patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -161,8 +161,6 @@ class Testinfra(Verifier):
cmd=["pytest", *util.dict2args(options), *self._tests, *args],
cwd=self._config.scenario.directory,
env=self.env,
- stdout=LOG.out,
- stderr=LOG.error,
)
# print(self._testinfra_command.cmd)
| 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Testinfra Verifier Module."""
import glob
import os
from molecule import logger, util
from molecule.api import Verifier
LOG = logger.get_logger(__name__)
class Testinfra(Verifier):
"""
`Testinfra`_ is no longer the default test verifier since version 3.0.
Additional options can be passed to ``testinfra`` through the options
dict. Any option set in this section will override the defaults.
.. note::
Molecule will remove any options matching '^[v]+$', and pass ``-vvv``
to the underlying ``pytest`` command when executing ``molecule
--debug``.
.. code-block:: yaml
verifier:
name: testinfra
options:
n: 1
The testing can be disabled by setting ``enabled`` to False.
.. code-block:: yaml
verifier:
name: testinfra
enabled: False
Environment variables can be passed to the verifier.
.. code-block:: yaml
verifier:
name: testinfra
env:
FOO: bar
Change path to the test directory.
.. code-block:: yaml
verifier:
name: testinfra
directory: /foo/bar/
Additional tests from another file or directory relative to the scenario's
tests directory (supports regexp).
.. code-block:: yaml
verifier:
name: testinfra
additional_files_or_dirs:
- ../path/to/test_1.py
- ../path/to/test_2.py
- ../path/to/directory/*
.. _`Testinfra`: https://testinfra.readthedocs.io
"""
def __init__(self, config=None):
"""
Set up the requirements to execute ``testinfra`` and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
super(Testinfra, self).__init__(config)
self._testinfra_command = None
if config:
self._tests = self._get_tests()
@property
def name(self):
return "testinfra"
@property
def default_options(self):
d = self._config.driver.testinfra_options
d["p"] = "no:cacheprovider"
if self._config.debug:
d["debug"] = True
d["vvv"] = True
if self._config.args.get("sudo"):
d["sudo"] = True
return d
# NOTE(retr0h): Override the base classes' options() to handle
# ``ansible-galaxy`` one-off.
@property
def options(self):
o = self._config.config["verifier"]["options"]
# NOTE(retr0h): Remove verbose options added by the user while in
# debug.
if self._config.debug:
o = util.filter_verbose_permutation(o)
return util.merge_dicts(self.default_options, o)
@property
def default_env(self):
env = util.merge_dicts(os.environ, self._config.env)
env = util.merge_dicts(env, self._config.provisioner.env)
return env
@property
def additional_files_or_dirs(self):
files_list = []
c = self._config.config
for f in c["verifier"]["additional_files_or_dirs"]:
glob_path = os.path.join(self._config.verifier.directory, f)
glob_list = glob.glob(glob_path)
if glob_list:
files_list.extend(glob_list)
return files_list
def bake(self):
"""
Bake a ``testinfra`` command so it's ready to execute and returns None.
:return: None
"""
options = self.options
verbose_flag = util.verbose_flag(options)
args = verbose_flag + self.additional_files_or_dirs
self._testinfra_command = util.BakedCommand(
cmd=["pytest", *util.dict2args(options), *self._tests, *args],
cwd=self._config.scenario.directory,
env=self.env,
stdout=LOG.out,
stderr=LOG.error,
)
# print(self._testinfra_command.cmd)
def execute(self):
if not self.enabled:
msg = "Skipping, verifier is disabled."
LOG.warning(msg)
return
if not len(self._tests) > 0:
msg = "Skipping, no tests found."
LOG.warning(msg)
return
if self._testinfra_command is None:
self.bake()
msg = "Executing Testinfra tests found in {}/...".format(self.directory)
LOG.info(msg)
result = util.run_command(self._testinfra_command, debug=self._config.debug)
if result.returncode == 0:
msg = "Verifier completed successfully."
LOG.success(msg)
else:
util.sysexit(result.returncode)
def _get_tests(self):
"""
Walk the verifier's directory for tests and returns a list.
:return: list
"""
return sorted(
[filename for filename in util.os_walk(self.directory, "test_*.py")]
)
def schema(self):
return {
"verifier": {
"type": "dict",
"schema": {"name": {"type": "string", "allowed": ["testinfra"]}},
}
}
| 1 | 10,905 | Why did we leave the `stderr` param in this case while we removed it from other bake methods? | ansible-community-molecule | py |
@@ -30,6 +30,13 @@ func VerifyTrustDomainMemberID(td spiffeid.TrustDomain, id spiffeid.ID) error {
return nil
}
+func VerifySameTrustDomain(td spiffeid.TrustDomain, id spiffeid.ID) error {
+ if !id.MemberOf(td) {
+ return fmt.Errorf("%q is not a member of trust domain %q", id, td)
+ }
+ return nil
+}
+
func TrustDomainAgentIDFromProto(td spiffeid.TrustDomain, protoID *types.SPIFFEID) (spiffeid.ID, error) {
id, err := idFromProto(protoID)
if err != nil { | 1 | package api
import (
"errors"
"fmt"
"github.com/spiffe/go-spiffe/v2/spiffeid"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/proto/spire/types"
)
func TrustDomainMemberIDFromProto(td spiffeid.TrustDomain, protoID *types.SPIFFEID) (spiffeid.ID, error) {
id, err := idFromProto(protoID)
if err != nil {
return spiffeid.ID{}, err
}
if err := VerifyTrustDomainMemberID(td, id); err != nil {
return spiffeid.ID{}, err
}
return id, nil
}
func VerifyTrustDomainMemberID(td spiffeid.TrustDomain, id spiffeid.ID) error {
if !id.MemberOf(td) {
return fmt.Errorf("%q is not a member of trust domain %q", id, td)
}
if id.Path() == "" {
return fmt.Errorf("%q is not a member of trust domain %q; path is empty", id, td)
}
return nil
}
func TrustDomainAgentIDFromProto(td spiffeid.TrustDomain, protoID *types.SPIFFEID) (spiffeid.ID, error) {
id, err := idFromProto(protoID)
if err != nil {
return spiffeid.ID{}, err
}
if err := VerifyTrustDomainAgentID(td, id); err != nil {
return spiffeid.ID{}, err
}
return id, nil
}
func VerifyTrustDomainAgentID(td spiffeid.TrustDomain, id spiffeid.ID) error {
if !id.MemberOf(td) {
return fmt.Errorf("%q is not a member of trust domain %q", id, td)
}
if id.Path() == "" {
return fmt.Errorf("%q is not an agent in trust domain %q; path is empty", id, td)
}
if !idutil.IsAgentPath(id.Path()) {
return fmt.Errorf("%q is not an agent in trust domain %q; path is not in the agent namespace", id, td)
}
return nil
}
func TrustDomainWorkloadIDFromProto(td spiffeid.TrustDomain, protoID *types.SPIFFEID) (spiffeid.ID, error) {
id, err := idFromProto(protoID)
if err != nil {
return spiffeid.ID{}, err
}
if err := VerifyTrustDomainWorkloadID(td, id); err != nil {
return spiffeid.ID{}, err
}
return id, nil
}
func VerifyTrustDomainWorkloadID(td spiffeid.TrustDomain, id spiffeid.ID) error {
if !id.MemberOf(td) {
return fmt.Errorf("%q is not a member of trust domain %q", id, td)
}
if id.Path() == "" {
return fmt.Errorf("%q is not a workload in trust domain %q; path is empty", id, td)
}
if idutil.IsReservedPath(id.Path()) {
return fmt.Errorf("%q is not a workload in trust domain %q; path is in the reserved namespace", id, td)
}
return nil
}
// ProtoFromID converts a SPIFFE ID from the given spiffeid.ID to
// types.SPIFFEID
func ProtoFromID(id spiffeid.ID) *types.SPIFFEID {
return &types.SPIFFEID{
TrustDomain: id.TrustDomain().String(),
Path: id.Path(),
}
}
func idFromProto(protoID *types.SPIFFEID) (spiffeid.ID, error) {
if protoID == nil {
return spiffeid.ID{}, errors.New("request must specify SPIFFE ID")
}
return spiffeid.New(protoID.TrustDomain, protoID.Path)
}
| 1 | 15,120 | This seems to only be used by the server/ca package.... maybe this can live there instead of being lumped into these API implementation helpers? | spiffe-spire | go |
@@ -1035,6 +1035,15 @@ func TestProvisionContainerResourcesSetPausePIDInVolumeResources(t *testing.T) {
}, nil),
mockCNIClient.EXPECT().SetupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nsResult, nil),
)
+ // These mock calls would be made only for Windows.
+ dockerClient.EXPECT().CreateContainerExec(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ &types.IDResponse{ID: containerID}, nil).MinTimes(0).MaxTimes(1)
+ dockerClient.EXPECT().StartContainerExec(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).
+ MinTimes(0).MaxTimes(1)
+ dockerClient.EXPECT().InspectContainerExec(gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.ContainerExecInspect{
+ ExitCode: 0,
+ Running: false,
+ }, nil).MinTimes(0).MaxTimes(1)
require.Nil(t, taskEngine.(*DockerTaskEngine).provisionContainerResources(testTask, pauseContainer).Error)
assert.Equal(t, strconv.Itoa(containerPid), volRes.GetPauseContainerPID()) | 1 | // +build unit
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/api/appmesh"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apieni "github.com/aws/amazon-ecs-agent/agent/api/eni"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/asm"
mock_asm_factory "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks"
mock_secretsmanageriface "github.com/aws/amazon-ecs-agent/agent/asm/mocks"
"github.com/aws/amazon-ecs-agent/agent/config"
mock_containermetadata "github.com/aws/amazon-ecs-agent/agent/containermetadata/mocks"
"github.com/aws/amazon-ecs-agent/agent/credentials"
mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks"
mock_ecscni "github.com/aws/amazon-ecs-agent/agent/ecscni/mocks"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/engine/execcmd"
mock_execcmdagent "github.com/aws/amazon-ecs-agent/agent/engine/execcmd/mocks"
"github.com/aws/amazon-ecs-agent/agent/engine/image"
mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks"
"github.com/aws/amazon-ecs-agent/agent/engine/testdata"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
mock_ssm_factory "github.com/aws/amazon-ecs-agent/agent/ssm/factory/mocks"
mock_ssmiface "github.com/aws/amazon-ecs-agent/agent/ssm/mocks"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
"github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth"
"github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret"
mock_taskresource "github.com/aws/amazon-ecs-agent/agent/taskresource/mocks"
"github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume"
mock_ttime "github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/ssm"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/golang/mock/gomock"
"github.com/pborman/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
credentialsID = "credsid"
ipv4 = "10.0.0.1"
gatewayIPv4 = "10.0.0.2/20"
mac = "1.2.3.4"
ipv6 = "f0:234:23"
dockerContainerName = "docker-container-name"
containerPid = 123
taskIP = "169.254.170.3"
exitCode = 1
labelsTaskARN = "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe"
taskSteadyStatePollInterval = time.Millisecond
secretID = "meaning-of-life"
region = "us-west-2"
username = "irene"
password = "sher"
ignoredUID = "1337"
proxyIngressPort = "15000"
proxyEgressPort = "15001"
appPort = "9000"
egressIgnoredIP = "169.254.169.254"
expectedDelaySeconds = 10
expectedDelay = expectedDelaySeconds * time.Second
networkBridgeIP = "bridgeIP"
networkModeBridge = "bridge"
networkModeAWSVPC = "awsvpc"
testTaskARN = "arn:aws:ecs:region:account-id:task/task-id"
containerNetworkMode = "none"
)
var (
defaultConfig config.Config
nsResult = mockSetupNSResult()
mockENI = &apieni.ENI{
ID: "eni-id",
IPV4Addresses: []*apieni.ENIIPV4Address{
{
Primary: true,
Address: ipv4,
},
},
MacAddress: mac,
IPV6Addresses: []*apieni.ENIIPV6Address{
{
Address: ipv6,
},
},
SubnetGatewayIPV4Address: gatewayIPv4,
}
// createdContainerName is used to save the name of the created
// container from the validateContainerRunWorkflow method. This
// variable should never be accessed directly.
// The `getCreatedContainerName` and `setCreatedContainerName`
// methods should be used instead.
createdContainerName string
// createdContainerNameLock guards access to the createdContainerName
// var.
createdContainerNameLock sync.Mutex
)
func init() {
defaultConfig = config.DefaultConfig()
defaultConfig.TaskCPUMemLimit.Value = config.ExplicitlyDisabled
}
func getCreatedContainerName() string {
createdContainerNameLock.Lock()
defer createdContainerNameLock.Unlock()
return createdContainerName
}
func setCreatedContainerName(name string) {
createdContainerNameLock.Lock()
defer createdContainerNameLock.Unlock()
createdContainerName = name
}
func mocks(t *testing.T, ctx context.Context, cfg *config.Config) (*gomock.Controller,
*mock_dockerapi.MockDockerClient, *mock_ttime.MockTime, TaskEngine,
*mock_credentials.MockManager, *mock_engine.MockImageManager, *mock_containermetadata.MockManager) {
ctrl := gomock.NewController(t)
client := mock_dockerapi.NewMockDockerClient(ctrl)
mockTime := mock_ttime.NewMockTime(ctrl)
credentialsManager := mock_credentials.NewMockManager(ctrl)
containerChangeEventStream := eventstream.NewEventStream("TESTTASKENGINE", ctx)
containerChangeEventStream.StartListening()
imageManager := mock_engine.NewMockImageManager(ctrl)
metadataManager := mock_containermetadata.NewMockManager(ctrl)
execCmdMgr := mock_execcmdagent.NewMockManager(ctrl)
taskEngine := NewTaskEngine(cfg, client, credentialsManager, containerChangeEventStream,
imageManager, dockerstate.NewTaskEngineState(), metadataManager, nil, execCmdMgr)
taskEngine.(*DockerTaskEngine)._time = mockTime
taskEngine.(*DockerTaskEngine).ctx = ctx
return ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, metadataManager
}
func mockSetupNSResult() *current.Result {
_, ip, _ := net.ParseCIDR(taskIP + "/32")
return ¤t.Result{
IPs: []*current.IPConfig{
{
Address: *ip,
},
},
}
}
func TestBatchContainerHappyPath(t *testing.T) {
testcases := []struct {
name string
metadataCreateError error
metadataUpdateError error
metadataCleanError error
taskCPULimit config.Conditional
execCommandAgentEnabled bool
}{
{
name: "Metadata Manager Succeeds",
metadataCreateError: nil,
metadataUpdateError: nil,
metadataCleanError: nil,
taskCPULimit: config.ExplicitlyDisabled,
},
{
name: "ExecCommandAgent is started",
metadataCreateError: nil,
metadataUpdateError: nil,
metadataCleanError: nil,
taskCPULimit: config.ExplicitlyDisabled,
execCommandAgentEnabled: true,
},
{
name: "Metadata Manager Fails to Create, Update and Cleanup",
metadataCreateError: errors.New("create metadata error"),
metadataUpdateError: errors.New("update metadata error"),
metadataCleanError: errors.New("clean metadata error"),
taskCPULimit: config.ExplicitlyDisabled,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
metadataConfig := defaultConfig
metadataConfig.TaskCPUMemLimit.Value = tc.taskCPULimit
metadataConfig.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, metadataManager := mocks(
t, ctx, &metadataConfig)
execCmdMgr := mock_execcmdagent.NewMockManager(ctrl)
taskEngine.(*DockerTaskEngine).execCmdMgr = execCmdMgr
defer ctrl.Finish()
roleCredentials := credentials.TaskIAMRoleCredentials{
IAMRoleCredentials: credentials.IAMRoleCredentials{CredentialsID: "credsid"},
}
credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(roleCredentials, true).AnyTimes()
credentialsManager.EXPECT().RemoveCredentials(credentialsID)
sleepTask := testdata.LoadTask("sleep5")
if tc.execCommandAgentEnabled && len(sleepTask.Containers) > 0 {
enableExecCommandAgentForContainer(sleepTask.Containers[0], apicontainer.ManagedAgentState{})
}
sleepTask.SetCredentialsID(credentialsID)
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
// containerEventsWG is used to force the test to wait until the container created and started
// events are processed
containerEventsWG := sync.WaitGroup{}
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
containerName := make(chan string)
go func() {
name := <-containerName
setCreatedContainerName(name)
}()
for _, container := range sleepTask.Containers {
validateContainerRunWorkflow(t, container, sleepTask, imageManager,
client, &roleCredentials, &containerEventsWG,
eventStream, containerName, func() {
metadataManager.EXPECT().Create(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any(), gomock.Any()).Return(tc.metadataCreateError)
metadataManager.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any()).Return(tc.metadataUpdateError)
if tc.execCommandAgentEnabled {
execCmdMgr.EXPECT().InitializeContainer(gomock.Any(), container, gomock.Any()).Times(1)
// TODO: [ecs-exec] validate call control plane to report ExecCommandAgent SUCCESS/FAIL here
execCmdMgr.EXPECT().StartAgent(gomock.Any(), client, sleepTask, sleepTask.Containers[0], containerID)
}
})
}
client.EXPECT().Info(gomock.Any(), gomock.Any()).Return(
types.Info{}, nil)
addTaskToEngine(t, ctx, taskEngine, sleepTask, mockTime, &containerEventsWG)
cleanup := make(chan time.Time, 1)
defer close(cleanup)
mockTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1)
client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any()).AnyTimes()
// Simulate a container stop event from docker
eventStream <- dockerapi.DockerContainerChangeEvent{
Status: apicontainerstatus.ContainerStopped,
DockerContainerMetadata: dockerapi.DockerContainerMetadata{
DockerID: containerID,
ExitCode: aws.Int(exitCode),
},
}
// StopContainer might be invoked if the test execution is slow, during
// the cleanup phase. Account for that.
client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(
dockerapi.DockerContainerMetadata{DockerID: containerID}).AnyTimes()
waitForStopEvents(t, taskEngine.StateChangeEvents(), true, tc.execCommandAgentEnabled)
// This ensures that managedTask.waitForStopReported makes progress
sleepTask.SetSentStatus(apitaskstatus.TaskStopped)
// Extra events should not block forever; duplicate acs and docker events are possible
go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }()
go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }()
sleepTaskStop := testdata.LoadTask("sleep5")
sleepTaskStop.SetCredentialsID(credentialsID)
sleepTaskStop.SetDesiredStatus(apitaskstatus.TaskStopped)
taskEngine.AddTask(sleepTaskStop)
// As above, duplicate events should not be a problem
taskEngine.AddTask(sleepTaskStop)
taskEngine.AddTask(sleepTaskStop)
// Expect a bunch of steady state 'poll' describes when we trigger cleanup
client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, removedContainerName string, timeout time.Duration) {
assert.Equal(t, containerID, removedContainerName,
"Container name mismatch")
}).Return(nil)
imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any())
metadataManager.EXPECT().Clean(gomock.Any()).Return(tc.metadataCleanError)
// trigger cleanup
cleanup <- time.Now()
go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }()
// Wait for the task to actually be dead; if we just fallthrough immediately,
// the remove might not have happened (expectation failure)
for {
tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks()
if len(tasks) == 0 {
break
}
time.Sleep(5 * time.Millisecond)
}
})
}
}
// TestRemoveEvents tests if the task engine can handle task events while the task is being
// cleaned up. This test ensures that there's no regression in the task engine and ensures
// there's no deadlock as seen in #313
func TestRemoveEvents(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
sleepTask := testdata.LoadTask("sleep5")
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
// containerEventsWG is used to force the test to wait until the container created and started
// events are processed
containerEventsWG := sync.WaitGroup{}
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
containerName := make(chan string)
go func() {
name := <-containerName
setCreatedContainerName(name)
}()
for _, container := range sleepTask.Containers {
validateContainerRunWorkflow(t, container, sleepTask, imageManager,
client, nil, &containerEventsWG,
eventStream, containerName, func() {
})
}
addTaskToEngine(t, ctx, taskEngine, sleepTask, mockTime, &containerEventsWG)
cleanup := make(chan time.Time, 1)
defer close(cleanup)
mockTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1)
client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any()).AnyTimes()
// Simulate a container stop event from docker
eventStream <- dockerapi.DockerContainerChangeEvent{
Status: apicontainerstatus.ContainerStopped,
DockerContainerMetadata: dockerapi.DockerContainerMetadata{
DockerID: containerID,
ExitCode: aws.Int(exitCode),
},
}
waitForStopEvents(t, taskEngine.StateChangeEvents(), true, false)
sleepTaskStop := testdata.LoadTask("sleep5")
sleepTaskStop.SetDesiredStatus(apitaskstatus.TaskStopped)
taskEngine.AddTask(sleepTaskStop)
client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, removedContainerName string, timeout time.Duration) {
assert.Equal(t, containerID, removedContainerName,
"Container name mismatch")
// Emit a couple of events for the task before cleanup finishes. This forces
// discardEventsUntil to be invoked and should test the code path that
// caused the deadlock, which was fixed with #320
eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped)
eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped)
}).Return(nil)
imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any())
// This ensures that managedTask.waitForStopReported makes progress
sleepTask.SetSentStatus(apitaskstatus.TaskStopped)
// trigger cleanup
cleanup <- time.Now()
// Wait for the task to actually be dead; if we just fallthrough immediately,
// the remove might not have happened (expectation failure)
for {
tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks()
if len(tasks) == 0 {
break
}
time.Sleep(5 * time.Millisecond)
}
}
func TestStartTimeoutThenStart(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, testTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
sleepTask := testdata.LoadTask("sleep5")
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
testTime.EXPECT().Now().Return(time.Now()).AnyTimes()
testTime.EXPECT().After(gomock.Any())
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil)
for _, container := range sleepTask.Containers {
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes()
client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{})
imageManager.EXPECT().RecordContainerReference(container)
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false)
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, x, y, z, timeout interface{}) {
go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) }()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID})
client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Return(dockerapi.DockerContainerMetadata{
Error: &dockerapi.DockerTimeoutError{},
})
}
// Start timeout triggers a container stop as we force stop containers
// when startcontainer times out. See #1043 for details
client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStartContainerError{fmt.Errorf("cannot start container")},
}).AnyTimes()
err := taskEngine.Init(ctx)
assert.NoError(t, err)
stateChangeEvents := taskEngine.StateChangeEvents()
taskEngine.AddTask(sleepTask)
waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false)
// Now surprise surprise, it actually did start!
eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning)
// However, if it starts again, we should not see it be killed; no additional expect
eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning)
eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning)
select {
case <-stateChangeEvents:
t.Fatal("Should be out of events")
default:
}
}
func TestSteadyStatePoll(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, testTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
taskEngine.(*DockerTaskEngine).taskSteadyStatePollInterval = taskSteadyStatePollInterval
containerEventsWG := sync.WaitGroup{}
sleepTask := testdata.LoadTask("sleep5")
sleepTask.Arn = uuid.New()
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
containerName := make(chan string)
go func() {
<-containerName
}()
// set up expectations for each container in the task calling create + start
for _, container := range sleepTask.Containers {
validateContainerRunWorkflow(t, container, sleepTask, imageManager,
client, nil, &containerEventsWG,
eventStream, containerName, func() {
})
}
testTime.EXPECT().Now().Return(time.Now()).MinTimes(1)
var wg sync.WaitGroup
wg.Add(1)
client.EXPECT().DescribeContainer(gomock.Any(), containerID).Return(
apicontainerstatus.ContainerStopped,
dockerapi.DockerContainerMetadata{
DockerID: containerID,
}).Do(func(ctx interface{}, x interface{}) {
wg.Done()
})
client.EXPECT().DescribeContainer(gomock.Any(), containerID).Return(
apicontainerstatus.ContainerStopped,
dockerapi.DockerContainerMetadata{
DockerID: containerID,
}).AnyTimes()
client.EXPECT().StopContainer(gomock.Any(), containerID, 30*time.Second).AnyTimes()
err := taskEngine.Init(ctx) // start the task engine
assert.NoError(t, err)
taskEngine.AddTask(sleepTask) // actually add the task we created
waitForRunningEvents(t, taskEngine.StateChangeEvents())
containerMap, ok := taskEngine.(*DockerTaskEngine).State().ContainerMapByArn(sleepTask.Arn)
assert.True(t, ok)
dockerContainer, ok := containerMap[sleepTask.Containers[0].Name]
assert.True(t, ok)
// Wait for container create and start events to be processed
containerEventsWG.Wait()
wg.Wait()
cleanup := make(chan time.Time)
defer close(cleanup)
testTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1)
client.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerID, dockerclient.RemoveContainerTimeout).Return(nil)
imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any()).Return(nil)
waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false)
// trigger cleanup, this ensures all the goroutines were finished
sleepTask.SetSentStatus(apitaskstatus.TaskStopped)
cleanup <- time.Now()
for {
tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks()
if len(tasks) == 0 {
break
}
time.Sleep(5 * time.Millisecond)
}
}
func TestStopWithPendingStops(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, testTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
testTime.EXPECT().Now().Return(time.Now()).AnyTimes()
testTime.EXPECT().After(gomock.Any()).AnyTimes()
sleepTask1 := testdata.LoadTask("sleep5")
sleepTask1.StartSequenceNumber = 5
sleepTask2 := testdata.LoadTask("sleep5")
sleepTask2.Arn = "arn2"
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
err := taskEngine.Init(ctx)
assert.NoError(t, err)
stateChangeEvents := taskEngine.StateChangeEvents()
defer discardEvents(stateChangeEvents)()
pullDone := make(chan bool)
pullInvoked := make(chan bool)
client.EXPECT().PullImage(gomock.Any(), gomock.Any(), nil, gomock.Any()).Do(func(w, x, y, z interface{}) {
pullInvoked <- true
<-pullDone
}).MaxTimes(2)
imageManager.EXPECT().RecordContainerReference(gomock.Any()).AnyTimes()
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).AnyTimes()
taskEngine.AddTask(sleepTask2)
<-pullInvoked
stopSleep2 := testdata.LoadTask("sleep5")
stopSleep2.Arn = "arn2"
stopSleep2.SetDesiredStatus(apitaskstatus.TaskStopped)
stopSleep2.StopSequenceNumber = 4
taskEngine.AddTask(stopSleep2)
taskEngine.AddTask(sleepTask1)
stopSleep1 := testdata.LoadTask("sleep5")
stopSleep1.SetDesiredStatus(apitaskstatus.TaskStopped)
stopSleep1.StopSequenceNumber = 5
taskEngine.AddTask(stopSleep1)
pullDone <- true
// this means the PullImage is only called once due to the task is stopped before it
// gets the pull image lock
}
func TestCreateContainerSaveDockerIDAndName(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
dataClient, cleanup := newTestDataClient(t)
defer cleanup()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
taskEngine.SetDataClient(dataClient)
sleepTask := testdata.LoadTask("sleep5")
sleepTask.Arn = testTaskARN
sleepContainer, _ := sleepTask.ContainerByName("sleep5")
sleepContainer.TaskARNUnsafe = testTaskARN
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.DockerContainerMetadata{
DockerID: testDockerID,
})
metadata := taskEngine.createContainer(sleepTask, sleepContainer)
require.NoError(t, metadata.Error)
containers, err := dataClient.GetContainers()
require.NoError(t, err)
require.Len(t, containers, 1)
assert.Equal(t, testDockerID, containers[0].DockerID)
assert.Contains(t, containers[0].DockerName, sleepContainer.Name)
}
func TestCreateContainerMetadata(t *testing.T) {
testcases := []struct {
name string
info types.Info
error error
}{
{
name: "Selinux Security Option",
info: types.Info{SecurityOptions: []string{"selinux"}},
error: nil,
},
{
name: "Docker Info Error",
info: types.Info{},
error: errors.New("Error getting docker info"),
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, _, metadataManager := mocks(t, ctx, &config.Config{})
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
taskEngine.cfg.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}
sleepTask := testdata.LoadTask("sleep5")
sleepContainer, _ := sleepTask.ContainerByName("sleep5")
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil)
client.EXPECT().Info(ctx, dockerclient.InfoTimeout).Return(tc.info, tc.error)
metadataManager.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), tc.info.SecurityOptions)
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
metadata := taskEngine.createContainer(sleepTask, sleepContainer)
assert.NoError(t, metadata.Error)
})
}
}
func TestCreateContainerMergesLabels(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
testTask := &apitask.Task{
Arn: labelsTaskARN,
Family: "myFamily",
Version: "1",
Containers: []*apicontainer.Container{
{
Name: "c1",
DockerConfig: apicontainer.DockerConfig{
Config: aws.String(`{"Labels":{"key":"value"}}`),
},
},
},
}
expectedConfig, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion)
if err != nil {
t.Fatal(err)
}
expectedConfig.Labels = map[string]string{
"com.amazonaws.ecs.task-arn": labelsTaskARN,
"com.amazonaws.ecs.container-name": "c1",
"com.amazonaws.ecs.task-definition-family": "myFamily",
"com.amazonaws.ecs.task-definition-version": "1",
"com.amazonaws.ecs.cluster": "",
"key": "value",
}
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
client.EXPECT().CreateContainer(gomock.Any(), expectedConfig, gomock.Any(), gomock.Any(), gomock.Any())
taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0])
}
// TestCreateContainerAddV3EndpointIDToState tests that in createContainer, when the
// container's v3 endpoint id is set, we will add mappings to engine state
func TestCreateContainerAddV3EndpointIDToState(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
testContainer := &apicontainer.Container{
Name: "c1",
V3EndpointID: "v3EndpointID",
}
testTask := &apitask.Task{
Arn: "myTaskArn",
Family: "myFamily",
Version: "1",
Containers: []*apicontainer.Container{
testContainer,
},
}
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
// V3EndpointID mappings are only added to state when dockerID is available. So return one here.
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.DockerContainerMetadata{
DockerID: "dockerID",
})
taskEngine.createContainer(testTask, testContainer)
// check that we have added v3 endpoint mappings to state
state := taskEngine.state
addedTaskARN, ok := state.TaskARNByV3EndpointID("v3EndpointID")
assert.True(t, ok)
assert.Equal(t, testTask.Arn, addedTaskARN)
addedDockerID, ok := state.DockerIDByV3EndpointID("v3EndpointID")
assert.True(t, ok)
assert.Equal(t, "dockerID", addedDockerID)
}
// TestTaskTransitionWhenStopContainerTimesout tests that task transitions to stopped
// only when terminal events are received from docker event stream when
// StopContainer times out
func TestTaskTransitionWhenStopContainerTimesout(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
sleepTask := testdata.LoadTask("sleep5")
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
mockTime.EXPECT().Now().Return(time.Now()).AnyTimes()
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
containerStopTimeoutError := dockerapi.DockerContainerMetadata{
Error: &dockerapi.DockerTimeoutError{
Transition: "stop",
Duration: 30 * time.Second,
},
}
dockerEventSent := make(chan int)
for _, container := range sleepTask.Containers {
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes()
client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{})
imageManager.EXPECT().RecordContainerReference(container)
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false)
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil)
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, x, y, z, timeout interface{}) {
go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) }()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID})
gomock.InOrder(
client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Do(
func(ctx interface{}, id string, timeout time.Duration) {
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning)
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}),
// StopContainer times out
client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(containerStopTimeoutError),
// Since task is not in steady state, progressContainers causes
// another invocation of StopContainer. Return a timeout error
// for that as well.
client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Do(
func(ctx interface{}, id string, timeout time.Duration) {
go func() {
dockerEventSent <- 1
// Emit 'ContainerStopped' event to the container event stream
// This should cause the container and the task to transition
// to 'STOPPED'
eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped)
}()
}).Return(containerStopTimeoutError).MinTimes(1),
)
}
err := taskEngine.Init(ctx)
assert.NoError(t, err)
stateChangeEvents := taskEngine.StateChangeEvents()
go taskEngine.AddTask(sleepTask)
// wait for task running
waitForRunningEvents(t, taskEngine.StateChangeEvents())
// Set the task desired status to be stopped and StopContainer will be called
updateSleepTask := testdata.LoadTask("sleep5")
updateSleepTask.SetDesiredStatus(apitaskstatus.TaskStopped)
go taskEngine.AddTask(updateSleepTask)
// StopContainer timeout error shouldn't cause cantainer/task status change
// until receive stop event from docker event stream
select {
case <-stateChangeEvents:
t.Error("Should not get task events")
case <-dockerEventSent:
t.Logf("Send docker stop event")
go func() {
for {
select {
case <-dockerEventSent:
case <-ctx.Done():
return
}
}
}()
}
// StopContainer was called again and received stop event from docker event stream
// Expect it to go to stopped
waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false)
}
// TestTaskTransitionWhenStopContainerReturnsUnretriableError tests if the task transitions
// to stopped without retrying stopping the container in the task when the initial
// stop container call returns an unretriable error from docker, specifically the
// NoSuchContainer error
func TestTaskTransitionWhenStopContainerReturnsUnretriableError(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
sleepTask := testdata.LoadTask("sleep5")
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
mockTime.EXPECT().Now().Return(time.Now()).AnyTimes()
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
containerEventsWG := sync.WaitGroup{}
for _, container := range sleepTask.Containers {
gomock.InOrder(
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(),
client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}),
imageManager.EXPECT().RecordContainerReference(container),
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false),
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil),
// Simulate successful create container
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, x, y, z, timeout interface{}) {
containerEventsWG.Add(1)
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated)
containerEventsWG.Done()
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}),
// Simulate successful start container
client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Do(
func(ctx interface{}, id string, timeout time.Duration) {
containerEventsWG.Add(1)
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning)
containerEventsWG.Done()
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}),
// StopContainer errors out. However, since this is a known unretriable error,
// the task engine should not retry stopping the container and move on.
// If there's a delay in task engine's processing of the ContainerRunning
// event, StopContainer will be invoked again as the engine considers it
// as a stopped container coming back. MinTimes() should guarantee that
// StopContainer is invoked at least once and in protecting agasint a test
// failure when there's a delay in task engine processing the ContainerRunning
// event.
client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStopContainerError{dockerapi.NoSuchContainerError{}},
}).MinTimes(1),
)
}
err := taskEngine.Init(ctx)
assert.NoError(t, err)
go taskEngine.AddTask(sleepTask)
// wait for task running
waitForRunningEvents(t, taskEngine.StateChangeEvents())
containerEventsWG.Wait()
// Set the task desired status to be stopped and StopContainer will be called
updateSleepTask := testdata.LoadTask("sleep5")
updateSleepTask.SetDesiredStatus(apitaskstatus.TaskStopped)
go taskEngine.AddTask(updateSleepTask)
// StopContainer was called again and received stop event from docker event stream
// Expect it to go to stopped
waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false)
}
// TestTaskTransitionWhenStopContainerReturnsTransientErrorBeforeSucceeding tests if the task
// transitions to stopped only after receiving the container stopped event from docker when
// the initial stop container call fails with an unknown error.
func TestTaskTransitionWhenStopContainerReturnsTransientErrorBeforeSucceeding(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
sleepTask := testdata.LoadTask("sleep5")
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
mockTime.EXPECT().Now().Return(time.Now()).AnyTimes()
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
containerStoppingError := dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStopContainerError{errors.New("Error stopping container")},
}
for _, container := range sleepTask.Containers {
gomock.InOrder(
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(),
client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}),
imageManager.EXPECT().RecordContainerReference(container),
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false),
// Simulate successful create container
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil),
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
dockerapi.DockerContainerMetadata{DockerID: containerID}),
// Simulate successful start container
client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Return(
dockerapi.DockerContainerMetadata{DockerID: containerID}),
// StopContainer errors out a couple of times
client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(containerStoppingError).Times(2),
// Since task is not in steady state, progressContainers causes
// another invocation of StopContainer. Return the 'succeed' response,
// which should cause the task engine to stop invoking this again and
// transition the task to stopped.
client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}),
)
}
err := taskEngine.Init(ctx)
assert.NoError(t, err)
go taskEngine.AddTask(sleepTask)
// wait for task running
waitForRunningEvents(t, taskEngine.StateChangeEvents())
// Set the task desired status to be stopped and StopContainer will be called
updateSleepTask := testdata.LoadTask("sleep5")
updateSleepTask.SetDesiredStatus(apitaskstatus.TaskStopped)
go taskEngine.AddTask(updateSleepTask)
// StopContainer invocation should have caused it to stop eventually.
waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false)
}
func TestGetTaskByArn(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
// Need a mock client as AddTask not only adds a task to the engine, but
// also causes the engine to progress the task.
ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
mockTime.EXPECT().Now().Return(time.Now()).AnyTimes()
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes()
imageManager.EXPECT().RecordContainerReference(gomock.Any()).AnyTimes()
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).AnyTimes()
err := taskEngine.Init(ctx)
assert.NoError(t, err)
defer taskEngine.Disable()
sleepTask := testdata.LoadTask("sleep5")
sleepTask.SetDesiredStatus(apitaskstatus.TaskStopped)
sleepTaskArn := sleepTask.Arn
sleepTask.SetDesiredStatus(apitaskstatus.TaskStopped)
taskEngine.AddTask(sleepTask)
_, found := taskEngine.GetTaskByArn(sleepTaskArn)
assert.True(t, found, "Task %s not found", sleepTaskArn)
_, found = taskEngine.GetTaskByArn(sleepTaskArn + "arn")
assert.False(t, found, "Task with invalid arn found in the task engine")
}
func TestProvisionContainerResourcesSetPausePIDInVolumeResources(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
dataClient, cleanup := newTestDataClient(t)
defer cleanup()
taskEngine.SetDataClient(dataClient)
mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl)
taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient
testTask := testdata.LoadTask("sleep5")
pauseContainer := &apicontainer.Container{
Name: "pausecontainer",
Type: apicontainer.ContainerCNIPause,
}
testTask.Containers = append(testTask.Containers, pauseContainer)
testTask.AddTaskENI(mockENI)
volRes := &taskresourcevolume.VolumeResource{}
testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{
"dockerVolume": {volRes},
}
taskEngine.(*DockerTaskEngine).State().AddTask(testTask)
taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{
DockerID: containerID,
DockerName: dockerContainerName,
Container: pauseContainer,
}, testTask)
gomock.InOrder(
dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(&types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: containerID,
State: &types.ContainerState{Pid: containerPid},
HostConfig: &dockercontainer.HostConfig{
NetworkMode: containerNetworkMode,
},
},
}, nil),
mockCNIClient.EXPECT().SetupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nsResult, nil),
)
require.Nil(t, taskEngine.(*DockerTaskEngine).provisionContainerResources(testTask, pauseContainer).Error)
assert.Equal(t, strconv.Itoa(containerPid), volRes.GetPauseContainerPID())
assert.Equal(t, taskIP, testTask.GetLocalIPAddress())
savedTasks, err := dataClient.GetTasks()
require.NoError(t, err)
assert.Len(t, savedTasks, 1)
}
func TestProvisionContainerResourcesInspectError(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl)
taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient
testTask := testdata.LoadTask("sleep5")
pauseContainer := &apicontainer.Container{
Name: "pausecontainer",
Type: apicontainer.ContainerCNIPause,
}
testTask.Containers = append(testTask.Containers, pauseContainer)
testTask.AddTaskENI(mockENI)
taskEngine.(*DockerTaskEngine).State().AddTask(testTask)
taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{
DockerID: containerID,
DockerName: dockerContainerName,
Container: pauseContainer,
}, testTask)
dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(nil, errors.New("test error"))
assert.NotNil(t, taskEngine.(*DockerTaskEngine).provisionContainerResources(testTask, pauseContainer).Error)
}
// TestStopPauseContainerCleanupCalled tests when stopping the pause container
// its network namespace should be cleaned up first
func TestStopPauseContainerCleanupCalled(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl)
taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient
testTask := testdata.LoadTask("sleep5")
pauseContainer := &apicontainer.Container{
Name: "pausecontainer",
Type: apicontainer.ContainerCNIPause,
DesiredStatusUnsafe: apicontainerstatus.ContainerStopped,
}
testTask.Containers = append(testTask.Containers, pauseContainer)
testTask.AddTaskENI(mockENI)
testTask.SetAppMesh(&appmesh.AppMesh{
IgnoredUID: ignoredUID,
ProxyIngressPort: proxyIngressPort,
ProxyEgressPort: proxyEgressPort,
AppPorts: []string{
appPort,
},
EgressIgnoredIPs: []string{
egressIgnoredIP,
},
})
taskEngine.(*DockerTaskEngine).State().AddTask(testTask)
taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{
DockerID: containerID,
DockerName: dockerContainerName,
Container: pauseContainer,
}, testTask)
gomock.InOrder(
dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(&types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: containerID,
State: &types.ContainerState{Pid: containerPid},
HostConfig: &dockercontainer.HostConfig{
NetworkMode: containerNetworkMode,
},
},
}, nil),
mockCNIClient.EXPECT().CleanupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
dockerClient.EXPECT().StopContainer(gomock.Any(),
containerID,
defaultConfig.DockerStopTimeout,
).Return(dockerapi.DockerContainerMetadata{}),
)
taskEngine.(*DockerTaskEngine).stopContainer(testTask, pauseContainer)
require.True(t, pauseContainer.IsContainerTornDown())
}
// TestStopPauseContainerCleanupCalled tests when stopping the pause container
// its network namespace should be cleaned up first
func TestStopPauseContainerCleanupDelay(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
cfg := config.DefaultConfig()
cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled
cfg.ENIPauseContainerCleanupDelaySeconds = expectedDelaySeconds
delayedChan := make(chan time.Duration, 1)
ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &cfg)
taskEngine.(*DockerTaskEngine).handleDelay = func(d time.Duration) {
delayedChan <- d
}
mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl)
taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient
testTask := testdata.LoadTask("sleep5")
pauseContainer := &apicontainer.Container{
Name: "pausecontainer",
Type: apicontainer.ContainerCNIPause,
DesiredStatusUnsafe: apicontainerstatus.ContainerStopped,
}
testTask.Containers = append(testTask.Containers, pauseContainer)
testTask.AddTaskENI(mockENI)
taskEngine.(*DockerTaskEngine).State().AddTask(testTask)
taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{
DockerID: containerID,
DockerName: dockerContainerName,
Container: pauseContainer,
}, testTask)
gomock.InOrder(
dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(&types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: containerID,
State: &types.ContainerState{Pid: containerPid},
HostConfig: &dockercontainer.HostConfig{
NetworkMode: containerNetworkMode,
},
},
}, nil),
mockCNIClient.EXPECT().CleanupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),
dockerClient.EXPECT().StopContainer(gomock.Any(),
containerID,
defaultConfig.DockerStopTimeout,
).Return(dockerapi.DockerContainerMetadata{}),
)
taskEngine.(*DockerTaskEngine).stopContainer(testTask, pauseContainer)
select {
case actualDelay := <-delayedChan:
assert.Equal(t, expectedDelay, actualDelay)
require.True(t, pauseContainer.IsContainerTornDown())
default:
assert.Fail(t, "engine.handleDelay wasn't called")
}
}
// TestCheckTearDownPauseContainer that the pause container teardown works and is idempotent
func TestCheckTearDownPauseContainer(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl)
taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient
testTask := testdata.LoadTask("sleep5")
pauseContainer := &apicontainer.Container{
Name: "pausecontainer",
Type: apicontainer.ContainerCNIPause,
DesiredStatusUnsafe: apicontainerstatus.ContainerStopped,
}
testTask.Containers = append(testTask.Containers, pauseContainer)
testTask.AddTaskENI(mockENI)
testTask.SetAppMesh(&appmesh.AppMesh{
IgnoredUID: ignoredUID,
ProxyIngressPort: proxyIngressPort,
ProxyEgressPort: proxyEgressPort,
AppPorts: []string{
appPort,
},
EgressIgnoredIPs: []string{
egressIgnoredIP,
},
})
taskEngine.(*DockerTaskEngine).State().AddTask(testTask)
taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{
DockerID: containerID,
DockerName: dockerContainerName,
Container: pauseContainer,
}, testTask)
gomock.InOrder(
dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(&types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: containerID,
State: &types.ContainerState{Pid: containerPid},
HostConfig: &dockercontainer.HostConfig{
NetworkMode: containerNetworkMode,
},
},
}, nil).MaxTimes(1),
mockCNIClient.EXPECT().CleanupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1),
)
taskEngine.(*DockerTaskEngine).checkTearDownPauseContainer(testTask)
require.True(t, pauseContainer.IsContainerTornDown())
// Invoke one more time to check for idempotency (mocks configured with maxTimes = 1)
taskEngine.(*DockerTaskEngine).checkTearDownPauseContainer(testTask)
}
// TestTaskWithCircularDependency tests the task with containers of which the
// dependencies can't be resolved
func TestTaskWithCircularDependency(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
client.EXPECT().ContainerEvents(gomock.Any())
task := testdata.LoadTask("circular_dependency")
err := taskEngine.Init(ctx)
assert.NoError(t, err)
events := taskEngine.StateChangeEvents()
go taskEngine.AddTask(task)
event := <-events
assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskStopped, "Expected task to move to stopped directly")
_, ok := taskEngine.(*DockerTaskEngine).state.TaskByArn(task.Arn)
assert.True(t, ok, "Task state should be added to the agent state")
_, ok = taskEngine.(*DockerTaskEngine).managedTasks[task.Arn]
assert.False(t, ok, "Task should not be added to task manager for processing")
}
// TestCreateContainerOnAgentRestart tests when agent restarts it should use the
// docker container name restored from agent state file to create the container
func TestCreateContainerOnAgentRestart(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, _, _ := mocks(t, ctx, &config.Config{})
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
state := taskEngine.State()
sleepTask := testdata.LoadTask("sleep5")
sleepContainer, _ := sleepTask.ContainerByName("sleep5")
// Store the generated container name to state
state.AddContainer(&apicontainer.DockerContainer{DockerID: "dockerID", DockerName: "docker_container_name", Container: sleepContainer}, sleepTask)
gomock.InOrder(
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil),
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), "docker_container_name", gomock.Any()),
)
metadata := taskEngine.createContainer(sleepTask, sleepContainer)
if metadata.Error != nil {
t.Error("Unexpected error", metadata.Error)
}
}
func TestPullCNIImage(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, _, _, privateTaskEngine, _, _, _ := mocks(t, ctx, &config.Config{})
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
container := &apicontainer.Container{
Type: apicontainer.ContainerCNIPause,
}
task := &apitask.Task{
Containers: []*apicontainer.Container{container},
}
metadata := taskEngine.pullContainer(task, container)
assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata")
}
func TestPullNormalImage(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{})
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
taskEngine._time = nil
imageName := "image"
container := &apicontainer.Container{
Type: apicontainer.ContainerNormal,
Image: imageName,
}
task := &apitask.Task{
Containers: []*apicontainer.Container{container},
}
imageState := &image.ImageState{
Image: &image.Image{ImageID: "id"},
}
client.EXPECT().PullImage(gomock.Any(), imageName, nil, gomock.Any())
imageManager.EXPECT().RecordContainerReference(container)
imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true)
metadata := taskEngine.pullContainer(task, container)
assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata")
}
func TestPullImageWithImagePullOnceBehavior(t *testing.T) {
testcases := []struct {
name string
pullSucceeded bool
}{
{
name: "PullSucceeded is true",
pullSucceeded: true,
},
{
name: "PullSucceeded is false",
pullSucceeded: false,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{ImagePullBehavior: config.ImagePullOnceBehavior})
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
taskEngine._time = nil
imageName := "image"
container := &apicontainer.Container{
Type: apicontainer.ContainerNormal,
Image: imageName,
}
task := &apitask.Task{
Containers: []*apicontainer.Container{container},
}
imageState := &image.ImageState{
Image: &image.Image{ImageID: "id"},
PullSucceeded: tc.pullSucceeded,
}
if !tc.pullSucceeded {
client.EXPECT().PullImage(gomock.Any(), imageName, nil, gomock.Any())
}
imageManager.EXPECT().RecordContainerReference(container)
imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true).Times(2)
metadata := taskEngine.pullContainer(task, container)
assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata")
})
}
}
func TestPullImageWithImagePullPreferCachedBehaviorWithCachedImage(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{ImagePullBehavior: config.ImagePullPreferCachedBehavior})
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
taskEngine._time = nil
imageName := "image"
container := &apicontainer.Container{
Type: apicontainer.ContainerNormal,
Image: imageName,
}
task := &apitask.Task{
Containers: []*apicontainer.Container{container},
}
imageState := &image.ImageState{
Image: &image.Image{ImageID: "id"},
}
client.EXPECT().InspectImage(imageName).Return(nil, nil)
imageManager.EXPECT().RecordContainerReference(container)
imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true)
metadata := taskEngine.pullContainer(task, container)
assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata")
}
func TestPullImageWithImagePullPreferCachedBehaviorWithoutCachedImage(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{ImagePullBehavior: config.ImagePullPreferCachedBehavior})
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
taskEngine._time = nil
imageName := "image"
container := &apicontainer.Container{
Type: apicontainer.ContainerNormal,
Image: imageName,
}
task := &apitask.Task{
Containers: []*apicontainer.Container{container},
}
imageState := &image.ImageState{
Image: &image.Image{ImageID: "id"},
}
client.EXPECT().InspectImage(imageName).Return(nil, errors.New("error"))
client.EXPECT().PullImage(gomock.Any(), imageName, nil, gomock.Any())
imageManager.EXPECT().RecordContainerReference(container)
imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true)
metadata := taskEngine.pullContainer(task, container)
assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata")
}
func TestUpdateContainerReference(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, _, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{})
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
taskEngine._time = nil
imageName := "image"
container := &apicontainer.Container{
Type: apicontainer.ContainerNormal,
Image: imageName,
}
task := &apitask.Task{
Containers: []*apicontainer.Container{container},
}
imageState := &image.ImageState{
Image: &image.Image{ImageID: "id"},
}
imageManager.EXPECT().RecordContainerReference(container)
imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true)
taskEngine.updateContainerReference(true, container, task.Arn)
assert.True(t, imageState.PullSucceeded, "PullSucceeded set to false")
}
// TestPullAndUpdateContainerReference checks whether a container is added to task engine state when
// Test # | Image availability | DependentContainersPullUpfront | ImagePullBehavior
// -----------------------------------------------------------------------------------
// 1 | remote | enabled | default
// 2 | remote | disabled | default
// 3 | local | enabled | default
// 4 | local | enabled | once
// 5 | local | enabled | prefer-cached
// 6 | local | enabled | always
func TestPullAndUpdateContainerReference(t *testing.T) {
testcases := []struct {
Name string
ImagePullUpfront config.BooleanDefaultFalse
ImagePullBehavior config.ImagePullBehaviorType
ImageState *image.ImageState
ImageInspect *types.ImageInspect
InspectImage bool
NumOfPulledContainer int
PullImageErr apierrors.NamedError
}{
{
Name: "DependentContainersPullUpfrontEnabledWithRemoteImage",
ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled},
ImagePullBehavior: config.ImagePullDefaultBehavior,
ImageState: &image.ImageState{
Image: &image.Image{ImageID: "id"},
},
InspectImage: false,
NumOfPulledContainer: 1,
PullImageErr: nil,
},
{
Name: "DependentContainersPullUpfrontDisabledWithRemoteImage",
ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled},
ImagePullBehavior: config.ImagePullDefaultBehavior,
ImageState: &image.ImageState{
Image: &image.Image{ImageID: "id"},
},
InspectImage: false,
NumOfPulledContainer: 1,
PullImageErr: nil,
},
{
Name: "DependentContainersPullUpfrontEnabledWithCachedImage",
ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled},
ImagePullBehavior: config.ImagePullDefaultBehavior,
ImageState: nil,
ImageInspect: nil,
InspectImage: true,
NumOfPulledContainer: 1,
PullImageErr: dockerapi.CannotPullContainerError{fmt.Errorf("error")},
},
{
Name: "DependentContainersPullUpfrontEnabledAndImagePullOnceBehavior",
ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled},
ImagePullBehavior: config.ImagePullOnceBehavior,
ImageState: nil,
ImageInspect: nil,
InspectImage: true,
NumOfPulledContainer: 1,
PullImageErr: dockerapi.CannotPullContainerError{fmt.Errorf("error")},
},
{
Name: "DependentContainersPullUpfrontEnabledAndImagePullPreferCachedBehavior",
ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled},
ImagePullBehavior: config.ImagePullPreferCachedBehavior,
ImageState: nil,
ImageInspect: nil,
InspectImage: true,
NumOfPulledContainer: 1,
PullImageErr: dockerapi.CannotPullContainerError{fmt.Errorf("error")},
},
{
Name: "DependentContainersPullUpfrontEnabledAndImagePullAlwaysBehavior",
ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled},
ImagePullBehavior: config.ImagePullAlwaysBehavior,
ImageState: nil,
ImageInspect: nil,
InspectImage: false,
NumOfPulledContainer: 0,
PullImageErr: dockerapi.CannotPullContainerError{fmt.Errorf("error")},
},
}
for _, tc := range testcases {
t.Run(tc.Name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
cfg := &config.Config{
DependentContainersPullUpfront: tc.ImagePullUpfront,
ImagePullBehavior: tc.ImagePullBehavior,
}
ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, cfg)
defer ctrl.Finish()
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
taskEngine._time = nil
imageName := "image"
taskArn := "taskArn"
container := &apicontainer.Container{
Type: apicontainer.ContainerNormal,
Image: imageName,
Essential: true,
}
task := &apitask.Task{
Arn: taskArn,
Containers: []*apicontainer.Container{container},
}
client.EXPECT().PullImage(gomock.Any(), imageName, nil, gomock.Any()).
Return(dockerapi.DockerContainerMetadata{Error: tc.PullImageErr})
if tc.InspectImage {
client.EXPECT().InspectImage(imageName).Return(tc.ImageInspect, nil)
}
imageManager.EXPECT().RecordContainerReference(container)
imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(tc.ImageState, false)
metadata := taskEngine.pullAndUpdateContainerReference(task, container)
pulledContainersMap, _ := taskEngine.State().PulledContainerMapByArn(taskArn)
require.Len(t, pulledContainersMap, tc.NumOfPulledContainer)
assert.Equal(t, dockerapi.DockerContainerMetadata{Error: tc.PullImageErr},
metadata, "expected metadata with error")
})
}
}
// TestMetadataFileUpdatedAgentRestart checks whether metadataManager.Update(...) is
// invoked in the path DockerTaskEngine.Init() -> .synchronizeState() -> .updateMetadataFile(...)
// for the following case:
// agent starts, container created, metadata file created, agent restarted, container recovered
// during task engine init, metadata file updated
func TestMetadataFileUpdatedAgentRestart(t *testing.T) {
conf := &defaultConfig
conf.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, imageManager, metadataManager := mocks(t, ctx, conf)
defer ctrl.Finish()
var metadataUpdateWG sync.WaitGroup
metadataUpdateWG.Add(1)
taskEngine, _ := privateTaskEngine.(*DockerTaskEngine)
assert.True(t, taskEngine.cfg.ContainerMetadataEnabled.Enabled(), "ContainerMetadataEnabled set to false.")
taskEngine._time = nil
state := taskEngine.State()
task := testdata.LoadTask("sleep5")
container, _ := task.ContainerByName("sleep5")
assert.False(t, container.MetadataFileUpdated)
container.SetKnownStatus(apicontainerstatus.ContainerRunning)
dockerContainer := &apicontainer.DockerContainer{DockerID: containerID, Container: container}
expectedTaskARN := task.Arn
expectedDockerID := dockerContainer.DockerID
expectedContainerName := container.Name
state.AddTask(task)
state.AddContainer(dockerContainer, task)
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any())
imageManager.EXPECT().RecordContainerReference(gomock.Any())
metadataManager.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, dockerID string, task *apitask.Task, containerName string) {
assert.Equal(t, expectedTaskARN, task.Arn)
assert.Equal(t, expectedContainerName, containerName)
assert.Equal(t, expectedDockerID, dockerID)
metadataUpdateWG.Done()
})
err := taskEngine.Init(ctx)
assert.NoError(t, err)
defer taskEngine.Disable()
metadataUpdateWG.Wait()
}
// TestTaskUseExecutionRolePullECRImage tests the agent will use the execution role
// credentials to pull from an ECR repository
func TestTaskUseExecutionRolePullECRImage(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, _ := mocks(
t, ctx, &defaultConfig)
defer ctrl.Finish()
credentialsID := "execution role"
accessKeyID := "akid"
secretAccessKey := "sakid"
sessionToken := "token"
executionRoleCredentials := credentials.IAMRoleCredentials{
CredentialsID: credentialsID,
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
SessionToken: sessionToken,
}
testTask := testdata.LoadTask("sleep5")
// Configure the task and container to use execution role
testTask.SetExecutionRoleCredentialsID(credentialsID)
testTask.Containers[0].RegistryAuthentication = &apicontainer.RegistryAuthenticationData{
Type: "ecr",
ECRAuthData: &apicontainer.ECRAuthData{
UseExecutionRole: true,
},
}
container := testTask.Containers[0]
mockTime.EXPECT().Now().AnyTimes()
credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(credentials.TaskIAMRoleCredentials{
ARN: "",
IAMRoleCredentials: executionRoleCredentials,
}, true)
client.EXPECT().PullImage(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, image string, auth *apicontainer.RegistryAuthenticationData, timeout interface{}) {
assert.Equal(t, container.Image, image)
assert.Equal(t, auth.ECRAuthData.GetPullCredentials(), executionRoleCredentials)
}).Return(dockerapi.DockerContainerMetadata{})
imageManager.EXPECT().RecordContainerReference(container).Return(nil)
imageManager.EXPECT().GetImageStateFromImageName(container.Image)
taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
}
// TestTaskUseExecutionRolePullPrivateRegistryImage tests the agent will use the
// execution role credentials to pull from a private repository
func TestTaskUseExecutionRolePullPrivateRegistryImage(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, _ := mocks(
t, ctx, &defaultConfig)
defer ctrl.Finish()
credentialsID := "execution role"
accessKeyID := "akid"
secretAccessKey := "sakid"
sessionToken := "token"
executionRoleCredentials := credentials.IAMRoleCredentials{
CredentialsID: credentialsID,
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
SessionToken: sessionToken,
}
testTask := testdata.LoadTask("sleep5")
// Configure the task and container to use execution role
testTask.SetExecutionRoleCredentialsID(credentialsID)
asmAuthData := &apicontainer.ASMAuthData{
CredentialsParameter: secretID,
Region: region,
}
testTask.Containers[0].RegistryAuthentication = &apicontainer.RegistryAuthenticationData{
Type: "asm",
ASMAuthData: asmAuthData,
}
requiredASMResources := []*apicontainer.ASMAuthData{asmAuthData}
asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl)
asmAuthRes := asmauth.NewASMAuthResource(testTask.Arn, requiredASMResources,
credentialsID, credentialsManager, asmClientCreator)
testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{
asmauth.ResourceName: {asmAuthRes},
}
mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl)
asmAuthDataBytes, _ := json.Marshal(&asm.AuthDataValue{
Username: aws.String(username),
Password: aws.String(password),
})
asmAuthDataVal := string(asmAuthDataBytes)
asmSecretValue := &secretsmanager.GetSecretValueOutput{
SecretString: aws.String(asmAuthDataVal),
}
gomock.InOrder(
credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(
credentials.TaskIAMRoleCredentials{
ARN: "",
IAMRoleCredentials: executionRoleCredentials,
}, true),
asmClientCreator.EXPECT().NewASMClient(region, executionRoleCredentials).Return(mockASMClient),
mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Return(asmSecretValue, nil),
)
require.NoError(t, asmAuthRes.Create())
container := testTask.Containers[0]
mockTime.EXPECT().Now().AnyTimes()
client.EXPECT().PullImage(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, image string, auth *apicontainer.RegistryAuthenticationData, timeout interface{}) {
assert.Equal(t, container.Image, image)
dac := auth.ASMAuthData.GetDockerAuthConfig()
assert.Equal(t, username, dac.Username)
assert.Equal(t, password, dac.Password)
}).Return(dockerapi.DockerContainerMetadata{})
imageManager.EXPECT().RecordContainerReference(container).Return(nil)
imageManager.EXPECT().GetImageStateFromImageName(container.Image)
ret := taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
assert.Nil(t, ret.Error)
}
// TestTaskUseExecutionRolePullPrivateRegistryImageNoASMResource tests the
// docker task engine code path for returning error for missing ASM resource
func TestTaskUseExecutionRolePullPrivateRegistryImageNoASMResource(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, _, mockTime, taskEngine, _, _, _ := mocks(
t, ctx, &defaultConfig)
defer ctrl.Finish()
testTask := testdata.LoadTask("sleep5")
// Configure the task and container to use execution role
testTask.SetExecutionRoleCredentialsID(credentialsID)
asmAuthData := &apicontainer.ASMAuthData{
CredentialsParameter: secretID,
Region: region,
}
testTask.Containers[0].RegistryAuthentication = &apicontainer.RegistryAuthenticationData{
Type: "asm",
ASMAuthData: asmAuthData,
}
// no asm auth resource in task
testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{}
container := testTask.Containers[0]
mockTime.EXPECT().Now().AnyTimes()
// ensure pullContainer returns error
ret := taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
assert.NotNil(t, ret.Error)
}
// TestNewTaskTransitionOnRestart tests the agent will process the task recorded in
// the state file on restart
func TestNewTaskTransitionOnRestart(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
mockTime.EXPECT().Now().AnyTimes()
mockTime.EXPECT().After(gomock.Any()).AnyTimes()
client.EXPECT().Version(gomock.Any(), gomock.Any()).MaxTimes(1)
client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1)
err := taskEngine.Init(ctx)
assert.NoError(t, err)
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
state := dockerTaskEngine.State()
testTask := testdata.LoadTask("sleep5")
// add the task to the state to simulate the agent restored the state on restart
state.AddTask(testTask)
// Set the task to be stopped so that the process can done quickly
testTask.SetDesiredStatus(apitaskstatus.TaskStopped)
dockerTaskEngine.synchronizeState()
_, ok := dockerTaskEngine.managedTasks[testTask.Arn]
assert.True(t, ok, "task wasnot started")
}
// TestTaskWaitForHostResourceOnRestart tests task stopped by acs but hasn't
// reached stopped should block the later task to start
func TestTaskWaitForHostResourceOnRestart(t *testing.T) {
// Task 1 stopped by backend
taskStoppedByACS := testdata.LoadTask("sleep5")
taskStoppedByACS.SetDesiredStatus(apitaskstatus.TaskStopped)
taskStoppedByACS.SetStopSequenceNumber(1)
taskStoppedByACS.SetKnownStatus(apitaskstatus.TaskRunning)
// Task 2 has essential container stopped
taskEssentialContainerStopped := testdata.LoadTask("sleep5")
taskEssentialContainerStopped.Arn = "task_Essential_Container_Stopped"
taskEssentialContainerStopped.SetDesiredStatus(apitaskstatus.TaskStopped)
taskEssentialContainerStopped.SetKnownStatus(apitaskstatus.TaskRunning)
// Normal task 3 needs to be started
taskNotStarted := testdata.LoadTask("sleep5")
taskNotStarted.Arn = "task_Not_started"
conf := &defaultConfig
conf.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, conf)
defer ctrl.Finish()
client.EXPECT().Version(gomock.Any(), gomock.Any()).MaxTimes(1)
client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1)
err := privateTaskEngine.Init(ctx)
assert.NoError(t, err)
taskEngine := privateTaskEngine.(*DockerTaskEngine)
taskEngine.State().AddTask(taskStoppedByACS)
taskEngine.State().AddTask(taskNotStarted)
taskEngine.State().AddTask(taskEssentialContainerStopped)
taskEngine.State().AddContainer(&apicontainer.DockerContainer{
Container: taskStoppedByACS.Containers[0],
DockerID: containerID + "1",
DockerName: dockerContainerName + "1",
}, taskStoppedByACS)
taskEngine.State().AddContainer(&apicontainer.DockerContainer{
Container: taskNotStarted.Containers[0],
DockerID: containerID + "2",
DockerName: dockerContainerName + "2",
}, taskNotStarted)
taskEngine.State().AddContainer(&apicontainer.DockerContainer{
Container: taskEssentialContainerStopped.Containers[0],
DockerID: containerID + "3",
DockerName: dockerContainerName + "3",
}, taskEssentialContainerStopped)
// these are performed in synchronizeState on restart
client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any()).Return(apicontainerstatus.ContainerRunning, dockerapi.DockerContainerMetadata{
DockerID: containerID,
}).Times(3)
imageManager.EXPECT().RecordContainerReference(gomock.Any()).Times(3)
// start the two tasks
taskEngine.synchronizeState()
var waitStopWG sync.WaitGroup
waitStopWG.Add(1)
go func() {
// This is to confirm the other task is waiting
time.Sleep(1 * time.Second)
// Remove the task sequence number 1 from waitgroup
taskEngine.taskStopGroup.Done(1)
waitStopWG.Done()
}()
// task with sequence number 2 should wait until 1 is removed from the waitgroup
taskEngine.taskStopGroup.Wait(2)
waitStopWG.Wait()
}
// TestPullStartedStoppedAtWasSetCorrectly tests the PullStartedAt and PullStoppedAt
// was set correctly
func TestPullStartedStoppedAtWasSetCorrectly(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
testTask := &apitask.Task{
Arn: "taskArn",
}
container := &apicontainer.Container{
Image: "image1",
}
startTime1 := time.Now()
startTime2 := startTime1.Add(time.Second)
startTime3 := startTime2.Add(time.Second)
stopTime1 := startTime3.Add(time.Second)
stopTime2 := stopTime1.Add(time.Second)
stopTime3 := stopTime2.Add(time.Second)
client.EXPECT().PullImage(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3)
imageManager.EXPECT().RecordContainerReference(gomock.Any()).Times(3)
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false).Times(3)
gomock.InOrder(
// three container pull start timestamp
mockTime.EXPECT().Now().Return(startTime1),
mockTime.EXPECT().Now().Return(startTime2),
mockTime.EXPECT().Now().Return(startTime3),
// threre container pull stop timestamp
mockTime.EXPECT().Now().Return(stopTime1),
mockTime.EXPECT().Now().Return(stopTime2),
mockTime.EXPECT().Now().Return(stopTime3),
)
// Pull three images, the PullStartedAt should be the pull of the first container
// and PullStoppedAt should be the pull completion of the last container
taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
assert.Equal(t, testTask.PullStartedAtUnsafe, startTime1)
assert.Equal(t, testTask.PullStoppedAtUnsafe, stopTime3)
}
// TestPullStoppedAtWasSetCorrectlyWhenPullFail tests the PullStoppedAt was set
// correctly when the pull failed
func TestPullStoppedAtWasSetCorrectlyWhenPullFail(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
testTask := &apitask.Task{
Arn: "taskArn",
}
container := &apicontainer.Container{
Image: "image1",
}
startTime1 := time.Now()
startTime2 := startTime1.Add(time.Second)
startTime3 := startTime2.Add(time.Second)
stopTime1 := startTime3.Add(time.Second)
stopTime2 := stopTime1.Add(time.Second)
stopTime3 := stopTime2.Add(time.Second)
gomock.InOrder(
client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}),
client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}),
client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(
dockerapi.DockerContainerMetadata{Error: dockerapi.CannotPullContainerError{fmt.Errorf("error")}}),
)
imageManager.EXPECT().RecordContainerReference(gomock.Any()).Times(3)
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false).Times(3)
gomock.InOrder(
// three container pull start timestamp
mockTime.EXPECT().Now().Return(startTime1),
mockTime.EXPECT().Now().Return(startTime2),
mockTime.EXPECT().Now().Return(startTime3),
// threre container pull stop timestamp
mockTime.EXPECT().Now().Return(stopTime1),
mockTime.EXPECT().Now().Return(stopTime2),
mockTime.EXPECT().Now().Return(stopTime3),
)
// Pull three images, the PullStartedAt should be the pull of the first container
// and PullStoppedAt should be the pull completion of the last container
taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
taskEngine.(*DockerTaskEngine).pullContainer(testTask, container)
assert.Equal(t, testTask.PullStartedAtUnsafe, startTime1)
assert.Equal(t, testTask.PullStoppedAtUnsafe, stopTime3)
}
func TestSynchronizeContainerStatus(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
dockerID := "1234"
dockerContainer := &apicontainer.DockerContainer{
DockerID: dockerID,
DockerName: "c1",
Container: &apicontainer.Container{},
}
labels := map[string]string{
"name": "metadata",
}
volumes := []types.MountPoint{
{
Name: "volume",
Source: "/src/vol",
Destination: "/vol",
},
}
created := time.Now()
gomock.InOrder(
client.EXPECT().DescribeContainer(gomock.Any(), dockerID).Return(apicontainerstatus.ContainerRunning,
dockerapi.DockerContainerMetadata{
Labels: labels,
DockerID: dockerID,
CreatedAt: created,
Volumes: volumes,
}),
imageManager.EXPECT().RecordContainerReference(dockerContainer.Container),
)
taskEngine.(*DockerTaskEngine).synchronizeContainerStatus(dockerContainer, nil)
assert.Equal(t, created, dockerContainer.Container.GetCreatedAt())
assert.Equal(t, labels, dockerContainer.Container.GetLabels())
assert.Equal(t, volumes, dockerContainer.Container.GetVolumes())
}
// TestHandleDockerHealthEvent tests the docker health event will only cause the
// container health status change
func TestHandleDockerHealthEvent(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
state := taskEngine.(*DockerTaskEngine).State()
testTask := testdata.LoadTask("sleep5")
testContainer := testTask.Containers[0]
testContainer.HealthCheckType = "docker"
state.AddTask(testTask)
state.AddContainer(&apicontainer.DockerContainer{DockerID: "id",
DockerName: "container_name",
Container: testContainer,
}, testTask)
taskEngine.(*DockerTaskEngine).handleDockerEvent(dockerapi.DockerContainerChangeEvent{
Status: apicontainerstatus.ContainerRunning,
Type: apicontainer.ContainerHealthEvent,
DockerContainerMetadata: dockerapi.DockerContainerMetadata{
DockerID: "id",
Health: apicontainer.HealthStatus{
Status: apicontainerstatus.ContainerHealthy,
},
},
})
assert.Equal(t, testContainer.Health.Status, apicontainerstatus.ContainerHealthy)
}
func TestContainerMetadataUpdatedOnRestart(t *testing.T) {
dockerID := "dockerID_created"
labels := map[string]string{
"name": "metadata",
}
testCases := []struct {
stage string
status apicontainerstatus.ContainerStatus
created time.Time
started time.Time
finished time.Time
portBindings []apicontainer.PortBinding
exitCode *int
err dockerapi.DockerStateError
}{
{
stage: "created",
status: apicontainerstatus.ContainerCreated,
created: time.Now(),
},
{
stage: "started",
status: apicontainerstatus.ContainerRunning,
started: time.Now(),
portBindings: []apicontainer.PortBinding{
{
ContainerPort: 80,
HostPort: 80,
BindIP: "0.0.0.0/0",
Protocol: apicontainer.TransportProtocolTCP,
},
},
},
{
stage: "stopped",
finished: time.Now(),
exitCode: aws.Int(1),
},
{
stage: "failed",
status: apicontainerstatus.ContainerStopped,
err: dockerapi.NewDockerStateError("error"),
exitCode: aws.Int(1),
},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("Agent restarted during container: %s", tc.stage), func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
dockerContainer := &apicontainer.DockerContainer{
DockerID: dockerID,
DockerName: fmt.Sprintf("docker%s", tc.stage),
Container: &apicontainer.Container{},
}
task := &apitask.Task{}
if tc.stage == "created" {
dockerContainer.DockerID = ""
task.Volumes = []apitask.TaskVolume{
{
Name: "empty",
Volume: &taskresourcevolume.LocalDockerVolume{},
},
}
client.EXPECT().InspectContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(&types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: dockerID,
Created: (tc.created).Format(time.RFC3339),
State: &types.ContainerState{
Health: &types.Health{},
},
HostConfig: &dockercontainer.HostConfig{
NetworkMode: containerNetworkMode,
},
},
Config: &dockercontainer.Config{
Labels: labels,
},
}, nil)
imageManager.EXPECT().RecordContainerReference(dockerContainer.Container).AnyTimes()
} else {
client.EXPECT().DescribeContainer(gomock.Any(), dockerID).Return(tc.status, dockerapi.DockerContainerMetadata{
Labels: labels,
DockerID: dockerID,
CreatedAt: tc.created,
StartedAt: tc.started,
FinishedAt: tc.finished,
PortBindings: tc.portBindings,
ExitCode: tc.exitCode,
Error: tc.err,
})
imageManager.EXPECT().RecordContainerReference(dockerContainer.Container).AnyTimes()
}
taskEngine.(*DockerTaskEngine).synchronizeContainerStatus(dockerContainer, task)
assert.Equal(t, labels, dockerContainer.Container.GetLabels())
assert.Equal(t, (tc.created).Format(time.RFC3339), (dockerContainer.Container.GetCreatedAt()).Format(time.RFC3339))
assert.Equal(t, (tc.started).Format(time.RFC3339), (dockerContainer.Container.GetStartedAt()).Format(time.RFC3339))
assert.Equal(t, (tc.finished).Format(time.RFC3339), (dockerContainer.Container.GetFinishedAt()).Format(time.RFC3339))
if tc.stage == "started" {
assert.Equal(t, uint16(80), dockerContainer.Container.KnownPortBindingsUnsafe[0].ContainerPort)
}
if tc.stage == "finished" {
assert.False(t, task.GetExecutionStoppedAt().IsZero())
assert.Equal(t, tc.exitCode, dockerContainer.Container.GetKnownExitCode())
}
if tc.stage == "failed" {
assert.Equal(t, tc.exitCode, dockerContainer.Container.GetKnownExitCode())
assert.NotNil(t, dockerContainer.Container.ApplyingError)
}
})
}
}
// TestContainerProgressParallize tests the container can be processed parallelly
func TestContainerProgressParallize(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, testTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
stateChangeEvents := taskEngine.StateChangeEvents()
eventStream := make(chan dockerapi.DockerContainerChangeEvent)
state := taskEngine.(*DockerTaskEngine).State()
fastPullImage := "fast-pull-image"
slowPullImage := "slow-pull-image"
testTask := testdata.LoadTask("sleep5")
containerTwo := &apicontainer.Container{
Name: fastPullImage,
Image: fastPullImage,
}
testTask.Containers = append(testTask.Containers, containerTwo)
testTask.Containers[0].Image = slowPullImage
testTask.Containers[0].Name = slowPullImage
var fastContainerDockerName string
var slowContainerDockerName string
fastContainerDockerID := "fast-pull-container-id"
slowContainerDockerID := "slow-pull-container-id"
var waitForFastPullContainer sync.WaitGroup
waitForFastPullContainer.Add(1)
client.EXPECT().Version(gomock.Any(), gomock.Any()).Return("17.12.0", nil).AnyTimes()
testTime.EXPECT().Now().Return(time.Now()).AnyTimes()
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes()
imageManager.EXPECT().RecordContainerReference(gomock.Any()).Return(nil).AnyTimes()
imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false).AnyTimes()
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
client.EXPECT().PullImage(gomock.Any(), fastPullImage, gomock.Any(), gomock.Any())
client.EXPECT().PullImage(gomock.Any(), slowPullImage, gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, image interface{}, auth interface{}, timeout interface{}) {
waitForFastPullContainer.Wait()
})
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, cfg interface{}, hostconfig interface{}, name string, duration interface{}) {
if strings.Contains(name, slowPullImage) {
slowContainerDockerName = name
state.AddContainer(&apicontainer.DockerContainer{
DockerID: slowContainerDockerID,
DockerName: slowContainerDockerName,
Container: testTask.Containers[0],
}, testTask)
go func() {
event := createDockerEvent(apicontainerstatus.ContainerCreated)
event.DockerID = slowContainerDockerID
eventStream <- event
}()
} else if strings.Contains(name, fastPullImage) {
fastContainerDockerName = name
state.AddTask(testTask)
state.AddContainer(&apicontainer.DockerContainer{
DockerID: fastContainerDockerID,
DockerName: fastContainerDockerName,
Container: testTask.Containers[1],
}, testTask)
go func() {
event := createDockerEvent(apicontainerstatus.ContainerCreated)
event.DockerID = fastContainerDockerID
eventStream <- event
}()
} else {
t.Fatalf("Got unexpected name for creating container: %s", name)
}
}).Times(2)
client.EXPECT().StartContainer(gomock.Any(), fastContainerDockerID, gomock.Any()).Do(
func(ctx interface{}, id string, duration interface{}) {
go func() {
event := createDockerEvent(apicontainerstatus.ContainerRunning)
event.DockerID = fastContainerDockerID
eventStream <- event
}()
})
client.EXPECT().StartContainer(gomock.Any(), slowContainerDockerID, gomock.Any()).Do(
func(ctx interface{}, id string, duration interface{}) {
go func() {
event := createDockerEvent(apicontainerstatus.ContainerRunning)
event.DockerID = slowContainerDockerID
eventStream <- event
}()
})
taskEngine.Init(ctx)
taskEngine.AddTask(testTask)
// Expect the fast pulled container to be running firs
fastPullContainerRunning := false
for event := range stateChangeEvents {
containerEvent, ok := event.(api.ContainerStateChange)
if ok && containerEvent.Status == apicontainerstatus.ContainerRunning {
if containerEvent.ContainerName == fastPullImage {
fastPullContainerRunning = true
// The second container should start processing now
waitForFastPullContainer.Done()
continue
}
assert.True(t, fastPullContainerRunning, "got the slower pulled container running events first")
continue
}
taskEvent, ok := event.(api.TaskStateChange)
if ok && taskEvent.Status == apitaskstatus.TaskRunning {
break
}
t.Errorf("Got unexpected task event: %v", taskEvent.String())
}
defer discardEvents(stateChangeEvents)()
// stop and clean up the task
cleanup := make(chan time.Time)
client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(
dockerapi.DockerContainerMetadata{DockerID: fastContainerDockerID}).AnyTimes()
client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(
dockerapi.DockerContainerMetadata{DockerID: slowContainerDockerID}).AnyTimes()
testTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1)
client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2)
imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any()).Return(nil).Times(2)
containerStoppedEvent := createDockerEvent(apicontainerstatus.ContainerStopped)
containerStoppedEvent.DockerID = slowContainerDockerID
eventStream <- containerStoppedEvent
testTask.SetSentStatus(apitaskstatus.TaskStopped)
cleanup <- time.Now()
for {
tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks()
if len(tasks) == 0 {
break
}
time.Sleep(5 * time.Millisecond)
}
}
func TestSynchronizeResource(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
mockTime.EXPECT().Now().AnyTimes()
client.EXPECT().Version(gomock.Any(), gomock.Any()).MaxTimes(1)
client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1)
err := taskEngine.Init(ctx)
assert.NoError(t, err)
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
state := dockerTaskEngine.State()
cgroupResource := mock_taskresource.NewMockTaskResource(ctrl)
testTask := testdata.LoadTask("sleep5")
testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{
"cgroup": {
cgroupResource,
},
}
// add the task to the state to simulate the agent restored the state on restart
state.AddTask(testTask)
cgroupResource.EXPECT().Initialize(gomock.Any(), gomock.Any(), gomock.Any())
cgroupResource.EXPECT().SetDesiredStatus(gomock.Any()).MaxTimes(1)
cgroupResource.EXPECT().GetDesiredStatus().MaxTimes(2)
cgroupResource.EXPECT().TerminalStatus().MaxTimes(1)
cgroupResource.EXPECT().SteadyState().MaxTimes(1)
cgroupResource.EXPECT().GetKnownStatus().MaxTimes(1)
cgroupResource.EXPECT().GetName().AnyTimes().Return("cgroup")
cgroupResource.EXPECT().StatusString(gomock.Any()).AnyTimes()
// Set the task to be stopped so that the process can done quickly
testTask.SetDesiredStatus(apitaskstatus.TaskStopped)
dockerTaskEngine.synchronizeState()
}
func TestSynchronizeENIAttachment(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
mockTime.EXPECT().Now().AnyTimes()
client.EXPECT().Version(gomock.Any(), gomock.Any()).MaxTimes(1)
client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1)
err := taskEngine.Init(ctx)
assert.NoError(t, err)
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
state := dockerTaskEngine.State()
testTask := testdata.LoadTask("sleep5")
expiresAt := time.Now().Unix() + 1
attachment := &apieni.ENIAttachment{
TaskARN: "TaskARN",
AttachmentARN: "AttachmentARN",
MACAddress: "MACAddress",
Status: apieni.ENIAttachmentNone,
ExpiresAt: time.Unix(expiresAt, 0),
}
state.AddENIAttachment(attachment)
state.AddTask(testTask)
testTask.SetDesiredStatus(apitaskstatus.TaskStopped)
dockerTaskEngine.synchronizeState()
// If the below call doesn't panic on NPE, it means the ENI attachment has been properly initialized in synchronizeState.
attachment.StopAckTimer()
}
func TestSynchronizeENIAttachmentRemoveData(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
dataClient, cleanup := newTestDataClient(t)
defer cleanup()
client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1)
err := taskEngine.Init(ctx)
assert.NoError(t, err)
taskEngine.(*DockerTaskEngine).dataClient = dataClient
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
attachment := &apieni.ENIAttachment{
TaskARN: "TaskARN",
AttachmentARN: testAttachmentArn,
MACAddress: "MACAddress",
Status: apieni.ENIAttachmentNone,
AttachStatusSent: false,
}
// eni attachment data is removed if AttachStatusSent is unset
dockerTaskEngine.state.AddENIAttachment(attachment)
assert.NoError(t, dockerTaskEngine.dataClient.SaveENIAttachment(attachment))
dockerTaskEngine.synchronizeState()
attachments, err := dockerTaskEngine.dataClient.GetENIAttachments()
assert.NoError(t, err)
assert.Len(t, attachments, 0)
}
func TestTaskSecretsEnvironmentVariables(t *testing.T) {
// metadata required for createContainer workflow validation
taskARN := "secretsTask"
taskFamily := "secretsTaskFamily"
taskVersion := "1"
taskContainerName := "secretsContainer"
// metadata required for ssm secret resource validation
ssmSecretName := "mySSMSecret"
ssmSecretValueFrom := "ssm/mySecret"
ssmSecretRetrievedValue := "mySSMSecretValue"
ssmSecretRegion := "us-west-2"
// metadata required for asm secret resource validation
asmSecretName := "myASMSecret"
asmSecretValueFrom := "arn:aws:secretsmanager:region:account-id:secret:" + asmSecretName
asmSecretRetrievedValue := "myASMSecretValue"
asmSecretRegion := "us-west-2"
asmSecretKey := asmSecretValueFrom + "_" + asmSecretRegion
ssmExpectedEnvVar := ssmSecretName + "=" + ssmSecretRetrievedValue
asmExpectedEnvVar := asmSecretName + "=" + asmSecretRetrievedValue
testCases := []struct {
name string
secrets []apicontainer.Secret
ssmSecret apicontainer.Secret
asmSecret apicontainer.Secret
expectedEnv []string
}{
{
name: "ASMSecretAsEnv",
secrets: []apicontainer.Secret{
{
Name: ssmSecretName,
ValueFrom: ssmSecretValueFrom,
Region: ssmSecretRegion,
Target: "LOG_DRIVER",
Provider: "ssm",
},
{
Name: asmSecretName,
ValueFrom: asmSecretValueFrom,
Region: asmSecretRegion,
Type: "ENVIRONMENT_VARIABLE",
Provider: "asm",
},
},
ssmSecret: apicontainer.Secret{
Name: ssmSecretName,
ValueFrom: ssmSecretValueFrom,
Region: ssmSecretRegion,
Target: "LOG_DRIVER",
Provider: "ssm",
},
asmSecret: apicontainer.Secret{
Name: asmSecretName,
ValueFrom: asmSecretValueFrom,
Region: asmSecretRegion,
Type: "ENVIRONMENT_VARIABLE",
Provider: "asm",
},
expectedEnv: []string{asmExpectedEnvVar},
},
{
name: "SSMSecretAsEnv",
secrets: []apicontainer.Secret{
{
Name: ssmSecretName,
ValueFrom: ssmSecretValueFrom,
Region: ssmSecretRegion,
Type: "ENVIRONMENT_VARIABLE",
Provider: "ssm",
},
{
Name: asmSecretName,
ValueFrom: asmSecretValueFrom,
Region: asmSecretRegion,
Target: "LOG_DRIVER",
Provider: "asm",
},
},
ssmSecret: apicontainer.Secret{
Name: ssmSecretName,
ValueFrom: ssmSecretValueFrom,
Region: ssmSecretRegion,
Type: "ENVIRONMENT_VARIABLE",
Provider: "ssm",
},
asmSecret: apicontainer.Secret{
Name: asmSecretName,
ValueFrom: asmSecretValueFrom,
Region: asmSecretRegion,
Target: "LOG_DRIVER",
Provider: "asm",
},
expectedEnv: []string{ssmExpectedEnvVar},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, mockTime, taskEngine, credentialsManager, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
// sample test
testTask := &apitask.Task{
Arn: taskARN,
Family: taskFamily,
Version: taskVersion,
Containers: []*apicontainer.Container{
{
Name: taskContainerName,
Secrets: tc.secrets,
},
},
}
// metadata required for execution role authentication workflow
credentialsID := "execution role"
executionRoleCredentials := credentials.IAMRoleCredentials{
CredentialsID: credentialsID,
}
taskIAMcreds := credentials.TaskIAMRoleCredentials{
IAMRoleCredentials: executionRoleCredentials,
}
// configure the task and container to use execution role
testTask.SetExecutionRoleCredentialsID(credentialsID)
// validate base config
expectedConfig, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion)
if err != nil {
t.Fatal(err)
}
expectedConfig.Labels = map[string]string{
"com.amazonaws.ecs.task-arn": taskARN,
"com.amazonaws.ecs.container-name": taskContainerName,
"com.amazonaws.ecs.task-definition-family": taskFamily,
"com.amazonaws.ecs.task-definition-version": taskVersion,
"com.amazonaws.ecs.cluster": "",
}
// required to validate container config includes secrets as environment variables
expectedConfig.Env = tc.expectedEnv
// required for validating ssm workflows
ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl)
mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl)
ssmRequirements := map[string][]apicontainer.Secret{
ssmSecretRegion: []apicontainer.Secret{
tc.ssmSecret,
},
}
ssmSecretRes := ssmsecret.NewSSMSecretResource(
testTask.Arn,
ssmRequirements,
credentialsID,
credentialsManager,
ssmClientCreator)
// required for validating asm workflows
asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl)
mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl)
asmRequirements := map[string]apicontainer.Secret{
asmSecretKey: tc.asmSecret,
}
asmSecretRes := asmsecret.NewASMSecretResource(
testTask.Arn,
asmRequirements,
credentialsID,
credentialsManager,
asmClientCreator)
testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{
ssmsecret.ResourceName: {ssmSecretRes},
asmsecret.ResourceName: {asmSecretRes},
}
ssmClientOutput := &ssm.GetParametersOutput{
InvalidParameters: []*string{},
Parameters: []*ssm.Parameter{
&ssm.Parameter{
Name: aws.String(ssmSecretValueFrom),
Value: aws.String(ssmSecretRetrievedValue),
},
},
}
asmClientOutput := &secretsmanager.GetSecretValueOutput{
SecretString: aws.String(asmSecretRetrievedValue),
}
reqSecretNames := []*string{aws.String(ssmSecretValueFrom)}
credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(taskIAMcreds, true).Times(2)
ssmClientCreator.EXPECT().NewSSMClient(region, executionRoleCredentials).Return(mockSSMClient)
asmClientCreator.EXPECT().NewASMClient(region, executionRoleCredentials).Return(mockASMClient)
mockSSMClient.EXPECT().GetParameters(gomock.Any()).Do(func(in *ssm.GetParametersInput) {
assert.Equal(t, in.Names, reqSecretNames)
}).Return(ssmClientOutput, nil).Times(1)
mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) {
assert.Equal(t, asmSecretValueFrom, aws.StringValue(in.SecretId))
}).Return(asmClientOutput, nil).Times(1)
require.NoError(t, ssmSecretRes.Create())
require.NoError(t, asmSecretRes.Create())
mockTime.EXPECT().Now().AnyTimes()
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
// test validates that the expectedConfig includes secrets are appended as
// environment varibles
client.EXPECT().CreateContainer(gomock.Any(), expectedConfig, gomock.Any(), gomock.Any(), gomock.Any())
ret := taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0])
assert.Nil(t, ret.Error)
})
}
}
// TestCreateContainerAddFirelensLogDriverConfig tests that in createContainer, when the
// container is using firelens log driver, its logConfig is properly set.
func TestCreateContainerAddFirelensLogDriverConfig(t *testing.T) {
taskName := "logSenderTask"
taskARN := "arn:aws:ecs:region:account-id:task/task-id"
taskID := "task-id"
taskFamily := "logSenderTaskFamily"
taskVersion := "1"
logDriverTypeFirelens := "awsfirelens"
dataLogDriverPath := "/data/firelens/"
dataLogDriverSocketPath := "/socket/fluent.sock"
socketPathPrefix := "unix://"
networkModeBridge := "bridge"
networkModeAWSVPC := "awsvpc"
bridgeIPAddr := "bridgeIP"
envVarBridgeMode := "FLUENT_HOST=bridgeIP"
envVarPort := "FLUENT_PORT=24224"
envVarAWSVPCMode := "FLUENT_HOST=127.0.0.1"
eniIPv4Address := "10.0.0.2"
getTask := func(logDriverType string, networkMode string) *apitask.Task {
rawHostConfigInput := dockercontainer.HostConfig{
LogConfig: dockercontainer.LogConfig{
Type: logDriverType,
Config: map[string]string{
"key1": "value1",
"key2": "value2",
},
},
}
rawHostConfig, err := json.Marshal(&rawHostConfigInput)
require.NoError(t, err)
return &apitask.Task{
Arn: taskARN,
Version: taskVersion,
Family: taskFamily,
Containers: []*apicontainer.Container{
{
Name: taskName,
DockerConfig: apicontainer.DockerConfig{
HostConfig: func() *string {
s := string(rawHostConfig)
return &s
}(),
},
NetworkModeUnsafe: networkMode,
},
{
Name: "test-container",
FirelensConfig: &apicontainer.FirelensConfig{
Type: "fluentd",
},
NetworkModeUnsafe: networkMode,
NetworkSettingsUnsafe: &types.NetworkSettings{
DefaultNetworkSettings: types.DefaultNetworkSettings{
IPAddress: bridgeIPAddr,
},
},
},
},
}
}
getTaskWithENI := func(logDriverType string, networkMode string) *apitask.Task {
rawHostConfigInput := dockercontainer.HostConfig{
LogConfig: dockercontainer.LogConfig{
Type: logDriverType,
Config: map[string]string{
"key1": "value1",
"key2": "value2",
},
},
}
rawHostConfig, err := json.Marshal(&rawHostConfigInput)
require.NoError(t, err)
return &apitask.Task{
Arn: taskARN,
Version: taskVersion,
Family: taskFamily,
ENIs: []*apieni.ENI{
{
IPV4Addresses: []*apieni.ENIIPV4Address{
{
Address: eniIPv4Address,
},
},
},
},
Containers: []*apicontainer.Container{
{
Name: taskName,
DockerConfig: apicontainer.DockerConfig{
HostConfig: func() *string {
s := string(rawHostConfig)
return &s
}(),
},
NetworkModeUnsafe: networkMode,
},
{
Name: "test-container",
FirelensConfig: &apicontainer.FirelensConfig{
Type: "fluentd",
},
NetworkModeUnsafe: networkMode,
NetworkSettingsUnsafe: &types.NetworkSettings{
DefaultNetworkSettings: types.DefaultNetworkSettings{
IPAddress: bridgeIPAddr,
},
},
},
},
}
}
testCases := []struct {
name string
task *apitask.Task
expectedLogConfigType string
expectedLogConfigTag string
expectedLogConfigFluentAddress string
expectedFluentdAsyncConnect string
expectedSubSecondPrecision string
expectedIPAddress string
expectedPort string
}{
{
name: "test container that uses firelens log driver with default mode",
task: getTask(logDriverTypeFirelens, ""),
expectedLogConfigType: logDriverTypeFluentd,
expectedLogConfigTag: taskName + "-firelens-" + taskID,
expectedFluentdAsyncConnect: strconv.FormatBool(true),
expectedSubSecondPrecision: strconv.FormatBool(true),
expectedLogConfigFluentAddress: socketPathPrefix + filepath.Join(defaultConfig.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath),
expectedIPAddress: envVarBridgeMode,
expectedPort: envVarPort,
},
{
name: "test container that uses firelens log driver with bridge mode",
task: getTask(logDriverTypeFirelens, networkModeBridge),
expectedLogConfigType: logDriverTypeFluentd,
expectedLogConfigTag: taskName + "-firelens-" + taskID,
expectedFluentdAsyncConnect: strconv.FormatBool(true),
expectedSubSecondPrecision: strconv.FormatBool(true),
expectedLogConfigFluentAddress: socketPathPrefix + filepath.Join(defaultConfig.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath),
expectedIPAddress: envVarBridgeMode,
expectedPort: envVarPort,
},
{
name: "test container that uses firelens log driver with awsvpc mode",
task: getTaskWithENI(logDriverTypeFirelens, networkModeAWSVPC),
expectedLogConfigType: logDriverTypeFluentd,
expectedLogConfigTag: taskName + "-firelens-" + taskID,
expectedFluentdAsyncConnect: strconv.FormatBool(true),
expectedSubSecondPrecision: strconv.FormatBool(true),
expectedLogConfigFluentAddress: socketPathPrefix + filepath.Join(defaultConfig.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath),
expectedIPAddress: envVarAWSVPCMode,
expectedPort: envVarPort,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx context.Context,
config *dockercontainer.Config,
hostConfig *dockercontainer.HostConfig,
name string,
timeout time.Duration) {
assert.Equal(t, tc.expectedLogConfigType, hostConfig.LogConfig.Type)
assert.Equal(t, tc.expectedLogConfigTag, hostConfig.LogConfig.Config["tag"])
assert.Equal(t, tc.expectedLogConfigFluentAddress, hostConfig.LogConfig.Config["fluentd-address"])
assert.Equal(t, tc.expectedFluentdAsyncConnect, hostConfig.LogConfig.Config["fluentd-async-connect"])
assert.Equal(t, tc.expectedSubSecondPrecision, hostConfig.LogConfig.Config["fluentd-sub-second-precision"])
assert.Contains(t, config.Env, tc.expectedIPAddress)
assert.Contains(t, config.Env, tc.expectedPort)
})
ret := taskEngine.(*DockerTaskEngine).createContainer(tc.task, tc.task.Containers[0])
assert.NoError(t, ret.Error)
})
}
}
func TestCreateFirelensContainerSetFluentdUID(t *testing.T) {
testTask := &apitask.Task{
Arn: "arn:aws:ecs:region:account-id:task/test-task-arn",
Containers: []*apicontainer.Container{
{
Name: "test-container",
FirelensConfig: &apicontainer.FirelensConfig{
Type: "fluentd",
},
},
},
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx context.Context,
config *dockercontainer.Config,
hostConfig *dockercontainer.HostConfig,
name string,
timeout time.Duration) {
assert.Contains(t, config.Env, "FLUENT_UID=0")
})
ret := taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0])
assert.NoError(t, ret.Error)
}
func TestGetBridgeIP(t *testing.T) {
networkDefaultIP := "defaultIP"
getNetwork := func(defaultIP string, bridgeIP string, networkMode string) *types.NetworkSettings {
endPoint := network.EndpointSettings{
IPAddress: bridgeIP,
}
return &types.NetworkSettings{
DefaultNetworkSettings: types.DefaultNetworkSettings{
IPAddress: defaultIP,
},
Networks: map[string]*network.EndpointSettings{
networkMode: &endPoint,
},
}
}
testCases := []struct {
defaultIP string
bridgeIP string
networkMode string
expectedOk bool
expectedIPAddress string
}{
{
defaultIP: networkDefaultIP,
bridgeIP: networkBridgeIP,
networkMode: networkModeBridge,
expectedOk: true,
expectedIPAddress: networkDefaultIP,
},
{
defaultIP: "",
bridgeIP: networkBridgeIP,
networkMode: networkModeBridge,
expectedOk: true,
expectedIPAddress: networkBridgeIP,
},
{
defaultIP: "",
bridgeIP: networkBridgeIP,
networkMode: networkModeAWSVPC,
expectedOk: false,
expectedIPAddress: "",
},
{
defaultIP: "",
bridgeIP: "",
networkMode: networkModeBridge,
expectedOk: false,
expectedIPAddress: "",
},
}
for _, tc := range testCases {
IPAddress, ok := getContainerHostIP(getNetwork(tc.defaultIP, tc.bridgeIP, tc.networkMode))
assert.Equal(t, tc.expectedOk, ok)
assert.Equal(t, tc.expectedIPAddress, IPAddress)
}
}
func TestStartFirelensContainerRetryForContainerIP(t *testing.T) {
dockerMetaDataWithoutNetworkSettings := dockerapi.DockerContainerMetadata{
DockerID: containerID,
Volumes: []types.MountPoint{
{
Name: "volume",
Source: "/src/vol",
Destination: "/vol",
},
},
}
rawHostConfigInput := dockercontainer.HostConfig{
LogConfig: dockercontainer.LogConfig{
Type: "fluentd",
Config: map[string]string{
"key1": "value1",
"key2": "value2",
},
},
}
jsonBaseWithoutNetwork := &types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: containerID,
State: &types.ContainerState{Pid: containerPid},
HostConfig: &dockercontainer.HostConfig{
NetworkMode: containerNetworkMode,
},
},
}
jsonBaseWithNetwork := &types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: containerID,
State: &types.ContainerState{Pid: containerPid},
HostConfig: &dockercontainer.HostConfig{
NetworkMode: containerNetworkMode,
},
},
NetworkSettings: &types.NetworkSettings{
DefaultNetworkSettings: types.DefaultNetworkSettings{
IPAddress: networkBridgeIP,
},
Networks: map[string]*network.EndpointSettings{
apitask.BridgeNetworkMode: &network.EndpointSettings{
IPAddress: networkBridgeIP,
},
},
},
}
rawHostConfig, err := json.Marshal(&rawHostConfigInput)
require.NoError(t, err)
testTask := &apitask.Task{
Arn: "arn:aws:ecs:region:account-id:task/task-id",
Version: "1",
Family: "logSenderTaskFamily",
Containers: []*apicontainer.Container{
{
Name: "logSenderTask",
DockerConfig: apicontainer.DockerConfig{
HostConfig: func() *string {
s := string(rawHostConfig)
return &s
}(),
},
NetworkModeUnsafe: apitask.BridgeNetworkMode,
},
{
Name: "test-container",
FirelensConfig: &apicontainer.FirelensConfig{
Type: "fluentd",
},
NetworkModeUnsafe: apitask.BridgeNetworkMode,
},
},
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
taskEngine.(*DockerTaskEngine).state.AddTask(testTask)
taskEngine.(*DockerTaskEngine).state.AddContainer(&apicontainer.DockerContainer{
Container: testTask.Containers[1],
DockerName: dockerContainerName,
DockerID: containerID,
}, testTask)
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
client.EXPECT().StartContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerMetaDataWithoutNetworkSettings).AnyTimes()
gomock.InOrder(
client.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).
Return(jsonBaseWithoutNetwork, nil),
client.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).
Return(jsonBaseWithoutNetwork, nil),
client.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).
Return(jsonBaseWithNetwork, nil),
)
ret := taskEngine.(*DockerTaskEngine).startContainer(testTask, testTask.Containers[1])
assert.NoError(t, ret.Error)
assert.Equal(t, jsonBaseWithNetwork.NetworkSettings, ret.NetworkSettings)
}
func TestStartExecAgent(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
nowTime := time.Now()
ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
execCmdMgr := mock_execcmdagent.NewMockManager(ctrl)
dockerTaskEngine.execCmdMgr = execCmdMgr
defer ctrl.Finish()
const (
testContainerId = "123"
)
testCases := []struct {
execCommandAgentEnabled bool
expectContainerEvent bool
execAgentStatus apicontainerstatus.ManagedAgentStatus
execAgentInitFailed bool
execAgentStartError error
}{
{
execCommandAgentEnabled: false,
expectContainerEvent: false,
execAgentStatus: apicontainerstatus.ManagedAgentStopped,
execAgentInitFailed: false,
},
{
execCommandAgentEnabled: true,
expectContainerEvent: true,
execAgentStatus: apicontainerstatus.ManagedAgentRunning,
execAgentInitFailed: false,
},
{
execCommandAgentEnabled: true,
expectContainerEvent: true,
execAgentStatus: apicontainerstatus.ManagedAgentStopped,
execAgentStartError: errors.New("mock error"),
},
{
execCommandAgentEnabled: true,
expectContainerEvent: false,
execAgentStatus: apicontainerstatus.ManagedAgentStopped,
execAgentInitFailed: true,
},
}
for _, tc := range testCases {
stateChangeEvents := taskEngine.StateChangeEvents()
testTask := &apitask.Task{
Arn: "arn:aws:ecs:region:account-id:task/test-task-arn",
Containers: []*apicontainer.Container{
{
Name: "test-container",
RuntimeID: testContainerId,
KnownStatusUnsafe: apicontainerstatus.ContainerStopped,
},
},
}
if tc.execCommandAgentEnabled {
enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{
LastStartedAt: nowTime,
Status: tc.execAgentStatus,
InitFailed: tc.execAgentInitFailed,
})
}
mTestTask := &managedTask{
Task: testTask,
engine: dockerTaskEngine,
ctx: ctx,
stateChangeEvents: stateChangeEvents,
}
dockerTaskEngine.state.AddTask(testTask)
dockerTaskEngine.managedTasks[testTask.Arn] = mTestTask
// check for expected taskEvent in stateChangeEvents
waitDone := make(chan struct{})
var reason string
if tc.expectContainerEvent {
reason = "ExecuteCommandAgent started"
}
if tc.execAgentStartError != nil {
reason = tc.execAgentStartError.Error()
}
expectedManagedAgent := apicontainer.ManagedAgent{
ManagedAgentState: apicontainer.ManagedAgentState{
Status: tc.execAgentStatus,
InitFailed: tc.execAgentInitFailed,
Reason: reason,
},
}
go checkManagedAgentEvents(t, tc.expectContainerEvent, stateChangeEvents, expectedManagedAgent, waitDone)
client.EXPECT().StartContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(
dockerapi.DockerContainerMetadata{DockerID: containerID}).AnyTimes()
if tc.execCommandAgentEnabled {
execCmdMgr.EXPECT().InitializeContainer(gomock.Any(), testTask.Containers[0], gomock.Any()).AnyTimes()
if !tc.execAgentInitFailed {
execCmdMgr.EXPECT().StartAgent(gomock.Any(), client, testTask, testTask.Containers[0], testContainerId).
Return(tc.execAgentStartError).
AnyTimes()
}
}
ret := taskEngine.(*DockerTaskEngine).startContainer(testTask, testTask.Containers[0])
assert.NoError(t, ret.Error)
timeout := false
select {
case <-waitDone:
case <-time.After(time.Second):
timeout = true
}
assert.False(t, timeout)
}
}
func TestMonitorExecAgentRunning(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
execCmdMgr := mock_execcmdagent.NewMockManager(ctrl)
dockerTaskEngine.execCmdMgr = execCmdMgr
dockerTaskEngine.monitorExecAgentsInterval = 2 * time.Millisecond
defer ctrl.Finish()
const (
testContainerId = "123"
)
testCases := []struct {
containerStatus apicontainerstatus.ContainerStatus
execCommandAgentState apicontainer.ManagedAgentState
execAgentStatus apicontainerstatus.ManagedAgentStatus
restartStatus execcmd.RestartStatus
simulateBadContainerId bool
expectedRestartInUnhealthyCall bool
expectContainerEvent bool
}{
{
containerStatus: apicontainerstatus.ContainerStopped,
execAgentStatus: apicontainerstatus.ManagedAgentStopped,
restartStatus: execcmd.NotRestarted,
expectContainerEvent: false,
},
{
containerStatus: apicontainerstatus.ContainerRunning,
simulateBadContainerId: true,
execAgentStatus: apicontainerstatus.ManagedAgentStopped,
restartStatus: execcmd.NotRestarted,
expectContainerEvent: false,
},
{
containerStatus: apicontainerstatus.ContainerRunning,
execAgentStatus: apicontainerstatus.ManagedAgentRunning,
restartStatus: execcmd.NotRestarted,
expectContainerEvent: false,
},
{
containerStatus: apicontainerstatus.ContainerRunning,
execAgentStatus: apicontainerstatus.ManagedAgentRunning,
restartStatus: execcmd.Restarted,
expectContainerEvent: true,
},
}
for _, tc := range testCases {
nowTime := time.Now()
stateChangeEvents := taskEngine.StateChangeEvents()
testTask := &apitask.Task{
Arn: "arn:aws:ecs:region:account-id:task/test-task-arn",
Containers: []*apicontainer.Container{
{
Name: "test-container",
RuntimeID: testContainerId,
KnownStatusUnsafe: tc.containerStatus,
},
},
}
enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{
LastStartedAt: nowTime,
Status: tc.execAgentStatus,
})
mTestTask := &managedTask{
Task: testTask,
engine: dockerTaskEngine,
ctx: ctx,
stateChangeEvents: stateChangeEvents,
}
dockerTaskEngine.state.AddTask(testTask)
if tc.simulateBadContainerId {
testTask.Containers[0].RuntimeID = ""
}
if tc.containerStatus == apicontainerstatus.ContainerRunning && !tc.simulateBadContainerId {
execCmdMgr.EXPECT().RestartAgentIfStopped(dockerTaskEngine.ctx, dockerTaskEngine.client, testTask,
testTask.Containers[0], testContainerId).
Return(tc.restartStatus, nil).
Times(1)
}
// check for expected containerEvent in stateChangeEvents
waitDone := make(chan struct{})
expectedManagedAgent := apicontainer.ManagedAgent{
ManagedAgentState: apicontainer.ManagedAgentState{
Status: apicontainerstatus.ManagedAgentRunning,
Reason: "ExecuteCommandAgent restarted",
},
}
// only if we expect restart will we also expect a managed agent container event
go checkManagedAgentEvents(t, tc.expectContainerEvent, stateChangeEvents, expectedManagedAgent, waitDone)
taskEngine.(*DockerTaskEngine).monitorExecAgentRunning(ctx, mTestTask, testTask.Containers[0])
timeout := false
select {
case <-waitDone:
case <-time.After(time.Second):
timeout = true
}
assert.False(t, timeout)
}
}
func TestMonitorExecAgentProcesses(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
nowTime := time.Now()
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
execCmdMgr := mock_execcmdagent.NewMockManager(ctrl)
dockerTaskEngine.execCmdMgr = execCmdMgr
dockerTaskEngine.monitorExecAgentsInterval = 2 * time.Millisecond
defer ctrl.Finish()
testCases := []struct {
execAgentStatus apicontainerstatus.ManagedAgentStatus
expectContainerEvent bool
execAgentInitfailed bool
}{
{
execAgentStatus: apicontainerstatus.ManagedAgentRunning,
expectContainerEvent: true,
execAgentInitfailed: false,
},
{
execAgentStatus: apicontainerstatus.ManagedAgentStopped,
expectContainerEvent: false,
execAgentInitfailed: true,
},
}
for _, tc := range testCases {
stateChangeEvents := taskEngine.StateChangeEvents()
testTask := &apitask.Task{
Arn: "arn:aws:ecs:region:account-id:task/test-task-arn",
Containers: []*apicontainer.Container{
{
Name: "test-container",
RuntimeID: "runtime-ID",
KnownStatusUnsafe: apicontainerstatus.ContainerRunning,
},
},
KnownStatusUnsafe: apitaskstatus.TaskRunning,
}
enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{
LastStartedAt: nowTime,
Status: apicontainerstatus.ManagedAgentRunning,
InitFailed: tc.execAgentInitfailed,
})
mTestTask := &managedTask{
Task: testTask,
engine: dockerTaskEngine,
ctx: ctx,
stateChangeEvents: stateChangeEvents,
}
dockerTaskEngine.state.AddTask(testTask)
dockerTaskEngine.managedTasks[testTask.Arn] = mTestTask
restartCtx, restartCancel := context.WithTimeout(context.Background(), time.Second)
defer restartCancel()
// return execcmd.Restarted to ensure container event emission
if !tc.execAgentInitfailed {
execCmdMgr.EXPECT().RestartAgentIfStopped(dockerTaskEngine.ctx, dockerTaskEngine.client, testTask, testTask.Containers[0], testTask.Containers[0].RuntimeID).
DoAndReturn(
func(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) (execcmd.RestartStatus, error) {
defer restartCancel()
return execcmd.Restarted, nil
}).
Times(1)
}
expectContainerEvent := tc.expectContainerEvent
waitDone := make(chan struct{})
expectedManagedAgent := apicontainer.ManagedAgent{
Name: execcmd.ExecuteCommandAgentName,
ManagedAgentState: apicontainer.ManagedAgentState{
Status: tc.execAgentStatus,
Reason: "ExecuteCommandAgent restarted",
LastStartedAt: nowTime,
},
}
go checkManagedAgentEvents(t, expectContainerEvent, stateChangeEvents, expectedManagedAgent, waitDone)
dockerTaskEngine.monitorExecAgentProcesses(dockerTaskEngine.ctx)
<-restartCtx.Done()
time.Sleep(5 * time.Millisecond)
timeout := false
select {
case <-waitDone:
case <-time.After(time.Second):
timeout = true
}
assert.False(t, timeout)
}
}
func TestMonitorExecAgentProcessExecDisabled(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
execCmdMgr := mock_execcmdagent.NewMockManager(ctrl)
dockerTaskEngine.execCmdMgr = execCmdMgr
defer ctrl.Finish()
tt := []struct {
execCommandAgentEnabled bool
taskStatus apitaskstatus.TaskStatus
}{
{
execCommandAgentEnabled: false,
taskStatus: apitaskstatus.TaskRunning,
},
{
execCommandAgentEnabled: true,
taskStatus: apitaskstatus.TaskStopped,
},
}
for _, test := range tt {
testTask := &apitask.Task{
Arn: "arn:aws:ecs:region:account-id:task/test-task-arn",
Containers: []*apicontainer.Container{
{
Name: "test-container",
RuntimeID: "runtime-ID",
KnownStatusUnsafe: apicontainerstatus.ContainerRunning,
},
},
KnownStatusUnsafe: test.taskStatus,
}
if test.execCommandAgentEnabled {
enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{})
}
dockerTaskEngine.state.AddTask(testTask)
dockerTaskEngine.managedTasks[testTask.Arn] = &managedTask{Task: testTask}
dockerTaskEngine.monitorExecAgentProcesses(ctx)
// absence of top container expect call indicates it shouldn't have been called
time.Sleep(10 * time.Millisecond)
}
}
func TestMonitorExecAgentsMultipleContainers(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
dockerTaskEngine := taskEngine.(*DockerTaskEngine)
execCmdMgr := mock_execcmdagent.NewMockManager(ctrl)
dockerTaskEngine.execCmdMgr = execCmdMgr
dockerTaskEngine.monitorExecAgentsInterval = 2 * time.Millisecond
defer ctrl.Finish()
stateChangeEvents := taskEngine.StateChangeEvents()
testTask := &apitask.Task{
Arn: "arn:aws:ecs:region:account-id:task/test-task-arn",
Containers: []*apicontainer.Container{
{
Name: "test-container1",
RuntimeID: "runtime-ID1",
KnownStatusUnsafe: apicontainerstatus.ContainerRunning,
},
{
Name: "test-container2",
RuntimeID: "runtime-ID2",
KnownStatusUnsafe: apicontainerstatus.ContainerRunning,
},
},
KnownStatusUnsafe: apitaskstatus.TaskRunning,
}
for _, c := range testTask.Containers {
enableExecCommandAgentForContainer(c, apicontainer.ManagedAgentState{})
}
mTestTask := &managedTask{
Task: testTask,
engine: dockerTaskEngine,
ctx: ctx,
stateChangeEvents: stateChangeEvents,
}
dockerTaskEngine.state.AddTask(testTask)
dockerTaskEngine.managedTasks[testTask.Arn] = mTestTask
wg := &sync.WaitGroup{}
numContainers := len(testTask.Containers)
wg.Add(numContainers)
for i := 0; i < numContainers; i++ {
execCmdMgr.EXPECT().RestartAgentIfStopped(dockerTaskEngine.ctx, dockerTaskEngine.client, testTask, testTask.Containers[i], testTask.Containers[i].RuntimeID).
DoAndReturn(
func(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) (execcmd.RestartStatus, error) {
defer wg.Done()
defer discardEvents(stateChangeEvents)()
return execcmd.NotRestarted, nil
}).
Times(1)
}
taskEngine.(*DockerTaskEngine).monitorExecAgentProcesses(dockerTaskEngine.ctx)
waitDone := make(chan struct{})
go func() {
wg.Wait()
close(waitDone)
}()
timeout := false
select {
case <-waitDone:
case <-time.After(time.Second):
timeout = true
}
assert.False(t, timeout)
}
func TestPeriodicExecAgentsMonitoring(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig)
defer ctrl.Finish()
execAgentPID := "1234"
resp := &dockercontainer.ContainerTopOKBody{
Processes: [][]string{{"root", execAgentPID}},
}
testTask := &apitask.Task{
Arn: "arn:aws:ecs:region:account-id:task/test-task-arn",
Containers: []*apicontainer.Container{
{
Name: "test-container",
RuntimeID: "runtime-ID",
},
},
}
enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{
Metadata: map[string]interface{}{
"PID": execAgentPID,
}})
taskEngine.(*DockerTaskEngine).monitorExecAgentsInterval = 2 * time.Millisecond
taskEngine.(*DockerTaskEngine).state.AddTask(testTask)
taskEngine.(*DockerTaskEngine).managedTasks[testTask.Arn] = &managedTask{Task: testTask}
topCtx, topCancel := context.WithTimeout(context.Background(), time.Second)
defer topCancel()
client.EXPECT().TopContainer(gomock.Any(), testTask.Containers[0].RuntimeID, 30*time.Second, execAgentPID).DoAndReturn(
func(ctx context.Context, containerID string, timeout time.Duration, psArgs ...string) (*dockercontainer.ContainerTopOKBody, error) {
defer topCancel()
return resp, nil
}).AnyTimes()
go taskEngine.(*DockerTaskEngine).startPeriodicExecAgentsMonitoring(ctx)
<-topCtx.Done()
time.Sleep(5 * time.Millisecond)
execCmdAgent, ok := testTask.Containers[0].GetManagedAgentByName(execcmd.ExecuteCommandAgentName)
assert.True(t, ok)
execMD := execcmd.MapToAgentMetadata(execCmdAgent.Metadata)
assert.Equal(t, execAgentPID, execMD.PID)
}
func TestCreateContainerWithExecAgent(t *testing.T) {
testcases := []struct {
name string
error error
expectContainerEvent bool
execAgentInitFailed bool
execAgentStatus apicontainerstatus.ManagedAgentStatus
}{
{
name: "ExecAgent config mount success",
error: nil,
expectContainerEvent: false,
execAgentInitFailed: false,
},
{
name: "ExecAgent config mount Error",
error: errors.New("mount error"),
expectContainerEvent: true,
execAgentInitFailed: true,
execAgentStatus: apicontainerstatus.ManagedAgentStopped,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ctrl, client, _, engine, _, _, _ := mocks(t, ctx, &config.Config{})
defer ctrl.Finish()
taskEngine, _ := engine.(*DockerTaskEngine)
stateChangeEvents := engine.StateChangeEvents()
execCmdMgr := mock_execcmdagent.NewMockManager(ctrl)
taskEngine.execCmdMgr = execCmdMgr
sleepTask := testdata.LoadTask("sleep5")
sleepContainer, _ := sleepTask.ContainerByName("sleep5")
enableExecCommandAgentForContainer(sleepContainer, apicontainer.ManagedAgentState{
Status: tc.execAgentStatus,
InitFailed: tc.execAgentInitFailed,
})
mTestTask := &managedTask{
Task: sleepTask,
engine: taskEngine,
ctx: ctx,
stateChangeEvents: stateChangeEvents,
}
taskEngine.state.AddTask(sleepTask)
taskEngine.managedTasks[sleepTask.Arn] = mTestTask
waitDone := make(chan struct{})
var reason string
if tc.error != nil {
reason = fmt.Sprintf("ExecuteCommandAgent Initialization failed - %v", tc.error)
}
expectedManagedAgent := apicontainer.ManagedAgent{
ManagedAgentState: apicontainer.ManagedAgentState{
Status: apicontainerstatus.ManagedAgentStopped,
Reason: reason,
},
}
go checkManagedAgentEvents(t, tc.expectContainerEvent, stateChangeEvents, expectedManagedAgent, waitDone)
execCmdMgr.EXPECT().InitializeContainer(gomock.Any(), sleepContainer, gomock.Any()).Return(tc.error)
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil)
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
metadata := taskEngine.createContainer(sleepTask, sleepContainer)
assert.NoError(t, metadata.Error)
timeout := false
select {
case <-waitDone:
case <-time.After(time.Second):
timeout = true
}
assert.False(t, timeout)
})
}
}
| 1 | 26,055 | hmm.. this is Hacky, the test will succeed in Windows even if you remove the calls to these, right? | aws-amazon-ecs-agent | go |
@@ -1,4 +1,9 @@
-test_name 'Install beaker and checkout branch if necessary' do
+ruby_version, ruby_source = ENV['RUBY_VER'], "job parameter"
+unless ruby_version
+ ruby_version = "2.3.1"
+ ruby_source = "default"
+end
+test_name 'Install and configure Ruby #{ruby_version} (from #{ruby_source}) on the SUT' do
step 'Download the beaker git repo' do
on default, 'git clone https://github.com/puppetlabs/beaker.git /opt/beaker/' | 1 | test_name 'Install beaker and checkout branch if necessary' do
step 'Download the beaker git repo' do
on default, 'git clone https://github.com/puppetlabs/beaker.git /opt/beaker/'
end
step 'Detect if checking out branch for testing and checkout' do
if ENV['BEAKER_PULL_ID']
logger.notify "Pull Request detected, checking out PR branch"
on(default, 'cd /opt/beaker/;git -c core.askpass=true fetch --tags --progress https://github.com/puppetlabs/beaker.git +refs/pull/*:refs/remotes/origin/pr/*')
on(default, "cd /opt/beaker/;git merge origin/pr/#{ENV['BEAKER_PULL_ID']}/head --no-edit")
else
logger.notify 'No PR branch detected, building from master'
end
end
step 'Build the gem and install it on the local system' do
build_output = on(default, 'cd /opt/beaker/;gem build beaker.gemspec').stdout
version = build_output.match(/^ File: (.+)$/)[1]
on(default, "cd /opt/beaker/;gem install #{version} --no-rdoc --no-ri; gem install beaker-vmpooler")
end
end
| 1 | 16,176 | should this block be in the file `05_install_ruby.rb`? | voxpupuli-beaker | rb |
@@ -167,7 +167,9 @@ func withDisconnectedClient(t *testing.T, recorder *Recorder, f func(raw.Client)
Unary: http.NewOutbound("http://localhost:65535"),
},
},
- Filter: recorder,
+ Filters: yarpc.Filters{
+ UnaryFilter: recorder,
+ },
})
require.NoError(t, clientDisp.Start())
defer clientDisp.Stop() | 1 | package recorder
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"testing"
"time"
"go.uber.org/yarpc"
"go.uber.org/yarpc/encoding/raw"
"go.uber.org/yarpc/transport"
"go.uber.org/yarpc/transport/http"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSanitizeFilename(t *testing.T) {
assert.EqualValues(t, sanitizeFilename(`hello`), `hello`)
assert.EqualValues(t, sanitizeFilename(`h/e\l?l%o*`), `h_e_l_l_o_`)
assert.EqualValues(t, sanitizeFilename(`:h|e"l<l>o.`), `_h_e_l_l_o.`)
assert.EqualValues(t, sanitizeFilename(`10€|çí¹`), `10__çí¹`)
assert.EqualValues(t, sanitizeFilename("hel\x00lo"), `hel_lo`)
}
type randomGenerator struct {
randsrc *rand.Rand
}
func newRandomGenerator(seed int64) randomGenerator {
return randomGenerator{
randsrc: rand.New(rand.NewSource(seed)),
}
}
// Atom returns an ASCII string.
func (r *randomGenerator) Atom() string {
length := 3 + r.randsrc.Intn(13)
atom := make([]byte, length)
for i := 0; i < length; i++ {
letter := r.randsrc.Intn(2 * 26)
if letter < 26 {
atom[i] = 'A' + byte(letter)
} else {
atom[i] = 'a' + byte(letter-26)
}
}
return string(atom)
}
// Headers returns a new randomized header.
func (r *randomGenerator) Headers() transport.Headers {
headers := transport.NewHeaders()
size := 2 + r.randsrc.Intn(6)
for i := 0; i < size; i++ {
headers = headers.With(r.Atom(), r.Atom())
}
return headers
}
// Request returns a new randomized request.
func (r *randomGenerator) Request() transport.Request {
bodyData := []byte(r.Atom())
return transport.Request{
Caller: r.Atom(),
Service: r.Atom(),
Encoding: transport.Encoding(r.Atom()),
Procedure: r.Atom(),
Headers: r.Headers(),
ShardKey: r.Atom(),
RoutingKey: r.Atom(),
RoutingDelegate: r.Atom(),
Body: ioutil.NopCloser(bytes.NewReader(bodyData)),
}
}
func TestHash(t *testing.T) {
rgen := newRandomGenerator(42)
request := rgen.Request()
recorder := NewRecorder(t)
requestRecord := recorder.requestToRequestRecord(&request)
referenceHash := recorder.hashRequestRecord(&requestRecord)
require.Equal(t, "7195d5a712201d2a", referenceHash)
// Caller
r := request
r.Caller = rgen.Atom()
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
// Service
r = request
r.Service = rgen.Atom()
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
// Encoding
r = request
r.Encoding = transport.Encoding(rgen.Atom())
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
// Procedure
r = request
r.Procedure = rgen.Atom()
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
// Headers
r = request
r.Headers = rgen.Headers()
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
// ShardKey
r = request
r.ShardKey = rgen.Atom()
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
// RoutingKey
r = request
r.RoutingKey = rgen.Atom()
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
// RoutingDelegate
r = request
r.RoutingDelegate = rgen.Atom()
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
// Body
r = request
request.Body = ioutil.NopCloser(bytes.NewReader([]byte(rgen.Atom())))
requestRecord = recorder.requestToRequestRecord(&r)
assert.NotEqual(t, recorder.hashRequestRecord(&requestRecord), referenceHash)
}
var testingTMockFatal = struct{}{}
type testingTMock struct {
*testing.T
fatalCount int
}
func (t *testingTMock) Fatal(args ...interface{}) {
t.Logf("counting fatal: %s", args)
t.fatalCount++
panic(testingTMockFatal)
}
func withDisconnectedClient(t *testing.T, recorder *Recorder, f func(raw.Client)) {
clientDisp := yarpc.NewDispatcher(yarpc.Config{
Name: "client",
Outbounds: yarpc.Outbounds{
"server": {
Unary: http.NewOutbound("http://localhost:65535"),
},
},
Filter: recorder,
})
require.NoError(t, clientDisp.Start())
defer clientDisp.Stop()
client := raw.New(clientDisp.Channel("server"))
f(client)
}
func withConnectedClient(t *testing.T, recorder *Recorder, f func(raw.Client)) {
serverHTTP := http.NewInbound(":0")
serverDisp := yarpc.NewDispatcher(yarpc.Config{
Name: "server",
Inbounds: yarpc.Inbounds{serverHTTP},
})
serverDisp.Register(raw.Procedure("hello",
func(ctx context.Context, reqMeta yarpc.ReqMeta, body []byte) ([]byte, yarpc.ResMeta, error) {
return append(body, []byte(", World")...), nil, nil
}))
require.NoError(t, serverDisp.Start())
defer serverDisp.Stop()
clientDisp := yarpc.NewDispatcher(yarpc.Config{
Name: "client",
Outbounds: yarpc.Outbounds{
"server": {
Unary: http.NewOutbound(fmt.Sprintf("http://%s", serverHTTP.Addr().String())),
},
},
Filter: recorder,
})
require.NoError(t, clientDisp.Start())
defer clientDisp.Stop()
client := raw.New(clientDisp.Channel("server"))
f(client)
}
func TestEndToEnd(t *testing.T) {
tMock := testingTMock{t, 0}
dir, err := ioutil.TempDir("", "yarpcgorecorder")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir) // clean up
// First we double check that our cache is empty.
recorder := NewRecorder(&tMock, RecordMode(Replay), RecordsPath(dir))
withDisconnectedClient(t, recorder, func(client raw.Client) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
require.Panics(t, func() {
client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
})
assert.Equal(t, tMock.fatalCount, 1)
})
// Now let's record our call.
recorder = NewRecorder(&tMock, RecordMode(Overwrite), RecordsPath(dir))
withConnectedClient(t, recorder, func(client raw.Client) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
rbody, _, err := client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
require.NoError(t, err)
assert.Equal(t, rbody, []byte("Hello, World"))
})
// Now replay the call.
recorder = NewRecorder(&tMock, RecordMode(Replay), RecordsPath(dir))
withDisconnectedClient(t, recorder, func(client raw.Client) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
rbody, _, err := client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
require.NoError(t, err)
assert.Equal(t, rbody, []byte("Hello, World"))
})
}
func TestEmptyReplay(t *testing.T) {
tMock := testingTMock{t, 0}
dir, err := ioutil.TempDir("", "yarpcgorecorder")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir) // clean up
recorder := NewRecorder(&tMock, RecordMode(Replay), RecordsPath(dir))
withDisconnectedClient(t, recorder, func(client raw.Client) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
require.Panics(t, func() {
client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
})
assert.Equal(t, tMock.fatalCount, 1)
})
}
const refRecordFilename = `server.hello.254fa3bab61fc27f.yaml`
const refRecordContent = `version: 1
request:
caller: client
service: server
procedure: hello
encoding: raw
headers: {}
shardkey: ""
routingkey: ""
routingdelegate: ""
body: SGVsbG8=
response:
headers: {}
body: SGVsbG8sIFdvcmxk
`
func TestRecording(t *testing.T) {
tMock := testingTMock{t, 0}
dir, err := ioutil.TempDir("", "yarpcgorecorder")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir) // clean up
recorder := NewRecorder(&tMock, RecordMode(Append), RecordsPath(dir))
withConnectedClient(t, recorder, func(client raw.Client) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
rbody, _, err := client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
require.NoError(t, err)
assert.Equal(t, []byte("Hello, World"), rbody)
})
recordPath := path.Join(dir, refRecordFilename)
_, err = os.Stat(recordPath)
require.NoError(t, err)
recordContent, err := ioutil.ReadFile(recordPath)
require.NoError(t, err)
assert.Equal(t, refRecordContent, string(recordContent))
}
func TestReplaying(t *testing.T) {
tMock := testingTMock{t, 0}
dir, err := ioutil.TempDir("", "yarpcgorecorder")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir) // clean up
recorder := NewRecorder(&tMock, RecordMode(Replay), RecordsPath(dir))
recordPath := path.Join(dir, refRecordFilename)
err = ioutil.WriteFile(recordPath, []byte(refRecordContent), 0444)
require.NoError(t, err)
withDisconnectedClient(t, recorder, func(client raw.Client) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
rbody, _, err := client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
require.NoError(t, err)
assert.Equal(t, rbody, []byte("Hello, World"))
})
}
| 1 | 11,367 | To match outbounds, let's just call this `Unary: recorder`, `Oneway: ...`. | yarpc-yarpc-go | go |
@@ -49,7 +49,7 @@ def stripControlChars(string):
def compactHash(string):
hash = md5()
- hash.update(string)
+ hash.update(string.encode('unicode_escape'))
return hash.hexdigest()
| 1 | """Copyright 2008 Orbitz WorldWide
Copyright 2011 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from graphite.logger import log
import time
try:
from hashlib import md5
except ImportError:
from md5 import md5
import bisect
def hashRequest(request):
# Normalize the request parameters so ensure we're deterministic
queryParams = ["%s=%s" % (key, '&'.join(values))
for (key,values) in request.GET.lists()
if not key.startswith('_')]
normalizedParams = ','.join( sorted(queryParams) ) or 'noParam'
myHash = stripControlChars(normalizedParams) #memcached doesn't like unprintable characters in its keys
return compactHash(myHash)
def hashData(targets, startTime, endTime):
targetsString = ','.join(targets)
startTimeString = startTime.strftime("%Y%m%d_%H%M")
endTimeString = endTime.strftime("%Y%m%d_%H%M")
myHash = targetsString + '@' + startTimeString + ':' + endTimeString
myHash = stripControlChars(myHash)
return compactHash(myHash)
def stripControlChars(string):
return filter(lambda char: ord(char) >= 33, string)
def compactHash(string):
hash = md5()
hash.update(string)
return hash.hexdigest()
class ConsistentHashRing:
def __init__(self, nodes, replica_count=100):
self.ring = []
self.ring_len = len(self.ring)
self.nodes = set()
self.nodes_len = len(self.nodes)
self.replica_count = replica_count
for node in nodes:
self.add_node(node)
def compute_ring_position(self, key):
big_hash = md5( str(key) ).hexdigest()
small_hash = int(big_hash[:4], 16)
return small_hash
def add_node(self, key):
self.nodes.add(key)
self.nodes_len = len(self.nodes)
for i in range(self.replica_count):
replica_key = "%s:%d" % (key, i)
position = self.compute_ring_position(replica_key)
entry = (position, key)
bisect.insort(self.ring, entry)
self.ring_len = len(self.ring)
def remove_node(self, key):
self.nodes.discard(key)
self.nodes_len = len(self.nodes)
self.ring = [entry for entry in self.ring if entry[1] != key]
self.ring_len = len(self.ring)
def get_node(self, key):
assert self.ring
position = self.compute_ring_position(key)
search_entry = (position, None)
index = bisect.bisect_left(self.ring, search_entry) % self.ring_len
entry = self.ring[index]
return entry[1]
def get_nodes(self, key):
nodes = []
position = self.compute_ring_position(key)
search_entry = (position, None)
index = bisect.bisect_left(self.ring, search_entry) % self.ring_len
last_index = (index - 1) % self.ring_len
nodes_len = len(nodes)
while nodes_len < self.nodes_len and index != last_index:
next_entry = self.ring[index]
(position, next_node) = next_entry
if next_node not in nodes:
nodes.append(next_node)
nodes_len += 1
index = (index + 1) % self.ring_len
return nodes
| 1 | 8,373 | `string.encode('utf-8')` is more common but I guess this is mostly cosmetic :) | graphite-project-graphite-web | py |
@@ -23,6 +23,6 @@ func NewLoggingHandler(handler http.Handler) LoggingHandler {
}
func (lh LoggingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- log.Info("Handling http request", "method", r.Method, "from", r.RemoteAddr, "uri", r.RequestURI)
+ log.Info("Handling http request", "method", r.Method, "from", r.RemoteAddr)
lh.h.ServeHTTP(w, r)
} | 1 | // Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package handlers
import "net/http"
type LoggingHandler struct{ h http.Handler }
// NewLoggingHandler creates a new LoggingHandler object.
func NewLoggingHandler(handler http.Handler) LoggingHandler {
return LoggingHandler{h: handler}
}
func (lh LoggingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Info("Handling http request", "method", r.Method, "from", r.RemoteAddr, "uri", r.RequestURI)
lh.h.ServeHTTP(w, r)
}
| 1 | 19,432 | Consider logging `r.Url.Path`? Either way, ship it! | aws-amazon-ecs-agent | go |
@@ -54,14 +54,16 @@ type Context struct {
// Group data are omitted because they are committed to in the
// transaction and its ID.
type Params struct {
- CurrSpecAddrs transactions.SpecialAddresses
- CurrProto protocol.ConsensusVersion
+ CurrSpecAddrs transactions.SpecialAddresses
+ CurrProto protocol.ConsensusVersion
+ MinTealVersion uint64
}
// PrepareContexts prepares verification contexts for a transaction
// group.
func PrepareContexts(group []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader) []Context {
ctxs := make([]Context, len(group))
+ minTealVersion := logic.ComputeMinTealVersion(group)
for i := range group {
spec := transactions.SpecialAddresses{
FeeSink: contextHdr.FeeSink, | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package verify
import (
"context"
"encoding/binary"
"errors"
"fmt"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/execpool"
"github.com/algorand/go-algorand/util/metrics"
)
var logicGoodTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_ledger_logic_ok", Description: "Total transaction scripts executed and accepted"})
var logicRejTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_ledger_logic_rej", Description: "Total transaction scripts executed and rejected"})
var logicErrTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_ledger_logic_err", Description: "Total transaction scripts executed and errored"})
// Context encapsulates the context needed to perform stateless checks
// on a signed transaction.
type Context struct {
Params
Group []transactions.SignedTxn
GroupIndex int
}
// Params is the set of parameters external to a transaction which
// stateless checks are performed against.
//
// For efficient caching, these parameters should either be constant
// or change slowly over time.
//
// Group data are omitted because they are committed to in the
// transaction and its ID.
type Params struct {
CurrSpecAddrs transactions.SpecialAddresses
CurrProto protocol.ConsensusVersion
}
// PrepareContexts prepares verification contexts for a transaction
// group.
func PrepareContexts(group []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader) []Context {
ctxs := make([]Context, len(group))
for i := range group {
spec := transactions.SpecialAddresses{
FeeSink: contextHdr.FeeSink,
RewardsPool: contextHdr.RewardsPool,
}
ctx := Context{
Params: Params{
CurrSpecAddrs: spec,
CurrProto: contextHdr.CurrentProtocol,
},
Group: group,
GroupIndex: i,
}
ctxs[i] = ctx
}
return ctxs
}
// TxnPool verifies that a SignedTxn has a good signature and that the underlying
// transaction is properly constructed.
// Note that this does not check whether a payset is valid against the ledger:
// a SignedTxn may be well-formed, but a payset might contain an overspend.
//
// This version of verify is performing the verification over the provided execution pool.
func TxnPool(s *transactions.SignedTxn, ctx Context, verificationPool execpool.BacklogPool) error {
proto, ok := config.Consensus[ctx.CurrProto]
if !ok {
return protocol.Error(ctx.CurrProto)
}
if err := s.Txn.WellFormed(ctx.CurrSpecAddrs, proto); err != nil {
return err
}
zeroAddress := basics.Address{}
if s.Txn.Src() == zeroAddress {
return errors.New("empty address")
}
if !proto.SupportRekeying && (s.AuthAddr != basics.Address{}) {
return errors.New("nonempty AuthAddr but rekeying not supported")
}
outCh := make(chan error, 1)
cx := asyncVerifyContext{s: s, outCh: outCh, ctx: &ctx}
verificationPool.EnqueueBacklog(context.Background(), stxnAsyncVerify, &cx, nil)
if err, hasErr := <-outCh; hasErr {
return err
}
return nil
}
// Txn verifies a SignedTxn as being signed and having no obviously inconsistent data.
// Block-assembly time checks of LogicSig and accounting rules may still block the txn.
func Txn(s *transactions.SignedTxn, ctx Context) error {
proto, ok := config.Consensus[ctx.CurrProto]
if !ok {
return protocol.Error(ctx.CurrProto)
}
if err := s.Txn.WellFormed(ctx.CurrSpecAddrs, proto); err != nil {
return err
}
zeroAddress := basics.Address{}
if s.Txn.Src() == zeroAddress {
return errors.New("empty address")
}
if !proto.SupportRekeying && (s.AuthAddr != basics.Address{}) {
return errors.New("nonempty AuthAddr but rekeying not supported")
}
return stxnVerifyCore(s, &ctx)
}
type asyncVerifyContext struct {
s *transactions.SignedTxn
outCh chan error
ctx *Context
}
func stxnAsyncVerify(arg interface{}) interface{} {
cx := arg.(*asyncVerifyContext)
err := stxnVerifyCore(cx.s, cx.ctx)
if err != nil {
cx.outCh <- err
} else {
close(cx.outCh)
}
return nil
}
func stxnVerifyCore(s *transactions.SignedTxn, ctx *Context) error {
numSigs := 0
hasSig := false
hasMsig := false
hasLogicSig := false
if s.Sig != (crypto.Signature{}) {
numSigs++
hasSig = true
}
if !s.Msig.Blank() {
numSigs++
hasMsig = true
}
if !s.Lsig.Blank() {
numSigs++
hasLogicSig = true
}
if numSigs == 0 {
return errors.New("signedtxn has no sig")
}
if numSigs > 1 {
return errors.New("signedtxn should only have one of Sig or Msig or LogicSig")
}
if hasSig {
if crypto.SignatureVerifier(s.Authorizer()).Verify(s.Txn, s.Sig) {
return nil
}
return errors.New("signature validation failed")
}
if hasMsig {
if ok, _ := crypto.MultisigVerify(s.Txn, crypto.Digest(s.Authorizer()), s.Msig); ok {
return nil
}
return errors.New("multisig validation failed")
}
if hasLogicSig {
return LogicSig(s, ctx)
}
return errors.New("has one mystery sig. WAT?")
}
// LogicSigSanityCheck checks that the signature is valid and that the program is basically well formed.
// It does not evaluate the logic.
func LogicSigSanityCheck(txn *transactions.SignedTxn, ctx *Context) error {
lsig := txn.Lsig
proto, ok := config.Consensus[ctx.CurrProto]
if !ok {
return protocol.Error(ctx.CurrProto)
}
if proto.LogicSigVersion == 0 {
return errors.New("LogicSig not enabled")
}
if len(lsig.Logic) == 0 {
return errors.New("LogicSig.Logic empty")
}
version, vlen := binary.Uvarint(lsig.Logic)
if vlen <= 0 {
return errors.New("LogicSig.Logic bad version")
}
if version > proto.LogicSigVersion {
return errors.New("LogicSig.Logic version too new")
}
if uint64(lsig.Len()) > proto.LogicSigMaxSize {
return errors.New("LogicSig.Logic too long")
}
ep := logic.EvalParams{
Txn: txn,
Proto: &proto,
TxnGroup: ctx.Group,
GroupIndex: ctx.GroupIndex,
}
cost, err := logic.Check(lsig.Logic, ep)
if err != nil {
return err
}
if cost > int(proto.LogicSigMaxCost) {
return fmt.Errorf("LogicSig.Logic too slow, %d > %d", cost, proto.LogicSigMaxCost)
}
hasMsig := false
numSigs := 0
if lsig.Sig != (crypto.Signature{}) {
numSigs++
}
if !lsig.Msig.Blank() {
hasMsig = true
numSigs++
}
if numSigs == 0 {
// if the txn.Authorizer() == hash(Logic) then this is a (potentially) valid operation on a contract-only account
program := logic.Program(lsig.Logic)
lhash := crypto.HashObj(&program)
if crypto.Digest(txn.Authorizer()) == lhash {
return nil
}
return errors.New("LogicNot signed and not a Logic-only account")
}
if numSigs > 1 {
return errors.New("LogicSig should only have one of Sig or Msig but has more than one")
}
if !hasMsig {
program := logic.Program(lsig.Logic)
if !crypto.SignatureVerifier(txn.Authorizer()).Verify(&program, lsig.Sig) {
return errors.New("logic signature validation failed")
}
} else {
program := logic.Program(lsig.Logic)
if ok, _ := crypto.MultisigVerify(&program, crypto.Digest(txn.Authorizer()), lsig.Msig); !ok {
return errors.New("logic multisig validation failed")
}
}
return nil
}
// LogicSig checks that the signature is valid, executing the program.
func LogicSig(txn *transactions.SignedTxn, ctx *Context) error {
proto, ok := config.Consensus[ctx.CurrProto]
if !ok {
return protocol.Error(ctx.CurrProto)
}
err := LogicSigSanityCheck(txn, ctx)
if err != nil {
return err
}
ep := logic.EvalParams{
Txn: txn,
Proto: &proto,
TxnGroup: ctx.Group,
GroupIndex: ctx.GroupIndex,
}
pass, err := logic.Eval(txn.Lsig.Logic, ep)
if err != nil {
logicErrTotal.Inc(nil)
return fmt.Errorf("transaction %v: rejected by logic err=%v", txn.ID(), err)
}
if !pass {
logicRejTotal.Inc(nil)
return fmt.Errorf("transaction %v: rejected by logic", txn.ID())
}
logicGoodTotal.Inc(nil)
return nil
}
| 1 | 39,566 | this probably should be done lazely only if logic/app call txn in the group. Or even done in LogicSigSanityCheck? | algorand-go-algorand | go |
@@ -1639,6 +1639,10 @@ func (e *mutableStateBuilder) addWorkflowExecutionStartedEventForContinueAsNew(
SearchAttributes: attributes.SearchAttributes,
}
+ if attributes.GetInitiator() == enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED {
+ attributes.Initiator = enumspb.CONTINUE_AS_NEW_INITIATOR_WORKFLOW
+ }
+
req := &historyservice.StartWorkflowExecutionRequest{
NamespaceId: e.namespaceEntry.GetInfo().Id,
StartRequest: createRequest, | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"fmt"
"math/rand"
"time"
"github.com/pborman/uuid"
commandpb "go.temporal.io/api/command/v1"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
failurepb "go.temporal.io/api/failure/v1"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/api/serviceerror"
taskqueuepb "go.temporal.io/api/taskqueue/v1"
workflowpb "go.temporal.io/api/workflow/v1"
"go.temporal.io/api/workflowservice/v1"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/api/historyservice/v1"
"go.temporal.io/server/api/persistenceblobs/v1"
workflowspb "go.temporal.io/server/api/workflow/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/checksum"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/definition"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/payload"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
)
const (
emptyUUID = "emptyUuid"
mutableStateInvalidHistoryActionMsg = "invalid history builder state for action"
mutableStateInvalidHistoryActionMsgTemplate = mutableStateInvalidHistoryActionMsg + ": %v"
)
var (
// ErrWorkflowFinished indicates trying to mutate mutable state after workflow finished
ErrWorkflowFinished = serviceerror.NewInternal("invalid mutable state action: mutation after finish")
// ErrMissingTimerInfo indicates missing timer info
ErrMissingTimerInfo = serviceerror.NewInternal("unable to get timer info")
// ErrMissingActivityInfo indicates missing activity info
ErrMissingActivityInfo = serviceerror.NewInternal("unable to get activity info")
// ErrMissingChildWorkflowInfo indicates missing child workflow info
ErrMissingChildWorkflowInfo = serviceerror.NewInternal("unable to get child workflow info")
// ErrMissingRequestCancelInfo indicates missing request cancel info
ErrMissingRequestCancelInfo = serviceerror.NewInternal("unable to get request cancel info")
// ErrMissingSignalInfo indicates missing signal external
ErrMissingSignalInfo = serviceerror.NewInternal("unable to get signal info")
// ErrMissingWorkflowStartEvent indicates missing workflow start event
ErrMissingWorkflowStartEvent = serviceerror.NewInternal("unable to get workflow start event")
// ErrMissingWorkflowCompletionEvent indicates missing workflow completion event
ErrMissingWorkflowCompletionEvent = serviceerror.NewInternal("unable to get workflow completion event")
// ErrMissingActivityScheduledEvent indicates missing workflow activity scheduled event
ErrMissingActivityScheduledEvent = serviceerror.NewInternal("unable to get activity scheduled event")
// ErrMissingChildWorkflowInitiatedEvent indicates missing child workflow initiated event
ErrMissingChildWorkflowInitiatedEvent = serviceerror.NewInternal("unable to get child workflow initiated event")
)
type (
mutableStateBuilder struct {
pendingActivityTimerHeartbeats map[int64]time.Time // Schedule Event ID -> LastHeartbeatTimeoutVisibilityInSeconds.
pendingActivityInfoIDs map[int64]*persistenceblobs.ActivityInfo // Schedule Event ID -> Activity Info.
pendingActivityIDToEventID map[string]int64 // Activity ID -> Schedule Event ID of the activity.
updateActivityInfos map[*persistenceblobs.ActivityInfo]struct{} // Modified activities from last update.
deleteActivityInfos map[int64]struct{} // Deleted activities from last update.
syncActivityTasks map[int64]struct{} // Activity to be sync to remote
pendingTimerInfoIDs map[string]*persistenceblobs.TimerInfo // User Timer ID -> Timer Info.
pendingTimerEventIDToID map[int64]string // User Timer Start Event ID -> User Timer ID.
updateTimerInfos map[*persistenceblobs.TimerInfo]struct{} // Modified timers from last update.
deleteTimerInfos map[string]struct{} // Deleted timers from last update.
pendingChildExecutionInfoIDs map[int64]*persistenceblobs.ChildExecutionInfo // Initiated Event ID -> Child Execution Info
updateChildExecutionInfos map[*persistenceblobs.ChildExecutionInfo]struct{} // Modified ChildExecution Infos since last update
deleteChildExecutionInfo *int64 // Deleted ChildExecution Info since last update
pendingRequestCancelInfoIDs map[int64]*persistenceblobs.RequestCancelInfo // Initiated Event ID -> RequestCancelInfo
updateRequestCancelInfos map[*persistenceblobs.RequestCancelInfo]struct{} // Modified RequestCancel Infos since last update, for persistence update
deleteRequestCancelInfo *int64 // Deleted RequestCancel Info since last update, for persistence update
pendingSignalInfoIDs map[int64]*persistenceblobs.SignalInfo // Initiated Event ID -> SignalInfo
updateSignalInfos map[*persistenceblobs.SignalInfo]struct{} // Modified SignalInfo since last update
deleteSignalInfo *int64 // Deleted SignalInfo since last update
pendingSignalRequestedIDs map[string]struct{} // Set of signaled requestIds
updateSignalRequestedIDs map[string]struct{} // Set of signaled requestIds since last update
deleteSignalRequestedID string // Deleted signaled requestId
bufferedEvents []*historypb.HistoryEvent // buffered history events that are already persisted
updateBufferedEvents []*historypb.HistoryEvent // buffered history events that needs to be persisted
clearBufferedEvents bool // delete buffered events from persistence
executionInfo *persistence.WorkflowExecutionInfo // Workflow mutable state info.
versionHistories *persistence.VersionHistories
hBuilder *historyBuilder
// in memory only attributes
// indicate the current version
currentVersion int64
// indicates whether there are buffered events in persistence
hasBufferedEventsInDB bool
// indicates the workflow state in DB, can be used to calculate
// whether this workflow is pointed by current workflow record
stateInDB enumsspb.WorkflowExecutionState
// indicates the next event ID in DB, for conditional update
nextEventIDInDB int64
// namespace entry contains a snapshot of namespace
// NOTE: do not use the failover version inside, use currentVersion above
namespaceEntry *cache.NamespaceCacheEntry
// record if a event has been applied to mutable state
// TODO: persist this to db
appliedEvents map[string]struct{}
insertTransferTasks []persistence.Task
insertReplicationTasks []persistence.Task
insertTimerTasks []persistence.Task
// do not rely on this, this is only updated on
// Load() and closeTransactionXXX methods. So when
// a transaction is in progress, this value will be
// wrong. This exist primarily for visibility via CLI
checksum checksum.Checksum
taskGenerator mutableStateTaskGenerator
workflowTaskManager mutableStateWorkflowTaskManager
queryRegistry queryRegistry
shard ShardContext
clusterMetadata cluster.Metadata
eventsCache eventsCache
config *Config
timeSource clock.TimeSource
logger log.Logger
metricsClient metrics.Client
}
)
var _ mutableState = (*mutableStateBuilder)(nil)
func newMutableStateBuilder(
shard ShardContext,
eventsCache eventsCache,
logger log.Logger,
namespaceEntry *cache.NamespaceCacheEntry,
) *mutableStateBuilder {
s := &mutableStateBuilder{
updateActivityInfos: make(map[*persistenceblobs.ActivityInfo]struct{}),
pendingActivityTimerHeartbeats: make(map[int64]time.Time),
pendingActivityInfoIDs: make(map[int64]*persistenceblobs.ActivityInfo),
pendingActivityIDToEventID: make(map[string]int64),
deleteActivityInfos: make(map[int64]struct{}),
syncActivityTasks: make(map[int64]struct{}),
pendingTimerInfoIDs: make(map[string]*persistenceblobs.TimerInfo),
pendingTimerEventIDToID: make(map[int64]string),
updateTimerInfos: make(map[*persistenceblobs.TimerInfo]struct{}),
deleteTimerInfos: make(map[string]struct{}),
updateChildExecutionInfos: make(map[*persistenceblobs.ChildExecutionInfo]struct{}),
pendingChildExecutionInfoIDs: make(map[int64]*persistenceblobs.ChildExecutionInfo),
deleteChildExecutionInfo: nil,
updateRequestCancelInfos: make(map[*persistenceblobs.RequestCancelInfo]struct{}),
pendingRequestCancelInfoIDs: make(map[int64]*persistenceblobs.RequestCancelInfo),
deleteRequestCancelInfo: nil,
updateSignalInfos: make(map[*persistenceblobs.SignalInfo]struct{}),
pendingSignalInfoIDs: make(map[int64]*persistenceblobs.SignalInfo),
deleteSignalInfo: nil,
updateSignalRequestedIDs: make(map[string]struct{}),
pendingSignalRequestedIDs: make(map[string]struct{}),
deleteSignalRequestedID: "",
currentVersion: namespaceEntry.GetFailoverVersion(),
hasBufferedEventsInDB: false,
stateInDB: enumsspb.WORKFLOW_EXECUTION_STATE_VOID,
nextEventIDInDB: 0,
namespaceEntry: namespaceEntry,
appliedEvents: make(map[string]struct{}),
queryRegistry: newQueryRegistry(),
shard: shard,
clusterMetadata: shard.GetClusterMetadata(),
eventsCache: eventsCache,
config: shard.GetConfig(),
timeSource: shard.GetTimeSource(),
logger: logger,
metricsClient: shard.GetMetricsClient(),
}
s.executionInfo = &persistence.WorkflowExecutionInfo{
WorkflowTaskVersion: common.EmptyVersion,
WorkflowTaskScheduleId: common.EmptyEventID,
WorkflowTaskStartedId: common.EmptyEventID,
WorkflowTaskRequestId: emptyUUID,
WorkflowTaskTimeout: timestamp.DurationFromSeconds(0),
WorkflowTaskAttempt: 1,
NextEventId: common.FirstEventID,
ExecutionState: &persistenceblobs.WorkflowExecutionState{State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED,
Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING},
LastProcessedEvent: common.EmptyEventID,
}
s.hBuilder = newHistoryBuilder(s, logger)
s.taskGenerator = newMutableStateTaskGenerator(shard.GetNamespaceCache(), s.logger, s)
s.workflowTaskManager = newMutableStateWorkflowTaskManager(s)
return s
}
func newMutableStateBuilderWithVersionHistories(
shard ShardContext,
eventsCache eventsCache,
logger log.Logger,
namespaceEntry *cache.NamespaceCacheEntry,
) *mutableStateBuilder {
s := newMutableStateBuilder(shard, eventsCache, logger, namespaceEntry)
s.versionHistories = persistence.NewVersionHistories(&persistence.VersionHistory{})
return s
}
func (e *mutableStateBuilder) CopyToPersistence() *persistence.WorkflowMutableState {
state := &persistence.WorkflowMutableState{}
state.ActivityInfos = e.pendingActivityInfoIDs
state.TimerInfos = e.pendingTimerInfoIDs
state.ChildExecutionInfos = e.pendingChildExecutionInfoIDs
state.RequestCancelInfos = e.pendingRequestCancelInfoIDs
state.SignalInfos = e.pendingSignalInfoIDs
state.SignalRequestedIDs = e.pendingSignalRequestedIDs
state.ExecutionInfo = e.executionInfo
state.BufferedEvents = e.bufferedEvents
state.VersionHistories = e.versionHistories
state.Checksum = e.checksum
return state
}
func (e *mutableStateBuilder) Load(
state *persistence.WorkflowMutableState,
) {
e.pendingActivityInfoIDs = state.ActivityInfos
for _, activityInfo := range state.ActivityInfos {
e.pendingActivityIDToEventID[activityInfo.ActivityId] = activityInfo.ScheduleId
}
e.pendingTimerInfoIDs = state.TimerInfos
for _, timerInfo := range state.TimerInfos {
e.pendingTimerEventIDToID[timerInfo.GetStartedId()] = timerInfo.GetTimerId()
}
e.pendingChildExecutionInfoIDs = state.ChildExecutionInfos
e.pendingRequestCancelInfoIDs = state.RequestCancelInfos
e.pendingSignalInfoIDs = state.SignalInfos
e.pendingSignalRequestedIDs = state.SignalRequestedIDs
e.executionInfo = state.ExecutionInfo
e.bufferedEvents = state.BufferedEvents
e.currentVersion = common.EmptyVersion
e.hasBufferedEventsInDB = len(e.bufferedEvents) > 0
e.stateInDB = state.ExecutionInfo.ExecutionState.State
e.nextEventIDInDB = state.ExecutionInfo.NextEventId
e.versionHistories = state.VersionHistories
e.checksum = state.Checksum
if len(state.Checksum.Value) > 0 {
switch {
case e.shouldInvalidateCheckum():
e.checksum = checksum.Checksum{}
e.metricsClient.IncCounter(metrics.WorkflowContextScope, metrics.MutableStateChecksumInvalidated)
case e.shouldVerifyChecksum():
if err := verifyMutableStateChecksum(e, state.Checksum); err != nil {
// we ignore checksum verification errors for now until this
// feature is tested and/or we have mechanisms in place to deal
// with these types of errors
e.metricsClient.IncCounter(metrics.WorkflowContextScope, metrics.MutableStateChecksumMismatch)
e.logError("mutable state checksum mismatch", tag.Error(err))
}
}
}
}
func (e *mutableStateBuilder) GetCurrentBranchToken() ([]byte, error) {
if e.versionHistories != nil {
currentVersionHistory, err := e.versionHistories.GetCurrentVersionHistory()
if err != nil {
return nil, err
}
return currentVersionHistory.GetBranchToken(), nil
}
return e.executionInfo.EventBranchToken, nil
}
func (e *mutableStateBuilder) GetVersionHistories() *persistence.VersionHistories {
return e.versionHistories
}
// set treeID/historyBranches
func (e *mutableStateBuilder) SetHistoryTree(
treeID string,
) error {
initialBranchToken, err := persistence.NewHistoryBranchToken(treeID)
if err != nil {
return err
}
return e.SetCurrentBranchToken(initialBranchToken)
}
func (e *mutableStateBuilder) SetCurrentBranchToken(
branchToken []byte,
) error {
exeInfo := e.GetExecutionInfo()
if e.versionHistories == nil {
exeInfo.EventBranchToken = branchToken
return nil
}
currentVersionHistory, err := e.versionHistories.GetCurrentVersionHistory()
if err != nil {
return err
}
return currentVersionHistory.SetBranchToken(branchToken)
}
func (e *mutableStateBuilder) SetVersionHistories(
versionHistories *persistence.VersionHistories,
) error {
e.versionHistories = versionHistories
return nil
}
func (e *mutableStateBuilder) GetHistoryBuilder() *historyBuilder {
return e.hBuilder
}
func (e *mutableStateBuilder) SetHistoryBuilder(hBuilder *historyBuilder) {
e.hBuilder = hBuilder
}
func (e *mutableStateBuilder) GetExecutionInfo() *persistence.WorkflowExecutionInfo {
return e.executionInfo
}
func (e *mutableStateBuilder) FlushBufferedEvents() error {
// put new events into 2 buckets:
// 1) if the event was added while there was in-flight workflow task, then put it in buffered bucket
// 2) otherwise, put it in committed bucket
var newBufferedEvents []*historypb.HistoryEvent
var newCommittedEvents []*historypb.HistoryEvent
for _, event := range e.hBuilder.history {
if event.GetEventId() == common.BufferedEventID {
newBufferedEvents = append(newBufferedEvents, event)
} else {
newCommittedEvents = append(newCommittedEvents, event)
}
}
// Sometimes we see buffered events are out of order when read back from database. This is mostly not an issue
// except in the Activity case where ActivityStarted and ActivityCompleted gets out of order. The following code
// is added to reorder buffered events to guarantee all activity completion events will always be processed at the end.
var reorderedEvents []*historypb.HistoryEvent
reorderFunc := func(bufferedEvents []*historypb.HistoryEvent) {
for _, event := range bufferedEvents {
switch event.GetEventType() {
case enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED,
enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED,
enumspb.EVENT_TYPE_ACTIVITY_TASK_CANCELED,
enumspb.EVENT_TYPE_ACTIVITY_TASK_TIMED_OUT:
reorderedEvents = append(reorderedEvents, event)
case enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_COMPLETED,
enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_FAILED,
enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_CANCELED,
enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_TIMED_OUT,
enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_TERMINATED:
reorderedEvents = append(reorderedEvents, event)
default:
newCommittedEvents = append(newCommittedEvents, event)
}
}
}
// no workflow task in-flight, flush all buffered events to committed bucket
if !e.HasInFlightWorkflowTask() {
// flush persisted buffered events
if len(e.bufferedEvents) > 0 {
reorderFunc(e.bufferedEvents)
e.bufferedEvents = nil
}
if e.hasBufferedEventsInDB {
e.clearBufferedEvents = true
}
// flush pending buffered events
reorderFunc(e.updateBufferedEvents)
// clear pending buffered events
e.updateBufferedEvents = nil
// Put back all the reordered buffer events at the end
if len(reorderedEvents) > 0 {
newCommittedEvents = append(newCommittedEvents, reorderedEvents...)
}
// flush new buffered events that were not saved to persistence yet
newCommittedEvents = append(newCommittedEvents, newBufferedEvents...)
newBufferedEvents = nil
}
newCommittedEvents = e.trimEventsAfterWorkflowClose(newCommittedEvents)
e.hBuilder.history = newCommittedEvents
// make sure all new committed events have correct EventID
e.assignEventIDToBufferedEvents()
if err := e.assignTaskIDToEvents(); err != nil {
return err
}
// if workflow task is not closed yet, and there are new buffered events, then put those to the pending buffer
if e.HasInFlightWorkflowTask() && len(newBufferedEvents) > 0 {
e.updateBufferedEvents = newBufferedEvents
}
return nil
}
func (e *mutableStateBuilder) UpdateCurrentVersion(
version int64,
forceUpdate bool,
) error {
if state, _ := e.GetWorkflowStateStatus(); state == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED {
// do not update current version only when workflow is completed
return nil
}
if e.versionHistories != nil {
versionHistory, err := e.versionHistories.GetCurrentVersionHistory()
if err != nil {
return err
}
if !versionHistory.IsEmpty() {
// this make sure current version >= last write version
versionHistoryItem, err := versionHistory.GetLastItem()
if err != nil {
return err
}
e.currentVersion = versionHistoryItem.GetVersion()
}
if version > e.currentVersion || forceUpdate {
e.currentVersion = version
}
return nil
}
// TODO: All mutable state should have versioned histories. Even local namespace should have it to allow for
// re-replication of local namespaces to other clusters.
// We probably need an error if mutableState does not have versioned history.
e.currentVersion = common.EmptyVersion
return nil
}
func (e *mutableStateBuilder) GetCurrentVersion() int64 {
if e.versionHistories != nil {
return e.currentVersion
}
return common.EmptyVersion
}
func (e *mutableStateBuilder) GetStartVersion() (int64, error) {
if e.versionHistories != nil {
versionHistory, err := e.versionHistories.GetCurrentVersionHistory()
if err != nil {
return 0, err
}
firstItem, err := versionHistory.GetFirstItem()
if err != nil {
return 0, err
}
return firstItem.GetVersion(), nil
}
return common.EmptyVersion, nil
}
func (e *mutableStateBuilder) GetLastWriteVersion() (int64, error) {
if e.versionHistories != nil {
versionHistory, err := e.versionHistories.GetCurrentVersionHistory()
if err != nil {
return 0, err
}
lastItem, err := versionHistory.GetLastItem()
if err != nil {
return 0, err
}
return lastItem.GetVersion(), nil
}
return common.EmptyVersion, nil
}
func (e *mutableStateBuilder) scanForBufferedActivityCompletion(
scheduleID int64,
) *historypb.HistoryEvent {
var completionEvent *historypb.HistoryEvent
completionEvent = scanForBufferedActivityCompletion(scheduleID, e.bufferedEvents)
if completionEvent != nil {
return completionEvent
}
return scanForBufferedActivityCompletion(scheduleID, e.updateBufferedEvents)
}
func scanForBufferedActivityCompletion(
scheduleID int64,
events []*historypb.HistoryEvent,
) *historypb.HistoryEvent {
for _, event := range events {
switch event.GetEventType() {
case enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED:
if event.GetActivityTaskCompletedEventAttributes().GetScheduledEventId() == scheduleID {
return event
}
case enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED:
if event.GetActivityTaskFailedEventAttributes().GetScheduledEventId() == scheduleID {
return event
}
case enumspb.EVENT_TYPE_ACTIVITY_TASK_TIMED_OUT:
if event.GetActivityTaskTimedOutEventAttributes().GetScheduledEventId() == scheduleID {
return event
}
case enumspb.EVENT_TYPE_ACTIVITY_TASK_CANCELED:
if event.GetActivityTaskCanceledEventAttributes().GetScheduledEventId() == scheduleID {
return event
}
}
}
// Completion event not found
return nil
}
func (e *mutableStateBuilder) checkAndClearTimerFiredEvent(
timerID string,
) *historypb.HistoryEvent {
var timerEvent *historypb.HistoryEvent
e.bufferedEvents, timerEvent = checkAndClearTimerFiredEvent(e.bufferedEvents, timerID)
if timerEvent != nil {
return timerEvent
}
e.updateBufferedEvents, timerEvent = checkAndClearTimerFiredEvent(e.updateBufferedEvents, timerID)
if timerEvent != nil {
return timerEvent
}
e.hBuilder.history, timerEvent = checkAndClearTimerFiredEvent(e.hBuilder.history, timerID)
return timerEvent
}
func checkAndClearTimerFiredEvent(
events []*historypb.HistoryEvent,
timerID string,
) ([]*historypb.HistoryEvent, *historypb.HistoryEvent) {
// go over all history events. if we find a timer fired event for the given
// timerID, clear it
timerFiredIdx := -1
for idx, event := range events {
if event.GetEventType() == enumspb.EVENT_TYPE_TIMER_FIRED &&
event.GetTimerFiredEventAttributes().GetTimerId() == timerID {
timerFiredIdx = idx
break
}
}
if timerFiredIdx == -1 {
return events, nil
}
timerEvent := events[timerFiredIdx]
return append(events[:timerFiredIdx], events[timerFiredIdx+1:]...), timerEvent
}
func (e *mutableStateBuilder) trimEventsAfterWorkflowClose(
input []*historypb.HistoryEvent,
) []*historypb.HistoryEvent {
if len(input) == 0 {
return input
}
nextIndex := 0
loop:
for _, event := range input {
nextIndex++
switch event.GetEventType() {
case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED:
break loop
}
}
return input[0:nextIndex]
}
func (e *mutableStateBuilder) assignEventIDToBufferedEvents() {
newCommittedEvents := e.hBuilder.history
scheduledIDToStartedID := make(map[int64]int64)
for _, event := range newCommittedEvents {
if event.GetEventId() != common.BufferedEventID {
continue
}
eventID := e.executionInfo.NextEventId
event.EventId = eventID
e.executionInfo.IncreaseNextEventID()
switch event.GetEventType() {
case enumspb.EVENT_TYPE_ACTIVITY_TASK_STARTED:
attributes := event.GetActivityTaskStartedEventAttributes()
scheduledID := attributes.GetScheduledEventId()
scheduledIDToStartedID[scheduledID] = eventID
if ai, ok := e.GetActivityInfo(scheduledID); ok {
ai.StartedId = eventID
e.updateActivityInfos[ai] = struct{}{}
}
case enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_STARTED:
attributes := event.GetChildWorkflowExecutionStartedEventAttributes()
initiatedID := attributes.GetInitiatedEventId()
scheduledIDToStartedID[initiatedID] = eventID
if ci, ok := e.GetChildExecutionInfo(initiatedID); ok {
ci.StartedId = eventID
e.updateChildExecutionInfos[ci] = struct{}{}
}
case enumspb.EVENT_TYPE_ACTIVITY_TASK_COMPLETED:
attributes := event.GetActivityTaskCompletedEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetScheduledEventId()]; ok {
attributes.StartedEventId = startedID
}
case enumspb.EVENT_TYPE_ACTIVITY_TASK_FAILED:
attributes := event.GetActivityTaskFailedEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetScheduledEventId()]; ok {
attributes.StartedEventId = startedID
}
case enumspb.EVENT_TYPE_ACTIVITY_TASK_TIMED_OUT:
attributes := event.GetActivityTaskTimedOutEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetScheduledEventId()]; ok {
attributes.StartedEventId = startedID
}
case enumspb.EVENT_TYPE_ACTIVITY_TASK_CANCELED:
attributes := event.GetActivityTaskCanceledEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetScheduledEventId()]; ok {
attributes.StartedEventId = startedID
}
case enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_COMPLETED:
attributes := event.GetChildWorkflowExecutionCompletedEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetInitiatedEventId()]; ok {
attributes.StartedEventId = startedID
}
case enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_FAILED:
attributes := event.GetChildWorkflowExecutionFailedEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetInitiatedEventId()]; ok {
attributes.StartedEventId = startedID
}
case enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_TIMED_OUT:
attributes := event.GetChildWorkflowExecutionTimedOutEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetInitiatedEventId()]; ok {
attributes.StartedEventId = startedID
}
case enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_CANCELED:
attributes := event.GetChildWorkflowExecutionCanceledEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetInitiatedEventId()]; ok {
attributes.StartedEventId = startedID
}
case enumspb.EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_TERMINATED:
attributes := event.GetChildWorkflowExecutionTerminatedEventAttributes()
if startedID, ok := scheduledIDToStartedID[attributes.GetInitiatedEventId()]; ok {
attributes.StartedEventId = startedID
}
}
}
}
func (e *mutableStateBuilder) assignTaskIDToEvents() error {
// assign task IDs to all history events
// first transient events
numTaskIDs := len(e.hBuilder.transientHistory)
if numTaskIDs > 0 {
taskIDs, err := e.shard.GenerateTransferTaskIDs(numTaskIDs)
if err != nil {
return err
}
for index, event := range e.hBuilder.transientHistory {
if event.GetTaskId() == common.EmptyEventTaskID {
taskID := taskIDs[index]
event.TaskId = taskID
e.executionInfo.LastEventTaskId = taskID
}
}
}
// then normal events
numTaskIDs = len(e.hBuilder.history)
if numTaskIDs > 0 {
taskIDs, err := e.shard.GenerateTransferTaskIDs(numTaskIDs)
if err != nil {
return err
}
for index, event := range e.hBuilder.history {
if event.GetTaskId() == common.EmptyEventTaskID {
taskID := taskIDs[index]
event.TaskId = taskID
e.executionInfo.LastEventTaskId = taskID
}
}
}
return nil
}
func (e *mutableStateBuilder) IsCurrentWorkflowGuaranteed() bool {
// stateInDB is used like a bloom filter:
//
// 1. stateInDB being created / running meaning that this workflow must be the current
// workflow (assuming there is no rebuild of mutable state).
// 2. stateInDB being completed does not guarantee this workflow being the current workflow
// 3. stateInDB being zombie guarantees this workflow not being the current workflow
// 4. stateInDB cannot be void, void is only possible when mutable state is just initialized
switch e.stateInDB {
case enumsspb.WORKFLOW_EXECUTION_STATE_VOID:
return false
case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED:
return true
case enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING:
return true
case enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED:
return false
case enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE:
return false
case enumsspb.WORKFLOW_EXECUTION_STATE_CORRUPTED:
return false
default:
panic(fmt.Sprintf("unknown workflow state: %v", e.executionInfo.ExecutionState.State))
}
}
func (e *mutableStateBuilder) GetNamespaceEntry() *cache.NamespaceCacheEntry {
return e.namespaceEntry
}
func (e *mutableStateBuilder) IsStickyTaskQueueEnabled() bool {
if e.executionInfo.StickyTaskQueue == "" {
return false
}
ttl := e.config.StickyTTL(e.GetNamespaceEntry().GetInfo().Name)
if e.timeSource.Now().After(timestamp.TimeValue(e.executionInfo.LastUpdatedTime).Add(ttl)) {
return false
}
return true
}
func (e *mutableStateBuilder) CreateNewHistoryEvent(
eventType enumspb.EventType,
) *historypb.HistoryEvent {
return e.CreateNewHistoryEventWithTime(eventType, e.timeSource.Now())
}
func (e *mutableStateBuilder) CreateNewHistoryEventWithTime(
eventType enumspb.EventType,
time time.Time,
) *historypb.HistoryEvent {
eventID := e.executionInfo.NextEventId
if e.shouldBufferEvent(eventType) {
eventID = common.BufferedEventID
} else {
// only increase NextEventID if event is not buffered
e.executionInfo.IncreaseNextEventID()
}
historyEvent := &historypb.HistoryEvent{}
historyEvent.EventId = eventID
historyEvent.EventTime = &time
historyEvent.EventType = eventType
historyEvent.Version = e.GetCurrentVersion()
historyEvent.TaskId = common.EmptyEventTaskID
return historyEvent
}
func (e *mutableStateBuilder) shouldBufferEvent(
eventType enumspb.EventType,
) bool {
switch eventType {
case // do not buffer for workflow state change
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED:
return false
case // workflow task event should not be buffered
enumspb.EVENT_TYPE_WORKFLOW_TASK_SCHEDULED,
enumspb.EVENT_TYPE_WORKFLOW_TASK_STARTED,
enumspb.EVENT_TYPE_WORKFLOW_TASK_COMPLETED,
enumspb.EVENT_TYPE_WORKFLOW_TASK_FAILED,
enumspb.EVENT_TYPE_WORKFLOW_TASK_TIMED_OUT:
return false
case // events generated directly from commands should not be buffered
// workflow complete, failed, cancelled and continue-as-new events are duplication of above
// just put is here for reference
// workflow.EventTypeWorkflowExecutionCompleted,
// workflow.EventTypeWorkflowExecutionFailed,
// workflow.EventTypeWorkflowExecutionCanceled,
// workflow.EventTypeWorkflowExecutionContinuedAsNew,
enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED,
enumspb.EVENT_TYPE_ACTIVITY_TASK_CANCEL_REQUESTED,
enumspb.EVENT_TYPE_TIMER_STARTED,
// CommandTypeCancelTimer is an exception. This command will be mapped
// to workflow.EventTypeTimerCanceled.
// This event should not be buffered. Ref: historyEngine, search for "workflow.CommandTypeCancelTimer"
enumspb.EVENT_TYPE_TIMER_CANCELED,
enumspb.EVENT_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED,
enumspb.EVENT_TYPE_MARKER_RECORDED,
enumspb.EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_INITIATED,
enumspb.EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED,
enumspb.EVENT_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES:
// do not buffer event if event is directly generated from a corresponding command
// sanity check there is no workflow task on the fly
if e.HasInFlightWorkflowTask() {
msg := fmt.Sprintf("history mutable state is processing event: %v while there is workflow task pending. "+
"namespaceID: %v, workflow ID: %v, run ID: %v.", eventType, e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionInfo.ExecutionState.RunId)
panic(msg)
}
return false
default:
return true
}
}
func (e *mutableStateBuilder) GetWorkflowType() *commonpb.WorkflowType {
wType := &commonpb.WorkflowType{}
wType.Name = e.executionInfo.WorkflowTypeName
return wType
}
func (e *mutableStateBuilder) GetQueryRegistry() queryRegistry {
return e.queryRegistry
}
func (e *mutableStateBuilder) GetActivityScheduledEvent(
scheduleEventID int64,
) (*historypb.HistoryEvent, error) {
ai, ok := e.pendingActivityInfoIDs[scheduleEventID]
if !ok {
return nil, ErrMissingActivityInfo
}
currentBranchToken, err := e.GetCurrentBranchToken()
if err != nil {
return nil, err
}
scheduledEvent, err := e.eventsCache.getEvent(
e.executionInfo.NamespaceId,
e.executionInfo.WorkflowId,
e.executionInfo.ExecutionState.RunId,
ai.ScheduledEventBatchId,
ai.ScheduleId,
currentBranchToken,
)
if err != nil {
// do not return the original error
// since original error can be of type entity not exists
// which can cause task processing side to fail silently
return nil, ErrMissingActivityScheduledEvent
}
return scheduledEvent, nil
}
// GetActivityInfo gives details about an activity that is currently in progress.
func (e *mutableStateBuilder) GetActivityInfo(
scheduleEventID int64,
) (*persistenceblobs.ActivityInfo, bool) {
ai, ok := e.pendingActivityInfoIDs[scheduleEventID]
return ai, ok
}
// GetActivityInfo gives details about an activity that is currently in progress.
func (e *mutableStateBuilder) GetActivityInfoWithTimerHeartbeat(
scheduleEventID int64,
) (*persistenceblobs.ActivityInfo, time.Time, bool) {
ai, ok := e.pendingActivityInfoIDs[scheduleEventID]
timerVis, ok := e.pendingActivityTimerHeartbeats[scheduleEventID]
return ai, timerVis, ok
}
// GetActivityByActivityID gives details about an activity that is currently in progress.
func (e *mutableStateBuilder) GetActivityByActivityID(
activityID string,
) (*persistenceblobs.ActivityInfo, bool) {
eventID, ok := e.pendingActivityIDToEventID[activityID]
if !ok {
return nil, false
}
return e.GetActivityInfo(eventID)
}
// GetChildExecutionInfo gives details about a child execution that is currently in progress.
func (e *mutableStateBuilder) GetChildExecutionInfo(
initiatedEventID int64,
) (*persistenceblobs.ChildExecutionInfo, bool) {
ci, ok := e.pendingChildExecutionInfoIDs[initiatedEventID]
return ci, ok
}
// GetChildExecutionInitiatedEvent reads out the ChildExecutionInitiatedEvent from mutable state for in-progress child
// executions
func (e *mutableStateBuilder) GetChildExecutionInitiatedEvent(
initiatedEventID int64,
) (*historypb.HistoryEvent, error) {
ci, ok := e.pendingChildExecutionInfoIDs[initiatedEventID]
if !ok {
return nil, ErrMissingChildWorkflowInfo
}
currentBranchToken, err := e.GetCurrentBranchToken()
if err != nil {
return nil, err
}
initiatedEvent, err := e.eventsCache.getEvent(
e.executionInfo.NamespaceId,
e.executionInfo.WorkflowId,
e.executionInfo.ExecutionState.RunId,
ci.InitiatedEventBatchId,
ci.InitiatedId,
currentBranchToken,
)
if err != nil {
// do not return the original error
// since original error can be of type entity not exists
// which can cause task processing side to fail silently
return nil, ErrMissingChildWorkflowInitiatedEvent
}
return initiatedEvent, nil
}
// GetRequestCancelInfo gives details about a request cancellation that is currently in progress.
func (e *mutableStateBuilder) GetRequestCancelInfo(
initiatedEventID int64,
) (*persistenceblobs.RequestCancelInfo, bool) {
ri, ok := e.pendingRequestCancelInfoIDs[initiatedEventID]
return ri, ok
}
func (e *mutableStateBuilder) GetRetryBackoffDuration(
failure *failurepb.Failure,
) (time.Duration, enumspb.RetryState) {
info := e.executionInfo
if !info.HasRetryPolicy {
return backoff.NoBackoff, enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET
}
return getBackoffInterval(
e.timeSource.Now(),
timestamp.TimeValue(info.WorkflowExpirationTime),
info.Attempt,
info.RetryMaximumAttempts,
info.RetryInitialInterval,
info.RetryMaximumInterval,
info.RetryBackoffCoefficient,
failure,
info.RetryNonRetryableErrorTypes,
)
}
func (e *mutableStateBuilder) GetCronBackoffDuration() (time.Duration, error) {
info := e.executionInfo
if len(info.CronSchedule) == 0 {
return backoff.NoBackoff, nil
}
// TODO: decide if we can add execution time in execution info.
executionTime := timestamp.TimeValue(e.executionInfo.StartTime)
// This only call when doing ContinueAsNew. At this point, the workflow should have a start event
workflowStartEvent, err := e.GetStartEvent()
if err != nil {
e.logError("unable to find workflow start event", tag.ErrorTypeInvalidHistoryAction)
return backoff.NoBackoff, err
}
firstWorkflowTaskBackoff := timestamp.DurationValue(workflowStartEvent.GetWorkflowExecutionStartedEventAttributes().GetFirstWorkflowTaskBackoff())
executionTime = executionTime.Add(firstWorkflowTaskBackoff)
return backoff.GetBackoffForNextSchedule(info.CronSchedule, executionTime, e.timeSource.Now()), nil
}
// GetSignalInfo get details about a signal request that is currently in progress.
func (e *mutableStateBuilder) GetSignalInfo(
initiatedEventID int64,
) (*persistenceblobs.SignalInfo, bool) {
ri, ok := e.pendingSignalInfoIDs[initiatedEventID]
return ri, ok
}
// GetCompletionEvent retrieves the workflow completion event from mutable state
func (e *mutableStateBuilder) GetCompletionEvent() (*historypb.HistoryEvent, error) {
if e.executionInfo.ExecutionState.State != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED {
return nil, ErrMissingWorkflowCompletionEvent
}
currentBranchToken, err := e.GetCurrentBranchToken()
if err != nil {
return nil, err
}
// Completion EventID is always one less than NextEventID after workflow is completed
completionEventID := e.executionInfo.NextEventId - 1
firstEventID := e.executionInfo.CompletionEventBatchId
completionEvent, err := e.eventsCache.getEvent(
e.executionInfo.NamespaceId,
e.executionInfo.WorkflowId,
e.executionInfo.ExecutionState.RunId,
firstEventID,
completionEventID,
currentBranchToken,
)
if err != nil {
// do not return the original error
// since original error can be of type entity not exists
// which can cause task processing side to fail silently
return nil, ErrMissingWorkflowCompletionEvent
}
return completionEvent, nil
}
// GetStartEvent retrieves the workflow start event from mutable state
func (e *mutableStateBuilder) GetStartEvent() (*historypb.HistoryEvent, error) {
currentBranchToken, err := e.GetCurrentBranchToken()
if err != nil {
return nil, err
}
startEvent, err := e.eventsCache.getEvent(
e.executionInfo.NamespaceId,
e.executionInfo.WorkflowId,
e.executionInfo.ExecutionState.RunId,
common.FirstEventID,
common.FirstEventID,
currentBranchToken,
)
if err != nil {
// do not return the original error
// since original error can be of type entity not exists
// which can cause task processing side to fail silently
return nil, ErrMissingWorkflowStartEvent
}
return startEvent, nil
}
// DeletePendingChildExecution deletes details about a ChildExecutionInfo.
func (e *mutableStateBuilder) DeletePendingChildExecution(
initiatedEventID int64,
) error {
if _, ok := e.pendingChildExecutionInfoIDs[initiatedEventID]; ok {
delete(e.pendingChildExecutionInfoIDs, initiatedEventID)
} else {
e.logError(
fmt.Sprintf("unable to find child workflow event ID: %v in mutable state", initiatedEventID),
tag.ErrorTypeInvalidMutableStateAction,
)
// log data inconsistency instead of returning an error
e.logDataInconsistency()
}
e.deleteChildExecutionInfo = &initiatedEventID
return nil
}
// DeletePendingRequestCancel deletes details about a RequestCancelInfo.
func (e *mutableStateBuilder) DeletePendingRequestCancel(
initiatedEventID int64,
) error {
if _, ok := e.pendingRequestCancelInfoIDs[initiatedEventID]; ok {
delete(e.pendingRequestCancelInfoIDs, initiatedEventID)
} else {
e.logError(
fmt.Sprintf("unable to find request cancel external workflow event ID: %v in mutable state", initiatedEventID),
tag.ErrorTypeInvalidMutableStateAction,
)
// log data inconsistency instead of returning an error
e.logDataInconsistency()
}
e.deleteRequestCancelInfo = &initiatedEventID
return nil
}
// DeletePendingSignal deletes details about a SignalInfo
func (e *mutableStateBuilder) DeletePendingSignal(
initiatedEventID int64,
) error {
if _, ok := e.pendingSignalInfoIDs[initiatedEventID]; ok {
delete(e.pendingSignalInfoIDs, initiatedEventID)
} else {
e.logError(
fmt.Sprintf("unable to find signal external workflow event ID: %v in mutable state", initiatedEventID),
tag.ErrorTypeInvalidMutableStateAction,
)
// log data inconsistency instead of returning an error
e.logDataInconsistency()
}
e.deleteSignalInfo = &initiatedEventID
return nil
}
func (e *mutableStateBuilder) writeEventToCache(
event *historypb.HistoryEvent,
) {
// For start event: store it within events cache so the recordWorkflowStarted transfer task doesn't need to
// load it from database
// For completion event: store it within events cache so we can communicate the result to parent execution
// during the processing of DeleteTransferTask without loading this event from database
e.eventsCache.putEvent(
e.executionInfo.NamespaceId,
e.executionInfo.WorkflowId,
e.executionInfo.ExecutionState.RunId,
event.GetEventId(),
event,
)
}
func (e *mutableStateBuilder) HasParentExecution() bool {
return e.executionInfo.ParentNamespaceId != "" && e.executionInfo.ParentWorkflowId != ""
}
func (e *mutableStateBuilder) UpdateActivityProgress(
ai *persistenceblobs.ActivityInfo,
request *workflowservice.RecordActivityTaskHeartbeatRequest,
) {
ai.Version = e.GetCurrentVersion()
ai.LastHeartbeatDetails = request.Details
now := e.timeSource.Now()
ai.LastHeartbeatUpdateTime = &now
e.updateActivityInfos[ai] = struct{}{}
e.syncActivityTasks[ai.ScheduleId] = struct{}{}
}
// ReplicateActivityInfo replicate the necessary activity information
func (e *mutableStateBuilder) ReplicateActivityInfo(
request *historyservice.SyncActivityRequest,
resetActivityTimerTaskStatus bool,
) error {
ai, ok := e.pendingActivityInfoIDs[request.GetScheduledId()]
if !ok {
e.logError(
fmt.Sprintf("unable to find activity event ID: %v in mutable state", request.GetScheduledId()),
tag.ErrorTypeInvalidMutableStateAction,
)
return ErrMissingActivityInfo
}
ai.Version = request.GetVersion()
ai.ScheduledTime = request.GetScheduledTime()
ai.StartedId = request.GetStartedId()
ai.LastHeartbeatUpdateTime = request.GetLastHeartbeatTime()
if ai.StartedId == common.EmptyEventID {
ai.StartedTime = timestamp.TimePtr(time.Time{})
} else {
ai.StartedTime = request.GetStartedTime()
}
ai.LastHeartbeatDetails = request.GetDetails()
ai.Attempt = request.GetAttempt()
ai.RetryLastWorkerIdentity = request.GetLastWorkerIdentity()
ai.RetryLastFailure = request.GetLastFailure()
if resetActivityTimerTaskStatus {
ai.TimerTaskStatus = timerTaskStatusNone
}
e.updateActivityInfos[ai] = struct{}{}
return nil
}
// UpdateActivity updates an activity
func (e *mutableStateBuilder) UpdateActivity(
ai *persistenceblobs.ActivityInfo,
) error {
if _, ok := e.pendingActivityInfoIDs[ai.ScheduleId]; !ok {
e.logError(
fmt.Sprintf("unable to find activity ID: %v in mutable state", ai.ActivityId),
tag.ErrorTypeInvalidMutableStateAction,
)
return ErrMissingActivityInfo
}
e.pendingActivityInfoIDs[ai.ScheduleId] = ai
e.updateActivityInfos[ai] = struct{}{}
return nil
}
// UpdateActivity updates an activity
func (e *mutableStateBuilder) UpdateActivityWithTimerHeartbeat(
ai *persistenceblobs.ActivityInfo,
timerTimeoutVisibility time.Time,
) error {
err := e.UpdateActivity(ai)
if err != nil {
return err
}
e.pendingActivityTimerHeartbeats[ai.ScheduleId] = timerTimeoutVisibility
return nil
}
// DeleteActivity deletes details about an activity.
func (e *mutableStateBuilder) DeleteActivity(
scheduleEventID int64,
) error {
if activityInfo, ok := e.pendingActivityInfoIDs[scheduleEventID]; ok {
delete(e.pendingActivityInfoIDs, scheduleEventID)
delete(e.pendingActivityTimerHeartbeats, scheduleEventID)
if _, ok = e.pendingActivityIDToEventID[activityInfo.ActivityId]; ok {
delete(e.pendingActivityIDToEventID, activityInfo.ActivityId)
} else {
e.logError(
fmt.Sprintf("unable to find activity ID: %v in mutable state", activityInfo.ActivityId),
tag.ErrorTypeInvalidMutableStateAction,
)
// log data inconsistency instead of returning an error
e.logDataInconsistency()
}
} else {
e.logError(
fmt.Sprintf("unable to find activity event id: %v in mutable state", scheduleEventID),
tag.ErrorTypeInvalidMutableStateAction,
)
// log data inconsistency instead of returning an error
e.logDataInconsistency()
}
e.deleteActivityInfos[scheduleEventID] = struct{}{}
return nil
}
// GetUserTimerInfo gives details about a user timer.
func (e *mutableStateBuilder) GetUserTimerInfo(
timerID string,
) (*persistenceblobs.TimerInfo, bool) {
timerInfo, ok := e.pendingTimerInfoIDs[timerID]
return timerInfo, ok
}
// GetUserTimerInfoByEventID gives details about a user timer.
func (e *mutableStateBuilder) GetUserTimerInfoByEventID(
startEventID int64,
) (*persistenceblobs.TimerInfo, bool) {
timerID, ok := e.pendingTimerEventIDToID[startEventID]
if !ok {
return nil, false
}
return e.GetUserTimerInfo(timerID)
}
// UpdateUserTimer updates the user timer in progress.
func (e *mutableStateBuilder) UpdateUserTimer(
ti *persistenceblobs.TimerInfo,
) error {
timerID, ok := e.pendingTimerEventIDToID[ti.GetStartedId()]
if !ok {
e.logError(
fmt.Sprintf("unable to find timer event ID: %v in mutable state", ti.GetStartedId()),
tag.ErrorTypeInvalidMutableStateAction,
)
return ErrMissingTimerInfo
}
if _, ok := e.pendingTimerInfoIDs[timerID]; !ok {
e.logError(
fmt.Sprintf("unable to find timer ID: %v in mutable state", timerID),
tag.ErrorTypeInvalidMutableStateAction,
)
return ErrMissingTimerInfo
}
e.pendingTimerInfoIDs[timerID] = ti
e.updateTimerInfos[ti] = struct{}{}
return nil
}
// DeleteUserTimer deletes an user timer.
func (e *mutableStateBuilder) DeleteUserTimer(
timerID string,
) error {
if timerInfo, ok := e.pendingTimerInfoIDs[timerID]; ok {
delete(e.pendingTimerInfoIDs, timerID)
if _, ok = e.pendingTimerEventIDToID[timerInfo.GetStartedId()]; ok {
delete(e.pendingTimerEventIDToID, timerInfo.GetStartedId())
} else {
e.logError(
fmt.Sprintf("unable to find timer event ID: %v in mutable state", timerID),
tag.ErrorTypeInvalidMutableStateAction,
)
// log data inconsistency instead of returning an error
e.logDataInconsistency()
}
} else {
e.logError(
fmt.Sprintf("unable to find timer ID: %v in mutable state", timerID),
tag.ErrorTypeInvalidMutableStateAction,
)
// log data inconsistency instead of returning an error
e.logDataInconsistency()
}
e.deleteTimerInfos[timerID] = struct{}{}
return nil
}
// nolint:unused
func (e *mutableStateBuilder) getWorkflowTaskInfo() *workflowTaskInfo {
taskQueue := &taskqueuepb.TaskQueue{}
if e.IsStickyTaskQueueEnabled() {
taskQueue.Name = e.executionInfo.StickyTaskQueue
taskQueue.Kind = enumspb.TASK_QUEUE_KIND_STICKY
} else {
taskQueue.Name = e.executionInfo.TaskQueue
taskQueue.Kind = enumspb.TASK_QUEUE_KIND_NORMAL
}
return &workflowTaskInfo{
Version: e.executionInfo.WorkflowTaskVersion,
ScheduleID: e.executionInfo.WorkflowTaskScheduleId,
StartedID: e.executionInfo.WorkflowTaskStartedId,
RequestID: e.executionInfo.WorkflowTaskRequestId,
WorkflowTaskTimeout: e.executionInfo.WorkflowTaskTimeout,
Attempt: e.executionInfo.WorkflowTaskAttempt,
StartedTimestamp: e.executionInfo.WorkflowTaskStartedTimestamp,
ScheduledTimestamp: e.executionInfo.WorkflowTaskScheduledTimestamp,
TaskQueue: taskQueue,
OriginalScheduledTimestamp: e.executionInfo.WorkflowTaskOriginalScheduledTimestamp,
}
}
// GetWorkflowTaskInfo returns details about the in-progress workflow task
func (e *mutableStateBuilder) GetWorkflowTaskInfo(
scheduleEventID int64,
) (*workflowTaskInfo, bool) {
return e.workflowTaskManager.GetWorkflowTaskInfo(scheduleEventID)
}
func (e *mutableStateBuilder) GetPendingActivityInfos() map[int64]*persistenceblobs.ActivityInfo {
return e.pendingActivityInfoIDs
}
func (e *mutableStateBuilder) GetPendingTimerInfos() map[string]*persistenceblobs.TimerInfo {
return e.pendingTimerInfoIDs
}
func (e *mutableStateBuilder) GetPendingChildExecutionInfos() map[int64]*persistenceblobs.ChildExecutionInfo {
return e.pendingChildExecutionInfoIDs
}
func (e *mutableStateBuilder) GetPendingRequestCancelExternalInfos() map[int64]*persistenceblobs.RequestCancelInfo {
return e.pendingRequestCancelInfoIDs
}
func (e *mutableStateBuilder) GetPendingSignalExternalInfos() map[int64]*persistenceblobs.SignalInfo {
return e.pendingSignalInfoIDs
}
func (e *mutableStateBuilder) HasProcessedOrPendingWorkflowTask() bool {
return e.workflowTaskManager.HasProcessedOrPendingWorkflowTask()
}
func (e *mutableStateBuilder) HasPendingWorkflowTask() bool {
return e.workflowTaskManager.HasPendingWorkflowTask()
}
func (e *mutableStateBuilder) GetPendingWorkflowTask() (*workflowTaskInfo, bool) {
return e.workflowTaskManager.GetPendingWorkflowTask()
}
func (e *mutableStateBuilder) HasInFlightWorkflowTask() bool {
return e.workflowTaskManager.HasInFlightWorkflowTask()
}
func (e *mutableStateBuilder) GetInFlightWorkflowTask() (*workflowTaskInfo, bool) {
return e.workflowTaskManager.GetInFlightWorkflowTask()
}
func (e *mutableStateBuilder) HasBufferedEvents() bool {
if len(e.bufferedEvents) > 0 || len(e.updateBufferedEvents) > 0 {
return true
}
for _, event := range e.hBuilder.history {
if event.GetEventId() == common.BufferedEventID {
return true
}
}
return false
}
// UpdateWorkflowTask updates a workflow task.
func (e *mutableStateBuilder) UpdateWorkflowTask(
workflowTask *workflowTaskInfo,
) {
e.workflowTaskManager.UpdateWorkflowTask(workflowTask)
}
// DeleteWorkflowTask deletes a workflow task.
func (e *mutableStateBuilder) DeleteWorkflowTask() {
e.workflowTaskManager.DeleteWorkflowTask()
}
func (e *mutableStateBuilder) FailWorkflowTask(
incrementAttempt bool,
) {
e.workflowTaskManager.FailWorkflowTask(incrementAttempt)
}
func (e *mutableStateBuilder) ClearStickyness() {
e.executionInfo.StickyTaskQueue = ""
e.executionInfo.StickyScheduleToStartTimeout = timestamp.DurationFromSeconds(0)
e.executionInfo.ClientLibraryVersion = ""
e.executionInfo.ClientFeatureVersion = ""
e.executionInfo.ClientImpl = ""
}
// GetLastFirstEventID returns last first event ID
// first event ID is the ID of a batch of events in a single history events record
func (e *mutableStateBuilder) GetLastFirstEventID() int64 {
return e.executionInfo.LastFirstEventId
}
// GetNextEventID returns next event ID
func (e *mutableStateBuilder) GetNextEventID() int64 {
return e.executionInfo.NextEventId
}
// GetPreviousStartedEventID returns last started workflow task event ID
func (e *mutableStateBuilder) GetPreviousStartedEventID() int64 {
return e.executionInfo.LastProcessedEvent
}
func (e *mutableStateBuilder) IsWorkflowExecutionRunning() bool {
switch e.executionInfo.ExecutionState.State {
case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED:
return true
case enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING:
return true
case enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED:
return false
case enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE:
return false
case enumsspb.WORKFLOW_EXECUTION_STATE_CORRUPTED:
return false
default:
panic(fmt.Sprintf("unknown workflow state: %v", e.executionInfo.ExecutionState.State))
}
}
func (e *mutableStateBuilder) IsCancelRequested() (bool, string) {
if e.executionInfo.CancelRequested {
return e.executionInfo.CancelRequested, e.executionInfo.GetExecutionState().CreateRequestId
}
return false, ""
}
func (e *mutableStateBuilder) IsSignalRequested(
requestID string,
) bool {
if _, ok := e.pendingSignalRequestedIDs[requestID]; ok {
return true
}
return false
}
func (e *mutableStateBuilder) AddSignalRequested(
requestID string,
) {
if e.pendingSignalRequestedIDs == nil {
e.pendingSignalRequestedIDs = make(map[string]struct{})
}
if e.updateSignalRequestedIDs == nil {
e.updateSignalRequestedIDs = make(map[string]struct{})
}
e.pendingSignalRequestedIDs[requestID] = struct{}{} // add requestID to set
e.updateSignalRequestedIDs[requestID] = struct{}{}
}
func (e *mutableStateBuilder) DeleteSignalRequested(
requestID string,
) {
delete(e.pendingSignalRequestedIDs, requestID)
e.deleteSignalRequestedID = requestID
}
func (e *mutableStateBuilder) addWorkflowExecutionStartedEventForContinueAsNew(
parentExecutionInfo *workflowspb.ParentExecutionInfo,
execution commonpb.WorkflowExecution,
previousExecutionState mutableState,
attributes *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes,
firstRunID string,
) (*historypb.HistoryEvent, error) {
previousExecutionInfo := previousExecutionState.GetExecutionInfo()
taskQueue := previousExecutionInfo.TaskQueue
if attributes.TaskQueue != nil {
taskQueue = attributes.TaskQueue.GetName()
}
tq := &taskqueuepb.TaskQueue{
Name: taskQueue,
Kind: enumspb.TASK_QUEUE_KIND_NORMAL,
}
workflowType := previousExecutionInfo.WorkflowTypeName
if attributes.WorkflowType != nil {
workflowType = attributes.WorkflowType.GetName()
}
wType := &commonpb.WorkflowType{}
wType.Name = workflowType
var taskTimeout *time.Duration
if timestamp.DurationValue(attributes.GetWorkflowTaskTimeout()) == 0 {
taskTimeout = previousExecutionInfo.DefaultWorkflowTaskTimeout
} else {
taskTimeout = attributes.GetWorkflowTaskTimeout()
}
// Workflow runTimeout is already set to the correct value in
// validateContinueAsNewWorkflowExecutionAttributes
runTimeout := attributes.GetWorkflowRunTimeout()
createRequest := &workflowservice.StartWorkflowExecutionRequest{
RequestId: uuid.New(),
Namespace: e.namespaceEntry.GetInfo().Name,
WorkflowId: execution.WorkflowId,
TaskQueue: tq,
WorkflowType: wType,
WorkflowExecutionTimeout: previousExecutionState.GetExecutionInfo().WorkflowExecutionTimeout,
WorkflowRunTimeout: runTimeout,
WorkflowTaskTimeout: taskTimeout,
Input: attributes.Input,
Header: attributes.Header,
RetryPolicy: attributes.RetryPolicy,
CronSchedule: attributes.CronSchedule,
Memo: attributes.Memo,
SearchAttributes: attributes.SearchAttributes,
}
req := &historyservice.StartWorkflowExecutionRequest{
NamespaceId: e.namespaceEntry.GetInfo().Id,
StartRequest: createRequest,
ParentExecutionInfo: parentExecutionInfo,
LastCompletionResult: attributes.LastCompletionResult,
ContinuedFailure: attributes.GetFailure(),
ContinueAsNewInitiator: attributes.Initiator,
FirstWorkflowTaskBackoff: attributes.BackoffStartInterval,
}
if attributes.GetInitiator() == enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY {
req.Attempt = previousExecutionState.GetExecutionInfo().Attempt + 1
} else {
req.Attempt = 1
}
workflowTimeoutTime := timestamp.TimeValue(previousExecutionState.GetExecutionInfo().WorkflowExpirationTime)
if !workflowTimeoutTime.IsZero() {
req.WorkflowExecutionExpirationTime = &workflowTimeoutTime
}
// History event only has namespace so namespaceID has to be passed in explicitly to update the mutable state
var parentNamespaceID string
if parentExecutionInfo != nil {
parentNamespaceID = parentExecutionInfo.GetNamespaceId()
}
event := e.hBuilder.AddWorkflowExecutionStartedEvent(req, previousExecutionInfo, firstRunID, execution.GetRunId())
if err := e.ReplicateWorkflowExecutionStartedEvent(
parentNamespaceID,
execution,
createRequest.GetRequestId(),
event,
); err != nil {
return nil, err
}
if err := e.SetHistoryTree(e.GetExecutionInfo().GetRunId()); err != nil {
return nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowStartTasks(
timestamp.TimeValue(event.GetEventTime()),
event,
); err != nil {
return nil, err
}
if err := e.taskGenerator.generateRecordWorkflowStartedTasks(
timestamp.TimeValue(event.GetEventTime()),
event,
); err != nil {
return nil, err
}
if err := e.AddFirstWorkflowTaskScheduled(
event,
); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) AddWorkflowExecutionStartedEvent(
execution commonpb.WorkflowExecution,
startRequest *historyservice.StartWorkflowExecutionRequest,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowStarted
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
request := startRequest.StartRequest
eventID := e.GetNextEventID()
if eventID != common.FirstEventID {
e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(eventID),
tag.ErrorTypeInvalidHistoryAction)
return nil, e.createInternalServerError(opTag)
}
event := e.hBuilder.AddWorkflowExecutionStartedEvent(startRequest, nil, execution.GetRunId(), execution.GetRunId())
var parentNamespaceID string
if startRequest.ParentExecutionInfo != nil {
parentNamespaceID = startRequest.ParentExecutionInfo.GetNamespaceId()
}
if err := e.ReplicateWorkflowExecutionStartedEvent(
parentNamespaceID,
execution,
request.GetRequestId(),
event); err != nil {
return nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowStartTasks(
timestamp.TimeValue(event.GetEventTime()),
event,
); err != nil {
return nil, err
}
if err := e.taskGenerator.generateRecordWorkflowStartedTasks(
timestamp.TimeValue(event.GetEventTime()),
event,
); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionStartedEvent(
parentNamespaceID string,
execution commonpb.WorkflowExecution,
requestID string,
startEvent *historypb.HistoryEvent,
) error {
event := startEvent.GetWorkflowExecutionStartedEventAttributes()
e.executionInfo.GetExecutionState().CreateRequestId = requestID
e.executionInfo.NamespaceId = e.namespaceEntry.GetInfo().Id
e.executionInfo.WorkflowId = execution.GetWorkflowId()
e.executionInfo.ExecutionState.RunId = execution.GetRunId()
e.executionInfo.FirstExecutionRunId = event.GetFirstExecutionRunId()
e.executionInfo.TaskQueue = event.TaskQueue.GetName()
e.executionInfo.WorkflowTypeName = event.WorkflowType.GetName()
e.executionInfo.WorkflowRunTimeout = event.GetWorkflowRunTimeout()
e.executionInfo.WorkflowExecutionTimeout = event.GetWorkflowExecutionTimeout()
e.executionInfo.DefaultWorkflowTaskTimeout = event.GetWorkflowTaskTimeout()
if err := e.UpdateWorkflowStateStatus(
enumsspb.WORKFLOW_EXECUTION_STATE_CREATED,
enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING,
); err != nil {
return err
}
e.executionInfo.LastProcessedEvent = common.EmptyEventID
e.executionInfo.LastFirstEventId = startEvent.GetEventId()
e.executionInfo.WorkflowTaskVersion = common.EmptyVersion
e.executionInfo.WorkflowTaskScheduleId = common.EmptyEventID
e.executionInfo.WorkflowTaskStartedId = common.EmptyEventID
e.executionInfo.WorkflowTaskRequestId = emptyUUID
e.executionInfo.WorkflowTaskTimeout = timestamp.DurationFromSeconds(0)
e.executionInfo.CronSchedule = event.GetCronSchedule()
e.executionInfo.ParentNamespaceId = parentNamespaceID
if event.ParentWorkflowExecution != nil {
e.executionInfo.ParentWorkflowId = event.ParentWorkflowExecution.GetWorkflowId()
e.executionInfo.ParentRunId = event.ParentWorkflowExecution.GetRunId()
}
if event.ParentInitiatedEventId != 0 {
e.executionInfo.InitiatedId = event.GetParentInitiatedEventId()
} else {
e.executionInfo.InitiatedId = common.EmptyEventID
}
e.executionInfo.Attempt = event.GetAttempt()
if !timestamp.TimeValue(event.GetWorkflowExecutionExpirationTime()).IsZero() {
e.executionInfo.WorkflowExpirationTime = event.GetWorkflowExecutionExpirationTime()
}
if event.RetryPolicy != nil {
e.executionInfo.HasRetryPolicy = true
e.executionInfo.RetryBackoffCoefficient = event.RetryPolicy.GetBackoffCoefficient()
e.executionInfo.RetryInitialInterval = event.RetryPolicy.GetInitialInterval()
e.executionInfo.RetryMaximumAttempts = event.RetryPolicy.GetMaximumAttempts()
e.executionInfo.RetryMaximumInterval = event.RetryPolicy.GetMaximumInterval()
e.executionInfo.RetryNonRetryableErrorTypes = event.RetryPolicy.GetNonRetryableErrorTypes()
}
e.executionInfo.AutoResetPoints = rolloverAutoResetPointsWithExpiringTime(
event.GetPrevAutoResetPoints(),
event.GetContinuedExecutionRunId(),
timestamp.TimeValue(startEvent.GetEventTime()),
e.namespaceEntry.GetRetentionDays(e.executionInfo.WorkflowId),
)
if event.Memo != nil {
e.executionInfo.Memo = event.Memo.GetFields()
}
if event.SearchAttributes != nil {
e.executionInfo.SearchAttributes = event.SearchAttributes.GetIndexedFields()
}
e.writeEventToCache(startEvent)
return nil
}
func (e *mutableStateBuilder) AddFirstWorkflowTaskScheduled(
startEvent *historypb.HistoryEvent,
) error {
opTag := tag.WorkflowActionWorkflowTaskScheduled
if err := e.checkMutability(opTag); err != nil {
return err
}
return e.workflowTaskManager.AddFirstWorkflowTaskScheduled(startEvent)
}
func (e *mutableStateBuilder) AddWorkflowTaskScheduledEvent(
bypassTaskGeneration bool,
) (*workflowTaskInfo, error) {
opTag := tag.WorkflowActionWorkflowTaskScheduled
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
return e.workflowTaskManager.AddWorkflowTaskScheduledEvent(bypassTaskGeneration)
}
// originalScheduledTimestamp is to record the first WorkflowTaskScheduledEvent during workflow task heartbeat.
func (e *mutableStateBuilder) AddWorkflowTaskScheduledEventAsHeartbeat(
bypassTaskGeneration bool,
originalScheduledTimestamp *time.Time,
) (*workflowTaskInfo, error) {
opTag := tag.WorkflowActionWorkflowTaskScheduled
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
return e.workflowTaskManager.AddWorkflowTaskScheduledEventAsHeartbeat(bypassTaskGeneration, originalScheduledTimestamp)
}
func (e *mutableStateBuilder) ReplicateTransientWorkflowTaskScheduled() (*workflowTaskInfo, error) {
return e.workflowTaskManager.ReplicateTransientWorkflowTaskScheduled()
}
func (e *mutableStateBuilder) ReplicateWorkflowTaskScheduledEvent(
version int64,
scheduleID int64,
taskQueue *taskqueuepb.TaskQueue,
startToCloseTimeoutSeconds int32,
attempt int32,
scheduleTimestamp *time.Time,
originalScheduledTimestamp *time.Time,
) (*workflowTaskInfo, error) {
return e.workflowTaskManager.ReplicateWorkflowTaskScheduledEvent(version, scheduleID, taskQueue, startToCloseTimeoutSeconds, attempt, scheduleTimestamp, originalScheduledTimestamp)
}
func (e *mutableStateBuilder) AddWorkflowTaskStartedEvent(
scheduleEventID int64,
requestID string,
request *workflowservice.PollWorkflowTaskQueueRequest,
) (*historypb.HistoryEvent, *workflowTaskInfo, error) {
opTag := tag.WorkflowActionWorkflowTaskStarted
if err := e.checkMutability(opTag); err != nil {
return nil, nil, err
}
return e.workflowTaskManager.AddWorkflowTaskStartedEvent(scheduleEventID, requestID, request)
}
func (e *mutableStateBuilder) ReplicateWorkflowTaskStartedEvent(
workflowTask *workflowTaskInfo,
version int64,
scheduleID int64,
startedID int64,
requestID string,
timestamp time.Time,
) (*workflowTaskInfo, error) {
return e.workflowTaskManager.ReplicateWorkflowTaskStartedEvent(workflowTask, version, scheduleID, startedID, requestID, timestamp)
}
func (e *mutableStateBuilder) CreateTransientWorkflowTaskEvents(
workflowTask *workflowTaskInfo,
identity string,
) (*historypb.HistoryEvent, *historypb.HistoryEvent) {
return e.workflowTaskManager.CreateTransientWorkflowTaskEvents(workflowTask, identity)
}
// add BinaryCheckSum for the first workflowTaskCompletedID for auto-reset
func (e *mutableStateBuilder) addBinaryCheckSumIfNotExists(
event *historypb.HistoryEvent,
maxResetPoints int,
) error {
binChecksum := event.GetWorkflowTaskCompletedEventAttributes().GetBinaryChecksum()
if len(binChecksum) == 0 {
return nil
}
exeInfo := e.executionInfo
var currResetPoints []*workflowpb.ResetPointInfo
if exeInfo.AutoResetPoints != nil && exeInfo.AutoResetPoints.Points != nil {
currResetPoints = e.executionInfo.AutoResetPoints.Points
} else {
currResetPoints = make([]*workflowpb.ResetPointInfo, 0, 1)
}
// List of all recent binary checksums associated with the workflow.
var recentBinaryChecksums []string
for _, rp := range currResetPoints {
recentBinaryChecksums = append(recentBinaryChecksums, rp.GetBinaryChecksum())
if rp.GetBinaryChecksum() == binChecksum {
// this checksum already exists
return nil
}
}
if len(currResetPoints) == maxResetPoints {
// If exceeding the max limit, do rotation by taking the oldest one out.
currResetPoints = currResetPoints[1:]
recentBinaryChecksums = recentBinaryChecksums[1:]
}
// Adding current version of the binary checksum.
recentBinaryChecksums = append(recentBinaryChecksums, binChecksum)
resettable := true
err := e.CheckResettable()
if err != nil {
resettable = false
}
info := &workflowpb.ResetPointInfo{
BinaryChecksum: binChecksum,
RunId: exeInfo.GetRunId(),
FirstWorkflowTaskCompletedId: event.GetEventId(),
CreateTime: timestamp.TimePtr(e.timeSource.Now()),
Resettable: resettable,
}
currResetPoints = append(currResetPoints, info)
exeInfo.AutoResetPoints = &workflowpb.ResetPoints{
Points: currResetPoints,
}
bytes, err := payload.Encode(recentBinaryChecksums)
if err != nil {
return err
}
if exeInfo.SearchAttributes == nil {
exeInfo.SearchAttributes = make(map[string]*commonpb.Payload)
}
exeInfo.SearchAttributes[definition.BinaryChecksums] = bytes
if e.shard.GetConfig().AdvancedVisibilityWritingMode() != common.AdvancedVisibilityWritingModeOff {
return e.taskGenerator.generateWorkflowSearchAttrTasks(timestamp.TimeValue(event.GetEventTime()))
}
return nil
}
// TODO: we will release the restriction when reset API allow those pending
func (e *mutableStateBuilder) CheckResettable() error {
if len(e.GetPendingChildExecutionInfos()) > 0 {
return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending child workflow."))
}
if len(e.GetPendingRequestCancelExternalInfos()) > 0 {
return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending request cancel."))
}
if len(e.GetPendingSignalExternalInfos()) > 0 {
return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending signals to send."))
}
return nil
}
func (e *mutableStateBuilder) AddWorkflowTaskCompletedEvent(
scheduleEventID int64,
startedEventID int64,
request *workflowservice.RespondWorkflowTaskCompletedRequest,
maxResetPoints int,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowTaskCompleted
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
return e.workflowTaskManager.AddWorkflowTaskCompletedEvent(scheduleEventID, startedEventID, request, maxResetPoints)
}
func (e *mutableStateBuilder) ReplicateWorkflowTaskCompletedEvent(
event *historypb.HistoryEvent,
) error {
return e.workflowTaskManager.ReplicateWorkflowTaskCompletedEvent(event)
}
func (e *mutableStateBuilder) AddWorkflowTaskTimedOutEvent(
scheduleEventID int64,
startedEventID int64,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowTaskTimedOut
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
return e.workflowTaskManager.AddWorkflowTaskTimedOutEvent(scheduleEventID, startedEventID)
}
func (e *mutableStateBuilder) ReplicateWorkflowTaskTimedOutEvent(
timeoutType enumspb.TimeoutType,
) error {
return e.workflowTaskManager.ReplicateWorkflowTaskTimedOutEvent(timeoutType)
}
func (e *mutableStateBuilder) AddWorkflowTaskScheduleToStartTimeoutEvent(
scheduleEventID int64,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowTaskTimedOut
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
return e.workflowTaskManager.AddWorkflowTaskScheduleToStartTimeoutEvent(scheduleEventID)
}
func (e *mutableStateBuilder) AddWorkflowTaskFailedEvent(
scheduleEventID int64,
startedEventID int64,
cause enumspb.WorkflowTaskFailedCause,
failure *failurepb.Failure,
identity string,
binChecksum string,
baseRunID string,
newRunID string,
forkEventVersion int64,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowTaskFailed
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
return e.workflowTaskManager.AddWorkflowTaskFailedEvent(
scheduleEventID,
startedEventID,
cause,
failure,
identity,
binChecksum,
baseRunID,
newRunID,
forkEventVersion,
)
}
func (e *mutableStateBuilder) ReplicateWorkflowTaskFailedEvent() error {
return e.workflowTaskManager.ReplicateWorkflowTaskFailedEvent()
}
func (e *mutableStateBuilder) AddActivityTaskScheduledEvent(
workflowTaskCompletedEventID int64,
attributes *commandpb.ScheduleActivityTaskCommandAttributes,
) (*historypb.HistoryEvent, *persistenceblobs.ActivityInfo, error) {
opTag := tag.WorkflowActionActivityTaskScheduled
if err := e.checkMutability(opTag); err != nil {
return nil, nil, err
}
_, ok := e.GetActivityByActivityID(attributes.GetActivityId())
if ok {
e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction)
return nil, nil, e.createCallerError(opTag)
}
event := e.hBuilder.AddActivityTaskScheduledEvent(workflowTaskCompletedEventID, attributes)
// Write the event to cache only on active cluster for processing on activity started or retried
e.eventsCache.putEvent(
e.executionInfo.NamespaceId,
e.executionInfo.WorkflowId,
e.executionInfo.ExecutionState.RunId,
event.GetEventId(),
event,
)
ai, err := e.ReplicateActivityTaskScheduledEvent(workflowTaskCompletedEventID, event)
// TODO merge active & passive task generation
if err := e.taskGenerator.generateActivityTransferTasks(
timestamp.TimeValue(event.GetEventTime()),
event,
); err != nil {
return nil, nil, err
}
return event, ai, err
}
func (e *mutableStateBuilder) ReplicateActivityTaskScheduledEvent(
firstEventID int64,
event *historypb.HistoryEvent,
) (*persistenceblobs.ActivityInfo, error) {
attributes := event.GetActivityTaskScheduledEventAttributes()
targetNamespaceID := e.executionInfo.NamespaceId
if attributes.GetNamespace() != "" {
targetNamespaceEntry, err := e.shard.GetNamespaceCache().GetNamespace(attributes.GetNamespace())
if err != nil {
return nil, err
}
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
scheduleEventID := event.GetEventId()
scheduleToCloseTimeout := attributes.GetScheduleToCloseTimeout()
ai := &persistenceblobs.ActivityInfo{
Version: event.GetVersion(),
ScheduleId: scheduleEventID,
ScheduledEventBatchId: firstEventID,
ScheduledTime: event.GetEventTime(),
StartedId: common.EmptyEventID,
StartedTime: timestamp.TimePtr(time.Time{}),
ActivityId: attributes.ActivityId,
NamespaceId: targetNamespaceID,
ScheduleToStartTimeout: attributes.GetScheduleToStartTimeout(),
ScheduleToCloseTimeout: scheduleToCloseTimeout,
StartToCloseTimeout: attributes.GetStartToCloseTimeout(),
HeartbeatTimeout: attributes.GetHeartbeatTimeout(),
CancelRequested: false,
CancelRequestId: common.EmptyEventID,
LastHeartbeatUpdateTime: timestamp.TimePtr(time.Time{}),
TimerTaskStatus: timerTaskStatusNone,
TaskQueue: attributes.TaskQueue.GetName(),
HasRetryPolicy: attributes.RetryPolicy != nil,
Attempt: 1,
}
retryTime := timestamp.TimeValue(ai.ScheduledTime).Add(timestamp.DurationValue(scheduleToCloseTimeout))
ai.RetryExpirationTime = &retryTime
if ai.HasRetryPolicy {
ai.RetryInitialInterval = attributes.RetryPolicy.GetInitialInterval()
ai.RetryBackoffCoefficient = attributes.RetryPolicy.GetBackoffCoefficient()
ai.RetryMaximumInterval = attributes.RetryPolicy.GetMaximumInterval()
ai.RetryMaximumAttempts = attributes.RetryPolicy.GetMaximumAttempts()
ai.RetryNonRetryableErrorTypes = attributes.RetryPolicy.NonRetryableErrorTypes
}
e.pendingActivityInfoIDs[scheduleEventID] = ai
e.pendingActivityIDToEventID[ai.ActivityId] = scheduleEventID
e.updateActivityInfos[ai] = struct{}{}
return ai, nil
}
func (e *mutableStateBuilder) addTransientActivityStartedEvent(
scheduleEventID int64,
) error {
ai, ok := e.GetActivityInfo(scheduleEventID)
if !ok || ai.StartedId != common.TransientEventID {
return nil
}
// activity task was started (as transient event), we need to add it now.
event := e.hBuilder.AddActivityTaskStartedEvent(scheduleEventID, int32(ai.Attempt), ai.RequestId, ai.StartedIdentity,
ai.RetryLastFailure)
if !ai.StartedTime.IsZero() {
// overwrite started event time to the one recorded in ActivityInfo
event.EventTime = ai.StartedTime
}
return e.ReplicateActivityTaskStartedEvent(event)
}
func (e *mutableStateBuilder) AddActivityTaskStartedEvent(
ai *persistenceblobs.ActivityInfo,
scheduleEventID int64,
requestID string,
identity string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionActivityTaskStarted
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
if !ai.HasRetryPolicy {
event := e.hBuilder.AddActivityTaskStartedEvent(scheduleEventID, int32(ai.Attempt), requestID, identity, ai.RetryLastFailure)
if err := e.ReplicateActivityTaskStartedEvent(event); err != nil {
return nil, err
}
return event, nil
}
// we might need to retry, so do not append started event just yet,
// instead update mutable state and will record started event when activity task is closed
ai.Version = e.GetCurrentVersion()
ai.StartedId = common.TransientEventID
ai.RequestId = requestID
ai.StartedTime = timestamp.TimePtr(e.timeSource.Now())
ai.LastHeartbeatUpdateTime = ai.StartedTime
ai.StartedIdentity = identity
if err := e.UpdateActivity(ai); err != nil {
return nil, err
}
e.syncActivityTasks[ai.ScheduleId] = struct{}{}
return nil, nil
}
func (e *mutableStateBuilder) ReplicateActivityTaskStartedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetActivityTaskStartedEventAttributes()
scheduleID := attributes.GetScheduledEventId()
ai, ok := e.GetActivityInfo(scheduleID)
if !ok {
e.logError(
fmt.Sprintf("unable to find activity event id: %v in mutable state", scheduleID),
tag.ErrorTypeInvalidMutableStateAction,
)
return ErrMissingActivityInfo
}
ai.Version = event.GetVersion()
ai.StartedId = event.GetEventId()
ai.RequestId = attributes.GetRequestId()
ai.StartedTime = event.GetEventTime()
ai.LastHeartbeatUpdateTime = ai.StartedTime
e.updateActivityInfos[ai] = struct{}{}
return nil
}
func (e *mutableStateBuilder) AddActivityTaskCompletedEvent(
scheduleEventID int64,
startedEventID int64,
request *workflowservice.RespondActivityTaskCompletedRequest,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionActivityTaskCompleted
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
if ai, ok := e.GetActivityInfo(scheduleEventID); !ok || ai.StartedId != startedEventID {
e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowScheduleID(scheduleEventID),
tag.WorkflowStartedID(startedEventID))
return nil, e.createInternalServerError(opTag)
}
if err := e.addTransientActivityStartedEvent(scheduleEventID); err != nil {
return nil, err
}
event := e.hBuilder.AddActivityTaskCompletedEvent(scheduleEventID, startedEventID, request)
if err := e.ReplicateActivityTaskCompletedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateActivityTaskCompletedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetActivityTaskCompletedEventAttributes()
scheduleID := attributes.GetScheduledEventId()
return e.DeleteActivity(scheduleID)
}
func (e *mutableStateBuilder) AddActivityTaskFailedEvent(
scheduleEventID int64,
startedEventID int64,
failure *failurepb.Failure,
retryState enumspb.RetryState,
identity string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionActivityTaskFailed
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
if ai, ok := e.GetActivityInfo(scheduleEventID); !ok || ai.StartedId != startedEventID {
e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowScheduleID(scheduleEventID),
tag.WorkflowStartedID(startedEventID))
return nil, e.createInternalServerError(opTag)
}
if err := e.addTransientActivityStartedEvent(scheduleEventID); err != nil {
return nil, err
}
event := e.hBuilder.AddActivityTaskFailedEvent(scheduleEventID, startedEventID, failure, retryState, identity)
if err := e.ReplicateActivityTaskFailedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateActivityTaskFailedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetActivityTaskFailedEventAttributes()
scheduleID := attributes.GetScheduledEventId()
return e.DeleteActivity(scheduleID)
}
func (e *mutableStateBuilder) AddActivityTaskTimedOutEvent(
scheduleEventID int64,
startedEventID int64,
timeoutFailure *failurepb.Failure,
retryState enumspb.RetryState,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionActivityTaskTimedOut
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
timeoutType := timeoutFailure.GetTimeoutFailureInfo().GetTimeoutType()
ai, ok := e.GetActivityInfo(scheduleEventID)
if !ok || ai.StartedId != startedEventID || ((timeoutType == enumspb.TIMEOUT_TYPE_START_TO_CLOSE ||
timeoutType == enumspb.TIMEOUT_TYPE_HEARTBEAT) && ai.StartedId == common.EmptyEventID) {
e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowScheduleID(ai.ScheduleId),
tag.WorkflowStartedID(ai.StartedId),
tag.WorkflowTimeoutType(timeoutType))
return nil, e.createInternalServerError(opTag)
}
timeoutFailure.Cause = ai.RetryLastFailure
if err := e.addTransientActivityStartedEvent(scheduleEventID); err != nil {
return nil, err
}
event := e.hBuilder.AddActivityTaskTimedOutEvent(scheduleEventID, startedEventID, timeoutFailure, retryState)
if err := e.ReplicateActivityTaskTimedOutEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateActivityTaskTimedOutEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetActivityTaskTimedOutEventAttributes()
scheduleID := attributes.GetScheduledEventId()
return e.DeleteActivity(scheduleID)
}
func (e *mutableStateBuilder) AddActivityTaskCancelRequestedEvent(
workflowTaskCompletedEventID int64,
scheduleID int64,
identity string,
) (*historypb.HistoryEvent, *persistenceblobs.ActivityInfo, error) {
opTag := tag.WorkflowActionActivityTaskCancelRequested
if err := e.checkMutability(opTag); err != nil {
return nil, nil, err
}
ai, ok := e.GetActivityInfo(scheduleID)
if !ok {
// It is possible both started and completed events are buffered for this activity
completedEvent := e.scanForBufferedActivityCompletion(scheduleID)
if completedEvent == nil {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowScheduleID(scheduleID))
return nil, nil, e.createCallerError(opTag)
}
}
// Check for duplicate cancellation
if ok && ai.CancelRequested {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowScheduleID(scheduleID))
return nil, nil, e.createCallerError(opTag)
}
// At this point we know this is a valid activity cancellation request
actCancelReqEvent := e.hBuilder.AddActivityTaskCancelRequestedEvent(workflowTaskCompletedEventID, scheduleID)
if err := e.ReplicateActivityTaskCancelRequestedEvent(actCancelReqEvent); err != nil {
return nil, nil, err
}
return actCancelReqEvent, ai, nil
}
func (e *mutableStateBuilder) ReplicateActivityTaskCancelRequestedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetActivityTaskCancelRequestedEventAttributes()
scheduleID := attributes.GetScheduledEventId()
ai, ok := e.GetActivityInfo(scheduleID)
if !ok {
// This will only be called on active cluster if activity info is found in mutable state
// Passive side logic should always have activity info in mutable state if this is called, as the only
// scenario where active side logic could have this event without activity info in mutable state is when
// activity start and complete events are buffered.
return nil
}
ai.Version = event.GetVersion()
// - We have the activity dispatched to worker.
// - The activity might not be heartbeat'ing, but the activity can still call RecordActivityHeartBeat()
// to see cancellation while reporting progress of the activity.
ai.CancelRequested = true
ai.CancelRequestId = event.GetEventId()
e.updateActivityInfos[ai] = struct{}{}
return nil
}
func (e *mutableStateBuilder) AddActivityTaskCanceledEvent(
scheduleEventID int64,
startedEventID int64,
latestCancelRequestedEventID int64,
details *commonpb.Payloads,
identity string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionActivityTaskCanceled
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
ai, ok := e.GetActivityInfo(scheduleEventID)
if !ok || ai.StartedId != startedEventID {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowScheduleID(scheduleEventID))
return nil, e.createInternalServerError(opTag)
}
// Verify cancel request as well.
if !ai.CancelRequested {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowScheduleID(scheduleEventID),
tag.WorkflowActivityID(ai.ActivityId),
tag.WorkflowStartedID(ai.StartedId))
return nil, e.createInternalServerError(opTag)
}
if err := e.addTransientActivityStartedEvent(scheduleEventID); err != nil {
return nil, err
}
event := e.hBuilder.AddActivityTaskCanceledEvent(scheduleEventID, startedEventID, latestCancelRequestedEventID,
details, identity)
if err := e.ReplicateActivityTaskCanceledEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateActivityTaskCanceledEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetActivityTaskCanceledEventAttributes()
scheduleID := attributes.GetScheduledEventId()
return e.DeleteActivity(scheduleID)
}
func (e *mutableStateBuilder) AddCompletedWorkflowEvent(
workflowTaskCompletedEventID int64,
attributes *commandpb.CompleteWorkflowExecutionCommandAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowCompleted
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
event := e.hBuilder.AddCompletedWorkflowEvent(workflowTaskCompletedEventID, attributes)
if err := e.ReplicateWorkflowExecutionCompletedEvent(workflowTaskCompletedEventID, event); err != nil {
return nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowCloseTasks(
timestamp.TimeValue(event.GetEventTime()),
); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionCompletedEvent(
firstEventID int64,
event *historypb.HistoryEvent,
) error {
if err := e.UpdateWorkflowStateStatus(
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED,
enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED,
); err != nil {
return err
}
e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database
e.ClearStickyness()
e.writeEventToCache(event)
return nil
}
func (e *mutableStateBuilder) AddFailWorkflowEvent(
workflowTaskCompletedEventID int64,
retryState enumspb.RetryState,
attributes *commandpb.FailWorkflowExecutionCommandAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowFailed
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
event := e.hBuilder.AddFailWorkflowEvent(workflowTaskCompletedEventID, retryState, attributes)
if err := e.ReplicateWorkflowExecutionFailedEvent(workflowTaskCompletedEventID, event); err != nil {
return nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowCloseTasks(
timestamp.TimeValue(event.GetEventTime()),
); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionFailedEvent(
firstEventID int64,
event *historypb.HistoryEvent,
) error {
if err := e.UpdateWorkflowStateStatus(
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED,
enumspb.WORKFLOW_EXECUTION_STATUS_FAILED,
); err != nil {
return err
}
e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database
e.ClearStickyness()
e.writeEventToCache(event)
return nil
}
func (e *mutableStateBuilder) AddTimeoutWorkflowEvent(
firstEventID int64,
retryState enumspb.RetryState,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowTimeout
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
event := e.hBuilder.AddTimeoutWorkflowEvent(retryState)
if err := e.ReplicateWorkflowExecutionTimedoutEvent(firstEventID, event); err != nil {
return nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowCloseTasks(
timestamp.TimeValue(event.GetEventTime()),
); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionTimedoutEvent(
firstEventID int64,
event *historypb.HistoryEvent,
) error {
if err := e.UpdateWorkflowStateStatus(
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED,
enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT,
); err != nil {
return err
}
e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database
e.ClearStickyness()
e.writeEventToCache(event)
return nil
}
func (e *mutableStateBuilder) AddWorkflowExecutionCancelRequestedEvent(
cause string,
request *historyservice.RequestCancelWorkflowExecutionRequest,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowCancelRequested
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
if e.executionInfo.CancelRequested {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowState(e.executionInfo.ExecutionState.State),
tag.Bool(e.executionInfo.CancelRequested),
tag.Key(e.executionInfo.CancelRequestId),
)
return nil, e.createInternalServerError(opTag)
}
event := e.hBuilder.AddWorkflowExecutionCancelRequestedEvent(cause, request)
if err := e.ReplicateWorkflowExecutionCancelRequestedEvent(event); err != nil {
return nil, err
}
// Set the CancelRequestID on the active cluster. This information is not part of the history event.
e.executionInfo.CancelRequestId = request.CancelRequest.GetRequestId()
return event, nil
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionCancelRequestedEvent(
event *historypb.HistoryEvent,
) error {
e.executionInfo.CancelRequested = true
return nil
}
func (e *mutableStateBuilder) AddWorkflowExecutionCanceledEvent(
workflowTaskCompletedEventID int64,
attributes *commandpb.CancelWorkflowExecutionCommandAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowCanceled
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
event := e.hBuilder.AddWorkflowExecutionCanceledEvent(workflowTaskCompletedEventID, attributes)
if err := e.ReplicateWorkflowExecutionCanceledEvent(workflowTaskCompletedEventID, event); err != nil {
return nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowCloseTasks(
timestamp.TimeValue(event.GetEventTime()),
); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionCanceledEvent(
firstEventID int64,
event *historypb.HistoryEvent,
) error {
if err := e.UpdateWorkflowStateStatus(
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED,
enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED,
); err != nil {
return err
}
e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database
e.ClearStickyness()
e.writeEventToCache(event)
return nil
}
func (e *mutableStateBuilder) AddRequestCancelExternalWorkflowExecutionInitiatedEvent(
workflowTaskCompletedEventID int64,
cancelRequestID string,
request *commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes,
) (*historypb.HistoryEvent, *persistenceblobs.RequestCancelInfo, error) {
opTag := tag.WorkflowActionExternalWorkflowCancelInitiated
if err := e.checkMutability(opTag); err != nil {
return nil, nil, err
}
event := e.hBuilder.AddRequestCancelExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, request)
rci, err := e.ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, event, cancelRequestID)
if err != nil {
return nil, nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateRequestCancelExternalTasks(
timestamp.TimeValue(event.GetEventTime()),
event,
); err != nil {
return nil, nil, err
}
return event, rci, nil
}
func (e *mutableStateBuilder) ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(
firstEventID int64,
event *historypb.HistoryEvent,
cancelRequestID string,
) (*persistenceblobs.RequestCancelInfo, error) {
// TODO: Evaluate if we need cancelRequestID also part of history event
initiatedEventID := event.GetEventId()
rci := &persistenceblobs.RequestCancelInfo{
Version: event.GetVersion(),
InitiatedEventBatchId: firstEventID,
InitiatedId: initiatedEventID,
CancelRequestId: cancelRequestID,
}
e.pendingRequestCancelInfoIDs[initiatedEventID] = rci
e.updateRequestCancelInfos[rci] = struct{}{}
return rci, nil
}
func (e *mutableStateBuilder) AddExternalWorkflowExecutionCancelRequested(
initiatedID int64,
namespace string,
workflowID string,
runID string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionExternalWorkflowCancelRequested
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
_, ok := e.GetRequestCancelInfo(initiatedID)
if !ok {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
event := e.hBuilder.AddExternalWorkflowExecutionCancelRequested(initiatedID, namespace, workflowID, runID)
if err := e.ReplicateExternalWorkflowExecutionCancelRequested(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateExternalWorkflowExecutionCancelRequested(
event *historypb.HistoryEvent,
) error {
initiatedID := event.GetExternalWorkflowExecutionCancelRequestedEventAttributes().GetInitiatedEventId()
return e.DeletePendingRequestCancel(initiatedID)
}
func (e *mutableStateBuilder) AddRequestCancelExternalWorkflowExecutionFailedEvent(
workflowTaskCompletedEventID int64,
initiatedID int64,
namespace string,
workflowID string,
runID string,
cause enumspb.CancelExternalWorkflowExecutionFailedCause,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionExternalWorkflowCancelFailed
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
_, ok := e.GetRequestCancelInfo(initiatedID)
if !ok {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
event := e.hBuilder.AddRequestCancelExternalWorkflowExecutionFailedEvent(workflowTaskCompletedEventID, initiatedID,
namespace, workflowID, runID, cause)
if err := e.ReplicateRequestCancelExternalWorkflowExecutionFailedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateRequestCancelExternalWorkflowExecutionFailedEvent(
event *historypb.HistoryEvent,
) error {
initiatedID := event.GetRequestCancelExternalWorkflowExecutionFailedEventAttributes().GetInitiatedEventId()
return e.DeletePendingRequestCancel(initiatedID)
}
func (e *mutableStateBuilder) AddSignalExternalWorkflowExecutionInitiatedEvent(
workflowTaskCompletedEventID int64,
signalRequestID string,
request *commandpb.SignalExternalWorkflowExecutionCommandAttributes,
) (*historypb.HistoryEvent, *persistenceblobs.SignalInfo, error) {
opTag := tag.WorkflowActionExternalWorkflowSignalInitiated
if err := e.checkMutability(opTag); err != nil {
return nil, nil, err
}
event := e.hBuilder.AddSignalExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, request)
si, err := e.ReplicateSignalExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, event, signalRequestID)
if err != nil {
return nil, nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateSignalExternalTasks(
timestamp.TimeValue(event.GetEventTime()),
event,
); err != nil {
return nil, nil, err
}
return event, si, nil
}
func (e *mutableStateBuilder) ReplicateSignalExternalWorkflowExecutionInitiatedEvent(
firstEventID int64,
event *historypb.HistoryEvent,
signalRequestID string,
) (*persistenceblobs.SignalInfo, error) {
// TODO: Consider also writing signalRequestID to history event
initiatedEventID := event.GetEventId()
attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes()
si := &persistenceblobs.SignalInfo{
Version: event.GetVersion(),
InitiatedEventBatchId: firstEventID,
InitiatedId: initiatedEventID,
RequestId: signalRequestID,
Name: attributes.GetSignalName(),
Input: attributes.Input,
Control: attributes.Control,
}
e.pendingSignalInfoIDs[initiatedEventID] = si
e.updateSignalInfos[si] = struct{}{}
return si, nil
}
func (e *mutableStateBuilder) AddUpsertWorkflowSearchAttributesEvent(
workflowTaskCompletedEventID int64,
request *commandpb.UpsertWorkflowSearchAttributesCommandAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionUpsertWorkflowSearchAttributes
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
event := e.hBuilder.AddUpsertWorkflowSearchAttributesEvent(workflowTaskCompletedEventID, request)
e.ReplicateUpsertWorkflowSearchAttributesEvent(event)
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowSearchAttrTasks(
timestamp.TimeValue(event.GetEventTime()),
); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateUpsertWorkflowSearchAttributesEvent(
event *historypb.HistoryEvent,
) {
upsertSearchAttr := event.GetUpsertWorkflowSearchAttributesEventAttributes().GetSearchAttributes().GetIndexedFields()
currentSearchAttr := e.GetExecutionInfo().SearchAttributes
e.executionInfo.SearchAttributes = mergeMapOfPayload(currentSearchAttr, upsertSearchAttr)
}
func mergeMapOfPayload(
current map[string]*commonpb.Payload,
upsert map[string]*commonpb.Payload,
) map[string]*commonpb.Payload {
if current == nil {
current = make(map[string]*commonpb.Payload)
}
for k, v := range upsert {
current[k] = v
}
return current
}
func (e *mutableStateBuilder) AddExternalWorkflowExecutionSignaled(
initiatedID int64,
namespace string,
workflowID string,
runID string,
control string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionExternalWorkflowSignalRequested
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
_, ok := e.GetSignalInfo(initiatedID)
if !ok {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
event := e.hBuilder.AddExternalWorkflowExecutionSignaled(initiatedID, namespace, workflowID, runID, control)
if err := e.ReplicateExternalWorkflowExecutionSignaled(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateExternalWorkflowExecutionSignaled(
event *historypb.HistoryEvent,
) error {
initiatedID := event.GetExternalWorkflowExecutionSignaledEventAttributes().GetInitiatedEventId()
return e.DeletePendingSignal(initiatedID)
}
func (e *mutableStateBuilder) AddSignalExternalWorkflowExecutionFailedEvent(
workflowTaskCompletedEventID int64,
initiatedID int64,
namespace string,
workflowID string,
runID string,
control string,
cause enumspb.SignalExternalWorkflowExecutionFailedCause,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionExternalWorkflowSignalFailed
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
_, ok := e.GetSignalInfo(initiatedID)
if !ok {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
event := e.hBuilder.AddSignalExternalWorkflowExecutionFailedEvent(workflowTaskCompletedEventID, initiatedID, namespace,
workflowID, runID, control, cause)
if err := e.ReplicateSignalExternalWorkflowExecutionFailedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateSignalExternalWorkflowExecutionFailedEvent(
event *historypb.HistoryEvent,
) error {
initiatedID := event.GetSignalExternalWorkflowExecutionFailedEventAttributes().GetInitiatedEventId()
return e.DeletePendingSignal(initiatedID)
}
func (e *mutableStateBuilder) AddTimerStartedEvent(
workflowTaskCompletedEventID int64,
request *commandpb.StartTimerCommandAttributes,
) (*historypb.HistoryEvent, *persistenceblobs.TimerInfo, error) {
opTag := tag.WorkflowActionTimerStarted
if err := e.checkMutability(opTag); err != nil {
return nil, nil, err
}
timerID := request.GetTimerId()
_, ok := e.GetUserTimerInfo(timerID)
if ok {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowTimerID(timerID))
return nil, nil, e.createCallerError(opTag)
}
event := e.hBuilder.AddTimerStartedEvent(workflowTaskCompletedEventID, request)
ti, err := e.ReplicateTimerStartedEvent(event)
if err != nil {
return nil, nil, err
}
return event, ti, err
}
func (e *mutableStateBuilder) ReplicateTimerStartedEvent(
event *historypb.HistoryEvent,
) (*persistenceblobs.TimerInfo, error) {
attributes := event.GetTimerStartedEventAttributes()
timerID := attributes.GetTimerId()
startToFireTimeout := timestamp.DurationValue(attributes.GetStartToFireTimeout())
// TODO: Time skew need to be taken in to account.
expiryTime := timestamp.TimeValue(event.GetEventTime()).Add(startToFireTimeout) // should use the event time, not now
ti := &persistenceblobs.TimerInfo{
Version: event.GetVersion(),
TimerId: timerID,
ExpiryTime: &expiryTime,
StartedId: event.GetEventId(),
TaskStatus: timerTaskStatusNone,
}
e.pendingTimerInfoIDs[timerID] = ti
e.pendingTimerEventIDToID[event.GetEventId()] = timerID
e.updateTimerInfos[ti] = struct{}{}
return ti, nil
}
func (e *mutableStateBuilder) AddTimerFiredEvent(
timerID string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionTimerFired
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
timerInfo, ok := e.GetUserTimerInfo(timerID)
if !ok {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowTimerID(timerID))
return nil, e.createInternalServerError(opTag)
}
// Timer is running.
event := e.hBuilder.AddTimerFiredEvent(timerInfo.GetStartedId(), timerID)
if err := e.ReplicateTimerFiredEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateTimerFiredEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetTimerFiredEventAttributes()
timerID := attributes.GetTimerId()
return e.DeleteUserTimer(timerID)
}
func (e *mutableStateBuilder) AddTimerCanceledEvent(
workflowTaskCompletedEventID int64,
attributes *commandpb.CancelTimerCommandAttributes,
identity string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionTimerCanceled
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
var timerStartedID int64
timerID := attributes.GetTimerId()
ti, ok := e.GetUserTimerInfo(timerID)
if !ok {
// if timer is not running then check if it has fired in the mutable state.
// If so clear the timer from the mutable state. We need to check both the
// bufferedEvents and the history builder
timerFiredEvent := e.checkAndClearTimerFiredEvent(timerID)
if timerFiredEvent == nil {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowTimerID(timerID))
return nil, e.createCallerError(opTag)
}
timerStartedID = timerFiredEvent.GetTimerFiredEventAttributes().GetStartedEventId()
} else {
timerStartedID = ti.GetStartedId()
}
// Timer is running.
event := e.hBuilder.AddTimerCanceledEvent(timerStartedID, workflowTaskCompletedEventID, timerID, identity)
if ok {
if err := e.ReplicateTimerCanceledEvent(event); err != nil {
return nil, err
}
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateTimerCanceledEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetTimerCanceledEventAttributes()
timerID := attributes.GetTimerId()
return e.DeleteUserTimer(timerID)
}
func (e *mutableStateBuilder) AddRecordMarkerEvent(
workflowTaskCompletedEventID int64,
attributes *commandpb.RecordMarkerCommandAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowRecordMarker
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
return e.hBuilder.AddMarkerRecordedEvent(workflowTaskCompletedEventID, attributes), nil
}
func (e *mutableStateBuilder) AddWorkflowExecutionTerminatedEvent(
firstEventID int64,
reason string,
details *commonpb.Payloads,
identity string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowTerminated
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
event := e.hBuilder.AddWorkflowExecutionTerminatedEvent(reason, details, identity)
if err := e.ReplicateWorkflowExecutionTerminatedEvent(firstEventID, event); err != nil {
return nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowCloseTasks(
timestamp.TimeValue(event.GetEventTime()),
); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionTerminatedEvent(
firstEventID int64,
event *historypb.HistoryEvent,
) error {
if err := e.UpdateWorkflowStateStatus(
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED,
enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED,
); err != nil {
return err
}
e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database
e.ClearStickyness()
e.writeEventToCache(event)
return nil
}
func (e *mutableStateBuilder) AddWorkflowExecutionSignaled(
signalName string,
input *commonpb.Payloads,
identity string,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionWorkflowSignaled
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
event := e.hBuilder.AddWorkflowExecutionSignaledEvent(signalName, input, identity)
if err := e.ReplicateWorkflowExecutionSignaled(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionSignaled(
event *historypb.HistoryEvent,
) error {
// Increment signal count in mutable state for this workflow execution
e.executionInfo.SignalCount++
return nil
}
func (e *mutableStateBuilder) AddContinueAsNewEvent(
firstEventID int64,
workflowTaskCompletedEventID int64,
parentNamespace string,
attributes *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes,
) (*historypb.HistoryEvent, mutableState, error) {
opTag := tag.WorkflowActionWorkflowContinueAsNew
if err := e.checkMutability(opTag); err != nil {
return nil, nil, err
}
var err error
newRunID := uuid.New()
newExecution := commonpb.WorkflowExecution{
WorkflowId: e.executionInfo.WorkflowId,
RunId: newRunID,
}
// Extract ParentExecutionInfo from current run so it can be passed down to the next
var parentInfo *workflowspb.ParentExecutionInfo
if e.HasParentExecution() {
parentInfo = &workflowspb.ParentExecutionInfo{
NamespaceId: e.executionInfo.ParentNamespaceId,
Namespace: parentNamespace,
Execution: &commonpb.WorkflowExecution{
WorkflowId: e.executionInfo.ParentWorkflowId,
RunId: e.executionInfo.ParentRunId,
},
InitiatedId: e.executionInfo.InitiatedId,
}
}
continueAsNewEvent := e.hBuilder.AddContinuedAsNewEvent(workflowTaskCompletedEventID, newRunID, attributes)
firstRunID := e.executionInfo.FirstExecutionRunId
// This is needed for backwards compatibility. Workflow execution create with Temporal release v0.28.0 or earlier
// does not have FirstExecutionRunID stored as part of mutable state. If this is not set then load it from
// workflow execution started event.
if len(firstRunID) == 0 {
currentStartEvent, err := e.GetStartEvent()
if err != nil {
return nil, nil, err
}
firstRunID = currentStartEvent.GetWorkflowExecutionStartedEventAttributes().GetFirstExecutionRunId()
}
namespaceID := e.namespaceEntry.GetInfo().Id
var newStateBuilder *mutableStateBuilder
newStateBuilder = newMutableStateBuilderWithVersionHistories(
e.shard,
e.shard.GetEventsCache(),
e.logger,
e.namespaceEntry,
)
if _, err = newStateBuilder.addWorkflowExecutionStartedEventForContinueAsNew(
parentInfo,
newExecution,
e,
attributes,
firstRunID,
); err != nil {
return nil, nil, serviceerror.NewInternal("Failed to add workflow execution started event.")
}
if err = e.ReplicateWorkflowExecutionContinuedAsNewEvent(
firstEventID,
namespaceID,
continueAsNewEvent,
); err != nil {
return nil, nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateWorkflowCloseTasks(
timestamp.TimeValue(continueAsNewEvent.GetEventTime()),
); err != nil {
return nil, nil, err
}
return continueAsNewEvent, newStateBuilder, nil
}
func rolloverAutoResetPointsWithExpiringTime(
resetPoints *workflowpb.ResetPoints,
prevRunID string,
now time.Time,
namespaceRetentionDays int32,
) *workflowpb.ResetPoints {
if resetPoints == nil || resetPoints.Points == nil {
return resetPoints
}
newPoints := make([]*workflowpb.ResetPointInfo, 0, len(resetPoints.Points))
expireTime := now.Add(time.Duration(namespaceRetentionDays) * time.Hour * 24)
for _, rp := range resetPoints.Points {
if rp.GetRunId() == prevRunID {
rp.ExpireTime = &expireTime
}
newPoints = append(newPoints, rp)
}
return &workflowpb.ResetPoints{
Points: newPoints,
}
}
func (e *mutableStateBuilder) ReplicateWorkflowExecutionContinuedAsNewEvent(
firstEventID int64,
namespaceID string,
continueAsNewEvent *historypb.HistoryEvent,
) error {
if err := e.UpdateWorkflowStateStatus(
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED,
enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW,
); err != nil {
return err
}
e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database
e.ClearStickyness()
e.writeEventToCache(continueAsNewEvent)
return nil
}
func (e *mutableStateBuilder) AddStartChildWorkflowExecutionInitiatedEvent(
workflowTaskCompletedEventID int64,
createRequestID string,
attributes *commandpb.StartChildWorkflowExecutionCommandAttributes,
) (*historypb.HistoryEvent, *persistenceblobs.ChildExecutionInfo, error) {
opTag := tag.WorkflowActionChildWorkflowInitiated
if err := e.checkMutability(opTag); err != nil {
return nil, nil, err
}
event := e.hBuilder.AddStartChildWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, attributes)
// Write the event to cache only on active cluster
e.eventsCache.putEvent(e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionInfo.ExecutionState.RunId,
event.GetEventId(), event)
ci, err := e.ReplicateStartChildWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, event, createRequestID)
if err != nil {
return nil, nil, err
}
// TODO merge active & passive task generation
if err := e.taskGenerator.generateChildWorkflowTasks(
timestamp.TimeValue(event.GetEventTime()),
event,
); err != nil {
return nil, nil, err
}
return event, ci, nil
}
func (e *mutableStateBuilder) ReplicateStartChildWorkflowExecutionInitiatedEvent(
firstEventID int64,
event *historypb.HistoryEvent,
createRequestID string,
) (*persistenceblobs.ChildExecutionInfo, error) {
initiatedEventID := event.GetEventId()
attributes := event.GetStartChildWorkflowExecutionInitiatedEventAttributes()
ci := &persistenceblobs.ChildExecutionInfo{
Version: event.GetVersion(),
InitiatedId: initiatedEventID,
InitiatedEventBatchId: firstEventID,
StartedId: common.EmptyEventID,
StartedWorkflowId: attributes.GetWorkflowId(),
CreateRequestId: createRequestID,
Namespace: attributes.GetNamespace(),
WorkflowTypeName: attributes.GetWorkflowType().GetName(),
ParentClosePolicy: attributes.GetParentClosePolicy(),
}
e.pendingChildExecutionInfoIDs[initiatedEventID] = ci
e.updateChildExecutionInfos[ci] = struct{}{}
return ci, nil
}
func (e *mutableStateBuilder) AddChildWorkflowExecutionStartedEvent(
namespace string,
execution *commonpb.WorkflowExecution,
workflowType *commonpb.WorkflowType,
initiatedID int64,
header *commonpb.Header,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionChildWorkflowStarted
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
ci, ok := e.GetChildExecutionInfo(initiatedID)
if !ok || ci.StartedId != common.EmptyEventID {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
event := e.hBuilder.AddChildWorkflowExecutionStartedEvent(namespace, execution, workflowType, initiatedID, header)
if err := e.ReplicateChildWorkflowExecutionStartedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionStartedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetChildWorkflowExecutionStartedEventAttributes()
initiatedID := attributes.GetInitiatedEventId()
ci, _ := e.GetChildExecutionInfo(initiatedID)
ci.StartedId = event.GetEventId()
ci.StartedRunId = attributes.GetWorkflowExecution().GetRunId()
e.updateChildExecutionInfos[ci] = struct{}{}
return nil
}
func (e *mutableStateBuilder) AddStartChildWorkflowExecutionFailedEvent(
initiatedID int64,
cause enumspb.StartChildWorkflowExecutionFailedCause,
initiatedEventAttributes *historypb.StartChildWorkflowExecutionInitiatedEventAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionChildWorkflowInitiationFailed
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
ci, ok := e.GetChildExecutionInfo(initiatedID)
if !ok || ci.StartedId != common.EmptyEventID {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
event := e.hBuilder.AddStartChildWorkflowExecutionFailedEvent(initiatedID, cause, initiatedEventAttributes)
if err := e.ReplicateStartChildWorkflowExecutionFailedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateStartChildWorkflowExecutionFailedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetStartChildWorkflowExecutionFailedEventAttributes()
initiatedID := attributes.GetInitiatedEventId()
return e.DeletePendingChildExecution(initiatedID)
}
func (e *mutableStateBuilder) AddChildWorkflowExecutionCompletedEvent(
initiatedID int64,
childExecution *commonpb.WorkflowExecution,
attributes *historypb.WorkflowExecutionCompletedEventAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionChildWorkflowCompleted
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
ci, ok := e.GetChildExecutionInfo(initiatedID)
if !ok || ci.StartedId == common.EmptyEventID {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
workflowType := &commonpb.WorkflowType{
Name: ci.WorkflowTypeName,
}
event := e.hBuilder.AddChildWorkflowExecutionCompletedEvent(ci.Namespace, childExecution, workflowType, ci.InitiatedId,
ci.StartedId, attributes)
if err := e.ReplicateChildWorkflowExecutionCompletedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionCompletedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetChildWorkflowExecutionCompletedEventAttributes()
initiatedID := attributes.GetInitiatedEventId()
return e.DeletePendingChildExecution(initiatedID)
}
func (e *mutableStateBuilder) AddChildWorkflowExecutionFailedEvent(
initiatedID int64,
childExecution *commonpb.WorkflowExecution,
attributes *historypb.WorkflowExecutionFailedEventAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionChildWorkflowFailed
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
ci, ok := e.GetChildExecutionInfo(initiatedID)
if !ok || ci.StartedId == common.EmptyEventID {
e.logWarn(mutableStateInvalidHistoryActionMsg,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(!ok),
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
workflowType := &commonpb.WorkflowType{
Name: ci.WorkflowTypeName,
}
event := e.hBuilder.AddChildWorkflowExecutionFailedEvent(ci.Namespace, childExecution, workflowType, ci.InitiatedId,
ci.StartedId, attributes)
if err := e.ReplicateChildWorkflowExecutionFailedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionFailedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetChildWorkflowExecutionFailedEventAttributes()
initiatedID := attributes.GetInitiatedEventId()
return e.DeletePendingChildExecution(initiatedID)
}
func (e *mutableStateBuilder) AddChildWorkflowExecutionCanceledEvent(
initiatedID int64,
childExecution *commonpb.WorkflowExecution,
attributes *historypb.WorkflowExecutionCanceledEventAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionChildWorkflowCanceled
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
ci, ok := e.GetChildExecutionInfo(initiatedID)
if !ok || ci.StartedId == common.EmptyEventID {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
workflowType := &commonpb.WorkflowType{
Name: ci.WorkflowTypeName,
}
event := e.hBuilder.AddChildWorkflowExecutionCanceledEvent(ci.Namespace, childExecution, workflowType, ci.InitiatedId,
ci.StartedId, attributes)
if err := e.ReplicateChildWorkflowExecutionCanceledEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionCanceledEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetChildWorkflowExecutionCanceledEventAttributes()
initiatedID := attributes.GetInitiatedEventId()
return e.DeletePendingChildExecution(initiatedID)
}
func (e *mutableStateBuilder) AddChildWorkflowExecutionTerminatedEvent(
initiatedID int64,
childExecution *commonpb.WorkflowExecution,
attributes *historypb.WorkflowExecutionTerminatedEventAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionChildWorkflowTerminated
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
ci, ok := e.GetChildExecutionInfo(initiatedID)
if !ok || ci.StartedId == common.EmptyEventID {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
workflowType := &commonpb.WorkflowType{
Name: ci.WorkflowTypeName,
}
event := e.hBuilder.AddChildWorkflowExecutionTerminatedEvent(ci.Namespace, childExecution, workflowType, ci.InitiatedId,
ci.StartedId, attributes)
if err := e.ReplicateChildWorkflowExecutionTerminatedEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionTerminatedEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetChildWorkflowExecutionTerminatedEventAttributes()
initiatedID := attributes.GetInitiatedEventId()
return e.DeletePendingChildExecution(initiatedID)
}
func (e *mutableStateBuilder) AddChildWorkflowExecutionTimedOutEvent(
initiatedID int64,
childExecution *commonpb.WorkflowExecution,
attributes *historypb.WorkflowExecutionTimedOutEventAttributes,
) (*historypb.HistoryEvent, error) {
opTag := tag.WorkflowActionChildWorkflowTimedOut
if err := e.checkMutability(opTag); err != nil {
return nil, err
}
ci, ok := e.GetChildExecutionInfo(initiatedID)
if !ok || ci.StartedId == common.EmptyEventID {
e.logWarn(mutableStateInvalidHistoryActionMsg, opTag,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.Bool(ok),
tag.WorkflowInitiatedID(initiatedID))
return nil, e.createInternalServerError(opTag)
}
workflowType := &commonpb.WorkflowType{
Name: ci.WorkflowTypeName,
}
event := e.hBuilder.AddChildWorkflowExecutionTimedOutEvent(ci.Namespace, childExecution, workflowType, ci.InitiatedId,
ci.StartedId, attributes)
if err := e.ReplicateChildWorkflowExecutionTimedOutEvent(event); err != nil {
return nil, err
}
return event, nil
}
func (e *mutableStateBuilder) ReplicateChildWorkflowExecutionTimedOutEvent(
event *historypb.HistoryEvent,
) error {
attributes := event.GetChildWorkflowExecutionTimedOutEventAttributes()
initiatedID := attributes.GetInitiatedEventId()
return e.DeletePendingChildExecution(initiatedID)
}
func (e *mutableStateBuilder) RetryActivity(
ai *persistenceblobs.ActivityInfo,
failure *failurepb.Failure,
) (enumspb.RetryState, error) {
opTag := tag.WorkflowActionActivityTaskRetry
if err := e.checkMutability(opTag); err != nil {
return enumspb.RETRY_STATE_INTERNAL_SERVER_ERROR, err
}
if !ai.HasRetryPolicy {
return enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, nil
}
if ai.CancelRequested {
return enumspb.RETRY_STATE_CANCEL_REQUESTED, nil
}
now := e.timeSource.Now()
backoffInterval, retryState := getBackoffInterval(
now,
timestamp.TimeValue(ai.RetryExpirationTime),
ai.Attempt,
ai.RetryMaximumAttempts,
ai.RetryInitialInterval,
ai.RetryMaximumInterval,
ai.RetryBackoffCoefficient,
failure,
ai.RetryNonRetryableErrorTypes,
)
if retryState != enumspb.RETRY_STATE_IN_PROGRESS {
return retryState, nil
}
// a retry is needed, update activity info for next retry
ai.Version = e.GetCurrentVersion()
ai.Attempt++
ai.ScheduledTime = timestamp.TimePtr(now.Add(backoffInterval)) // update to next schedule time
ai.StartedId = common.EmptyEventID
ai.RequestId = ""
ai.StartedTime = timestamp.TimePtr(time.Time{})
ai.TimerTaskStatus = timerTaskStatusNone
ai.RetryLastWorkerIdentity = ai.StartedIdentity
ai.RetryLastFailure = failure
if err := e.taskGenerator.generateActivityRetryTasks(
ai.ScheduleId,
); err != nil {
return enumspb.RETRY_STATE_INTERNAL_SERVER_ERROR, err
}
e.updateActivityInfos[ai] = struct{}{}
e.syncActivityTasks[ai.ScheduleId] = struct{}{}
return enumspb.RETRY_STATE_IN_PROGRESS, nil
}
// TODO mutable state should generate corresponding transfer / timer tasks according to
// updates accumulated, while currently all transfer / timer tasks are managed manually
// TODO convert AddTransferTasks to prepareTransferTasks
func (e *mutableStateBuilder) AddTransferTasks(
transferTasks ...persistence.Task,
) {
e.insertTransferTasks = append(e.insertTransferTasks, transferTasks...)
}
// TODO convert AddTransferTasks to prepareTimerTasks
func (e *mutableStateBuilder) AddTimerTasks(
timerTasks ...persistence.Task,
) {
e.insertTimerTasks = append(e.insertTimerTasks, timerTasks...)
}
func (e *mutableStateBuilder) SetUpdateCondition(
nextEventIDInDB int64,
) {
e.nextEventIDInDB = nextEventIDInDB
}
func (e *mutableStateBuilder) GetUpdateCondition() int64 {
return e.nextEventIDInDB
}
func (e *mutableStateBuilder) GetWorkflowStateStatus() (enumsspb.WorkflowExecutionState, enumspb.WorkflowExecutionStatus) {
executionInfo := e.executionInfo
return executionInfo.ExecutionState.State, executionInfo.ExecutionState.Status
}
func (e *mutableStateBuilder) UpdateWorkflowStateStatus(
state enumsspb.WorkflowExecutionState,
status enumspb.WorkflowExecutionStatus,
) error {
return e.executionInfo.UpdateWorkflowStateStatus(state, status)
}
func (e *mutableStateBuilder) StartTransaction(
namespaceEntry *cache.NamespaceCacheEntry,
) (bool, error) {
e.namespaceEntry = namespaceEntry
if err := e.UpdateCurrentVersion(namespaceEntry.GetFailoverVersion(), false); err != nil {
return false, err
}
flushBeforeReady, err := e.startTransactionHandleWorkflowTaskFailover(false)
if err != nil {
return false, err
}
return flushBeforeReady, nil
}
func (e *mutableStateBuilder) StartTransactionSkipWorkflowTaskFail(
namespaceEntry *cache.NamespaceCacheEntry,
) error {
e.namespaceEntry = namespaceEntry
if err := e.UpdateCurrentVersion(namespaceEntry.GetFailoverVersion(), false); err != nil {
return err
}
_, err := e.startTransactionHandleWorkflowTaskFailover(true)
return err
}
func (e *mutableStateBuilder) CloseTransactionAsMutation(
now time.Time,
transactionPolicy transactionPolicy,
) (*persistence.WorkflowMutation, []*persistence.WorkflowEvents, error) {
if err := e.prepareCloseTransaction(
now,
transactionPolicy,
); err != nil {
return nil, nil, err
}
workflowEventsSeq, err := e.prepareEventsAndReplicationTasks(transactionPolicy)
if err != nil {
return nil, nil, err
}
if len(workflowEventsSeq) > 0 {
lastEvents := workflowEventsSeq[len(workflowEventsSeq)-1].Events
firstEvent := lastEvents[0]
lastEvent := lastEvents[len(lastEvents)-1]
e.updateWithLastFirstEvent(firstEvent)
if err := e.updateWithLastWriteEvent(
lastEvent,
transactionPolicy,
); err != nil {
return nil, nil, err
}
}
setTaskInfo(e.GetCurrentVersion(), now, e.insertTransferTasks, e.insertTimerTasks)
// update last update time
e.executionInfo.LastUpdatedTime = &now
// we generate checksum here based on the assumption that the returned
// snapshot object is considered immutable. As of this writing, the only
// code that modifies the returned object lives inside workflowExecutionContext.resetWorkflowExecution
// currently, the updates done inside workflowExecutionContext.resetWorkflowExecution doesn't
// impact the checksum calculation
checksum := e.generateChecksum()
workflowMutation := &persistence.WorkflowMutation{
ExecutionInfo: e.executionInfo,
VersionHistories: e.versionHistories,
UpsertActivityInfos: convertUpdateActivityInfos(e.updateActivityInfos),
DeleteActivityInfos: convertDeleteActivityInfos(e.deleteActivityInfos),
UpsertTimerInfos: convertUpdateTimerInfos(e.updateTimerInfos),
DeleteTimerInfos: convertDeleteTimerInfos(e.deleteTimerInfos),
UpsertChildExecutionInfos: convertUpdateChildExecutionInfos(e.updateChildExecutionInfos),
DeleteChildExecutionInfo: e.deleteChildExecutionInfo,
UpsertRequestCancelInfos: convertUpdateRequestCancelInfos(e.updateRequestCancelInfos),
DeleteRequestCancelInfo: e.deleteRequestCancelInfo,
UpsertSignalInfos: convertUpdateSignalInfos(e.updateSignalInfos),
DeleteSignalInfo: e.deleteSignalInfo,
UpsertSignalRequestedIDs: convertSignalRequestedIDs(e.updateSignalRequestedIDs),
DeleteSignalRequestedID: e.deleteSignalRequestedID,
NewBufferedEvents: e.updateBufferedEvents,
ClearBufferedEvents: e.clearBufferedEvents,
TransferTasks: e.insertTransferTasks,
ReplicationTasks: e.insertReplicationTasks,
TimerTasks: e.insertTimerTasks,
Condition: e.nextEventIDInDB,
Checksum: checksum,
}
e.checksum = checksum
if err := e.cleanupTransaction(transactionPolicy); err != nil {
return nil, nil, err
}
return workflowMutation, workflowEventsSeq, nil
}
func (e *mutableStateBuilder) CloseTransactionAsSnapshot(
now time.Time,
transactionPolicy transactionPolicy,
) (*persistence.WorkflowSnapshot, []*persistence.WorkflowEvents, error) {
if err := e.prepareCloseTransaction(
now,
transactionPolicy,
); err != nil {
return nil, nil, err
}
workflowEventsSeq, err := e.prepareEventsAndReplicationTasks(transactionPolicy)
if err != nil {
return nil, nil, err
}
if len(workflowEventsSeq) > 1 {
return nil, nil, serviceerror.NewInternal("cannot generate workflow snapshot with transient events")
}
if len(e.bufferedEvents) > 0 {
// TODO do we need the functionality to generate snapshot with buffered events?
return nil, nil, serviceerror.NewInternal("cannot generate workflow snapshot with buffered events")
}
if len(workflowEventsSeq) > 0 {
lastEvents := workflowEventsSeq[len(workflowEventsSeq)-1].Events
firstEvent := lastEvents[0]
lastEvent := lastEvents[len(lastEvents)-1]
e.updateWithLastFirstEvent(firstEvent)
if err := e.updateWithLastWriteEvent(
lastEvent,
transactionPolicy,
); err != nil {
return nil, nil, err
}
}
setTaskInfo(e.GetCurrentVersion(), now, e.insertTransferTasks, e.insertTimerTasks)
// update last update time
e.executionInfo.LastUpdatedTime = &now
// we generate checksum here based on the assumption that the returned
// snapshot object is considered immutable. As of this writing, the only
// code that modifies the returned object lives inside workflowExecutionContext.resetWorkflowExecution
// currently, the updates done inside workflowExecutionContext.resetWorkflowExecution doesn't
// impact the checksum calculation
checksum := e.generateChecksum()
workflowSnapshot := &persistence.WorkflowSnapshot{
ExecutionInfo: e.executionInfo,
VersionHistories: e.versionHistories,
ActivityInfos: convertPendingActivityInfos(e.pendingActivityInfoIDs),
TimerInfos: convertPendingTimerInfos(e.pendingTimerInfoIDs),
ChildExecutionInfos: convertPendingChildExecutionInfos(e.pendingChildExecutionInfoIDs),
RequestCancelInfos: convertPendingRequestCancelInfos(e.pendingRequestCancelInfoIDs),
SignalInfos: convertPendingSignalInfos(e.pendingSignalInfoIDs),
SignalRequestedIDs: convertSignalRequestedIDs(e.pendingSignalRequestedIDs),
TransferTasks: e.insertTransferTasks,
ReplicationTasks: e.insertReplicationTasks,
TimerTasks: e.insertTimerTasks,
Condition: e.nextEventIDInDB,
Checksum: checksum,
}
e.checksum = checksum
if err := e.cleanupTransaction(transactionPolicy); err != nil {
return nil, nil, err
}
return workflowSnapshot, workflowEventsSeq, nil
}
func (e *mutableStateBuilder) IsResourceDuplicated(
resourceDedupKey definition.DeduplicationID,
) bool {
id := definition.GenerateDeduplicationKey(resourceDedupKey)
_, duplicated := e.appliedEvents[id]
return duplicated
}
func (e *mutableStateBuilder) UpdateDuplicatedResource(
resourceDedupKey definition.DeduplicationID,
) {
id := definition.GenerateDeduplicationKey(resourceDedupKey)
e.appliedEvents[id] = struct{}{}
}
func (e *mutableStateBuilder) prepareCloseTransaction(
now time.Time,
transactionPolicy transactionPolicy,
) error {
if err := e.closeTransactionWithPolicyCheck(
transactionPolicy,
); err != nil {
return err
}
if err := e.closeTransactionHandleBufferedEventsLimit(
transactionPolicy,
); err != nil {
return err
}
if err := e.closeTransactionHandleWorkflowReset(
now,
transactionPolicy,
); err != nil {
return err
}
// flushing buffered events should happen at very last
if transactionPolicy == transactionPolicyActive {
if err := e.FlushBufferedEvents(); err != nil {
return err
}
}
// TODO merge active & passive task generation
// NOTE: this function must be the last call
// since we only generate at most one activity & user timer,
// regardless of how many activity & user timer created
// so the calculation must be at the very end
return e.closeTransactionHandleActivityUserTimerTasks(
now,
transactionPolicy,
)
}
func (e *mutableStateBuilder) cleanupTransaction(
transactionPolicy transactionPolicy,
) error {
// Clear all updates to prepare for the next session
e.hBuilder = newHistoryBuilder(e, e.logger)
e.updateActivityInfos = make(map[*persistenceblobs.ActivityInfo]struct{})
e.deleteActivityInfos = make(map[int64]struct{})
e.syncActivityTasks = make(map[int64]struct{})
e.updateTimerInfos = make(map[*persistenceblobs.TimerInfo]struct{})
e.deleteTimerInfos = make(map[string]struct{})
e.updateChildExecutionInfos = make(map[*persistenceblobs.ChildExecutionInfo]struct{})
e.deleteChildExecutionInfo = nil
e.updateRequestCancelInfos = make(map[*persistenceblobs.RequestCancelInfo]struct{})
e.deleteRequestCancelInfo = nil
e.updateSignalInfos = make(map[*persistenceblobs.SignalInfo]struct{})
e.deleteSignalInfo = nil
e.updateSignalRequestedIDs = make(map[string]struct{})
e.deleteSignalRequestedID = ""
e.clearBufferedEvents = false
if e.updateBufferedEvents != nil {
e.bufferedEvents = append(e.bufferedEvents, e.updateBufferedEvents...)
e.updateBufferedEvents = nil
}
e.hasBufferedEventsInDB = len(e.bufferedEvents) > 0
e.stateInDB = e.executionInfo.ExecutionState.State
e.nextEventIDInDB = e.GetNextEventID()
e.insertTransferTasks = nil
e.insertReplicationTasks = nil
e.insertTimerTasks = nil
return nil
}
func (e *mutableStateBuilder) prepareEventsAndReplicationTasks(
transactionPolicy transactionPolicy,
) ([]*persistence.WorkflowEvents, error) {
currentBranchToken, err := e.GetCurrentBranchToken()
if err != nil {
return nil, err
}
var workflowEventsSeq []*persistence.WorkflowEvents
if len(e.hBuilder.transientHistory) != 0 {
workflowEventsSeq = append(workflowEventsSeq, &persistence.WorkflowEvents{
NamespaceID: e.executionInfo.NamespaceId,
WorkflowID: e.executionInfo.WorkflowId,
RunID: e.executionInfo.ExecutionState.RunId,
BranchToken: currentBranchToken,
Events: e.hBuilder.transientHistory,
})
}
if len(e.hBuilder.history) != 0 {
workflowEventsSeq = append(workflowEventsSeq, &persistence.WorkflowEvents{
NamespaceID: e.executionInfo.NamespaceId,
WorkflowID: e.executionInfo.WorkflowId,
RunID: e.executionInfo.ExecutionState.RunId,
BranchToken: currentBranchToken,
Events: e.hBuilder.history,
})
}
if err := e.validateNoEventsAfterWorkflowFinish(
transactionPolicy,
e.hBuilder.history,
); err != nil {
return nil, err
}
for _, workflowEvents := range workflowEventsSeq {
replicationTasks, err := e.eventsToReplicationTask(transactionPolicy, workflowEvents.Events)
if err != nil {
return nil, err
}
e.insertReplicationTasks = append(
e.insertReplicationTasks,
replicationTasks...,
)
}
e.insertReplicationTasks = append(
e.insertReplicationTasks,
e.syncActivityToReplicationTask(transactionPolicy)...,
)
if transactionPolicy == transactionPolicyPassive && len(e.insertReplicationTasks) > 0 {
return nil, serviceerror.NewInternal("should not generate replication task when close transaction as passive")
}
return workflowEventsSeq, nil
}
func (e *mutableStateBuilder) eventsToReplicationTask(
transactionPolicy transactionPolicy,
events []*historypb.HistoryEvent,
) ([]persistence.Task, error) {
if transactionPolicy == transactionPolicyPassive ||
!e.canReplicateEvents() ||
len(events) == 0 {
return emptyTasks, nil
}
firstEvent := events[0]
lastEvent := events[len(events)-1]
version := firstEvent.GetVersion()
sourceCluster := e.clusterMetadata.ClusterNameForFailoverVersion(version)
currentCluster := e.clusterMetadata.GetCurrentClusterName()
if currentCluster != sourceCluster {
return nil, serviceerror.NewInternal("mutableStateBuilder encounter contradicting version & transaction policy")
}
currentBranchToken, err := e.GetCurrentBranchToken()
if err != nil {
return nil, err
}
replicationTask := &persistence.HistoryReplicationTask{
FirstEventID: firstEvent.GetEventId(),
NextEventID: lastEvent.GetEventId() + 1,
Version: firstEvent.GetVersion(),
BranchToken: currentBranchToken,
NewRunBranchToken: nil,
}
if e.GetVersionHistories() == nil {
return nil, serviceerror.NewInternal("should not generate replication task when missing replication state & version history")
}
return []persistence.Task{replicationTask}, nil
}
func (e *mutableStateBuilder) syncActivityToReplicationTask(
transactionPolicy transactionPolicy,
) []persistence.Task {
if transactionPolicy == transactionPolicyPassive ||
!e.canReplicateEvents() {
return emptyTasks
}
return convertSyncActivityInfos(
e.pendingActivityInfoIDs,
e.syncActivityTasks,
)
}
func (e *mutableStateBuilder) updateWithLastWriteEvent(
lastEvent *historypb.HistoryEvent,
transactionPolicy transactionPolicy,
) error {
if transactionPolicy == transactionPolicyPassive {
// already handled in state builder
return nil
}
e.GetExecutionInfo().LastEventTaskId = lastEvent.GetTaskId()
if e.versionHistories != nil {
currentVersionHistory, err := e.versionHistories.GetCurrentVersionHistory()
if err != nil {
return err
}
if err := currentVersionHistory.AddOrUpdateItem(persistence.NewVersionHistoryItem(
lastEvent.GetEventId(), lastEvent.GetVersion(),
)); err != nil {
return err
}
}
return nil
}
func (e *mutableStateBuilder) updateWithLastFirstEvent(
lastFirstEvent *historypb.HistoryEvent,
) {
e.GetExecutionInfo().SetLastFirstEventID(lastFirstEvent.GetEventId())
}
func (e *mutableStateBuilder) canReplicateEvents() bool {
return e.namespaceEntry.GetReplicationPolicy() == cache.ReplicationPolicyMultiCluster
}
// validateNoEventsAfterWorkflowFinish perform check on history event batch
// NOTE: do not apply this check on every batch, since transient
// workflow task && workflow finish will be broken (the first batch)
func (e *mutableStateBuilder) validateNoEventsAfterWorkflowFinish(
transactionPolicy transactionPolicy,
events []*historypb.HistoryEvent,
) error {
if transactionPolicy == transactionPolicyPassive ||
len(events) == 0 {
return nil
}
// only do check if workflow is finished
if e.GetExecutionInfo().GetExecutionState().State != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED {
return nil
}
// workflow close
// this will perform check on the last event of last batch
// NOTE: do not apply this check on every batch, since transient
// workflow task && workflow finish will be broken (the first batch)
lastEvent := events[len(events)-1]
switch lastEvent.GetEventType() {
case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW,
enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED:
return nil
default:
executionInfo := e.GetExecutionInfo()
e.logError(
"encounter case where events appears after workflow finish.",
tag.WorkflowNamespaceID(executionInfo.NamespaceId),
tag.WorkflowID(executionInfo.WorkflowId),
tag.WorkflowRunID(executionInfo.ExecutionState.RunId),
)
return ErrEventsAterWorkflowFinish
}
}
func (e *mutableStateBuilder) startTransactionHandleWorkflowTaskFailover(
skipWorkflowTaskFailed bool,
) (bool, error) {
if !e.IsWorkflowExecutionRunning() ||
!e.canReplicateEvents() {
return false, nil
}
// NOTE:
// the main idea here is to guarantee that once there is a workflow task started
// all events ending in the buffer should have the same version
// Handling mutable state turn from standby to active, while having a workflow task on the fly
workflowTask, ok := e.GetInFlightWorkflowTask()
if !ok || workflowTask.Version >= e.GetCurrentVersion() {
// no pending workflow tasks, no buffered events
// or workflow task has higher / equal version
return false, nil
}
currentVersion := e.GetCurrentVersion()
lastWriteVersion, err := e.GetLastWriteVersion()
if err != nil {
return false, err
}
if lastWriteVersion != workflowTask.Version {
return false, serviceerror.NewInternal(fmt.Sprintf("mutableStateBuilder encounter mismatch version, workflow task: %v, last write version %v", workflowTask.Version, lastWriteVersion))
}
lastWriteSourceCluster := e.clusterMetadata.ClusterNameForFailoverVersion(lastWriteVersion)
currentVersionCluster := e.clusterMetadata.ClusterNameForFailoverVersion(currentVersion)
currentCluster := e.clusterMetadata.GetCurrentClusterName()
// there are 4 cases for version changes (based on version from namespace cache)
// NOTE: namespace cache version change may occur after seeing events with higher version
// meaning that the flush buffer logic in NDC branch manager should be kept.
//
// 1. active -> passive => fail workflow task & flush buffer using last write version
// 2. active -> active => fail workflow task & flush buffer using last write version
// 3. passive -> active => fail workflow task using current version, no buffered events
// 4. passive -> passive => no buffered events, since always passive, nothing to be done
// handle case 4
if lastWriteSourceCluster != currentCluster && currentVersionCluster != currentCluster {
// do a sanity check on buffered events
if e.HasBufferedEvents() {
return false, serviceerror.NewInternal("mutableStateBuilder encounter previous passive workflow with buffered events")
}
return false, nil
}
// handle case 1 & 2
var flushBufferVersion = lastWriteVersion
// handle case 3
if lastWriteSourceCluster != currentCluster && currentVersionCluster == currentCluster {
// do a sanity check on buffered events
if e.HasBufferedEvents() {
return false, serviceerror.NewInternal("mutableStateBuilder encounter previous passive workflow with buffered events")
}
flushBufferVersion = currentVersion
}
// this workflow was previous active (whether it has buffered events or not),
// the in flight workflow task must be failed to guarantee all events within same
// event batch shard the same version
if err := e.UpdateCurrentVersion(flushBufferVersion, true); err != nil {
return false, err
}
if skipWorkflowTaskFailed {
return false, nil
}
// we have a workflow task with buffered events on the fly with a lower version, fail it
if err := failWorkflowTask(
e,
workflowTask,
enumspb.WORKFLOW_TASK_FAILED_CAUSE_FAILOVER_CLOSE_COMMAND,
); err != nil {
return false, err
}
err = scheduleWorkflowTask(e)
if err != nil {
return false, err
}
return true, nil
}
func (e *mutableStateBuilder) closeTransactionWithPolicyCheck(
transactionPolicy transactionPolicy,
) error {
if transactionPolicy == transactionPolicyPassive ||
!e.canReplicateEvents() {
return nil
}
activeCluster := e.clusterMetadata.ClusterNameForFailoverVersion(e.GetCurrentVersion())
currentCluster := e.clusterMetadata.GetCurrentClusterName()
if activeCluster != currentCluster {
namespaceID := e.GetExecutionInfo().NamespaceId
return serviceerror.NewNamespaceNotActive(namespaceID, currentCluster, activeCluster)
}
return nil
}
func (e *mutableStateBuilder) closeTransactionHandleBufferedEventsLimit(
transactionPolicy transactionPolicy,
) error {
if transactionPolicy == transactionPolicyPassive ||
!e.IsWorkflowExecutionRunning() {
return nil
}
if len(e.bufferedEvents) < e.config.MaximumBufferedEventsBatch() {
return nil
}
// Handling buffered events size issue
if workflowTask, ok := e.GetInFlightWorkflowTask(); ok {
// we have a workflow task on the fly with a lower version, fail it
if err := failWorkflowTask(
e,
workflowTask,
enumspb.WORKFLOW_TASK_FAILED_CAUSE_FORCE_CLOSE_COMMAND,
); err != nil {
return err
}
err := scheduleWorkflowTask(e)
if err != nil {
return err
}
}
return nil
}
func (e *mutableStateBuilder) closeTransactionHandleWorkflowReset(
now time.Time,
transactionPolicy transactionPolicy,
) error {
if transactionPolicy == transactionPolicyPassive ||
!e.IsWorkflowExecutionRunning() {
return nil
}
// compare with bad client binary checksum and schedule a reset task
// only schedule reset task if current doesn't have childWFs.
// TODO: This will be removed once our reset allows childWFs
if len(e.GetPendingChildExecutionInfos()) != 0 {
return nil
}
executionInfo := e.GetExecutionInfo()
namespaceEntry, err := e.shard.GetNamespaceCache().GetNamespaceByID(executionInfo.NamespaceId)
if err != nil {
return err
}
if _, pt := FindAutoResetPoint(
e.timeSource,
namespaceEntry.GetConfig().BadBinaries,
e.GetExecutionInfo().AutoResetPoints,
); pt != nil {
if err := e.taskGenerator.generateWorkflowResetTasks(
e.unixNanoToTime(now.UnixNano()),
); err != nil {
return err
}
e.logInfo("Auto-Reset task is scheduled",
tag.WorkflowNamespace(namespaceEntry.GetInfo().Name),
tag.WorkflowID(executionInfo.WorkflowId),
tag.WorkflowRunID(executionInfo.ExecutionState.RunId),
tag.WorkflowResetBaseRunID(pt.GetRunId()),
tag.WorkflowEventID(pt.GetFirstWorkflowTaskCompletedId()),
tag.WorkflowBinaryChecksum(pt.GetBinaryChecksum()),
)
}
return nil
}
func (e *mutableStateBuilder) closeTransactionHandleActivityUserTimerTasks(
now time.Time,
transactionPolicy transactionPolicy,
) error {
if transactionPolicy == transactionPolicyPassive ||
!e.IsWorkflowExecutionRunning() {
return nil
}
if err := e.taskGenerator.generateActivityTimerTasks(
e.unixNanoToTime(now.UnixNano()),
); err != nil {
return err
}
return e.taskGenerator.generateUserTimerTasks(
e.unixNanoToTime(now.UnixNano()),
)
}
func (e *mutableStateBuilder) checkMutability(
actionTag tag.Tag,
) error {
if !e.IsWorkflowExecutionRunning() {
e.logWarn(
mutableStateInvalidHistoryActionMsg,
tag.WorkflowEventID(e.GetNextEventID()),
tag.ErrorTypeInvalidHistoryAction,
tag.WorkflowState(e.executionInfo.ExecutionState.State),
actionTag,
)
return ErrWorkflowFinished
}
return nil
}
func (e *mutableStateBuilder) generateChecksum() checksum.Checksum {
if !e.shouldGenerateChecksum() {
return checksum.Checksum{}
}
csum, err := generateMutableStateChecksum(e)
if err != nil {
e.logWarn("error generating mutableState checksum", tag.Error(err))
return checksum.Checksum{}
}
return csum
}
func (e *mutableStateBuilder) shouldGenerateChecksum() bool {
if e.namespaceEntry == nil {
return false
}
return rand.Intn(100) < e.config.MutableStateChecksumGenProbability(e.namespaceEntry.GetInfo().Name)
}
func (e *mutableStateBuilder) shouldVerifyChecksum() bool {
if e.namespaceEntry == nil {
return false
}
return rand.Intn(100) < e.config.MutableStateChecksumVerifyProbability(e.namespaceEntry.GetInfo().Name)
}
func (e *mutableStateBuilder) shouldInvalidateCheckum() bool {
invalidateBeforeEpochSecs := int64(e.config.MutableStateChecksumInvalidateBefore())
if invalidateBeforeEpochSecs > 0 {
invalidateBefore := time.Unix(invalidateBeforeEpochSecs, 0).UTC()
return e.executionInfo.LastUpdatedTime.Before(invalidateBefore)
}
return false
}
func (e *mutableStateBuilder) createInternalServerError(
actionTag tag.Tag,
) error {
return serviceerror.NewInternal(actionTag.Field().String + " operation failed")
}
func (e *mutableStateBuilder) createCallerError(
actionTag tag.Tag,
) error {
return serviceerror.NewInvalidArgument(fmt.Sprintf(mutableStateInvalidHistoryActionMsgTemplate, actionTag.Field().String))
}
func (_ *mutableStateBuilder) unixNanoToTime(
timestampNanos int64,
) time.Time {
return time.Unix(0, timestampNanos).UTC()
}
func (e *mutableStateBuilder) logInfo(msg string, tags ...tag.Tag) {
tags = append(tags, tag.WorkflowID(e.executionInfo.WorkflowId))
tags = append(tags, tag.WorkflowRunID(e.executionInfo.ExecutionState.RunId))
tags = append(tags, tag.WorkflowNamespaceID(e.executionInfo.NamespaceId))
e.logger.Info(msg, tags...)
}
func (e *mutableStateBuilder) logWarn(msg string, tags ...tag.Tag) {
tags = append(tags, tag.WorkflowID(e.executionInfo.WorkflowId))
tags = append(tags, tag.WorkflowRunID(e.executionInfo.ExecutionState.RunId))
tags = append(tags, tag.WorkflowNamespaceID(e.executionInfo.NamespaceId))
e.logger.Warn(msg, tags...)
}
func (e *mutableStateBuilder) logError(msg string, tags ...tag.Tag) {
tags = append(tags, tag.WorkflowID(e.executionInfo.WorkflowId))
tags = append(tags, tag.WorkflowRunID(e.executionInfo.ExecutionState.RunId))
tags = append(tags, tag.WorkflowNamespaceID(e.executionInfo.NamespaceId))
e.logger.Error(msg, tags...)
}
func (e *mutableStateBuilder) logDataInconsistency() {
namespaceID := e.executionInfo.NamespaceId
workflowID := e.executionInfo.WorkflowId
runID := e.executionInfo.ExecutionState.RunId
e.logger.Error("encounter cassandra data inconsistency",
tag.WorkflowNamespaceID(namespaceID),
tag.WorkflowID(workflowID),
tag.WorkflowRunID(runID),
)
}
| 1 | 10,233 | Please move this to `common/enums/defaults.go`. | temporalio-temporal | go |
@@ -2,6 +2,7 @@
import time
import json
import re
+import listenbrainz.db.user as db_user
from collections import defaultdict
from yattag import Doc
import yattag | 1 |
import time
import json
import re
from collections import defaultdict
from yattag import Doc
import yattag
from flask import Blueprint, request, render_template
from flask_login import login_required, current_user
from listenbrainz.webserver.external import messybrainz
from listenbrainz.webserver.rate_limiter import ratelimit
from listenbrainz.webserver.errors import InvalidAPIUsage, CompatError
import xmltodict
from listenbrainz.webserver.views.api_tools import insert_payload, validate_listen
from listenbrainz.db.lastfm_user import User
from listenbrainz.db.lastfm_session import Session
from listenbrainz.db.lastfm_token import Token
import calendar
from datetime import datetime
from listenbrainz.webserver.influx_connection import _influx
api_bp = Blueprint('api_compat', __name__)
@api_bp.route('/api/auth/', methods=['GET'])
@ratelimit()
@login_required
def api_auth():
""" Renders the token activation page.
"""
token = request.args['token']
return render_template(
"user/auth.html",
user_id=current_user.musicbrainz_id,
token=token
)
@api_bp.route('/api/auth/', methods=['POST'])
@ratelimit()
@login_required
def api_auth_approve():
""" Authenticate the user token provided.
"""
user = User.load_by_name(current_user.musicbrainz_id)
if "token" not in request.form:
return render_template(
"user/auth.html",
user_id=current_user.musicbrainz_id,
msg="Missing required parameters. Please provide correct parameters and try again."
)
token = Token.load(request.form['token'])
if not token:
return render_template(
"user/auth.html",
user_id=current_user.musicbrainz_id,
msg="Either this token is already used or invalid. Please try again."
)
if token.user:
return render_template(
"user/auth.html",
user_id=current_user.musicbrainz_id,
msg="This token is already approved. Please check the token and try again."
)
if token.has_expired():
return render_template(
"user/auth.html",
user_id=current_user.musicbrainz_id,
msg="This token has expired. Please create a new token and try again."
)
token.approve(user.name)
return render_template(
"user/auth.html",
user_id=current_user.musicbrainz_id,
msg="Token %s approved for user %s, press continue in client." % (token.token, current_user.musicbrainz_id)
)
@api_bp.route('/2.0/', methods=['POST', 'GET'])
@ratelimit()
def api_methods():
""" Receives both (GET & POST)-API calls and redirects them to appropriate methods.
"""
data = request.args if request.method == 'GET' else request.form
method = data['method'].lower()
if method in ('track.updatenowplaying', 'track.scrobble'):
return record_listens(request, data)
elif method == 'auth.getsession':
return get_session(request, data)
elif method == 'auth.gettoken':
return get_token(request, data)
elif method == 'user.getinfo':
return user_info(request, data)
elif method == 'auth.getsessioninfo':
return session_info(request, data)
else:
# Invalid Method
raise InvalidAPIUsage(CompatError.INVALID_METHOD, output_format=data.get('format', "xml"))
def session_info(request, data):
try:
sk = data['sk']
api_key = data['api_key']
output_format = data.get('format', 'xml')
username = data['username']
except KeyError:
raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Missing Required Params
session = Session.load(sk)
if (not session) or User.load_by_name(username).id != session.user.id:
raise InvalidAPIUsage(CompatError.INVALID_SESSION_KEY, output_format=output_format) # Invalid Session KEY
print("SESSION INFO for session %s, user %s" % (session.id, session.user.name))
doc, tag, text = Doc().tagtext()
with tag('lfm', status='ok'):
with tag('application'):
with tag('session'):
with tag('name'):
text(session.user.name)
with tag('key'):
text(session.id)
with tag('subscriber'):
text('0')
with tag('country'):
text('US')
return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()),
output_format)
def get_token(request, data):
""" Issue a token to user after verying his API_KEY
"""
output_format = data.get('format', 'xml')
api_key = data.get('api_key')
if not api_key:
raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Missing required params
if not Token.is_valid_api_key(api_key):
raise InvalidAPIUsage(CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API_KEY
token = Token.generate(api_key)
doc, tag, text = Doc().tagtext()
with tag('lfm', status='ok'):
with tag('token'):
text(token.token)
return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()),
output_format)
def get_session(request, data):
""" Create new session after validating the API_key and token.
"""
output_format = data.get('format', 'xml')
try:
api_key = data['api_key']
token = Token.load(data['token'], api_key)
except KeyError:
raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Missing Required Params
if not token:
if not Token.is_valid_api_key(api_key):
raise InvalidAPIUsage(CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API_key
raise InvalidAPIUsage(CompatError.INVALID_TOKEN, output_format=output_format) # Invalid token
if token.has_expired():
raise InvalidAPIUsage(CompatError.TOKEN_EXPIRED, output_format=output_format) # Token expired
if not token.user:
raise InvalidAPIUsage(CompatError.UNAUTHORIZED_TOKEN, output_format=output_format) # Unauthorized token
session = Session.create(token)
doc, tag, text = Doc().tagtext()
with tag('lfm', status='ok'):
with tag('session'):
with tag('name'):
text(session.user.name)
with tag('key'):
text(session.sid)
with tag('subscriber'):
text('0')
return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()),
data.get('format', "xml"))
def _to_native_api(lookup, method="track.scrobble", output_format="xml"):
""" Converts the list of listens received in the new Last.fm submission format
to the native ListenBrainz API format.
Returns: type_of_listen and listen_payload
"""
listen_type = 'listens'
if method == 'track.updateNowPlaying':
listen_type = 'playing_now'
if len(list(lookup.keys())) != 1:
raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Invalid parameters
listens = []
for ind, data in lookup.items():
listen = {
'track_metadata': {
'additional_info': {}
}
}
if 'artist' in data:
listen['track_metadata']['artist_name'] = data['artist']
if 'track' in data:
listen['track_metadata']['track_name'] = data['track']
if 'timestamp' in data:
listen['listened_at'] = data['timestamp']
if 'album' in data:
listen['track_metadata']['release_name'] = data['album']
if 'context' in data:
listen['track_metadata']['additional_info']['context'] = data['context']
if 'streamId' in data:
listen['track_metadata']['additional_info']['stream_id'] = data['streamId']
if 'trackNumber' in data:
listen['track_metadata']['additional_info']['tracknumber'] = data['trackNumber']
if 'mbid' in data:
listen['track_metadata']['release_mbid'] = data['mbid']
if 'duration' in data:
listen['track_metadata']['additional_info']['duration'] = data['duration']
# Choosen_by_user is 1 by default
listen['track_metadata']['additional_info']['choosen_by_user'] = data.get('choosenByUser', 1)
listens.append(listen)
return listen_type, listens
def record_listens(request, data):
""" Submit the listen in the lastfm format to be inserted in db.
Accepts listens for both track.updateNowPlaying and track.scrobble methods.
"""
output_format = data.get('format', 'xml')
try:
sk, api_key = data['sk'], data['api_key']
except KeyError:
raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Invalid parameters
session = Session.load(sk)
if not session:
if not Token.is_valid_api_key(api_key):
raise InvalidAPIUsage(CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API_KEY
raise InvalidAPIUsage(CompatError.INVALID_SESSION_KEY, output_format=output_format) # Invalid Session KEY
lookup = defaultdict(dict)
for key, value in data.items():
if key in ["sk", "token", "api_key", "method", "api_sig"]:
continue
matches = re.match('(.*)\[(\d+)\]', key)
if matches:
key = matches.group(1)
number = matches.group(2)
else:
number = 0
lookup[number][key] = value
if request.form['method'].lower() == 'track.updatenowplaying':
for i, listen in lookup.items():
if 'timestamp' not in listen:
listen['timestamp'] = calendar.timegm(datetime.now().utctimetuple())
# Convert to native payload then submit 'em after validation.
listen_type, native_payload = _to_native_api(lookup, data['method'], output_format)
for listen in native_payload:
validate_listen(listen, listen_type)
augmented_listens = insert_payload(native_payload, session.user, listen_type=listen_type)
# With corrections than the original submitted listen.
doc, tag, text = Doc().tagtext()
with tag('lfm', status='ok'):
with tag('nowplaying' if listen_type == 'playing_now' else 'scrobbles'):
for origL, augL in zip(list(lookup.values()), augmented_listens):
corr = defaultdict(lambda: '0')
track = augL['track_metadata']['track_name']
if origL['track'] != augL['track_metadata']['track_name']:
corr['track'] = '1'
artist = augL['track_metadata']['artist_name']
if origL['artist'] != augL['track_metadata']['artist_name']:
corr['artist'] = '1'
ts = augL['listened_at']
albumArtist = artist
if origL.get('albumArtist', origL['artist']) != artist:
corr['albumArtist'] = '1'
album = augL['track_metadata'].get('release_name', '')
if origL.get('album', '') != album:
corr['album'] = '1'
with tag('scrobble'):
with tag('track', corrected=corr['track']):
text(track)
with tag('artist', corrected=corr['artist']):
text(artist)
with tag('album', corrected=corr['album']):
text(album)
with tag('albumArtist', corrected=corr['albumArtist']):
text(albumArtist)
with tag('timestamp'):
text(ts)
with tag('ignoredMessage', code="0"):
text('')
return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()),
output_format)
def format_response(data, format="xml"):
""" Convert the XML response to required format.
NOTE: The order of attributes may change while converting from XML to other formats.
NOTE: The rendering format for the error does not follow these rules and has been managed separately
in the error handlers.
The response is a translation of the XML response format, converted according to the
following rules:
1. Attributes are expressed as string member values with the attribute name as key.
2. Element child nodes are expressed as object members values with the node name as key.
3. Text child nodes are expressed as string values, unless the element also contains
attributes, in which case the text node is expressed as a string member value with the
key #text.
4. Repeated child nodes will be grouped as an array member with the shared node name as key.
(The #text notation is rarely used in XML responses.)
"""
if format == 'xml':
return data
elif format == 'json':
# Remove the <lfm> tag and its attributes
jsonData = xmltodict.parse(data)['lfm']
for k in jsonData.keys():
if k[0] == '@':
jsonData.pop(k)
def remove_attrib_prefix(data):
""" Filter the JSON response to merge some attributes and clean dict.
NOTE: This won't keep the dict ordered !!
"""
if not isinstance(data, dict):
return data
for k in list(data.keys()):
if k[0] == "@":
data[k[1:]] = data.pop(k)
elif isinstance(data[k], str):
continue
elif isinstance(data[k], list):
for ind, item in enumerate(data[k]):
data[k][ind] = remove_attrib_prefix(item)
elif isinstance(data[k], dict):
data[k] = remove_attrib_prefix(data[k])
else:
print(type(data[k]))
return data
return json.dumps(remove_attrib_prefix(jsonData), indent=4)
def user_info(request, data):
""" Gives information about the user specified in the parameters.
"""
try:
api_key = data['api_key']
output_format = data.get('format', 'xml')
sk = data.get('sk')
username = data.get('user')
if not (sk or username):
raise KeyError
if not Token.is_valid_api_key(api_key):
raise InvalidAPIUsage(CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API key
user = User.load_by_sessionkey(sk, api_key)
if not user:
raise InvalidAPIUsage(CompatError.INVALID_SESSION_KEY, output_format=output_format) # Invalid Session key
query_user = User.load_by_name(username) if (username and username != user.name) else user
if not query_user:
raise InvalidAPIUsage(CompatError.INVALID_RESOURCE, output_format=output_format) # Invalid resource specified
except KeyError:
raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Missing required params
doc, tag, text = Doc().tagtext()
with tag('lfm', status='ok'):
with tag('user'):
with tag('name'):
text(query_user.name)
with tag('realname'):
text(query_user.name)
with tag('url'):
text('http://listenbrainz.org/user/' + query_user.name)
with tag('playcount'):
text(User.get_play_count(query_user.id, _influx))
with tag('registered', unixtime=str(query_user.created.strftime("%s"))):
text(str(query_user.created))
return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()),
data.get('format', "xml"))
| 1 | 14,745 | In general, if you find unalphabetized imports, you should alphabetize them. Fine for now though. | metabrainz-listenbrainz-server | py |
@@ -39,5 +39,5 @@ class InputDevice(object):
def clear_actions(self):
self.actions = []
- def create_pause(self, duraton=0):
+ def create_pause(self, duration=0):
pass | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import uuid
class InputDevice(object):
"""
Describes the input device being used for the action.
"""
def __init__(self, name=None):
if name is None:
self.name = uuid.uuid4()
else:
self.name = name
self.actions = []
def add_action(self, action):
"""
"""
self.actions.append(action)
def clear_actions(self):
self.actions = []
def create_pause(self, duraton=0):
pass
| 1 | 14,870 | we should probably deprecate (and display a warning) the misspelled keyword arg here rather than removing it... and then add the new one. This changes a public API and will break any code that is currently using the misspelled version. | SeleniumHQ-selenium | java |
@@ -49,7 +49,12 @@ namespace pwiz.Skyline.Model
SLens = explicitSLens;
ConeVoltage = explicitConeVoltage;
DeclusteringPotential = explicitDeclusteringPotential;
- CompensationVoltage = explicitCompensationVoltage;
+ if (explicitCompensationVoltage.HasValue &&
+ (explicitIonMobilityUnits != MsDataFileImpl.eIonMobilityUnits.compensation_V ||
+ !Equals(explicitIonMobility, explicitCompensationVoltage)))
+ {
+ CompensationVoltage = explicitCompensationVoltage;
+ }
}
public ExplicitTransitionGroupValues(ExplicitTransitionGroupValues other) | 1 | /*
* Original author: Brian Pratt <bspratt .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2014 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using pwiz.Common.SystemUtil;
using pwiz.ProteowizardWrapper;
namespace pwiz.Skyline.Model
{
public class ExplicitTransitionGroupValues : Immutable
{
/// <summary>
/// Helper class of attributes we normally calculate or get from a library, but which may
/// be specified in an imported transition list or by some other means.
/// </summary>
public static readonly ExplicitTransitionGroupValues EMPTY = new ExplicitTransitionGroupValues(null);
public ExplicitTransitionGroupValues(double? explicitCollisionEnergy,
double? explicitIonMobility,
double? explicitIonMobilityHighEnergyOffset,
MsDataFileImpl.eIonMobilityUnits explicitIonMobilityUnits,
double? explicitCollisionalCrossSectionSqA,
double? explicitSLens,
double? explicitConeVoltage,
double? explicitDeclusteringPotential,
double? explicitCompensationVoltage)
{
CollisionEnergy = explicitCollisionEnergy;
IonMobility = explicitIonMobility;
IonMobilityHighEnergyOffset = explicitIonMobilityHighEnergyOffset;
IonMobilityUnits = explicitIonMobilityUnits;
CollisionalCrossSectionSqA = explicitCollisionalCrossSectionSqA;
SLens = explicitSLens;
ConeVoltage = explicitConeVoltage;
DeclusteringPotential = explicitDeclusteringPotential;
CompensationVoltage = explicitCompensationVoltage;
}
public ExplicitTransitionGroupValues(ExplicitTransitionGroupValues other)
: this(
(other == null) ? null : other.CollisionEnergy,
(other == null) ? null : other.IonMobility,
(other == null) ? null : other.IonMobility,
(other == null) ? MsDataFileImpl.eIonMobilityUnits.none : other.IonMobilityUnits,
(other == null) ? null : other.CollisionalCrossSectionSqA,
(other == null) ? null : other.SLens,
(other == null) ? null : other.ConeVoltage,
(other == null) ? null : other.DeclusteringPotential,
(other == null) ? null : other.CompensationVoltage)
{
}
public double? CollisionEnergy { get; private set; } // For import formats with explicit values for CE
public double? IonMobility { get; private set; } // For import formats with explicit values for DT
public double? IonMobilityHighEnergyOffset { get; private set; } // For import formats with explicit values for DT
public MsDataFileImpl.eIonMobilityUnits IonMobilityUnits { get; private set; } // For import formats with explicit values for DT
public double? CollisionalCrossSectionSqA { get; private set; } // For import formats with explicit values for CCS
public double? SLens { get; private set; } // For Thermo
public double? ConeVoltage { get; private set; } // For Waters
public double? DeclusteringPotential { get; private set; } // For import formats with explicit values for DP
public double? CompensationVoltage { get; private set; } // For import formats with explicit values for CV
public ExplicitTransitionGroupValues ChangeCollisionEnergy(double? ce)
{
return ChangeProp(ImClone(this), (im, v) => im.CollisionEnergy = v, ce);
}
public ExplicitTransitionGroupValues ChangeIonMobilityHighEnergyOffset(double? dtOffset)
{
return ChangeProp(ImClone(this), (im, v) => im.IonMobilityHighEnergyOffset = v, dtOffset);
}
public ExplicitTransitionGroupValues ChangeIonMobility(double? imNew, MsDataFileImpl.eIonMobilityUnits unitsNew)
{
var explicitTransitionGroupValues = ChangeProp(ImClone(this), (im, v) => im.IonMobility = v, imNew);
return ChangeProp(ImClone(explicitTransitionGroupValues), (im, v) => im.IonMobilityUnits = v, unitsNew);
}
public ExplicitTransitionGroupValues ChangeCollisionalCrossSection(double? ccs)
{
return ChangeProp(ImClone(this), (im, v) => im.CollisionalCrossSectionSqA = v, ccs);
}
public ExplicitTransitionGroupValues ChangeSLens(double? slens)
{
return ChangeProp(ImClone(this), (im, v) => im.SLens = v, slens);
}
public ExplicitTransitionGroupValues ChangeConeVoltage(double? coneVoltage)
{
return ChangeProp(ImClone(this), (im, v) => im.ConeVoltage = v, coneVoltage);
}
public ExplicitTransitionGroupValues ChangeDeclusteringPotential(double? dp)
{
return ChangeProp(ImClone(this), (im, v) => im.DeclusteringPotential = v, dp);
}
public ExplicitTransitionGroupValues ChangeCompensationVoltage(double? cv)
{
return ChangeProp(ImClone(this), (im, v) => im.CompensationVoltage = v, cv);
}
protected bool Equals(ExplicitTransitionGroupValues other)
{
return Equals(CollisionEnergy, other.CollisionEnergy) &&
Equals(IonMobility, other.IonMobility) &&
Equals(IonMobilityHighEnergyOffset, other.IonMobilityHighEnergyOffset) &&
Equals(IonMobilityUnits, other.IonMobilityUnits) &&
Equals(CollisionalCrossSectionSqA, other.CollisionalCrossSectionSqA) &&
Equals(SLens, other.SLens) &&
Equals(ConeVoltage, other.ConeVoltage) &&
CompensationVoltage.Equals(other.CompensationVoltage) &&
DeclusteringPotential.Equals(other.DeclusteringPotential);
}
public override bool Equals(object obj)
{
if (ReferenceEquals(null, obj)) return false;
if (ReferenceEquals(this, obj)) return true;
if (obj.GetType() != GetType()) return false;
return Equals((ExplicitTransitionGroupValues)obj);
}
public override int GetHashCode()
{
unchecked
{
int hashCode = CollisionEnergy.GetHashCode();
hashCode = (hashCode * 397) ^ IonMobility.GetHashCode();
hashCode = (hashCode * 397) ^ IonMobilityHighEnergyOffset.GetHashCode();
hashCode = (hashCode * 397) ^ IonMobilityUnits.GetHashCode();
hashCode = (hashCode * 397) ^ CollisionalCrossSectionSqA.GetHashCode();
hashCode = (hashCode * 397) ^ SLens.GetHashCode();
hashCode = (hashCode * 397) ^ ConeVoltage.GetHashCode();
hashCode = (hashCode * 397) ^ DeclusteringPotential.GetHashCode();
hashCode = (hashCode * 397) ^ CompensationVoltage.GetHashCode();
return hashCode;
}
}
}
} | 1 | 12,205 | Is this if statement here necessary? It looks like this is the same logic that is taken care of in the setter for the property "CompensationVoltage". | ProteoWizard-pwiz | .cs |
@@ -68,6 +68,14 @@ def bulk_send(elastic, list_):
raise_on_exception=False
)
+def key_to_parts(key):
+ """make a string for fulltext indexing of file name"""
+ base, ext = os.path.splitext(key)
+ key_parts = base.split("/")
+ key_parts.append(ext[1:])
+
+ return f"{key} {' '.join(key_parts)}"
+
class DocumentQueue:
"""transient in-memory queue for documents to be indexed"""
def __init__(self, context): | 1 | """
phone data into elastic for supported file extensions.
note: we truncate outbound documents to DOC_SIZE_LIMIT characters
(to bound memory pressure and request size to elastic)
"""
from datetime import datetime
from math import floor
import json
import os
from urllib.parse import unquote, unquote_plus
from aws_requests_auth.aws_auth import AWSRequestsAuth
import boto3
import botocore
from elasticsearch import Elasticsearch, RequestsHttpConnection
from elasticsearch.helpers import bulk
import nbformat
from tenacity import retry, retry_if_exception, stop_after_attempt, wait_exponential
CONTENT_INDEX_EXTS = [
".csv",
".html",
".htm",
".ipynb",
".json",
".md",
".rmd",
".tsv",
".txt",
".xml"
]
# 10 MB, see https://amzn.to/2xJpngN
CHUNK_LIMIT_BYTES = 20_000_000
DOC_LIMIT_BYTES = 2_000
ELASTIC_TIMEOUT = 30
MAX_RETRY = 4 # prevent long-running lambdas due to malformed calls
NB_VERSION = 4 # default notebook version for nbformat
# signifies that the object is truly deleted, not to be confused with
# s3:ObjectRemoved:DeleteMarkerCreated, which we may see in versioned buckets
# see https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
OBJECT_DELETE = "ObjectRemoved:Delete"
QUEUE_LIMIT_BYTES = 100_000_000# 100MB
RETRY_429 = 5
TEST_EVENT = "s3:TestEvent"
# we need to filter out GetObject and HeadObject calls generated by the present
# lambda in order to display accurate analytics in the Quilt catalog
# a custom user agent enables said filtration
USER_AGENT_EXTRA = " quilt3-lambdas-es-indexer"
def bulk_send(elastic, list_):
"""make a bulk() call to elastic"""
return bulk(
elastic,
list_,
# Some magic numbers to reduce memory pressure
# e.g. see https://github.com/wagtail/wagtail/issues/4554
chunk_size=100,# max number of documents sent in one chunk
# The stated default is max_chunk_bytes=10485760, but with default
# ES will still return an exception stating that the very
# same request size limit has been exceeded
max_chunk_bytes=CHUNK_LIMIT_BYTES,
# number of retries for 429 (too many requests only)
# all other errors handled by our code
max_retries=RETRY_429,
# we'll process errors on our own
raise_on_error=False,
raise_on_exception=False
)
class DocumentQueue:
"""transient in-memory queue for documents to be indexed"""
def __init__(self, context):
"""constructor"""
self.queue = []
self.size = 0
self.context = context
def append(
self,
event_type,
size=0,
meta=None,
*,
last_modified,
bucket,
ext,
key,
text,
etag,
version_id
):
"""format event as a document and then queue the document"""
# On types and fields, see
# https://www.elastic.co/guide/en/elasticsearch/reference/master/mapping.html
body = {
# Elastic native keys
"_id": f"{key}:{version_id}",
"_index": bucket,
# index will upsert (and clobber existing equivalent _ids)
"_op_type": "delete" if event_type == OBJECT_DELETE else "index",
"_type": "_doc",
# Quilt keys
# Be VERY CAREFUL changing these values, as a type change can cause a
# mapper_parsing_exception that below code won't handle
"etag": etag,
"ext": ext,
"event": event_type,
"size": size,
"text": text,
"key": key,
"last_modified": last_modified.isoformat(),
"updated": datetime.utcnow().isoformat(),
"version_id": version_id
}
body = {**body, **transform_meta(meta or {})}
body["meta_text"] = " ".join([body["meta_text"], key])
self.append_document(body)
if self.size >= QUEUE_LIMIT_BYTES:
self.send_all()
def append_document(self, doc):
"""append well-formed documents (used for retry or by append())"""
if doc["text"]:
# document text dominates memory footprint; OK to neglect the
# small fixed size for the JSON metadata
self.size += min(doc["size"], DOC_LIMIT_BYTES)
self.queue.append(doc)
def send_all(self):
"""flush self.queue in 1-2 bulk calls"""
if not self.queue:
return
elastic_host = os.environ["ES_HOST"]
session = boto3.session.Session()
credentials = session.get_credentials().get_frozen_credentials()
awsauth = AWSRequestsAuth(
# These environment variables are automatically set by Lambda
aws_access_key=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
aws_token=credentials.token,
aws_host=elastic_host,
aws_region=session.region_name,
aws_service="es"
)
elastic = Elasticsearch(
hosts=[{"host": elastic_host, "port": 443}],
http_auth=awsauth,
max_backoff=get_time_remaining(self.context),
# Give ES time to respond when under load
timeout=ELASTIC_TIMEOUT,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
_, errors = bulk_send(elastic, self.queue)
if errors:
id_to_doc = {d["_id"]: d for d in self.queue}
send_again = []
for error in errors:
# only retry index call errors, not delete errors
if "index" in error:
inner = error["index"]
info = inner.get("error")
doc = id_to_doc[inner["_id"]]
# because error.error might be a string *sigh*
if isinstance(info, dict):
if "mapper_parsing_exception" in info.get("type", ""):
print("mapper_parsing_exception", error, inner)
# clear out structured metadata and try again
doc["user_meta"] = doc["system"] = {}
else:
print("unhandled indexer error:", error)
# Always retry, regardless of whether we know to handle and clean the request
# or not. This can catch temporary 403 on index write blocks and other
# transcient issues.
send_again.append(doc)
else:
# If index not in error, then retry the whole batch. Unclear what would cause
# that, but if there's an error without an id we need to assume it applies to
# the batch.
send_again = self.queue
print("unhandled indexer error (missing index field):", error)
# we won't retry after this (elasticsearch might retry 429s tho)
if send_again:
_, errors = bulk_send(elastic, send_again)
if errors:
raise Exception("Failed to load messages into Elastic on second retry.")
# empty the queue
self.size = 0
self.queue = []
def get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):
"""get the byte contents of a file"""
content = ""
if ext in CONTENT_INDEX_EXTS:
if ext == ".ipynb":
content = trim_to_bytes(
# we have no choice but to fetch the entire notebook, because we
# are going to parse it
# warning: huge notebooks could spike memory here
get_notebook_cells(
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
)
content = trim_to_bytes(content)
else:
content = get_plain_text(
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
return content
def extract_text(notebook_str):
""" Extract code and markdown
Args:
* nb - notebook as a string
Returns:
* str - select code and markdown source (and outputs)
Pre:
* notebook is well-formed per notebook version 4
* "cell_type" is defined for all cells
* "source" defined for all "code" and "markdown" cells
Throws:
* Anything nbformat.reads() can throw :( which is diverse and poorly
documented, hence the `except Exception` in handler()
Notes:
* Deliberately decided not to index output streams and display strings
because they were noisy and low value
* Tested this code against ~6400 Jupyter notebooks in
s3://alpha-quilt-storage/tree/notebook-search/
* Might be useful to index "cell_type" : "raw" in the future
See also:
* Format reference https://nbformat.readthedocs.io/en/latest/format_description.html
"""
formatted = nbformat.reads(notebook_str, as_version=NB_VERSION)
text = []
for cell in formatted.get("cells", []):
if "source" in cell and cell.get("cell_type") in ("code", "markdown"):
text.append(cell["source"])
return "\n".join(text)
def get_notebook_cells(bucket, key, size, *, etag, s3_client, version_id):
"""extract cells for ipynb notebooks for indexing"""
text = ""
try:
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
notebook = obj["Body"].read().decode("utf-8")
text = extract_text(notebook)
except UnicodeDecodeError as uni:
print(f"Unicode decode error in {key}: {uni}")
except (json.JSONDecodeError, nbformat.reader.NotJSONError):
print(f"Invalid JSON in {key}.")
except (KeyError, AttributeError) as err:
print(f"Missing key in {key}: {err}")
# there might be more errors than covered by test_read_notebook
# better not to fail altogether
except Exception as exc:#pylint: disable=broad-except
print(f"Exception in file {key}: {exc}")
return text
def get_plain_text(bucket, key, size, *, etag, s3_client, version_id):
"""get plain text object contents"""
text = ""
try:
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
limit=DOC_LIMIT_BYTES,
version_id=version_id
)
# ignore because limit might break a long character midstream
text = obj["Body"].read().decode("utf-8", "ignore")
except UnicodeDecodeError as ex:
print(f"Unicode decode error in {key}", ex)
return text
def get_time_remaining(context):
"""returns time remaining in seconds before lambda context is shut down"""
time_remaining = floor(context.get_remaining_time_in_millis()/1000)
if time_remaining < 30:
print(
f"Warning: Lambda function has less than {time_remaining} seconds."
" Consider reducing bulk batch size."
)
return time_remaining
def make_s3_client():
"""make a client with a custom user agent string so that we can
filter the present lambda's requests to S3 from object analytics"""
configuration = botocore.config.Config(user_agent_extra=USER_AGENT_EXTRA)
return boto3.client("s3", config=configuration)
def transform_meta(meta):
""" Reshapes metadata for indexing in ES """
helium = meta.get("helium", {})
user_meta = helium.pop("user_meta", {}) or {}
comment = helium.pop("comment", "") or ""
target = helium.pop("target", "") or ""
meta_text_parts = [comment, target]
if helium:
meta_text_parts.append(json.dumps(helium))
if user_meta:
meta_text_parts.append(json.dumps(user_meta))
return {
"system_meta": helium,
"user_meta": user_meta,
"comment": comment,
"target": target,
"meta_text": " ".join(meta_text_parts)
}
def handler(event, context):
"""enumerate S3 keys in event, extract relevant data and metadata,
queue events, send to elastic via bulk() API
"""
# message is a proper SQS message, which either contains a single event
# (from the bucket notification system) or batch-many events as determined
# by enterprise/**/bulk_loader.py
for message in event["Records"]:
body = json.loads(message["body"])
body_message = json.loads(body["Message"])
if "Records" not in body_message:
if body_message.get("Event") == TEST_EVENT:
# Consume and ignore this event, which is an initial message from
# SQS; see https://forums.aws.amazon.com/thread.jspa?threadID=84331
continue
else:
print("Unexpected message['body']. No 'Records' key.", message)
raise Exception("Unexpected message['body']. No 'Records' key.")
batch_processor = DocumentQueue(context)
events = body_message.get("Records", [])
s3_client = make_s3_client()
# event is a single S3 event
for event_ in events:
try:
event_name = event_["eventName"]
bucket = unquote(event_["s3"]["bucket"]["name"])
# In the grand tradition of IE6, S3 events turn spaces into '+'
key = unquote_plus(event_["s3"]["object"]["key"])
version_id = event_["s3"]["object"].get("versionId")
version_id = unquote(version_id) if version_id else None
etag = unquote(event_["s3"]["object"]["eTag"])
_, ext = os.path.splitext(key)
ext = ext.lower()
head = retry_s3(
"head",
bucket,
key,
s3_client=s3_client,
version_id=version_id,
etag=etag
)
size = head["ContentLength"]
last_modified = head["LastModified"]
meta = head["Metadata"]
text = ""
if event_name == OBJECT_DELETE:
batch_processor.append(
event_name,
bucket=bucket,
ext=ext,
etag=etag,
key=key,
last_modified=last_modified,
text=text,
version_id=version_id
)
continue
_, ext = os.path.splitext(key)
ext = ext.lower()
text = get_contents(
bucket,
key,
ext,
etag=etag,
version_id=version_id,
s3_client=s3_client,
size=size
)
# decode Quilt-specific metadata
if meta and "helium" in meta:
try:
decoded_helium = json.loads(meta["helium"])
meta["helium"] = decoded_helium or {}
except (KeyError, json.JSONDecodeError):
print("Unable to parse Quilt 'helium' metadata", meta)
batch_processor.append(
event_name,
bucket=bucket,
key=key,
ext=ext,
meta=meta,
etag=etag,
version_id=version_id,
last_modified=last_modified,
size=size,
text=text
)
except Exception as exc:# pylint: disable=broad-except
print("Fatal exception for record", event_, exc)
import traceback
traceback.print_tb(exc.__traceback__)
raise exc
# flush the queue
batch_processor.send_all()
def retry_s3(
operation,
bucket,
key,
size=None,
limit=None,
*,
etag,
version_id,
s3_client
):
"""retry head or get operation to S3 with; stop before we run out of time.
retry is necessary since, due to eventual consistency, we may not
always get the required version of the object.
"""
if operation == "head":
function_ = s3_client.head_object
elif operation == "get":
function_ = s3_client.get_object
else:
raise ValueError(f"unexpected operation: {operation}")
# Keyword arguments to function_
arguments = {
"Bucket": bucket,
"Key": key
}
if operation == 'get' and size:
# can only request range if file is not empty
arguments['Range'] = f"bytes=0-{limit}"
if version_id:
arguments['VersionId'] = version_id
else:
arguments['IfMatch'] = etag
def not_known_exception(exception):
error_code = exception.response.get(['Error'], {}).get(['Code'], 218)
return error_code not in ["402", "403", "404"]
@retry(
# debug
reraise=True,
stop=stop_after_attempt(MAX_RETRY),
wait=wait_exponential(multiplier=2, min=4, max=30),
retry=(retry_if_exception(not_known_exception))
)
def call():
"""local function so we can set stop_after_delay dynamically"""
# TODO: remove all this, stop_after_delay is not dynamically loaded anymore
return function_(**arguments)
return call()
def trim_to_bytes(string, limit=DOC_LIMIT_BYTES):
"""trim string to specified number of bytes"""
encoded = string.encode("utf-8")
size = len(encoded)
if size <= limit:
return string
return encoded[:limit].decode("utf-8", "ignore")
| 1 | 17,502 | Eliminate this function; handled by mappings and analyzer | quiltdata-quilt | py |
@@ -249,7 +249,14 @@ class JSTree extends AbstractBase
'recordID' => '__record_id__'
]
];
- $cache[$route] = $this->router->fromRoute($route, $params, $options);
+ $routeName = $route;
+ $datasource = $this->getDataSource();
+ if ($route === 'collection') {
+ $routeName = $datasource->getCollectionRoute();
+ } elseif ($route === 'record') {
+ $routeName = $datasource->getRecordRoute();
+ }
+ $cache[$route] = $this->router->fromRoute($routeName, $params, $options);
}
return str_replace('__record_id__', urlencode($id), $cache[$route]);
} | 1 | <?php
/**
* Hierarchy Tree Renderer for the JS_Tree plugin
*
* PHP version 7
*
* Copyright (C) Villanova University 2010.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package HierarchyTree_Renderer
* @author Luke O'Sullivan <l.osullivan@swansea.ac.uk>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:hierarchy_components Wiki
*/
namespace VuFind\Hierarchy\TreeRenderer;
/**
* Hierarchy Tree Renderer
*
* This is a helper class for producing hierarchy trees.
*
* @category VuFind
* @package HierarchyTree_Renderer
* @author Luke O'Sullivan <l.osullivan@swansea.ac.uk>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:hierarchy_components Wiki
*/
class JSTree extends AbstractBase
implements \VuFind\I18n\Translator\TranslatorAwareInterface
{
use \VuFind\I18n\Translator\TranslatorAwareTrait;
/**
* Router plugin
*
* @var \Zend\Mvc\Controller\Plugin\Url
*/
protected $router = null;
/**
* Whether the collections functionality is enabled
*
* @var bool
*/
protected $collectionsEnabled;
/**
* Constructor
*
* @param \Zend\Mvc\Controller\Plugin\Url $router Router plugin for
* urls
* @param bool $collectionsEnabled Whether the
* collections functionality is enabled
*/
public function __construct(\Zend\Mvc\Controller\Plugin\Url $router,
$collectionsEnabled
) {
$this->router = $router;
$this->collectionsEnabled = $collectionsEnabled;
}
/**
* Get a list of trees containing the item represented by the stored record
* driver.
*
* @param string $hierarchyID Optional filter: specific hierarchy ID to retrieve
*
* @return mixed An array of hierarchy IDS if an archive tree exists,
* false if it does not
*/
public function getTreeList($hierarchyID = false)
{
$record = $this->getRecordDriver();
$inHierarchies = $record->getHierarchyTopID();
$inHierarchiesTitle = $record->getHierarchyTopTitle();
if ($hierarchyID) {
// Specific Hierarchy Supplied
if (in_array($hierarchyID, $inHierarchies)
&& $this->getDataSource()->supports($hierarchyID)
) {
return [
$hierarchyID => $this->getHierarchyName(
$hierarchyID, $inHierarchies, $inHierarchiesTitle
)
];
}
} else {
// Return All Hierarchies
$i = 0;
$hierarchies = [];
foreach ($inHierarchies as $hierarchyTopID) {
if ($this->getDataSource()->supports($hierarchyTopID)) {
$hierarchies[$hierarchyTopID] = $inHierarchiesTitle[$i] ?? '';
}
$i++;
}
if (!empty($hierarchies)) {
return $hierarchies;
}
}
// If we got this far, we couldn't find valid match(es).
return false;
}
/**
* Render the Hierarchy Tree
*
* @param string $context The context from which the call has been made
* @param string $mode The mode in which the tree should be generated
* @param string $hierarchyID The hierarchy ID of the tree to fetch (optional)
* @param string $recordID The current record ID (optional)
*
* @return mixed The desired hierarchy tree output (or false on error)
*/
public function render($context, $mode, $hierarchyID, $recordID = false)
{
if (!empty($context) && !empty($mode)) {
if ($mode == 'List') {
$json = $this->getDataSource()->getJSON($hierarchyID);
if (!empty($json)) {
return $this->jsonToHTML(
json_decode($json),
$context,
$hierarchyID,
$this->recordDriver->getUniqueId()
);
}
} else {
return $this->transformCollectionXML(
$context, $mode, $hierarchyID, $recordID
);
}
}
return false;
}
/**
* Render the Hierarchy Tree
*
* @param string $hierarchyID The hierarchy ID of the tree to fetch
* @param string $context Record or Collection
*
* @return mixed The desired hierarchy tree output (or false on error)
*/
public function getJSON($hierarchyID, $context = 'Record')
{
$json = $this->getDataSource()->getJSON($hierarchyID);
if ($json == null) {
return false;
}
return json_encode(
$this->buildNodeArray(json_decode($json), $context, $hierarchyID)
);
}
/**
* Recursive function to convert the json to the right format
*
* @param object $node JSON object of a node/top node
* @param string $context Record or Collection
* @param string $hierarchyID Collection ID
*
* @return array
*/
protected function buildNodeArray($node, $context, $hierarchyID)
{
$escaper = new \Zend\Escaper\Escaper('utf-8');
$ret = [
'id' => preg_replace('/\W/', '-', $node->id),
'text' => $escaper->escapeHtml($node->title),
'li_attr' => [
'recordid' => $node->id
],
'a_attr' => [
'href' => $this->getContextualUrl($node, $context),
'title' => $node->title
],
'type' => $node->type
];
if (isset($node->children)) {
$ret['children'] = [];
for ($i = 0;$i < count($node->children);$i++) {
$ret['children'][$i] = $this
->buildNodeArray($node->children[$i], $context, $hierarchyID);
}
}
return $ret;
}
/**
* Use the router to build the appropriate URL based on context
*
* @param object $node JSON object of a node/top node
* @param string $context Record or Collection
*
* @return string
*/
protected function getContextualUrl($node, $context)
{
if ($context == 'Collection') {
return $this->getUrlFromRouteCache('collection', $node->id)
. '#tabnav';
} else {
$type = $node->type;
if ('collection' === $type && !$this->collectionsEnabled) {
$type = 'record';
}
$url = $this->getUrlFromRouteCache($type, $node->id);
return $type === 'collection'
? $url . '#tabnav'
: $url . '#tree-' . preg_replace('/\W/', '-', $node->id);
}
}
/**
* Get the URL for a record and cache it to avoid the relatively slow routing
* calls.
*
* @param string $route Route
* @param string $id Record ID
*
* @return string URL
*/
protected function getUrlFromRouteCache($route, $id)
{
static $cache = [];
if (!isset($cache[$route])) {
$params = [
'id' => '__record_id__',
'tab' => 'HierarchyTree'
];
$options = [
'query' => [
'recordID' => '__record_id__'
]
];
$cache[$route] = $this->router->fromRoute($route, $params, $options);
}
return str_replace('__record_id__', urlencode($id), $cache[$route]);
}
/**
* Convert JSTree JSON structure to HTML
*
* @param object $node JSON object of a the JSTree
* @param string $context Record or Collection
* @param string $hierarchyID Collection ID
* @param string $recordID The currently active record
*
* @return string
*/
protected function jsonToHTML($node, $context, $hierarchyID, $recordID = false)
{
$escaper = new \Zend\Escaper\Escaper('utf-8');
$name = strlen($node->title) > 100
? substr($node->title, 0, 100) . '...'
: $node->title;
$href = $this->getContextualUrl($node, $context);
$icon = $node->type == 'record' ? 'file-o' : 'folder-open';
$html = '<li';
if ($node->type == 'collection') {
$html .= ' class="hierarchy';
if ($recordID && $recordID == $node->id) {
$html .= ' currentHierarchy';
}
$html .= '"';
} elseif ($recordID && $recordID == $node->id) {
$html .= ' class="currentRecord"';
}
$html .= '><i class="fa fa-li fa-' . $icon . '"></i> '
. '<a name="tree-' . $escaper->escapeHtmlAttr($node->id) . '" href="'
. $escaper->escapeHtmlAttr($href) . '" title="'
. $escaper->escapeHtml($node->title) . '">'
. $escaper->escapeHtml($name) . '</a>';
if (isset($node->children)) {
$html .= '<ul class="fa-ul">';
foreach ($node->children as $child) {
$html .= $this->jsonToHTML(
$child, $context, $hierarchyID, $recordID
);
}
$html .= '</ul>';
}
return $html . '</li>';
}
/**
* Transforms Collection XML to Desired Format
*
* @param string $context The Context in which the tree is being displayed
* @param string $mode The Mode in which the tree is being displayed
* @param string $hierarchyID The hierarchy to get the tree for
* @param string $recordID The currently selected Record (false for none)
*
* @return string A HTML List
*/
protected function transformCollectionXML(
$context, $mode, $hierarchyID, $recordID
) {
$record = $this->getRecordDriver();
$inHierarchies = $record->getHierarchyTopID();
$inHierarchiesTitle = $record->getHierarchyTopTitle();
$hierarchyTitle = $this->getHierarchyName(
$hierarchyID, $inHierarchies, $inHierarchiesTitle
);
// Set up parameters for XSL transformation
$params = [
'titleText' => $this->translate('collection_view_record'),
'collectionID' => $hierarchyID,
'collectionTitle' => $hierarchyTitle,
'baseURL' => rtrim($this->router->fromRoute('home'), '/'),
'context' => $context,
'recordID' => $recordID
];
// Transform the XML
$xmlFile = $this->getDataSource()->getXML($hierarchyID);
$transformation = ucfirst($context) . ucfirst($mode);
$xslFile = "Hierarchy/{$transformation}.xsl";
return \VuFind\XSLT\Processor::process($xslFile, $xmlFile, $params);
}
}
| 1 | 28,380 | I wonder if this new logic would actually fit better as a support method, both for readability and overriding... e.g. <pre> protected function getRouteNameFromDataSource($route) { if ($route === 'collection') { return $this->getDataSource()->getCollectionRoute(); } elseif ($route === 'record') { return $this->getDataSource()->getRecordRoute(); } return $route; } </pre> That's extremely nitpicky, and you can feel free to leave this as-is if you prefer... but that feels marginally better to me. | vufind-org-vufind | php |
@@ -35,13 +35,13 @@ public class BaseSuite {
public static ExternalResource testEnvironment = new ExternalResource() {
@Override
protected void before() {
- log.info("Preparing test environment");
+ log.finest("Preparing test environment");
GlobalTestEnvironment.get(SeleniumTestEnvironment.class);
System.setProperty("webdriver.remote.shorten_log_messages", "true");
}
@Override
protected void after() {
- log.info("Cleaning test environment");
+ log.finest("Cleaning test environment");
TestEnvironment environment = GlobalTestEnvironment.get();
if (environment != null) {
environment.stop(); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.thoughtworks.selenium;
import com.thoughtworks.selenium.testing.SeleniumTestEnvironment;
import org.junit.ClassRule;
import org.junit.rules.ExternalResource;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import org.openqa.selenium.environment.GlobalTestEnvironment;
import org.openqa.selenium.environment.TestEnvironment;
import java.util.logging.Logger;
public class BaseSuite {
private static final Logger log = Logger.getLogger(BaseSuite.class.getName());
public static ExternalResource testEnvironment = new ExternalResource() {
@Override
protected void before() {
log.info("Preparing test environment");
GlobalTestEnvironment.get(SeleniumTestEnvironment.class);
System.setProperty("webdriver.remote.shorten_log_messages", "true");
}
@Override
protected void after() {
log.info("Cleaning test environment");
TestEnvironment environment = GlobalTestEnvironment.get();
if (environment != null) {
environment.stop();
GlobalTestEnvironment.set(null);
}
}
};
public static ExternalResource browser = new ExternalResource() {
@Override
protected void after() {
log.info("Stopping browser");
try {
InternalSelenseTestBase.destroyDriver();
} catch (SeleniumException ignored) {
// Nothing sane to do
}
}
};
@ClassRule
public static TestRule chain =
RuleChain.outerRule(testEnvironment).around(browser);
}
| 1 | 16,446 | This is in test code: understanding what we're doing is important in this context. | SeleniumHQ-selenium | js |
@@ -16,7 +16,7 @@ import (
"github.com/tinygo-org/tinygo/ir"
"github.com/tinygo-org/tinygo/loader"
"golang.org/x/tools/go/ssa"
- "tinygo.org/x/go-llvm"
+ llvm "tinygo.org/x/go-llvm"
)
func init() { | 1 | package compiler
import (
"errors"
"fmt"
"go/ast"
"go/build"
"go/constant"
"go/token"
"go/types"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/tinygo-org/tinygo/ir"
"github.com/tinygo-org/tinygo/loader"
"golang.org/x/tools/go/ssa"
"tinygo.org/x/go-llvm"
)
func init() {
llvm.InitializeAllTargets()
llvm.InitializeAllTargetMCs()
llvm.InitializeAllTargetInfos()
llvm.InitializeAllAsmParsers()
llvm.InitializeAllAsmPrinters()
}
// The TinyGo import path.
const tinygoPath = "github.com/tinygo-org/tinygo"
// functionsUsedInTransform is a list of function symbols that may be used
// during TinyGo optimization passes so they have to be marked as external
// linkage until all TinyGo passes have finished.
var functionsUsedInTransforms = []string{
"runtime.alloc",
"runtime.free",
"runtime.sleepTask",
"runtime.sleepCurrentTask",
"runtime.setTaskStatePtr",
"runtime.getTaskStatePtr",
"runtime.activateTask",
"runtime.scheduler",
"runtime.startGoroutine",
}
// Configure the compiler.
type Config struct {
Triple string // LLVM target triple, e.g. x86_64-unknown-linux-gnu (empty string means default)
CPU string // LLVM CPU name, e.g. atmega328p (empty string means default)
Features []string // LLVM CPU features
GOOS string //
GOARCH string //
GC string // garbage collection strategy
Scheduler string // scheduler implementation ("coroutines" or "tasks")
PanicStrategy string // panic strategy ("print" or "trap")
CFlags []string // cflags to pass to cgo
LDFlags []string // ldflags to pass to cgo
ClangHeaders string // Clang built-in header include path
DumpSSA bool // dump Go SSA, for compiler debugging
Debug bool // add debug symbols for gdb
GOROOT string // GOROOT
TINYGOROOT string // GOROOT for TinyGo
GOPATH string // GOPATH, like `go env GOPATH`
BuildTags []string // build tags for TinyGo (empty means {Config.GOOS/Config.GOARCH})
TestConfig TestConfig
}
type TestConfig struct {
CompileTestBinary bool
// TODO: Filter the test functions to run, include verbose flag, etc
}
type Compiler struct {
Config
mod llvm.Module
ctx llvm.Context
builder llvm.Builder
dibuilder *llvm.DIBuilder
cu llvm.Metadata
difiles map[string]llvm.Metadata
machine llvm.TargetMachine
targetData llvm.TargetData
intType llvm.Type
i8ptrType llvm.Type // for convenience
funcPtrAddrSpace int
uintptrType llvm.Type
initFuncs []llvm.Value
interfaceInvokeWrappers []interfaceInvokeWrapper
ir *ir.Program
diagnostics []error
astComments map[string]*ast.CommentGroup
}
type Frame struct {
fn *ir.Function
locals map[ssa.Value]llvm.Value // local variables
blockEntries map[*ssa.BasicBlock]llvm.BasicBlock // a *ssa.BasicBlock may be split up
blockExits map[*ssa.BasicBlock]llvm.BasicBlock // these are the exit blocks
currentBlock *ssa.BasicBlock
phis []Phi
taskHandle llvm.Value
deferPtr llvm.Value
difunc llvm.Metadata
allDeferFuncs []interface{}
deferFuncs map[*ir.Function]int
deferInvokeFuncs map[string]int
deferClosureFuncs map[*ir.Function]int
selectRecvBuf map[*ssa.Select]llvm.Value
}
type Phi struct {
ssa *ssa.Phi
llvm llvm.Value
}
func NewCompiler(pkgName string, config Config) (*Compiler, error) {
if config.Triple == "" {
config.Triple = llvm.DefaultTargetTriple()
}
if len(config.BuildTags) == 0 {
config.BuildTags = []string{config.GOOS, config.GOARCH}
}
c := &Compiler{
Config: config,
difiles: make(map[string]llvm.Metadata),
}
target, err := llvm.GetTargetFromTriple(config.Triple)
if err != nil {
return nil, err
}
features := ""
if len(config.Features) > 0 {
features = strings.Join(config.Features, `,`)
}
c.machine = target.CreateTargetMachine(config.Triple, config.CPU, features, llvm.CodeGenLevelDefault, llvm.RelocStatic, llvm.CodeModelDefault)
c.targetData = c.machine.CreateTargetData()
c.ctx = llvm.NewContext()
c.mod = c.ctx.NewModule(pkgName)
c.mod.SetTarget(config.Triple)
c.mod.SetDataLayout(c.targetData.String())
c.builder = c.ctx.NewBuilder()
if c.Debug {
c.dibuilder = llvm.NewDIBuilder(c.mod)
}
c.uintptrType = c.ctx.IntType(c.targetData.PointerSize() * 8)
if c.targetData.PointerSize() <= 4 {
// 8, 16, 32 bits targets
c.intType = c.ctx.Int32Type()
} else if c.targetData.PointerSize() == 8 {
// 64 bits target
c.intType = c.ctx.Int64Type()
} else {
panic("unknown pointer size")
}
c.i8ptrType = llvm.PointerType(c.ctx.Int8Type(), 0)
dummyFuncType := llvm.FunctionType(c.ctx.VoidType(), nil, false)
dummyFunc := llvm.AddFunction(c.mod, "tinygo.dummy", dummyFuncType)
c.funcPtrAddrSpace = dummyFunc.Type().PointerAddressSpace()
dummyFunc.EraseFromParentAsFunction()
return c, nil
}
func (c *Compiler) Packages() []*loader.Package {
return c.ir.LoaderProgram.Sorted()
}
// Return the LLVM module. Only valid after a successful compile.
func (c *Compiler) Module() llvm.Module {
return c.mod
}
// Return the LLVM target data object. Only valid after a successful compile.
func (c *Compiler) TargetData() llvm.TargetData {
return c.targetData
}
// selectGC picks an appropriate GC strategy if none was provided.
func (c *Compiler) selectGC() string {
if c.GC != "" {
return c.GC
}
return "conservative"
}
// selectScheduler picks an appropriate scheduler for the target if none was
// given.
func (c *Compiler) selectScheduler() string {
if c.Scheduler != "" {
// A scheduler was specified in the target description.
return c.Scheduler
}
// Fall back to coroutines, which are supported everywhere.
return "coroutines"
}
// Compile the given package path or .go file path. Return an error when this
// fails (in any stage).
func (c *Compiler) Compile(mainPath string) []error {
// Prefix the GOPATH with the system GOROOT, as GOROOT is already set to
// the TinyGo root.
overlayGopath := c.GOPATH
if overlayGopath == "" {
overlayGopath = c.GOROOT
} else {
overlayGopath = c.GOROOT + string(filepath.ListSeparator) + overlayGopath
}
wd, err := os.Getwd()
if err != nil {
return []error{err}
}
buildTags := append([]string{"tinygo", "gc." + c.selectGC(), "scheduler." + c.selectScheduler()}, c.BuildTags...)
lprogram := &loader.Program{
Build: &build.Context{
GOARCH: c.GOARCH,
GOOS: c.GOOS,
GOROOT: c.GOROOT,
GOPATH: c.GOPATH,
CgoEnabled: true,
UseAllFiles: false,
Compiler: "gc", // must be one of the recognized compilers
BuildTags: buildTags,
},
OverlayBuild: &build.Context{
GOARCH: c.GOARCH,
GOOS: c.GOOS,
GOROOT: c.TINYGOROOT,
GOPATH: overlayGopath,
CgoEnabled: true,
UseAllFiles: false,
Compiler: "gc", // must be one of the recognized compilers
BuildTags: buildTags,
},
OverlayPath: func(path string) string {
// Return the (overlay) import path when it should be overlaid, and
// "" if it should not.
if strings.HasPrefix(path, tinygoPath+"/src/") {
// Avoid issues with packages that are imported twice, one from
// GOPATH and one from TINYGOPATH.
path = path[len(tinygoPath+"/src/"):]
}
switch path {
case "machine", "os", "reflect", "runtime", "runtime/volatile", "sync", "testing":
return path
default:
if strings.HasPrefix(path, "device/") || strings.HasPrefix(path, "examples/") {
return path
} else if path == "syscall" {
for _, tag := range c.BuildTags {
if tag == "baremetal" || tag == "darwin" {
return path
}
}
}
}
return ""
},
TypeChecker: types.Config{
Sizes: &StdSizes{
IntSize: int64(c.targetData.TypeAllocSize(c.intType)),
PtrSize: int64(c.targetData.PointerSize()),
MaxAlign: int64(c.targetData.PrefTypeAlignment(c.i8ptrType)),
},
},
Dir: wd,
TINYGOROOT: c.TINYGOROOT,
CFlags: c.CFlags,
ClangHeaders: c.ClangHeaders,
}
if strings.HasSuffix(mainPath, ".go") {
_, err = lprogram.ImportFile(mainPath)
if err != nil {
return []error{err}
}
} else {
_, err = lprogram.Import(mainPath, wd)
if err != nil {
return []error{err}
}
}
_, err = lprogram.Import("runtime", "")
if err != nil {
return []error{err}
}
err = lprogram.Parse(c.TestConfig.CompileTestBinary)
if err != nil {
return []error{err}
}
c.ir = ir.NewProgram(lprogram, mainPath)
// Run a simple dead code elimination pass.
c.ir.SimpleDCE()
// Initialize debug information.
if c.Debug {
c.cu = c.dibuilder.CreateCompileUnit(llvm.DICompileUnit{
Language: 0xb, // DW_LANG_C99 (0xc, off-by-one?)
File: mainPath,
Dir: "",
Producer: "TinyGo",
Optimized: true,
})
}
var frames []*Frame
c.loadASTComments(lprogram)
// Declare all functions.
for _, f := range c.ir.Functions {
frames = append(frames, c.parseFuncDecl(f))
}
// Add definitions to declarations.
for _, frame := range frames {
if frame.fn.Synthetic == "package initializer" {
c.initFuncs = append(c.initFuncs, frame.fn.LLVMFn)
}
if frame.fn.CName() != "" {
continue
}
if frame.fn.Blocks == nil {
continue // external function
}
c.parseFunc(frame)
}
// Define the already declared functions that wrap methods for use in
// interfaces.
for _, state := range c.interfaceInvokeWrappers {
c.createInterfaceInvokeWrapper(state)
}
// After all packages are imported, add a synthetic initializer function
// that calls the initializer of each package.
initFn := c.ir.GetFunction(c.ir.Program.ImportedPackage("runtime").Members["initAll"].(*ssa.Function))
initFn.LLVMFn.SetLinkage(llvm.InternalLinkage)
initFn.LLVMFn.SetUnnamedAddr(true)
if c.Debug {
difunc := c.attachDebugInfo(initFn)
pos := c.ir.Program.Fset.Position(initFn.Pos())
c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), difunc, llvm.Metadata{})
}
block := c.ctx.AddBasicBlock(initFn.LLVMFn, "entry")
c.builder.SetInsertPointAtEnd(block)
for _, fn := range c.initFuncs {
c.builder.CreateCall(fn, []llvm.Value{llvm.Undef(c.i8ptrType), llvm.Undef(c.i8ptrType)}, "")
}
c.builder.CreateRetVoid()
// Conserve for goroutine lowering. Without marking these as external, they
// would be optimized away.
realMain := c.mod.NamedFunction(c.ir.MainPkg().Pkg.Path() + ".main")
realMain.SetLinkage(llvm.ExternalLinkage) // keep alive until goroutine lowering
// Make sure these functions are kept in tact during TinyGo transformation passes.
for _, name := range functionsUsedInTransforms {
fn := c.mod.NamedFunction(name)
if fn.IsNil() {
continue
}
fn.SetLinkage(llvm.ExternalLinkage)
}
// Load some attributes
getAttr := func(attrName string) llvm.Attribute {
attrKind := llvm.AttributeKindID(attrName)
return c.ctx.CreateEnumAttribute(attrKind, 0)
}
nocapture := getAttr("nocapture")
writeonly := getAttr("writeonly")
readonly := getAttr("readonly")
// Tell the optimizer that runtime.alloc is an allocator, meaning that it
// returns values that are never null and never alias to an existing value.
for _, attrName := range []string{"noalias", "nonnull"} {
c.mod.NamedFunction("runtime.alloc").AddAttributeAtIndex(0, getAttr(attrName))
}
// See emitNilCheck in asserts.go.
c.mod.NamedFunction("runtime.isnil").AddAttributeAtIndex(1, nocapture)
// This function is necessary for tracking pointers on the stack in a
// portable way (see gc.go). Indicate to the optimizer that the only thing
// we'll do is read the pointer.
trackPointer := c.mod.NamedFunction("runtime.trackPointer")
if !trackPointer.IsNil() {
trackPointer.AddAttributeAtIndex(1, nocapture)
trackPointer.AddAttributeAtIndex(1, readonly)
}
// Memory copy operations do not capture pointers, even though some weird
// pointer arithmetic is happening in the Go implementation.
for _, fnName := range []string{"runtime.memcpy", "runtime.memmove"} {
fn := c.mod.NamedFunction(fnName)
fn.AddAttributeAtIndex(1, nocapture)
fn.AddAttributeAtIndex(1, writeonly)
fn.AddAttributeAtIndex(2, nocapture)
fn.AddAttributeAtIndex(2, readonly)
}
// see: https://reviews.llvm.org/D18355
if c.Debug {
c.mod.AddNamedMetadataOperand("llvm.module.flags",
c.ctx.MDNode([]llvm.Metadata{
llvm.ConstInt(c.ctx.Int32Type(), 1, false).ConstantAsMetadata(), // Error on mismatch
llvm.GlobalContext().MDString("Debug Info Version"),
llvm.ConstInt(c.ctx.Int32Type(), 3, false).ConstantAsMetadata(), // DWARF version
}),
)
c.mod.AddNamedMetadataOperand("llvm.module.flags",
c.ctx.MDNode([]llvm.Metadata{
llvm.ConstInt(c.ctx.Int32Type(), 1, false).ConstantAsMetadata(),
llvm.GlobalContext().MDString("Dwarf Version"),
llvm.ConstInt(c.ctx.Int32Type(), 4, false).ConstantAsMetadata(),
}),
)
c.dibuilder.Finalize()
}
return c.diagnostics
}
// getRuntimeType obtains a named type from the runtime package and returns it
// as a Go type.
func (c *Compiler) getRuntimeType(name string) types.Type {
return c.ir.Program.ImportedPackage("runtime").Type(name).Type()
}
// getLLVMRuntimeType obtains a named type from the runtime package and returns
// it as a LLVM type, creating it if necessary. It is a shorthand for
// getLLVMType(getRuntimeType(name)).
func (c *Compiler) getLLVMRuntimeType(name string) llvm.Type {
return c.getLLVMType(c.getRuntimeType(name))
}
// getLLVMType creates and returns a LLVM type for a Go type. In the case of
// named struct types (or Go types implemented as named LLVM structs such as
// strings) it also creates it first if necessary.
func (c *Compiler) getLLVMType(goType types.Type) llvm.Type {
switch typ := goType.(type) {
case *types.Array:
elemType := c.getLLVMType(typ.Elem())
return llvm.ArrayType(elemType, int(typ.Len()))
case *types.Basic:
switch typ.Kind() {
case types.Bool, types.UntypedBool:
return c.ctx.Int1Type()
case types.Int8, types.Uint8:
return c.ctx.Int8Type()
case types.Int16, types.Uint16:
return c.ctx.Int16Type()
case types.Int32, types.Uint32:
return c.ctx.Int32Type()
case types.Int, types.Uint:
return c.intType
case types.Int64, types.Uint64:
return c.ctx.Int64Type()
case types.Float32:
return c.ctx.FloatType()
case types.Float64:
return c.ctx.DoubleType()
case types.Complex64:
return c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false)
case types.Complex128:
return c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false)
case types.String, types.UntypedString:
return c.getLLVMRuntimeType("_string")
case types.Uintptr:
return c.uintptrType
case types.UnsafePointer:
return c.i8ptrType
default:
panic("unknown basic type: " + typ.String())
}
case *types.Chan:
return llvm.PointerType(c.getLLVMRuntimeType("channel"), 0)
case *types.Interface:
return c.getLLVMRuntimeType("_interface")
case *types.Map:
return llvm.PointerType(c.getLLVMRuntimeType("hashmap"), 0)
case *types.Named:
if st, ok := typ.Underlying().(*types.Struct); ok {
// Structs are a special case. While other named types are ignored
// in LLVM IR, named structs are implemented as named structs in
// LLVM. This is because it is otherwise impossible to create
// self-referencing types such as linked lists.
llvmName := typ.Obj().Pkg().Path() + "." + typ.Obj().Name()
llvmType := c.mod.GetTypeByName(llvmName)
if llvmType.IsNil() {
llvmType = c.ctx.StructCreateNamed(llvmName)
underlying := c.getLLVMType(st)
llvmType.StructSetBody(underlying.StructElementTypes(), false)
}
return llvmType
}
return c.getLLVMType(typ.Underlying())
case *types.Pointer:
ptrTo := c.getLLVMType(typ.Elem())
return llvm.PointerType(ptrTo, 0)
case *types.Signature: // function value
return c.getFuncType(typ)
case *types.Slice:
elemType := c.getLLVMType(typ.Elem())
members := []llvm.Type{
llvm.PointerType(elemType, 0),
c.uintptrType, // len
c.uintptrType, // cap
}
return c.ctx.StructType(members, false)
case *types.Struct:
members := make([]llvm.Type, typ.NumFields())
for i := 0; i < typ.NumFields(); i++ {
members[i] = c.getLLVMType(typ.Field(i).Type())
}
if len(members) > 2 && typ.Field(0).Name() == "C union" {
// Not a normal struct but a C union emitted by cgo.
// Such a field name cannot be entered in regular Go code, this must
// be manually inserted in the AST so this is safe.
maxAlign := 0
maxSize := uint64(0)
mainType := members[0]
for _, member := range members {
align := c.targetData.ABITypeAlignment(member)
size := c.targetData.TypeAllocSize(member)
if align > maxAlign {
maxAlign = align
mainType = member
} else if align == maxAlign && size > maxSize {
maxAlign = align
maxSize = size
mainType = member
} else if size > maxSize {
maxSize = size
}
}
members = []llvm.Type{mainType}
mainTypeSize := c.targetData.TypeAllocSize(mainType)
if mainTypeSize < maxSize {
members = append(members, llvm.ArrayType(c.ctx.Int8Type(), int(maxSize-mainTypeSize)))
}
}
return c.ctx.StructType(members, false)
case *types.Tuple:
members := make([]llvm.Type, typ.Len())
for i := 0; i < typ.Len(); i++ {
members[i] = c.getLLVMType(typ.At(i).Type())
}
return c.ctx.StructType(members, false)
default:
panic("unknown type: " + goType.String())
}
}
// Return a zero LLVM value for any LLVM type. Setting this value as an
// initializer has the same effect as setting 'zeroinitializer' on a value.
// Sadly, I haven't found a way to do it directly with the Go API but this works
// just fine.
func (c *Compiler) getZeroValue(typ llvm.Type) llvm.Value {
switch typ.TypeKind() {
case llvm.ArrayTypeKind:
subTyp := typ.ElementType()
subVal := c.getZeroValue(subTyp)
vals := make([]llvm.Value, typ.ArrayLength())
for i := range vals {
vals[i] = subVal
}
return llvm.ConstArray(subTyp, vals)
case llvm.FloatTypeKind, llvm.DoubleTypeKind:
return llvm.ConstFloat(typ, 0.0)
case llvm.IntegerTypeKind:
return llvm.ConstInt(typ, 0, false)
case llvm.PointerTypeKind:
return llvm.ConstPointerNull(typ)
case llvm.StructTypeKind:
types := typ.StructElementTypes()
vals := make([]llvm.Value, len(types))
for i, subTyp := range types {
vals[i] = c.getZeroValue(subTyp)
}
if typ.StructName() != "" {
return llvm.ConstNamedStruct(typ, vals)
} else {
return c.ctx.ConstStruct(vals, false)
}
default:
panic("unknown LLVM zero inititializer: " + typ.String())
}
}
// Is this a pointer type of some sort? Can be unsafe.Pointer or any *T pointer.
func isPointer(typ types.Type) bool {
if _, ok := typ.(*types.Pointer); ok {
return true
} else if typ, ok := typ.(*types.Basic); ok && typ.Kind() == types.UnsafePointer {
return true
} else {
return false
}
}
// Get the DWARF type for this Go type.
func (c *Compiler) getDIType(typ types.Type) llvm.Metadata {
llvmType := c.getLLVMType(typ)
sizeInBytes := c.targetData.TypeAllocSize(llvmType)
switch typ := typ.(type) {
case *types.Array:
return c.dibuilder.CreateArrayType(llvm.DIArrayType{
SizeInBits: sizeInBytes * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8,
ElementType: c.getDIType(typ.Elem()),
Subscripts: []llvm.DISubrange{
llvm.DISubrange{
Lo: 0,
Count: typ.Len(),
},
},
})
case *types.Basic:
var encoding llvm.DwarfTypeEncoding
if typ.Info()&types.IsBoolean != 0 {
encoding = llvm.DW_ATE_boolean
} else if typ.Info()&types.IsFloat != 0 {
encoding = llvm.DW_ATE_float
} else if typ.Info()&types.IsComplex != 0 {
encoding = llvm.DW_ATE_complex_float
} else if typ.Info()&types.IsUnsigned != 0 {
encoding = llvm.DW_ATE_unsigned
} else if typ.Info()&types.IsInteger != 0 {
encoding = llvm.DW_ATE_signed
} else if typ.Kind() == types.UnsafePointer {
return c.dibuilder.CreatePointerType(llvm.DIPointerType{
Name: "unsafe.Pointer",
SizeInBits: c.targetData.TypeAllocSize(llvmType) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8,
AddressSpace: 0,
})
} else if typ.Info()&types.IsString != 0 {
return c.dibuilder.CreateStructType(llvm.Metadata{}, llvm.DIStructType{
Name: "string",
SizeInBits: sizeInBytes * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8,
Elements: []llvm.Metadata{
c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{
Name: "ptr",
SizeInBits: c.targetData.TypeAllocSize(c.i8ptrType) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(c.i8ptrType)) * 8,
OffsetInBits: 0,
Type: c.getDIType(types.NewPointer(types.Typ[types.Byte])),
}),
c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{
Name: "len",
SizeInBits: c.targetData.TypeAllocSize(c.uintptrType) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(c.uintptrType)) * 8,
OffsetInBits: c.targetData.ElementOffset(llvmType, 1) * 8,
Type: c.getDIType(types.Typ[types.Uintptr]),
}),
},
})
} else {
panic("unknown basic type")
}
return c.dibuilder.CreateBasicType(llvm.DIBasicType{
Name: typ.String(),
SizeInBits: sizeInBytes * 8,
Encoding: encoding,
})
case *types.Chan:
return c.getDIType(types.NewPointer(c.ir.Program.ImportedPackage("runtime").Members["channel"].(*ssa.Type).Type()))
case *types.Interface:
return c.getDIType(c.ir.Program.ImportedPackage("runtime").Members["_interface"].(*ssa.Type).Type())
case *types.Map:
return c.getDIType(types.NewPointer(c.ir.Program.ImportedPackage("runtime").Members["hashmap"].(*ssa.Type).Type()))
case *types.Named:
return c.dibuilder.CreateTypedef(llvm.DITypedef{
Type: c.getDIType(typ.Underlying()),
Name: typ.String(),
})
case *types.Pointer:
return c.dibuilder.CreatePointerType(llvm.DIPointerType{
Pointee: c.getDIType(typ.Elem()),
SizeInBits: c.targetData.TypeAllocSize(llvmType) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8,
AddressSpace: 0,
})
case *types.Signature:
// actually a closure
fields := llvmType.StructElementTypes()
return c.dibuilder.CreateStructType(llvm.Metadata{}, llvm.DIStructType{
SizeInBits: sizeInBytes * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8,
Elements: []llvm.Metadata{
c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{
Name: "context",
SizeInBits: c.targetData.TypeAllocSize(fields[1]) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(fields[1])) * 8,
OffsetInBits: 0,
Type: c.getDIType(types.Typ[types.UnsafePointer]),
}),
c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{
Name: "fn",
SizeInBits: c.targetData.TypeAllocSize(fields[0]) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(fields[0])) * 8,
OffsetInBits: c.targetData.ElementOffset(llvmType, 1) * 8,
Type: c.getDIType(types.Typ[types.UnsafePointer]),
}),
},
})
case *types.Slice:
fields := llvmType.StructElementTypes()
return c.dibuilder.CreateStructType(llvm.Metadata{}, llvm.DIStructType{
Name: typ.String(),
SizeInBits: sizeInBytes * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8,
Elements: []llvm.Metadata{
c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{
Name: "ptr",
SizeInBits: c.targetData.TypeAllocSize(fields[0]) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(fields[0])) * 8,
OffsetInBits: 0,
Type: c.getDIType(types.NewPointer(typ.Elem())),
}),
c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{
Name: "len",
SizeInBits: c.targetData.TypeAllocSize(c.uintptrType) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(c.uintptrType)) * 8,
OffsetInBits: c.targetData.ElementOffset(llvmType, 1) * 8,
Type: c.getDIType(types.Typ[types.Uintptr]),
}),
c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{
Name: "cap",
SizeInBits: c.targetData.TypeAllocSize(c.uintptrType) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(c.uintptrType)) * 8,
OffsetInBits: c.targetData.ElementOffset(llvmType, 2) * 8,
Type: c.getDIType(types.Typ[types.Uintptr]),
}),
},
})
case *types.Struct:
elements := make([]llvm.Metadata, typ.NumFields())
for i := range elements {
field := typ.Field(i)
fieldType := field.Type()
if _, ok := fieldType.Underlying().(*types.Pointer); ok {
// XXX hack to avoid recursive types
fieldType = types.Typ[types.UnsafePointer]
}
llvmField := c.getLLVMType(fieldType)
elements[i] = c.dibuilder.CreateMemberType(llvm.Metadata{}, llvm.DIMemberType{
Name: field.Name(),
SizeInBits: c.targetData.TypeAllocSize(llvmField) * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmField)) * 8,
OffsetInBits: c.targetData.ElementOffset(llvmType, i) * 8,
Type: c.getDIType(fieldType),
})
}
return c.dibuilder.CreateStructType(llvm.Metadata{}, llvm.DIStructType{
SizeInBits: sizeInBytes * 8,
AlignInBits: uint32(c.targetData.ABITypeAlignment(llvmType)) * 8,
Elements: elements,
})
default:
panic("unknown type while generating DWARF debug type: " + typ.String())
}
}
func (c *Compiler) parseFuncDecl(f *ir.Function) *Frame {
frame := &Frame{
fn: f,
locals: make(map[ssa.Value]llvm.Value),
blockEntries: make(map[*ssa.BasicBlock]llvm.BasicBlock),
blockExits: make(map[*ssa.BasicBlock]llvm.BasicBlock),
}
var retType llvm.Type
if f.Signature.Results() == nil {
retType = c.ctx.VoidType()
} else if f.Signature.Results().Len() == 1 {
retType = c.getLLVMType(f.Signature.Results().At(0).Type())
} else {
results := make([]llvm.Type, 0, f.Signature.Results().Len())
for i := 0; i < f.Signature.Results().Len(); i++ {
results = append(results, c.getLLVMType(f.Signature.Results().At(i).Type()))
}
retType = c.ctx.StructType(results, false)
}
var paramTypes []llvm.Type
for _, param := range f.Params {
paramType := c.getLLVMType(param.Type())
paramTypeFragments := c.expandFormalParamType(paramType)
paramTypes = append(paramTypes, paramTypeFragments...)
}
// Add an extra parameter as the function context. This context is used in
// closures and bound methods, but should be optimized away when not used.
if !f.IsExported() {
paramTypes = append(paramTypes, c.i8ptrType) // context
paramTypes = append(paramTypes, c.i8ptrType) // parent coroutine
}
fnType := llvm.FunctionType(retType, paramTypes, false)
name := f.LinkName()
frame.fn.LLVMFn = c.mod.NamedFunction(name)
if frame.fn.LLVMFn.IsNil() {
frame.fn.LLVMFn = llvm.AddFunction(c.mod, name, fnType)
}
// External/exported functions may not retain pointer values.
// https://golang.org/cmd/cgo/#hdr-Passing_pointers
if f.IsExported() {
nocaptureKind := llvm.AttributeKindID("nocapture")
nocapture := c.ctx.CreateEnumAttribute(nocaptureKind, 0)
for i, typ := range paramTypes {
if typ.TypeKind() == llvm.PointerTypeKind {
frame.fn.LLVMFn.AddAttributeAtIndex(i+1, nocapture)
}
}
}
return frame
}
func (c *Compiler) attachDebugInfo(f *ir.Function) llvm.Metadata {
pos := c.ir.Program.Fset.Position(f.Syntax().Pos())
return c.attachDebugInfoRaw(f, f.LLVMFn, "", pos.Filename, pos.Line)
}
func (c *Compiler) attachDebugInfoRaw(f *ir.Function, llvmFn llvm.Value, suffix, filename string, line int) llvm.Metadata {
if _, ok := c.difiles[filename]; !ok {
dir, file := filepath.Split(filename)
if dir != "" {
dir = dir[:len(dir)-1]
}
c.difiles[filename] = c.dibuilder.CreateFile(file, dir)
}
// Debug info for this function.
diparams := make([]llvm.Metadata, 0, len(f.Params))
for _, param := range f.Params {
diparams = append(diparams, c.getDIType(param.Type()))
}
diFuncType := c.dibuilder.CreateSubroutineType(llvm.DISubroutineType{
File: c.difiles[filename],
Parameters: diparams,
Flags: 0, // ?
})
difunc := c.dibuilder.CreateFunction(c.difiles[filename], llvm.DIFunction{
Name: f.RelString(nil) + suffix,
LinkageName: f.LinkName() + suffix,
File: c.difiles[filename],
Line: line,
Type: diFuncType,
LocalToUnit: true,
IsDefinition: true,
ScopeLine: 0,
Flags: llvm.FlagPrototyped,
Optimized: true,
})
llvmFn.SetSubprogram(difunc)
return difunc
}
func (c *Compiler) parseFunc(frame *Frame) {
if c.DumpSSA {
fmt.Printf("\nfunc %s:\n", frame.fn.Function)
}
if !frame.fn.LLVMFn.IsDeclaration() {
c.addError(frame.fn.Pos(), "function is already defined:"+frame.fn.LLVMFn.Name())
return
}
if !frame.fn.IsExported() {
frame.fn.LLVMFn.SetLinkage(llvm.InternalLinkage)
frame.fn.LLVMFn.SetUnnamedAddr(true)
}
if frame.fn.IsInterrupt() && strings.HasPrefix(c.Triple, "avr") {
frame.fn.LLVMFn.SetFunctionCallConv(85) // CallingConv::AVR_SIGNAL
}
// Some functions have a pragma controlling the inlining level.
switch frame.fn.Inline() {
case ir.InlineHint:
// Add LLVM inline hint to functions with //go:inline pragma.
inline := c.ctx.CreateEnumAttribute(llvm.AttributeKindID("inlinehint"), 0)
frame.fn.LLVMFn.AddFunctionAttr(inline)
case ir.InlineNone:
// Add LLVM attribute to always avoid inlining this function.
noinline := c.ctx.CreateEnumAttribute(llvm.AttributeKindID("noinline"), 0)
frame.fn.LLVMFn.AddFunctionAttr(noinline)
}
// Add debug info, if needed.
if c.Debug {
if frame.fn.Synthetic == "package initializer" {
// Package initializers have no debug info. Create some fake debug
// info to at least have *something*.
frame.difunc = c.attachDebugInfoRaw(frame.fn, frame.fn.LLVMFn, "", "", 0)
} else if frame.fn.Syntax() != nil {
// Create debug info file if needed.
frame.difunc = c.attachDebugInfo(frame.fn)
}
pos := c.ir.Program.Fset.Position(frame.fn.Pos())
c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), frame.difunc, llvm.Metadata{})
}
// Pre-create all basic blocks in the function.
for _, block := range frame.fn.DomPreorder() {
llvmBlock := c.ctx.AddBasicBlock(frame.fn.LLVMFn, block.Comment)
frame.blockEntries[block] = llvmBlock
frame.blockExits[block] = llvmBlock
}
entryBlock := frame.blockEntries[frame.fn.Blocks[0]]
c.builder.SetInsertPointAtEnd(entryBlock)
// Load function parameters
llvmParamIndex := 0
for i, param := range frame.fn.Params {
llvmType := c.getLLVMType(param.Type())
fields := make([]llvm.Value, 0, 1)
for range c.expandFormalParamType(llvmType) {
fields = append(fields, frame.fn.LLVMFn.Param(llvmParamIndex))
llvmParamIndex++
}
frame.locals[param] = c.collapseFormalParam(llvmType, fields)
// Add debug information to this parameter (if available)
if c.Debug && frame.fn.Syntax() != nil {
pos := c.ir.Program.Fset.Position(frame.fn.Syntax().Pos())
diType := c.getDIType(param.Type())
dbgParam := c.dibuilder.CreateParameterVariable(frame.difunc, llvm.DIParameterVariable{
Name: param.Name(),
File: c.difiles[pos.Filename],
Line: pos.Line,
Type: diType,
AlwaysPreserve: true,
ArgNo: i + 1,
})
loc := c.builder.GetCurrentDebugLocation()
if len(fields) == 1 {
expr := c.dibuilder.CreateExpression(nil)
c.dibuilder.InsertValueAtEnd(fields[0], dbgParam, expr, loc, entryBlock)
} else {
fieldOffsets := c.expandFormalParamOffsets(llvmType)
for i, field := range fields {
expr := c.dibuilder.CreateExpression([]int64{
0x1000, // DW_OP_LLVM_fragment
int64(fieldOffsets[i]) * 8, // offset in bits
int64(c.targetData.TypeAllocSize(field.Type())) * 8, // size in bits
})
c.dibuilder.InsertValueAtEnd(field, dbgParam, expr, loc, entryBlock)
}
}
}
}
// Load free variables from the context. This is a closure (or bound
// method).
var context llvm.Value
if !frame.fn.IsExported() {
parentHandle := frame.fn.LLVMFn.LastParam()
parentHandle.SetName("parentHandle")
context = llvm.PrevParam(parentHandle)
context.SetName("context")
}
if len(frame.fn.FreeVars) != 0 {
// Get a list of all variable types in the context.
freeVarTypes := make([]llvm.Type, len(frame.fn.FreeVars))
for i, freeVar := range frame.fn.FreeVars {
freeVarTypes[i] = c.getLLVMType(freeVar.Type())
}
// Load each free variable from the context pointer.
// A free variable is always a pointer when this is a closure, but it
// can be another type when it is a wrapper for a bound method (these
// wrappers are generated by the ssa package).
for i, val := range c.emitPointerUnpack(context, freeVarTypes) {
frame.locals[frame.fn.FreeVars[i]] = val
}
}
if frame.fn.Recover != nil {
// This function has deferred function calls. Set some things up for
// them.
c.deferInitFunc(frame)
}
// Fill blocks with instructions.
for _, block := range frame.fn.DomPreorder() {
if c.DumpSSA {
fmt.Printf("%d: %s:\n", block.Index, block.Comment)
}
c.builder.SetInsertPointAtEnd(frame.blockEntries[block])
frame.currentBlock = block
for _, instr := range block.Instrs {
if _, ok := instr.(*ssa.DebugRef); ok {
continue
}
if c.DumpSSA {
if val, ok := instr.(ssa.Value); ok && val.Name() != "" {
fmt.Printf("\t%s = %s\n", val.Name(), val.String())
} else {
fmt.Printf("\t%s\n", instr.String())
}
}
c.parseInstr(frame, instr)
}
if frame.fn.Name() == "init" && len(block.Instrs) == 0 {
c.builder.CreateRetVoid()
}
}
// Resolve phi nodes
for _, phi := range frame.phis {
block := phi.ssa.Block()
for i, edge := range phi.ssa.Edges {
llvmVal := c.getValue(frame, edge)
llvmBlock := frame.blockExits[block.Preds[i]]
phi.llvm.AddIncoming([]llvm.Value{llvmVal}, []llvm.BasicBlock{llvmBlock})
}
}
}
func (c *Compiler) parseInstr(frame *Frame, instr ssa.Instruction) {
if c.Debug {
pos := c.ir.Program.Fset.Position(instr.Pos())
c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), frame.difunc, llvm.Metadata{})
}
switch instr := instr.(type) {
case ssa.Value:
if value, err := c.parseExpr(frame, instr); err != nil {
// This expression could not be parsed. Add the error to the list
// of diagnostics and continue with an undef value.
// The resulting IR will be incorrect (but valid). However,
// compilation can proceed which is useful because there may be
// more compilation errors which can then all be shown together to
// the user.
c.diagnostics = append(c.diagnostics, err)
frame.locals[instr] = llvm.Undef(c.getLLVMType(instr.Type()))
} else {
frame.locals[instr] = value
if len(*instr.Referrers()) != 0 && c.needsStackObjects() {
c.trackExpr(frame, instr, value)
}
}
case *ssa.DebugRef:
// ignore
case *ssa.Defer:
c.emitDefer(frame, instr)
case *ssa.Go:
// Get all function parameters to pass to the goroutine.
var params []llvm.Value
for _, param := range instr.Call.Args {
params = append(params, c.getValue(frame, param))
}
// Start a new goroutine.
if callee := instr.Call.StaticCallee(); callee != nil {
// Static callee is known. This makes it easier to start a new
// goroutine.
calleeFn := c.ir.GetFunction(callee)
if !calleeFn.IsExported() && c.selectScheduler() != "tasks" {
// For coroutine scheduling, this is only required when calling
// an external function.
// For tasks, because all params are stored in a single object,
// no unnecessary parameters should be stored anyway.
params = append(params, llvm.Undef(c.i8ptrType)) // context parameter
params = append(params, llvm.ConstPointerNull(c.i8ptrType)) // parent coroutine handle
}
c.emitStartGoroutine(calleeFn.LLVMFn, params)
} else if !instr.Call.IsInvoke() {
// This is a function pointer.
// At the moment, two extra params are passed to the newly started
// goroutine:
// * The function context, for closures.
// * The parent handle (for coroutines) or the function pointer
// itself (for tasks).
funcPtr, context := c.decodeFuncValue(c.getValue(frame, instr.Call.Value), instr.Call.Value.Type().(*types.Signature))
params = append(params, context) // context parameter
switch c.selectScheduler() {
case "coroutines":
params = append(params, llvm.ConstPointerNull(c.i8ptrType)) // parent coroutine handle
case "tasks":
params = append(params, funcPtr)
default:
panic("unknown scheduler type")
}
c.emitStartGoroutine(funcPtr, params)
} else {
c.addError(instr.Pos(), "todo: go on interface call")
}
case *ssa.If:
cond := c.getValue(frame, instr.Cond)
block := instr.Block()
blockThen := frame.blockEntries[block.Succs[0]]
blockElse := frame.blockEntries[block.Succs[1]]
c.builder.CreateCondBr(cond, blockThen, blockElse)
case *ssa.Jump:
blockJump := frame.blockEntries[instr.Block().Succs[0]]
c.builder.CreateBr(blockJump)
case *ssa.MapUpdate:
m := c.getValue(frame, instr.Map)
key := c.getValue(frame, instr.Key)
value := c.getValue(frame, instr.Value)
mapType := instr.Map.Type().Underlying().(*types.Map)
c.emitMapUpdate(mapType.Key(), m, key, value, instr.Pos())
case *ssa.Panic:
value := c.getValue(frame, instr.X)
c.createRuntimeCall("_panic", []llvm.Value{value}, "")
c.builder.CreateUnreachable()
case *ssa.Return:
if len(instr.Results) == 0 {
c.builder.CreateRetVoid()
} else if len(instr.Results) == 1 {
c.builder.CreateRet(c.getValue(frame, instr.Results[0]))
} else {
// Multiple return values. Put them all in a struct.
retVal := c.getZeroValue(frame.fn.LLVMFn.Type().ElementType().ReturnType())
for i, result := range instr.Results {
val := c.getValue(frame, result)
retVal = c.builder.CreateInsertValue(retVal, val, i, "")
}
c.builder.CreateRet(retVal)
}
case *ssa.RunDefers:
c.emitRunDefers(frame)
case *ssa.Send:
c.emitChanSend(frame, instr)
case *ssa.Store:
llvmAddr := c.getValue(frame, instr.Addr)
llvmVal := c.getValue(frame, instr.Val)
c.emitNilCheck(frame, llvmAddr, "store")
if c.targetData.TypeAllocSize(llvmVal.Type()) == 0 {
// nothing to store
return
}
c.builder.CreateStore(llvmVal, llvmAddr)
default:
c.addError(instr.Pos(), "unknown instruction: "+instr.String())
}
}
func (c *Compiler) parseBuiltin(frame *Frame, args []ssa.Value, callName string, pos token.Pos) (llvm.Value, error) {
switch callName {
case "append":
src := c.getValue(frame, args[0])
elems := c.getValue(frame, args[1])
srcBuf := c.builder.CreateExtractValue(src, 0, "append.srcBuf")
srcPtr := c.builder.CreateBitCast(srcBuf, c.i8ptrType, "append.srcPtr")
srcLen := c.builder.CreateExtractValue(src, 1, "append.srcLen")
srcCap := c.builder.CreateExtractValue(src, 2, "append.srcCap")
elemsBuf := c.builder.CreateExtractValue(elems, 0, "append.elemsBuf")
elemsPtr := c.builder.CreateBitCast(elemsBuf, c.i8ptrType, "append.srcPtr")
elemsLen := c.builder.CreateExtractValue(elems, 1, "append.elemsLen")
elemType := srcBuf.Type().ElementType()
elemSize := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(elemType), false)
result := c.createRuntimeCall("sliceAppend", []llvm.Value{srcPtr, elemsPtr, srcLen, srcCap, elemsLen, elemSize}, "append.new")
newPtr := c.builder.CreateExtractValue(result, 0, "append.newPtr")
newBuf := c.builder.CreateBitCast(newPtr, srcBuf.Type(), "append.newBuf")
newLen := c.builder.CreateExtractValue(result, 1, "append.newLen")
newCap := c.builder.CreateExtractValue(result, 2, "append.newCap")
newSlice := llvm.Undef(src.Type())
newSlice = c.builder.CreateInsertValue(newSlice, newBuf, 0, "")
newSlice = c.builder.CreateInsertValue(newSlice, newLen, 1, "")
newSlice = c.builder.CreateInsertValue(newSlice, newCap, 2, "")
return newSlice, nil
case "cap":
value := c.getValue(frame, args[0])
var llvmCap llvm.Value
switch args[0].Type().(type) {
case *types.Chan:
// Channel. Buffered channels haven't been implemented yet so always
// return 0.
llvmCap = llvm.ConstInt(c.intType, 0, false)
case *types.Slice:
llvmCap = c.builder.CreateExtractValue(value, 2, "cap")
default:
return llvm.Value{}, c.makeError(pos, "todo: cap: unknown type")
}
if c.targetData.TypeAllocSize(llvmCap.Type()) < c.targetData.TypeAllocSize(c.intType) {
llvmCap = c.builder.CreateZExt(llvmCap, c.intType, "len.int")
}
return llvmCap, nil
case "close":
c.emitChanClose(frame, args[0])
return llvm.Value{}, nil
case "complex":
r := c.getValue(frame, args[0])
i := c.getValue(frame, args[1])
t := args[0].Type().Underlying().(*types.Basic)
var cplx llvm.Value
switch t.Kind() {
case types.Float32:
cplx = llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false))
case types.Float64:
cplx = llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false))
default:
return llvm.Value{}, c.makeError(pos, "unsupported type in complex builtin: "+t.String())
}
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
case "copy":
dst := c.getValue(frame, args[0])
src := c.getValue(frame, args[1])
dstLen := c.builder.CreateExtractValue(dst, 1, "copy.dstLen")
srcLen := c.builder.CreateExtractValue(src, 1, "copy.srcLen")
dstBuf := c.builder.CreateExtractValue(dst, 0, "copy.dstArray")
srcBuf := c.builder.CreateExtractValue(src, 0, "copy.srcArray")
elemType := dstBuf.Type().ElementType()
dstBuf = c.builder.CreateBitCast(dstBuf, c.i8ptrType, "copy.dstPtr")
srcBuf = c.builder.CreateBitCast(srcBuf, c.i8ptrType, "copy.srcPtr")
elemSize := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(elemType), false)
return c.createRuntimeCall("sliceCopy", []llvm.Value{dstBuf, srcBuf, dstLen, srcLen, elemSize}, "copy.n"), nil
case "delete":
m := c.getValue(frame, args[0])
key := c.getValue(frame, args[1])
return llvm.Value{}, c.emitMapDelete(args[1].Type(), m, key, pos)
case "imag":
cplx := c.getValue(frame, args[0])
return c.builder.CreateExtractValue(cplx, 1, "imag"), nil
case "len":
value := c.getValue(frame, args[0])
var llvmLen llvm.Value
switch args[0].Type().Underlying().(type) {
case *types.Basic, *types.Slice:
// string or slice
llvmLen = c.builder.CreateExtractValue(value, 1, "len")
case *types.Chan:
// Channel. Buffered channels haven't been implemented yet so always
// return 0.
llvmLen = llvm.ConstInt(c.intType, 0, false)
case *types.Map:
llvmLen = c.createRuntimeCall("hashmapLen", []llvm.Value{value}, "len")
default:
return llvm.Value{}, c.makeError(pos, "todo: len: unknown type")
}
if c.targetData.TypeAllocSize(llvmLen.Type()) < c.targetData.TypeAllocSize(c.intType) {
llvmLen = c.builder.CreateZExt(llvmLen, c.intType, "len.int")
}
return llvmLen, nil
case "print", "println":
for i, arg := range args {
if i >= 1 && callName == "println" {
c.createRuntimeCall("printspace", nil, "")
}
value := c.getValue(frame, arg)
typ := arg.Type().Underlying()
switch typ := typ.(type) {
case *types.Basic:
switch typ.Kind() {
case types.String, types.UntypedString:
c.createRuntimeCall("printstring", []llvm.Value{value}, "")
case types.Uintptr:
c.createRuntimeCall("printptr", []llvm.Value{value}, "")
case types.UnsafePointer:
ptrValue := c.builder.CreatePtrToInt(value, c.uintptrType, "")
c.createRuntimeCall("printptr", []llvm.Value{ptrValue}, "")
default:
// runtime.print{int,uint}{8,16,32,64}
if typ.Info()&types.IsInteger != 0 {
name := "print"
if typ.Info()&types.IsUnsigned != 0 {
name += "uint"
} else {
name += "int"
}
name += strconv.FormatUint(c.targetData.TypeAllocSize(value.Type())*8, 10)
c.createRuntimeCall(name, []llvm.Value{value}, "")
} else if typ.Kind() == types.Bool {
c.createRuntimeCall("printbool", []llvm.Value{value}, "")
} else if typ.Kind() == types.Float32 {
c.createRuntimeCall("printfloat32", []llvm.Value{value}, "")
} else if typ.Kind() == types.Float64 {
c.createRuntimeCall("printfloat64", []llvm.Value{value}, "")
} else if typ.Kind() == types.Complex64 {
c.createRuntimeCall("printcomplex64", []llvm.Value{value}, "")
} else if typ.Kind() == types.Complex128 {
c.createRuntimeCall("printcomplex128", []llvm.Value{value}, "")
} else {
return llvm.Value{}, c.makeError(pos, "unknown basic arg type: "+typ.String())
}
}
case *types.Interface:
c.createRuntimeCall("printitf", []llvm.Value{value}, "")
case *types.Map:
c.createRuntimeCall("printmap", []llvm.Value{value}, "")
case *types.Pointer:
ptrValue := c.builder.CreatePtrToInt(value, c.uintptrType, "")
c.createRuntimeCall("printptr", []llvm.Value{ptrValue}, "")
default:
return llvm.Value{}, c.makeError(pos, "unknown arg type: "+typ.String())
}
}
if callName == "println" {
c.createRuntimeCall("printnl", nil, "")
}
return llvm.Value{}, nil // print() or println() returns void
case "real":
cplx := c.getValue(frame, args[0])
return c.builder.CreateExtractValue(cplx, 0, "real"), nil
case "recover":
return c.createRuntimeCall("_recover", nil, ""), nil
case "ssa:wrapnilchk":
// TODO: do an actual nil check?
return c.getValue(frame, args[0]), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: builtin: "+callName)
}
}
func (c *Compiler) parseFunctionCall(frame *Frame, args []ssa.Value, llvmFn, context llvm.Value, exported bool) llvm.Value {
var params []llvm.Value
for _, param := range args {
params = append(params, c.getValue(frame, param))
}
if !exported {
// This function takes a context parameter.
// Add it to the end of the parameter list.
params = append(params, context)
// Parent coroutine handle.
params = append(params, llvm.Undef(c.i8ptrType))
}
return c.createCall(llvmFn, params, "")
}
func (c *Compiler) parseCall(frame *Frame, instr *ssa.CallCommon) (llvm.Value, error) {
if instr.IsInvoke() {
fnCast, args := c.getInvokeCall(frame, instr)
return c.createCall(fnCast, args, ""), nil
}
// Try to call the function directly for trivially static calls.
if fn := instr.StaticCallee(); fn != nil {
name := fn.RelString(nil)
switch {
case name == "device/arm.ReadRegister" || name == "device/riscv.ReadRegister":
return c.emitReadRegister(name, instr.Args)
case name == "device/arm.Asm" || name == "device/avr.Asm" || name == "device/riscv.Asm":
return c.emitAsm(instr.Args)
case name == "device/arm.AsmFull" || name == "device/avr.AsmFull" || name == "device/riscv.AsmFull":
return c.emitAsmFull(frame, instr)
case strings.HasPrefix(name, "device/arm.SVCall"):
return c.emitSVCall(frame, instr.Args)
case strings.HasPrefix(name, "syscall.Syscall"):
return c.emitSyscall(frame, instr)
case strings.HasPrefix(name, "runtime/volatile.Load"):
return c.emitVolatileLoad(frame, instr)
case strings.HasPrefix(name, "runtime/volatile.Store"):
return c.emitVolatileStore(frame, instr)
}
targetFunc := c.ir.GetFunction(fn)
if targetFunc.LLVMFn.IsNil() {
return llvm.Value{}, c.makeError(instr.Pos(), "undefined function: "+targetFunc.LinkName())
}
var context llvm.Value
switch value := instr.Value.(type) {
case *ssa.Function:
// Regular function call. No context is necessary.
context = llvm.Undef(c.i8ptrType)
case *ssa.MakeClosure:
// A call on a func value, but the callee is trivial to find. For
// example: immediately applied functions.
funcValue := c.getValue(frame, value)
context = c.extractFuncContext(funcValue)
default:
panic("StaticCallee returned an unexpected value")
}
return c.parseFunctionCall(frame, instr.Args, targetFunc.LLVMFn, context, targetFunc.IsExported()), nil
}
// Builtin or function pointer.
switch call := instr.Value.(type) {
case *ssa.Builtin:
return c.parseBuiltin(frame, instr.Args, call.Name(), instr.Pos())
default: // function pointer
value := c.getValue(frame, instr.Value)
// This is a func value, which cannot be called directly. We have to
// extract the function pointer and context first from the func value.
funcPtr, context := c.decodeFuncValue(value, instr.Value.Type().Underlying().(*types.Signature))
c.emitNilCheck(frame, funcPtr, "fpcall")
return c.parseFunctionCall(frame, instr.Args, funcPtr, context, false), nil
}
}
// getValue returns the LLVM value of a constant, function value, global, or
// already processed SSA expression.
func (c *Compiler) getValue(frame *Frame, expr ssa.Value) llvm.Value {
switch expr := expr.(type) {
case *ssa.Const:
return c.parseConst(frame.fn.LinkName(), expr)
case *ssa.Function:
fn := c.ir.GetFunction(expr)
if fn.IsExported() {
c.addError(expr.Pos(), "cannot use an exported function as value: "+expr.String())
return llvm.Undef(c.getLLVMType(expr.Type()))
}
return c.createFuncValue(fn.LLVMFn, llvm.Undef(c.i8ptrType), fn.Signature)
case *ssa.Global:
value := c.getGlobal(expr)
if value.IsNil() {
c.addError(expr.Pos(), "global not found: "+expr.RelString(nil))
return llvm.Undef(c.getLLVMType(expr.Type()))
}
return value
default:
// other (local) SSA value
if value, ok := frame.locals[expr]; ok {
return value
} else {
// indicates a compiler bug
panic("local has not been parsed: " + expr.String())
}
}
}
// parseExpr translates a Go SSA expression to a LLVM instruction.
func (c *Compiler) parseExpr(frame *Frame, expr ssa.Value) (llvm.Value, error) {
if _, ok := frame.locals[expr]; ok {
// sanity check
panic("local has already been parsed: " + expr.String())
}
switch expr := expr.(type) {
case *ssa.Alloc:
typ := c.getLLVMType(expr.Type().Underlying().(*types.Pointer).Elem())
if expr.Heap {
size := c.targetData.TypeAllocSize(typ)
// Calculate ^uintptr(0)
maxSize := llvm.ConstNot(llvm.ConstInt(c.uintptrType, 0, false)).ZExtValue()
if size > maxSize {
// Size would be truncated if truncated to uintptr.
return llvm.Value{}, c.makeError(expr.Pos(), fmt.Sprintf("value is too big (%v bytes)", size))
}
sizeValue := llvm.ConstInt(c.uintptrType, size, false)
buf := c.createRuntimeCall("alloc", []llvm.Value{sizeValue}, expr.Comment)
buf = c.builder.CreateBitCast(buf, llvm.PointerType(typ, 0), "")
return buf, nil
} else {
buf := c.createEntryBlockAlloca(typ, expr.Comment)
if c.targetData.TypeAllocSize(typ) != 0 {
c.builder.CreateStore(c.getZeroValue(typ), buf) // zero-initialize var
}
return buf, nil
}
case *ssa.BinOp:
x := c.getValue(frame, expr.X)
y := c.getValue(frame, expr.Y)
return c.parseBinOp(expr.Op, expr.X.Type(), x, y, expr.Pos())
case *ssa.Call:
// Passing the current task here to the subroutine. It is only used when
// the subroutine is blocking.
return c.parseCall(frame, expr.Common())
case *ssa.ChangeInterface:
// Do not change between interface types: always use the underlying
// (concrete) type in the type number of the interface. Every method
// call on an interface will do a lookup which method to call.
// This is different from how the official Go compiler works, because of
// heap allocation and because it's easier to implement, see:
// https://research.swtch.com/interfaces
return c.getValue(frame, expr.X), nil
case *ssa.ChangeType:
// This instruction changes the type, but the underlying value remains
// the same. This is often a no-op, but sometimes we have to change the
// LLVM type as well.
x := c.getValue(frame, expr.X)
llvmType := c.getLLVMType(expr.Type())
if x.Type() == llvmType {
// Different Go type but same LLVM type (for example, named int).
// This is the common case.
return x, nil
}
// Figure out what kind of type we need to cast.
switch llvmType.TypeKind() {
case llvm.StructTypeKind:
// Unfortunately, we can't just bitcast structs. We have to
// actually create a new struct of the correct type and insert the
// values from the previous struct in there.
value := llvm.Undef(llvmType)
for i := 0; i < llvmType.StructElementTypesCount(); i++ {
field := c.builder.CreateExtractValue(x, i, "changetype.field")
value = c.builder.CreateInsertValue(value, field, i, "changetype.struct")
}
return value, nil
case llvm.PointerTypeKind:
// This can happen with pointers to structs. This case is easy:
// simply bitcast the pointer to the destination type.
return c.builder.CreateBitCast(x, llvmType, "changetype.pointer"), nil
default:
return llvm.Value{}, errors.New("todo: unknown ChangeType type: " + expr.X.Type().String())
}
case *ssa.Const:
panic("const is not an expression")
case *ssa.Convert:
x := c.getValue(frame, expr.X)
return c.parseConvert(expr.X.Type(), expr.Type(), x, expr.Pos())
case *ssa.Extract:
if _, ok := expr.Tuple.(*ssa.Select); ok {
return c.getChanSelectResult(frame, expr), nil
}
value := c.getValue(frame, expr.Tuple)
return c.builder.CreateExtractValue(value, expr.Index, ""), nil
case *ssa.Field:
value := c.getValue(frame, expr.X)
if s := expr.X.Type().Underlying().(*types.Struct); s.NumFields() > 2 && s.Field(0).Name() == "C union" {
// Extract a field from a CGo union.
// This could be done directly, but as this is a very infrequent
// operation it's much easier to bitcast it through an alloca.
resultType := c.getLLVMType(expr.Type())
alloca, allocaPtr, allocaSize := c.createTemporaryAlloca(value.Type(), "union.alloca")
c.builder.CreateStore(value, alloca)
bitcast := c.builder.CreateBitCast(alloca, llvm.PointerType(resultType, 0), "union.bitcast")
result := c.builder.CreateLoad(bitcast, "union.result")
c.emitLifetimeEnd(allocaPtr, allocaSize)
return result, nil
}
result := c.builder.CreateExtractValue(value, expr.Field, "")
return result, nil
case *ssa.FieldAddr:
val := c.getValue(frame, expr.X)
// Check for nil pointer before calculating the address, from the spec:
// > For an operand x of type T, the address operation &x generates a
// > pointer of type *T to x. [...] If the evaluation of x would cause a
// > run-time panic, then the evaluation of &x does too.
c.emitNilCheck(frame, val, "gep")
if s := expr.X.Type().(*types.Pointer).Elem().Underlying().(*types.Struct); s.NumFields() > 2 && s.Field(0).Name() == "C union" {
// This is not a regular struct but actually an union.
// That simplifies things, as we can just bitcast the pointer to the
// right type.
ptrType := c.getLLVMType(expr.Type())
return c.builder.CreateBitCast(val, ptrType, ""), nil
} else {
// Do a GEP on the pointer to get the field address.
indices := []llvm.Value{
llvm.ConstInt(c.ctx.Int32Type(), 0, false),
llvm.ConstInt(c.ctx.Int32Type(), uint64(expr.Field), false),
}
return c.builder.CreateInBoundsGEP(val, indices, ""), nil
}
case *ssa.Function:
panic("function is not an expression")
case *ssa.Global:
panic("global is not an expression")
case *ssa.Index:
array := c.getValue(frame, expr.X)
index := c.getValue(frame, expr.Index)
// Check bounds.
arrayLen := expr.X.Type().(*types.Array).Len()
arrayLenLLVM := llvm.ConstInt(c.uintptrType, uint64(arrayLen), false)
c.emitLookupBoundsCheck(frame, arrayLenLLVM, index, expr.Index.Type())
// Can't load directly from array (as index is non-constant), so have to
// do it using an alloca+gep+load.
alloca, allocaPtr, allocaSize := c.createTemporaryAlloca(array.Type(), "index.alloca")
c.builder.CreateStore(array, alloca)
zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false)
ptr := c.builder.CreateInBoundsGEP(alloca, []llvm.Value{zero, index}, "index.gep")
result := c.builder.CreateLoad(ptr, "index.load")
c.emitLifetimeEnd(allocaPtr, allocaSize)
return result, nil
case *ssa.IndexAddr:
val := c.getValue(frame, expr.X)
index := c.getValue(frame, expr.Index)
// Get buffer pointer and length
var bufptr, buflen llvm.Value
switch ptrTyp := expr.X.Type().Underlying().(type) {
case *types.Pointer:
typ := expr.X.Type().Underlying().(*types.Pointer).Elem().Underlying()
switch typ := typ.(type) {
case *types.Array:
bufptr = val
buflen = llvm.ConstInt(c.uintptrType, uint64(typ.Len()), false)
// Check for nil pointer before calculating the address, from
// the spec:
// > For an operand x of type T, the address operation &x
// > generates a pointer of type *T to x. [...] If the
// > evaluation of x would cause a run-time panic, then the
// > evaluation of &x does too.
c.emitNilCheck(frame, bufptr, "gep")
default:
return llvm.Value{}, c.makeError(expr.Pos(), "todo: indexaddr: "+typ.String())
}
case *types.Slice:
bufptr = c.builder.CreateExtractValue(val, 0, "indexaddr.ptr")
buflen = c.builder.CreateExtractValue(val, 1, "indexaddr.len")
default:
return llvm.Value{}, c.makeError(expr.Pos(), "todo: indexaddr: "+ptrTyp.String())
}
// Bounds check.
c.emitLookupBoundsCheck(frame, buflen, index, expr.Index.Type())
switch expr.X.Type().Underlying().(type) {
case *types.Pointer:
indices := []llvm.Value{
llvm.ConstInt(c.ctx.Int32Type(), 0, false),
index,
}
return c.builder.CreateInBoundsGEP(bufptr, indices, ""), nil
case *types.Slice:
return c.builder.CreateInBoundsGEP(bufptr, []llvm.Value{index}, ""), nil
default:
panic("unreachable")
}
case *ssa.Lookup:
value := c.getValue(frame, expr.X)
index := c.getValue(frame, expr.Index)
switch xType := expr.X.Type().Underlying().(type) {
case *types.Basic:
// Value type must be a string, which is a basic type.
if xType.Info()&types.IsString == 0 {
panic("lookup on non-string?")
}
// Bounds check.
length := c.builder.CreateExtractValue(value, 1, "len")
c.emitLookupBoundsCheck(frame, length, index, expr.Index.Type())
// Lookup byte
buf := c.builder.CreateExtractValue(value, 0, "")
bufPtr := c.builder.CreateInBoundsGEP(buf, []llvm.Value{index}, "")
return c.builder.CreateLoad(bufPtr, ""), nil
case *types.Map:
valueType := expr.Type()
if expr.CommaOk {
valueType = valueType.(*types.Tuple).At(0).Type()
}
return c.emitMapLookup(xType.Key(), valueType, value, index, expr.CommaOk, expr.Pos())
default:
panic("unknown lookup type: " + expr.String())
}
case *ssa.MakeChan:
return c.emitMakeChan(expr)
case *ssa.MakeClosure:
return c.parseMakeClosure(frame, expr)
case *ssa.MakeInterface:
val := c.getValue(frame, expr.X)
return c.parseMakeInterface(val, expr.X.Type(), expr.Pos()), nil
case *ssa.MakeMap:
mapType := expr.Type().Underlying().(*types.Map)
llvmKeyType := c.getLLVMType(mapType.Key().Underlying())
llvmValueType := c.getLLVMType(mapType.Elem().Underlying())
keySize := c.targetData.TypeAllocSize(llvmKeyType)
valueSize := c.targetData.TypeAllocSize(llvmValueType)
llvmKeySize := llvm.ConstInt(c.ctx.Int8Type(), keySize, false)
llvmValueSize := llvm.ConstInt(c.ctx.Int8Type(), valueSize, false)
sizeHint := llvm.ConstInt(c.uintptrType, 8, false)
if expr.Reserve != nil {
sizeHint = c.getValue(frame, expr.Reserve)
var err error
sizeHint, err = c.parseConvert(expr.Reserve.Type(), types.Typ[types.Uintptr], sizeHint, expr.Pos())
if err != nil {
return llvm.Value{}, err
}
}
hashmap := c.createRuntimeCall("hashmapMake", []llvm.Value{llvmKeySize, llvmValueSize, sizeHint}, "")
return hashmap, nil
case *ssa.MakeSlice:
sliceLen := c.getValue(frame, expr.Len)
sliceCap := c.getValue(frame, expr.Cap)
sliceType := expr.Type().Underlying().(*types.Slice)
llvmElemType := c.getLLVMType(sliceType.Elem())
elemSize := c.targetData.TypeAllocSize(llvmElemType)
elemSizeValue := llvm.ConstInt(c.uintptrType, elemSize, false)
// Calculate (^uintptr(0)) >> 1, which is the max value that fits in
// uintptr if uintptr were signed.
maxSize := llvm.ConstLShr(llvm.ConstNot(llvm.ConstInt(c.uintptrType, 0, false)), llvm.ConstInt(c.uintptrType, 1, false))
if elemSize > maxSize.ZExtValue() {
// This seems to be checked by the typechecker already, but let's
// check it again just to be sure.
return llvm.Value{}, c.makeError(expr.Pos(), fmt.Sprintf("slice element type is too big (%v bytes)", elemSize))
}
// Bounds checking.
lenType := expr.Len.Type().(*types.Basic)
capType := expr.Cap.Type().(*types.Basic)
c.emitSliceBoundsCheck(frame, maxSize, sliceLen, sliceCap, sliceCap, lenType, capType, capType)
// Allocate the backing array.
sliceCapCast, err := c.parseConvert(expr.Cap.Type(), types.Typ[types.Uintptr], sliceCap, expr.Pos())
if err != nil {
return llvm.Value{}, err
}
sliceSize := c.builder.CreateBinOp(llvm.Mul, elemSizeValue, sliceCapCast, "makeslice.cap")
slicePtr := c.createRuntimeCall("alloc", []llvm.Value{sliceSize}, "makeslice.buf")
slicePtr = c.builder.CreateBitCast(slicePtr, llvm.PointerType(llvmElemType, 0), "makeslice.array")
// Extend or truncate if necessary. This is safe as we've already done
// the bounds check.
sliceLen, err = c.parseConvert(expr.Len.Type(), types.Typ[types.Uintptr], sliceLen, expr.Pos())
if err != nil {
return llvm.Value{}, err
}
sliceCap, err = c.parseConvert(expr.Cap.Type(), types.Typ[types.Uintptr], sliceCap, expr.Pos())
if err != nil {
return llvm.Value{}, err
}
// Create the slice.
slice := c.ctx.ConstStruct([]llvm.Value{
llvm.Undef(slicePtr.Type()),
llvm.Undef(c.uintptrType),
llvm.Undef(c.uintptrType),
}, false)
slice = c.builder.CreateInsertValue(slice, slicePtr, 0, "")
slice = c.builder.CreateInsertValue(slice, sliceLen, 1, "")
slice = c.builder.CreateInsertValue(slice, sliceCap, 2, "")
return slice, nil
case *ssa.Next:
rangeVal := expr.Iter.(*ssa.Range).X
llvmRangeVal := c.getValue(frame, rangeVal)
it := c.getValue(frame, expr.Iter)
if expr.IsString {
return c.createRuntimeCall("stringNext", []llvm.Value{llvmRangeVal, it}, "range.next"), nil
} else { // map
llvmKeyType := c.getLLVMType(rangeVal.Type().Underlying().(*types.Map).Key())
llvmValueType := c.getLLVMType(rangeVal.Type().Underlying().(*types.Map).Elem())
mapKeyAlloca, mapKeyPtr, mapKeySize := c.createTemporaryAlloca(llvmKeyType, "range.key")
mapValueAlloca, mapValuePtr, mapValueSize := c.createTemporaryAlloca(llvmValueType, "range.value")
ok := c.createRuntimeCall("hashmapNext", []llvm.Value{llvmRangeVal, it, mapKeyPtr, mapValuePtr}, "range.next")
tuple := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.Int1Type(), llvmKeyType, llvmValueType}, false))
tuple = c.builder.CreateInsertValue(tuple, ok, 0, "")
tuple = c.builder.CreateInsertValue(tuple, c.builder.CreateLoad(mapKeyAlloca, ""), 1, "")
tuple = c.builder.CreateInsertValue(tuple, c.builder.CreateLoad(mapValueAlloca, ""), 2, "")
c.emitLifetimeEnd(mapKeyPtr, mapKeySize)
c.emitLifetimeEnd(mapValuePtr, mapValueSize)
return tuple, nil
}
case *ssa.Phi:
phi := c.builder.CreatePHI(c.getLLVMType(expr.Type()), "")
frame.phis = append(frame.phis, Phi{expr, phi})
return phi, nil
case *ssa.Range:
var iteratorType llvm.Type
switch typ := expr.X.Type().Underlying().(type) {
case *types.Basic: // string
iteratorType = c.getLLVMRuntimeType("stringIterator")
case *types.Map:
iteratorType = c.getLLVMRuntimeType("hashmapIterator")
default:
panic("unknown type in range: " + typ.String())
}
it, _, _ := c.createTemporaryAlloca(iteratorType, "range.it")
c.builder.CreateStore(c.getZeroValue(iteratorType), it)
return it, nil
case *ssa.Select:
return c.emitSelect(frame, expr), nil
case *ssa.Slice:
value := c.getValue(frame, expr.X)
var lowType, highType, maxType *types.Basic
var low, high, max llvm.Value
if expr.Low != nil {
lowType = expr.Low.Type().Underlying().(*types.Basic)
low = c.getValue(frame, expr.Low)
if low.Type().IntTypeWidth() < c.uintptrType.IntTypeWidth() {
if lowType.Info()&types.IsUnsigned != 0 {
low = c.builder.CreateZExt(low, c.uintptrType, "")
} else {
low = c.builder.CreateSExt(low, c.uintptrType, "")
}
}
} else {
lowType = types.Typ[types.Uintptr]
low = llvm.ConstInt(c.uintptrType, 0, false)
}
if expr.High != nil {
highType = expr.High.Type().Underlying().(*types.Basic)
high = c.getValue(frame, expr.High)
if high.Type().IntTypeWidth() < c.uintptrType.IntTypeWidth() {
if highType.Info()&types.IsUnsigned != 0 {
high = c.builder.CreateZExt(high, c.uintptrType, "")
} else {
high = c.builder.CreateSExt(high, c.uintptrType, "")
}
}
} else {
highType = types.Typ[types.Uintptr]
}
if expr.Max != nil {
maxType = expr.Max.Type().Underlying().(*types.Basic)
max = c.getValue(frame, expr.Max)
if max.Type().IntTypeWidth() < c.uintptrType.IntTypeWidth() {
if maxType.Info()&types.IsUnsigned != 0 {
max = c.builder.CreateZExt(max, c.uintptrType, "")
} else {
max = c.builder.CreateSExt(max, c.uintptrType, "")
}
}
} else {
maxType = types.Typ[types.Uintptr]
}
switch typ := expr.X.Type().Underlying().(type) {
case *types.Pointer: // pointer to array
// slice an array
length := typ.Elem().Underlying().(*types.Array).Len()
llvmLen := llvm.ConstInt(c.uintptrType, uint64(length), false)
if high.IsNil() {
high = llvmLen
}
if max.IsNil() {
max = llvmLen
}
indices := []llvm.Value{
llvm.ConstInt(c.ctx.Int32Type(), 0, false),
low,
}
c.emitSliceBoundsCheck(frame, llvmLen, low, high, max, lowType, highType, maxType)
// Truncate ints bigger than uintptr. This is after the bounds
// check so it's safe.
if c.targetData.TypeAllocSize(low.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
low = c.builder.CreateTrunc(low, c.uintptrType, "")
}
if c.targetData.TypeAllocSize(high.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
high = c.builder.CreateTrunc(high, c.uintptrType, "")
}
if c.targetData.TypeAllocSize(max.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
max = c.builder.CreateTrunc(max, c.uintptrType, "")
}
sliceLen := c.builder.CreateSub(high, low, "slice.len")
slicePtr := c.builder.CreateInBoundsGEP(value, indices, "slice.ptr")
sliceCap := c.builder.CreateSub(max, low, "slice.cap")
slice := c.ctx.ConstStruct([]llvm.Value{
llvm.Undef(slicePtr.Type()),
llvm.Undef(c.uintptrType),
llvm.Undef(c.uintptrType),
}, false)
slice = c.builder.CreateInsertValue(slice, slicePtr, 0, "")
slice = c.builder.CreateInsertValue(slice, sliceLen, 1, "")
slice = c.builder.CreateInsertValue(slice, sliceCap, 2, "")
return slice, nil
case *types.Slice:
// slice a slice
oldPtr := c.builder.CreateExtractValue(value, 0, "")
oldLen := c.builder.CreateExtractValue(value, 1, "")
oldCap := c.builder.CreateExtractValue(value, 2, "")
if high.IsNil() {
high = oldLen
}
if max.IsNil() {
max = oldCap
}
c.emitSliceBoundsCheck(frame, oldCap, low, high, max, lowType, highType, maxType)
// Truncate ints bigger than uintptr. This is after the bounds
// check so it's safe.
if c.targetData.TypeAllocSize(low.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
low = c.builder.CreateTrunc(low, c.uintptrType, "")
}
if c.targetData.TypeAllocSize(high.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
high = c.builder.CreateTrunc(high, c.uintptrType, "")
}
if c.targetData.TypeAllocSize(max.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
max = c.builder.CreateTrunc(max, c.uintptrType, "")
}
newPtr := c.builder.CreateInBoundsGEP(oldPtr, []llvm.Value{low}, "")
newLen := c.builder.CreateSub(high, low, "")
newCap := c.builder.CreateSub(max, low, "")
slice := c.ctx.ConstStruct([]llvm.Value{
llvm.Undef(newPtr.Type()),
llvm.Undef(c.uintptrType),
llvm.Undef(c.uintptrType),
}, false)
slice = c.builder.CreateInsertValue(slice, newPtr, 0, "")
slice = c.builder.CreateInsertValue(slice, newLen, 1, "")
slice = c.builder.CreateInsertValue(slice, newCap, 2, "")
return slice, nil
case *types.Basic:
if typ.Info()&types.IsString == 0 {
return llvm.Value{}, c.makeError(expr.Pos(), "unknown slice type: "+typ.String())
}
// slice a string
if expr.Max != nil {
// This might as well be a panic, as the frontend should have
// handled this already.
return llvm.Value{}, c.makeError(expr.Pos(), "slicing a string with a max parameter is not allowed by the spec")
}
oldPtr := c.builder.CreateExtractValue(value, 0, "")
oldLen := c.builder.CreateExtractValue(value, 1, "")
if high.IsNil() {
high = oldLen
}
c.emitSliceBoundsCheck(frame, oldLen, low, high, high, lowType, highType, maxType)
// Truncate ints bigger than uintptr. This is after the bounds
// check so it's safe.
if c.targetData.TypeAllocSize(low.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
low = c.builder.CreateTrunc(low, c.uintptrType, "")
}
if c.targetData.TypeAllocSize(high.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
high = c.builder.CreateTrunc(high, c.uintptrType, "")
}
newPtr := c.builder.CreateInBoundsGEP(oldPtr, []llvm.Value{low}, "")
newLen := c.builder.CreateSub(high, low, "")
str := llvm.Undef(c.getLLVMRuntimeType("_string"))
str = c.builder.CreateInsertValue(str, newPtr, 0, "")
str = c.builder.CreateInsertValue(str, newLen, 1, "")
return str, nil
default:
return llvm.Value{}, c.makeError(expr.Pos(), "unknown slice type: "+typ.String())
}
case *ssa.TypeAssert:
return c.parseTypeAssert(frame, expr), nil
case *ssa.UnOp:
return c.parseUnOp(frame, expr)
default:
return llvm.Value{}, c.makeError(expr.Pos(), "todo: unknown expression: "+expr.String())
}
}
func (c *Compiler) parseBinOp(op token.Token, typ types.Type, x, y llvm.Value, pos token.Pos) (llvm.Value, error) {
switch typ := typ.Underlying().(type) {
case *types.Basic:
if typ.Info()&types.IsInteger != 0 {
// Operations on integers
signed := typ.Info()&types.IsUnsigned == 0
switch op {
case token.ADD: // +
return c.builder.CreateAdd(x, y, ""), nil
case token.SUB: // -
return c.builder.CreateSub(x, y, ""), nil
case token.MUL: // *
return c.builder.CreateMul(x, y, ""), nil
case token.QUO: // /
if signed {
return c.builder.CreateSDiv(x, y, ""), nil
} else {
return c.builder.CreateUDiv(x, y, ""), nil
}
case token.REM: // %
if signed {
return c.builder.CreateSRem(x, y, ""), nil
} else {
return c.builder.CreateURem(x, y, ""), nil
}
case token.AND: // &
return c.builder.CreateAnd(x, y, ""), nil
case token.OR: // |
return c.builder.CreateOr(x, y, ""), nil
case token.XOR: // ^
return c.builder.CreateXor(x, y, ""), nil
case token.SHL, token.SHR:
sizeX := c.targetData.TypeAllocSize(x.Type())
sizeY := c.targetData.TypeAllocSize(y.Type())
if sizeX > sizeY {
// x and y must have equal sizes, make Y bigger in this case.
// y is unsigned, this has been checked by the Go type checker.
y = c.builder.CreateZExt(y, x.Type(), "")
} else if sizeX < sizeY {
// What about shifting more than the integer width?
// I'm not entirely sure what the Go spec is on that, but as
// Intel CPUs have undefined behavior when shifting more
// than the integer width I'm assuming it is also undefined
// in Go.
y = c.builder.CreateTrunc(y, x.Type(), "")
}
switch op {
case token.SHL: // <<
return c.builder.CreateShl(x, y, ""), nil
case token.SHR: // >>
if signed {
return c.builder.CreateAShr(x, y, ""), nil
} else {
return c.builder.CreateLShr(x, y, ""), nil
}
default:
panic("unreachable")
}
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
case token.AND_NOT: // &^
// Go specific. Calculate "and not" with x & (~y)
inv := c.builder.CreateNot(y, "") // ~y
return c.builder.CreateAnd(x, inv, ""), nil
case token.LSS: // <
if signed {
return c.builder.CreateICmp(llvm.IntSLT, x, y, ""), nil
} else {
return c.builder.CreateICmp(llvm.IntULT, x, y, ""), nil
}
case token.LEQ: // <=
if signed {
return c.builder.CreateICmp(llvm.IntSLE, x, y, ""), nil
} else {
return c.builder.CreateICmp(llvm.IntULE, x, y, ""), nil
}
case token.GTR: // >
if signed {
return c.builder.CreateICmp(llvm.IntSGT, x, y, ""), nil
} else {
return c.builder.CreateICmp(llvm.IntUGT, x, y, ""), nil
}
case token.GEQ: // >=
if signed {
return c.builder.CreateICmp(llvm.IntSGE, x, y, ""), nil
} else {
return c.builder.CreateICmp(llvm.IntUGE, x, y, ""), nil
}
default:
panic("binop on integer: " + op.String())
}
} else if typ.Info()&types.IsFloat != 0 {
// Operations on floats
switch op {
case token.ADD: // +
return c.builder.CreateFAdd(x, y, ""), nil
case token.SUB: // -
return c.builder.CreateFSub(x, y, ""), nil
case token.MUL: // *
return c.builder.CreateFMul(x, y, ""), nil
case token.QUO: // /
return c.builder.CreateFDiv(x, y, ""), nil
case token.EQL: // ==
return c.builder.CreateFCmp(llvm.FloatUEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateFCmp(llvm.FloatUNE, x, y, ""), nil
case token.LSS: // <
return c.builder.CreateFCmp(llvm.FloatULT, x, y, ""), nil
case token.LEQ: // <=
return c.builder.CreateFCmp(llvm.FloatULE, x, y, ""), nil
case token.GTR: // >
return c.builder.CreateFCmp(llvm.FloatUGT, x, y, ""), nil
case token.GEQ: // >=
return c.builder.CreateFCmp(llvm.FloatUGE, x, y, ""), nil
default:
panic("binop on float: " + op.String())
}
} else if typ.Info()&types.IsComplex != 0 {
r1 := c.builder.CreateExtractValue(x, 0, "r1")
r2 := c.builder.CreateExtractValue(y, 0, "r2")
i1 := c.builder.CreateExtractValue(x, 1, "i1")
i2 := c.builder.CreateExtractValue(y, 1, "i2")
switch op {
case token.EQL: // ==
req := c.builder.CreateFCmp(llvm.FloatOEQ, r1, r2, "")
ieq := c.builder.CreateFCmp(llvm.FloatOEQ, i1, i2, "")
return c.builder.CreateAnd(req, ieq, ""), nil
case token.NEQ: // !=
req := c.builder.CreateFCmp(llvm.FloatOEQ, r1, r2, "")
ieq := c.builder.CreateFCmp(llvm.FloatOEQ, i1, i2, "")
neq := c.builder.CreateAnd(req, ieq, "")
return c.builder.CreateNot(neq, ""), nil
case token.ADD, token.SUB:
var r, i llvm.Value
switch op {
case token.ADD:
r = c.builder.CreateFAdd(r1, r2, "")
i = c.builder.CreateFAdd(i1, i2, "")
case token.SUB:
r = c.builder.CreateFSub(r1, r2, "")
i = c.builder.CreateFSub(i1, i2, "")
default:
panic("unreachable")
}
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{r.Type(), i.Type()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
case token.MUL:
// Complex multiplication follows the current implementation in
// the Go compiler, with the difference that complex64
// components are not first scaled up to float64 for increased
// precision.
// https://github.com/golang/go/blob/170b8b4b12be50eeccbcdadb8523fb4fc670ca72/src/cmd/compile/internal/gc/ssa.go#L2089-L2127
// The implementation is as follows:
// r := real(a) * real(b) - imag(a) * imag(b)
// i := real(a) * imag(b) + imag(a) * real(b)
// Note: this does NOT follow the C11 specification (annex G):
// http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1548.pdf#page=549
// See https://github.com/golang/go/issues/29846 for a related
// discussion.
r := c.builder.CreateFSub(c.builder.CreateFMul(r1, r2, ""), c.builder.CreateFMul(i1, i2, ""), "")
i := c.builder.CreateFAdd(c.builder.CreateFMul(r1, i2, ""), c.builder.CreateFMul(i1, r2, ""), "")
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{r.Type(), i.Type()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
case token.QUO:
// Complex division.
// Do this in a library call because it's too difficult to do
// inline.
switch r1.Type().TypeKind() {
case llvm.FloatTypeKind:
return c.createRuntimeCall("complex64div", []llvm.Value{x, y}, ""), nil
case llvm.DoubleTypeKind:
return c.createRuntimeCall("complex128div", []llvm.Value{x, y}, ""), nil
default:
panic("unexpected complex type")
}
default:
panic("binop on complex: " + op.String())
}
} else if typ.Info()&types.IsBoolean != 0 {
// Operations on booleans
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
default:
panic("binop on bool: " + op.String())
}
} else if typ.Kind() == types.UnsafePointer {
// Operations on pointers
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
default:
panic("binop on pointer: " + op.String())
}
} else if typ.Info()&types.IsString != 0 {
// Operations on strings
switch op {
case token.ADD: // +
return c.createRuntimeCall("stringConcat", []llvm.Value{x, y}, ""), nil
case token.EQL: // ==
return c.createRuntimeCall("stringEqual", []llvm.Value{x, y}, ""), nil
case token.NEQ: // !=
result := c.createRuntimeCall("stringEqual", []llvm.Value{x, y}, "")
return c.builder.CreateNot(result, ""), nil
case token.LSS: // <
return c.createRuntimeCall("stringLess", []llvm.Value{x, y}, ""), nil
case token.LEQ: // <=
result := c.createRuntimeCall("stringLess", []llvm.Value{y, x}, "")
return c.builder.CreateNot(result, ""), nil
case token.GTR: // >
result := c.createRuntimeCall("stringLess", []llvm.Value{x, y}, "")
return c.builder.CreateNot(result, ""), nil
case token.GEQ: // >=
return c.createRuntimeCall("stringLess", []llvm.Value{y, x}, ""), nil
default:
panic("binop on string: " + op.String())
}
} else {
return llvm.Value{}, c.makeError(pos, "todo: unknown basic type in binop: "+typ.String())
}
case *types.Signature:
// Get raw scalars from the function value and compare those.
// Function values may be implemented in multiple ways, but they all
// have some way of getting a scalar value identifying the function.
// This is safe: function pointers are generally not comparable
// against each other, only against nil. So one of these has to be nil.
x = c.extractFuncScalar(x)
y = c.extractFuncScalar(y)
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "binop on signature: "+op.String())
}
case *types.Interface:
switch op {
case token.EQL, token.NEQ: // ==, !=
result := c.createRuntimeCall("interfaceEqual", []llvm.Value{x, y}, "")
if op == token.NEQ {
result = c.builder.CreateNot(result, "")
}
return result, nil
default:
return llvm.Value{}, c.makeError(pos, "binop on interface: "+op.String())
}
case *types.Chan, *types.Map, *types.Pointer:
// Maps are in general not comparable, but can be compared against nil
// (which is a nil pointer). This means they can be trivially compared
// by treating them as a pointer.
// Channels behave as pointers in that they are equal as long as they
// are created with the same call to make or if both are nil.
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: binop on pointer: "+op.String())
}
case *types.Slice:
// Slices are in general not comparable, but can be compared against
// nil. Assume at least one of them is nil to make the code easier.
xPtr := c.builder.CreateExtractValue(x, 0, "")
yPtr := c.builder.CreateExtractValue(y, 0, "")
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, xPtr, yPtr, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, xPtr, yPtr, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: binop on slice: "+op.String())
}
case *types.Array:
// Compare each array element and combine the result. From the spec:
// Array values are comparable if values of the array element type
// are comparable. Two array values are equal if their corresponding
// elements are equal.
result := llvm.ConstInt(c.ctx.Int1Type(), 1, true)
for i := 0; i < int(typ.Len()); i++ {
xField := c.builder.CreateExtractValue(x, i, "")
yField := c.builder.CreateExtractValue(y, i, "")
fieldEqual, err := c.parseBinOp(token.EQL, typ.Elem(), xField, yField, pos)
if err != nil {
return llvm.Value{}, err
}
result = c.builder.CreateAnd(result, fieldEqual, "")
}
switch op {
case token.EQL: // ==
return result, nil
case token.NEQ: // !=
return c.builder.CreateNot(result, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "unknown: binop on struct: "+op.String())
}
case *types.Struct:
// Compare each struct field and combine the result. From the spec:
// Struct values are comparable if all their fields are comparable.
// Two struct values are equal if their corresponding non-blank
// fields are equal.
result := llvm.ConstInt(c.ctx.Int1Type(), 1, true)
for i := 0; i < typ.NumFields(); i++ {
if typ.Field(i).Name() == "_" {
// skip blank fields
continue
}
fieldType := typ.Field(i).Type()
xField := c.builder.CreateExtractValue(x, i, "")
yField := c.builder.CreateExtractValue(y, i, "")
fieldEqual, err := c.parseBinOp(token.EQL, fieldType, xField, yField, pos)
if err != nil {
return llvm.Value{}, err
}
result = c.builder.CreateAnd(result, fieldEqual, "")
}
switch op {
case token.EQL: // ==
return result, nil
case token.NEQ: // !=
return c.builder.CreateNot(result, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "unknown: binop on struct: "+op.String())
}
default:
return llvm.Value{}, c.makeError(pos, "todo: binop type: "+typ.String())
}
}
func (c *Compiler) parseConst(prefix string, expr *ssa.Const) llvm.Value {
switch typ := expr.Type().Underlying().(type) {
case *types.Basic:
llvmType := c.getLLVMType(typ)
if typ.Info()&types.IsBoolean != 0 {
b := constant.BoolVal(expr.Value)
n := uint64(0)
if b {
n = 1
}
return llvm.ConstInt(llvmType, n, false)
} else if typ.Info()&types.IsString != 0 {
str := constant.StringVal(expr.Value)
strLen := llvm.ConstInt(c.uintptrType, uint64(len(str)), false)
objname := prefix + "$string"
global := llvm.AddGlobal(c.mod, llvm.ArrayType(c.ctx.Int8Type(), len(str)), objname)
global.SetInitializer(c.ctx.ConstString(str, false))
global.SetLinkage(llvm.InternalLinkage)
global.SetGlobalConstant(true)
global.SetUnnamedAddr(true)
zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false)
strPtr := c.builder.CreateInBoundsGEP(global, []llvm.Value{zero, zero}, "")
strObj := llvm.ConstNamedStruct(c.getLLVMRuntimeType("_string"), []llvm.Value{strPtr, strLen})
return strObj
} else if typ.Kind() == types.UnsafePointer {
if !expr.IsNil() {
value, _ := constant.Uint64Val(expr.Value)
return llvm.ConstIntToPtr(llvm.ConstInt(c.uintptrType, value, false), c.i8ptrType)
}
return llvm.ConstNull(c.i8ptrType)
} else if typ.Info()&types.IsUnsigned != 0 {
n, _ := constant.Uint64Val(expr.Value)
return llvm.ConstInt(llvmType, n, false)
} else if typ.Info()&types.IsInteger != 0 { // signed
n, _ := constant.Int64Val(expr.Value)
return llvm.ConstInt(llvmType, uint64(n), true)
} else if typ.Info()&types.IsFloat != 0 {
n, _ := constant.Float64Val(expr.Value)
return llvm.ConstFloat(llvmType, n)
} else if typ.Kind() == types.Complex64 {
r := c.parseConst(prefix, ssa.NewConst(constant.Real(expr.Value), types.Typ[types.Float32]))
i := c.parseConst(prefix, ssa.NewConst(constant.Imag(expr.Value), types.Typ[types.Float32]))
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx
} else if typ.Kind() == types.Complex128 {
r := c.parseConst(prefix, ssa.NewConst(constant.Real(expr.Value), types.Typ[types.Float64]))
i := c.parseConst(prefix, ssa.NewConst(constant.Imag(expr.Value), types.Typ[types.Float64]))
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx
} else {
panic("unknown constant of basic type: " + expr.String())
}
case *types.Chan:
if expr.Value != nil {
panic("expected nil chan constant")
}
return c.getZeroValue(c.getLLVMType(expr.Type()))
case *types.Signature:
if expr.Value != nil {
panic("expected nil signature constant")
}
return c.getZeroValue(c.getLLVMType(expr.Type()))
case *types.Interface:
if expr.Value != nil {
panic("expected nil interface constant")
}
// Create a generic nil interface with no dynamic type (typecode=0).
fields := []llvm.Value{
llvm.ConstInt(c.uintptrType, 0, false),
llvm.ConstPointerNull(c.i8ptrType),
}
return llvm.ConstNamedStruct(c.getLLVMRuntimeType("_interface"), fields)
case *types.Pointer:
if expr.Value != nil {
panic("expected nil pointer constant")
}
return llvm.ConstPointerNull(c.getLLVMType(typ))
case *types.Slice:
if expr.Value != nil {
panic("expected nil slice constant")
}
elemType := c.getLLVMType(typ.Elem())
llvmPtr := llvm.ConstPointerNull(llvm.PointerType(elemType, 0))
llvmLen := llvm.ConstInt(c.uintptrType, 0, false)
slice := c.ctx.ConstStruct([]llvm.Value{
llvmPtr, // backing array
llvmLen, // len
llvmLen, // cap
}, false)
return slice
case *types.Map:
if !expr.IsNil() {
// I believe this is not allowed by the Go spec.
panic("non-nil map constant")
}
llvmType := c.getLLVMType(typ)
return c.getZeroValue(llvmType)
default:
panic("unknown constant: " + expr.String())
}
}
func (c *Compiler) parseConvert(typeFrom, typeTo types.Type, value llvm.Value, pos token.Pos) (llvm.Value, error) {
llvmTypeFrom := value.Type()
llvmTypeTo := c.getLLVMType(typeTo)
// Conversion between unsafe.Pointer and uintptr.
isPtrFrom := isPointer(typeFrom.Underlying())
isPtrTo := isPointer(typeTo.Underlying())
if isPtrFrom && !isPtrTo {
return c.builder.CreatePtrToInt(value, llvmTypeTo, ""), nil
} else if !isPtrFrom && isPtrTo {
if !value.IsABinaryOperator().IsNil() && value.InstructionOpcode() == llvm.Add {
// This is probably a pattern like the following:
// unsafe.Pointer(uintptr(ptr) + index)
// Used in functions like memmove etc. for lack of pointer
// arithmetic. Convert it to real pointer arithmatic here.
ptr := value.Operand(0)
index := value.Operand(1)
if !index.IsAPtrToIntInst().IsNil() {
// Swap if necessary, if ptr and index are reversed.
ptr, index = index, ptr
}
if !ptr.IsAPtrToIntInst().IsNil() {
origptr := ptr.Operand(0)
if origptr.Type() == c.i8ptrType {
// This pointer can be calculated from the original
// ptrtoint instruction with a GEP. The leftover inttoptr
// instruction is trivial to optimize away.
// Making it an in bounds GEP even though it's easy to
// create a GEP that is not in bounds. However, we're
// talking about unsafe code here so the programmer has to
// be careful anyway.
return c.builder.CreateInBoundsGEP(origptr, []llvm.Value{index}, ""), nil
}
}
}
return c.builder.CreateIntToPtr(value, llvmTypeTo, ""), nil
}
// Conversion between pointers and unsafe.Pointer.
if isPtrFrom && isPtrTo {
return c.builder.CreateBitCast(value, llvmTypeTo, ""), nil
}
switch typeTo := typeTo.Underlying().(type) {
case *types.Basic:
sizeFrom := c.targetData.TypeAllocSize(llvmTypeFrom)
if typeTo.Info()&types.IsString != 0 {
switch typeFrom := typeFrom.Underlying().(type) {
case *types.Basic:
// Assume a Unicode code point, as that is the only possible
// value here.
// Cast to an i32 value as expected by
// runtime.stringFromUnicode.
if sizeFrom > 4 {
value = c.builder.CreateTrunc(value, c.ctx.Int32Type(), "")
} else if sizeFrom < 4 && typeTo.Info()&types.IsUnsigned != 0 {
value = c.builder.CreateZExt(value, c.ctx.Int32Type(), "")
} else if sizeFrom < 4 {
value = c.builder.CreateSExt(value, c.ctx.Int32Type(), "")
}
return c.createRuntimeCall("stringFromUnicode", []llvm.Value{value}, ""), nil
case *types.Slice:
switch typeFrom.Elem().(*types.Basic).Kind() {
case types.Byte:
return c.createRuntimeCall("stringFromBytes", []llvm.Value{value}, ""), nil
case types.Rune:
return c.createRuntimeCall("stringFromRunes", []llvm.Value{value}, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: convert to string: "+typeFrom.String())
}
default:
return llvm.Value{}, c.makeError(pos, "todo: convert to string: "+typeFrom.String())
}
}
typeFrom := typeFrom.Underlying().(*types.Basic)
sizeTo := c.targetData.TypeAllocSize(llvmTypeTo)
if typeFrom.Info()&types.IsInteger != 0 && typeTo.Info()&types.IsInteger != 0 {
// Conversion between two integers.
if sizeFrom > sizeTo {
return c.builder.CreateTrunc(value, llvmTypeTo, ""), nil
} else if typeFrom.Info()&types.IsUnsigned != 0 { // if unsigned
return c.builder.CreateZExt(value, llvmTypeTo, ""), nil
} else { // if signed
return c.builder.CreateSExt(value, llvmTypeTo, ""), nil
}
}
if typeFrom.Info()&types.IsFloat != 0 && typeTo.Info()&types.IsFloat != 0 {
// Conversion between two floats.
if sizeFrom > sizeTo {
return c.builder.CreateFPTrunc(value, llvmTypeTo, ""), nil
} else if sizeFrom < sizeTo {
return c.builder.CreateFPExt(value, llvmTypeTo, ""), nil
} else {
return value, nil
}
}
if typeFrom.Info()&types.IsFloat != 0 && typeTo.Info()&types.IsInteger != 0 {
// Conversion from float to int.
if typeTo.Info()&types.IsUnsigned != 0 { // if unsigned
return c.builder.CreateFPToUI(value, llvmTypeTo, ""), nil
} else { // if signed
return c.builder.CreateFPToSI(value, llvmTypeTo, ""), nil
}
}
if typeFrom.Info()&types.IsInteger != 0 && typeTo.Info()&types.IsFloat != 0 {
// Conversion from int to float.
if typeFrom.Info()&types.IsUnsigned != 0 { // if unsigned
return c.builder.CreateUIToFP(value, llvmTypeTo, ""), nil
} else { // if signed
return c.builder.CreateSIToFP(value, llvmTypeTo, ""), nil
}
}
if typeFrom.Kind() == types.Complex128 && typeTo.Kind() == types.Complex64 {
// Conversion from complex128 to complex64.
r := c.builder.CreateExtractValue(value, 0, "real.f64")
i := c.builder.CreateExtractValue(value, 1, "imag.f64")
r = c.builder.CreateFPTrunc(r, c.ctx.FloatType(), "real.f32")
i = c.builder.CreateFPTrunc(i, c.ctx.FloatType(), "imag.f32")
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
}
if typeFrom.Kind() == types.Complex64 && typeTo.Kind() == types.Complex128 {
// Conversion from complex64 to complex128.
r := c.builder.CreateExtractValue(value, 0, "real.f32")
i := c.builder.CreateExtractValue(value, 1, "imag.f32")
r = c.builder.CreateFPExt(r, c.ctx.DoubleType(), "real.f64")
i = c.builder.CreateFPExt(i, c.ctx.DoubleType(), "imag.f64")
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
}
return llvm.Value{}, c.makeError(pos, "todo: convert: basic non-integer type: "+typeFrom.String()+" -> "+typeTo.String())
case *types.Slice:
if basic, ok := typeFrom.(*types.Basic); !ok || basic.Info()&types.IsString == 0 {
panic("can only convert from a string to a slice")
}
elemType := typeTo.Elem().Underlying().(*types.Basic) // must be byte or rune
switch elemType.Kind() {
case types.Byte:
return c.createRuntimeCall("stringToBytes", []llvm.Value{value}, ""), nil
case types.Rune:
return c.createRuntimeCall("stringToRunes", []llvm.Value{value}, ""), nil
default:
panic("unexpected type in string to slice conversion")
}
default:
return llvm.Value{}, c.makeError(pos, "todo: convert "+typeTo.String()+" <- "+typeFrom.String())
}
}
func (c *Compiler) parseUnOp(frame *Frame, unop *ssa.UnOp) (llvm.Value, error) {
x := c.getValue(frame, unop.X)
switch unop.Op {
case token.NOT: // !x
return c.builder.CreateNot(x, ""), nil
case token.SUB: // -x
if typ, ok := unop.X.Type().Underlying().(*types.Basic); ok {
if typ.Info()&types.IsInteger != 0 {
return c.builder.CreateSub(llvm.ConstInt(x.Type(), 0, false), x, ""), nil
} else if typ.Info()&types.IsFloat != 0 {
return c.builder.CreateFSub(llvm.ConstFloat(x.Type(), 0.0), x, ""), nil
} else {
return llvm.Value{}, c.makeError(unop.Pos(), "todo: unknown basic type for negate: "+typ.String())
}
} else {
return llvm.Value{}, c.makeError(unop.Pos(), "todo: unknown type for negate: "+unop.X.Type().Underlying().String())
}
case token.MUL: // *x, dereference pointer
unop.X.Type().Underlying().(*types.Pointer).Elem()
if c.targetData.TypeAllocSize(x.Type().ElementType()) == 0 {
// zero-length data
return c.getZeroValue(x.Type().ElementType()), nil
} else if strings.HasSuffix(unop.X.String(), "$funcaddr") {
// CGo function pointer. The cgo part has rewritten CGo function
// pointers as stub global variables of the form:
// var C.add unsafe.Pointer
// Instead of a load from the global, create a bitcast of the
// function pointer itself.
globalName := c.getGlobalInfo(unop.X.(*ssa.Global)).linkName
name := globalName[:len(globalName)-len("$funcaddr")]
fn := c.mod.NamedFunction(name)
if fn.IsNil() {
return llvm.Value{}, c.makeError(unop.Pos(), "cgo function not found: "+name)
}
return c.builder.CreateBitCast(fn, c.i8ptrType, ""), nil
} else {
c.emitNilCheck(frame, x, "deref")
load := c.builder.CreateLoad(x, "")
return load, nil
}
case token.XOR: // ^x, toggle all bits in integer
return c.builder.CreateXor(x, llvm.ConstInt(x.Type(), ^uint64(0), false), ""), nil
case token.ARROW: // <-x, receive from channel
return c.emitChanRecv(frame, unop), nil
default:
return llvm.Value{}, c.makeError(unop.Pos(), "todo: unknown unop")
}
}
// IR returns the whole IR as a human-readable string.
func (c *Compiler) IR() string {
return c.mod.String()
}
func (c *Compiler) Verify() error {
return llvm.VerifyModule(c.mod, llvm.PrintMessageAction)
}
func (c *Compiler) ApplyFunctionSections() {
// Put every function in a separate section. This makes it possible for the
// linker to remove dead code (-ffunction-sections).
llvmFn := c.mod.FirstFunction()
for !llvmFn.IsNil() {
if !llvmFn.IsDeclaration() {
name := llvmFn.Name()
llvmFn.SetSection(".text." + name)
}
llvmFn = llvm.NextFunction(llvmFn)
}
}
// Turn all global constants into global variables. This works around a
// limitation on Harvard architectures (e.g. AVR), where constant and
// non-constant pointers point to a different address space.
func (c *Compiler) NonConstGlobals() {
global := c.mod.FirstGlobal()
for !global.IsNil() {
global.SetGlobalConstant(false)
global = llvm.NextGlobal(global)
}
}
// When -wasm-abi flag set to "js" (default),
// replace i64 in an external function with a stack-allocated i64*, to work
// around the lack of 64-bit integers in JavaScript (commonly used together with
// WebAssembly). Once that's resolved, this pass may be avoided.
// See also the -wasm-abi= flag
// https://github.com/WebAssembly/design/issues/1172
func (c *Compiler) ExternalInt64AsPtr() error {
int64Type := c.ctx.Int64Type()
int64PtrType := llvm.PointerType(int64Type, 0)
for fn := c.mod.FirstFunction(); !fn.IsNil(); fn = llvm.NextFunction(fn) {
if fn.Linkage() != llvm.ExternalLinkage {
// Only change externally visible functions (exports and imports).
continue
}
if strings.HasPrefix(fn.Name(), "llvm.") || strings.HasPrefix(fn.Name(), "runtime.") {
// Do not try to modify the signature of internal LLVM functions and
// assume that runtime functions are only temporarily exported for
// coroutine lowering.
continue
}
hasInt64 := false
paramTypes := []llvm.Type{}
// Check return type for 64-bit integer.
fnType := fn.Type().ElementType()
returnType := fnType.ReturnType()
if returnType == int64Type {
hasInt64 = true
paramTypes = append(paramTypes, int64PtrType)
returnType = c.ctx.VoidType()
}
// Check param types for 64-bit integers.
for param := fn.FirstParam(); !param.IsNil(); param = llvm.NextParam(param) {
if param.Type() == int64Type {
hasInt64 = true
paramTypes = append(paramTypes, int64PtrType)
} else {
paramTypes = append(paramTypes, param.Type())
}
}
if !hasInt64 {
// No i64 in the paramter list.
continue
}
// Add $i64wrapper to the real function name as it is only used
// internally.
// Add a new function with the correct signature that is exported.
name := fn.Name()
fn.SetName(name + "$i64wrap")
externalFnType := llvm.FunctionType(returnType, paramTypes, fnType.IsFunctionVarArg())
externalFn := llvm.AddFunction(c.mod, name, externalFnType)
if fn.IsDeclaration() {
// Just a declaration: the definition doesn't exist on the Go side
// so it cannot be called from external code.
// Update all users to call the external function.
// The old $i64wrapper function could be removed, but it may as well
// be left in place.
for use := fn.FirstUse(); !use.IsNil(); use = use.NextUse() {
call := use.User()
c.builder.SetInsertPointBefore(call)
callParams := []llvm.Value{}
var retvalAlloca llvm.Value
if fnType.ReturnType() == int64Type {
retvalAlloca = c.builder.CreateAlloca(int64Type, "i64asptr")
callParams = append(callParams, retvalAlloca)
}
for i := 0; i < call.OperandsCount()-1; i++ {
operand := call.Operand(i)
if operand.Type() == int64Type {
// Pass a stack-allocated pointer instead of the value
// itself.
alloca := c.builder.CreateAlloca(int64Type, "i64asptr")
c.builder.CreateStore(operand, alloca)
callParams = append(callParams, alloca)
} else {
// Unchanged parameter.
callParams = append(callParams, operand)
}
}
if fnType.ReturnType() == int64Type {
// Pass a stack-allocated pointer as the first parameter
// where the return value should be stored, instead of using
// the regular return value.
c.builder.CreateCall(externalFn, callParams, call.Name())
returnValue := c.builder.CreateLoad(retvalAlloca, "retval")
call.ReplaceAllUsesWith(returnValue)
call.EraseFromParentAsInstruction()
} else {
newCall := c.builder.CreateCall(externalFn, callParams, call.Name())
call.ReplaceAllUsesWith(newCall)
call.EraseFromParentAsInstruction()
}
}
} else {
// The function has a definition in Go. This means that it may still
// be called both Go and from external code.
// Keep existing calls with the existing convention in place (for
// better performance), but export a new wrapper function with the
// correct calling convention.
fn.SetLinkage(llvm.InternalLinkage)
fn.SetUnnamedAddr(true)
entryBlock := llvm.AddBasicBlock(externalFn, "entry")
c.builder.SetInsertPointAtEnd(entryBlock)
var callParams []llvm.Value
if fnType.ReturnType() == int64Type {
return errors.New("not yet implemented: exported function returns i64 with -wasm-abi=js; " +
"see https://tinygo.org/compiler-internals/calling-convention/")
}
for i, origParam := range fn.Params() {
paramValue := externalFn.Param(i)
if origParam.Type() == int64Type {
paramValue = c.builder.CreateLoad(paramValue, "i64")
}
callParams = append(callParams, paramValue)
}
retval := c.builder.CreateCall(fn, callParams, "")
if retval.Type().TypeKind() == llvm.VoidTypeKind {
c.builder.CreateRetVoid()
} else {
c.builder.CreateRet(retval)
}
}
}
return nil
}
// Emit object file (.o).
func (c *Compiler) EmitObject(path string) error {
llvmBuf, err := c.machine.EmitToMemoryBuffer(c.mod, llvm.ObjectFile)
if err != nil {
return err
}
return c.writeFile(llvmBuf.Bytes(), path)
}
// Emit LLVM bitcode file (.bc).
func (c *Compiler) EmitBitcode(path string) error {
data := llvm.WriteBitcodeToMemoryBuffer(c.mod).Bytes()
return c.writeFile(data, path)
}
// Emit LLVM IR source file (.ll).
func (c *Compiler) EmitText(path string) error {
data := []byte(c.mod.String())
return c.writeFile(data, path)
}
// Write the data to the file specified by path.
func (c *Compiler) writeFile(data []byte, path string) error {
// Write output to file
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
_, err = f.Write(data)
if err != nil {
return err
}
return f.Close()
}
| 1 | 7,675 | This change (and a few similar ones below) are not related to rpi3 support, and should be removed. | tinygo-org-tinygo | go |
@@ -382,7 +382,7 @@ class KoalasBoxPlot(BoxPlot):
showcaps=None,
showbox=None,
showfliers=None,
- **kwargs
+ **kwargs,
):
# Missing arguments default to rcParams.
if whis is None: | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_integer, is_list_like
from pandas.io.formats.printing import pprint_thing
from pandas.core.base import PandasObject
from pyspark.ml.feature import Bucketizer
from pyspark.mllib.stat import KernelDensity
from pyspark.sql import functions as F
from databricks.koalas.missing import unsupported_function
from databricks.koalas.config import get_option
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot,
BoxPlot,
HistPlot,
MPLPlot,
PiePlot,
AreaPlot,
LinePlot,
BarhPlot,
ScatterPlot,
KdePlot,
)
else:
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib import (
BarPlot,
BoxPlot,
HistPlot,
PiePlot,
AreaPlot,
LinePlot,
BarhPlot,
ScatterPlot,
KdePlot,
)
from pandas.plotting._matplotlib.core import MPLPlot
_all_kinds = PlotAccessor._all_kinds
class TopNPlot:
def get_top_n(self, data):
from databricks.koalas import DataFrame, Series
max_rows = get_option("plotting.max_rows")
# Simply use the first 1k elements and make it into a pandas dataframe
# For categorical variables, it is likely called from df.x.value_counts().plot.xxx().
if isinstance(data, (Series, DataFrame)):
data = data.head(max_rows + 1).to_pandas()
else:
raise ValueError("Only DataFrame and Series are supported for plotting.")
self.partial = False
if len(data) > max_rows:
self.partial = True
data = data.iloc[:max_rows]
return data
def set_result_text(self, ax):
max_rows = get_option("plotting.max_rows")
assert hasattr(self, "partial")
if self.partial:
ax.text(
1,
1,
"showing top {} elements only".format(max_rows),
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
class SampledPlot:
def get_sampled(self, data):
from databricks.koalas import DataFrame, Series
fraction = get_option("plotting.sample_ratio")
if fraction is None:
fraction = 1 / (len(data) / get_option("plotting.max_rows"))
fraction = min(1.0, fraction)
self.fraction = fraction
if isinstance(data, (DataFrame, Series)):
if isinstance(data, Series):
data = data.to_frame()
sampled = data._internal.resolved_copy.spark_frame.sample(fraction=self.fraction)
return DataFrame(data._internal.with_new_sdf(sampled)).to_pandas()
else:
raise ValueError("Only DataFrame and Series are supported for plotting.")
def set_result_text(self, ax):
assert hasattr(self, "fraction")
if self.fraction < 1:
ax.text(
1,
1,
"showing the sampled result by fraction %s" % self.fraction,
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
class KoalasBarPlot(BarPlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasBarPlot, self).__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class KoalasBoxPlot(BoxPlot):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_xticks=True,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
""" Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(
prop_dict, matplotlib.rcParams[rc_str.format(rc_name, prop_dict)]
)
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
manage_xticks=manage_xticks,
zorder=zorder,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(KoalasBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is Koalas specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = KoalasBoxPlot._compute_stats(data, colname, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = KoalasBoxPlot._outliers(data, colname, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = KoalasBoxPlot._calc_whiskers(colname, outliers)
if showfliers:
fliers = KoalasBoxPlot._get_fliers(colname, outliers)
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = matplotlib.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = matplotlib.rcParams["boxplot.bootstrap"]
if notch is None:
notch = matplotlib.rcParams["boxplot.notch"]
if vert is None:
vert = matplotlib.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = matplotlib.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = matplotlib.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = matplotlib.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = matplotlib.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = matplotlib.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = matplotlib.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
@staticmethod
def _compute_stats(data, colname, whis, precision):
# Computes mean, median, Q1 and Q3 with approx_percentile and precision
pdf = data._kdf._internal.resolved_copy.spark_frame.agg(
*[
F.expr(
"approx_percentile({}, {}, {})".format(colname, q, int(1.0 / precision))
).alias("{}_{}%".format(colname, int(q * 100)))
for q in [0.25, 0.50, 0.75]
],
F.mean(colname).alias("{}_mean".format(colname))
).toPandas()
# Computes IQR and Tukey's fences
iqr = "{}_iqr".format(colname)
p75 = "{}_75%".format(colname)
p25 = "{}_25%".format(colname)
pdf.loc[:, iqr] = pdf.loc[:, p75] - pdf.loc[:, p25]
pdf.loc[:, "{}_lfence".format(colname)] = pdf.loc[:, p25] - whis * pdf.loc[:, iqr]
pdf.loc[:, "{}_ufence".format(colname)] = pdf.loc[:, p75] + whis * pdf.loc[:, iqr]
qnames = ["25%", "50%", "75%", "mean", "lfence", "ufence"]
col_summ = pdf[["{}_{}".format(colname, q) for q in qnames]]
col_summ.columns = qnames
lfence, ufence = col_summ["lfence"], col_summ["ufence"]
stats = {
"mean": col_summ["mean"].values[0],
"med": col_summ["50%"].values[0],
"q1": col_summ["25%"].values[0],
"q3": col_summ["75%"].values[0],
}
return stats, (lfence.values[0], ufence.values[0])
@staticmethod
def _outliers(data, colname, lfence, ufence):
# Builds expression to identify outliers
expression = F.col(colname).between(lfence, ufence)
# Creates a column to flag rows as outliers or not
return data._kdf._internal.resolved_copy.spark_frame.withColumn(
"__{}_outlier".format(colname), ~expression
)
@staticmethod
def _calc_whiskers(colname, outliers):
# Computes min and max values of non-outliers - the whiskers
minmax = (
outliers.filter("not __{}_outlier".format(colname))
.agg(F.min(colname).alias("min"), F.max(colname).alias("max"))
.toPandas()
)
return minmax.iloc[0][["min", "max"]].values
@staticmethod
def _get_fliers(colname, outliers):
# Filters only the outliers, should "showfliers" be True
fliers_df = outliers.filter("__{}_outlier".format(colname))
# If shows fliers, takes the top 1k with highest absolute values
fliers = (
fliers_df.select(F.abs(F.col("`{}`".format(colname))).alias(colname))
.orderBy(F.desc("`{}`".format(colname)))
.limit(1001)
.toPandas()[colname]
.values
)
return fliers
class KoalasHistPlot(HistPlot):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
# TODO: this logic is same with KdePlot. Might have to deduplicate it.
from databricks.koalas.series import Series
data = self.data
if isinstance(data, Series):
data = data.to_frame()
numeric_data = data.select_dtypes(
include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64]
)
# no empty frames or series allowed
if len(numeric_data.columns) == 0:
raise TypeError(
"Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__)
)
if is_integer(self.bins):
# computes boundaries for the column
self.bins = self._get_bins(data.to_spark(), self.bins)
self.data = numeric_data
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
# 'y' is a Spark DataFrame that selects one column.
# here, we manually calculates the weights separately via Spark
# and assign it directly to histogram plot.
y = KoalasHistPlot._compute_hist(y, self.bins) # now y is a pandas Series.
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
@staticmethod
def _get_bins(sdf, bins):
# 'data' is a Spark DataFrame that selects all columns.
if len(sdf.columns) > 1:
min_col = F.least(*map(F.min, sdf))
max_col = F.greatest(*map(F.max, sdf))
else:
min_col = F.min(sdf.columns[-1])
max_col = F.max(sdf.columns[-1])
boundaries = sdf.select(min_col, max_col).first()
# divides the boundaries into bins
if boundaries[0] == boundaries[1]:
boundaries = (boundaries[0] - 0.5, boundaries[1] + 0.5)
return np.linspace(boundaries[0], boundaries[1], bins + 1)
@staticmethod
def _compute_hist(sdf, bins):
# 'data' is a Spark DataFrame that selects one column.
assert isinstance(bins, (np.ndarray, np.generic))
colname = sdf.columns[-1]
bucket_name = "__{}_bucket".format(colname)
# creates a Bucketizer to get corresponding bin of each value
bucketizer = Bucketizer(
splits=bins, inputCol=colname, outputCol=bucket_name, handleInvalid="skip"
)
# after bucketing values, groups and counts them
result = (
bucketizer.transform(sdf)
.select(bucket_name)
.groupby(bucket_name)
.agg(F.count("*").alias("count"))
.toPandas()
.sort_values(by=bucket_name)
)
# generates a pandas DF with one row for each bin
# we need this as some of the bins may be empty
indexes = pd.DataFrame({bucket_name: np.arange(0, len(bins) - 1), "bucket": bins[:-1]})
# merges the bins with counts on it and fills remaining ones with zeros
pdf = indexes.merge(result, how="left", on=[bucket_name]).fillna(0)[["count"]]
pdf.columns = [bucket_name]
return pdf[bucket_name]
class KoalasPiePlot(PiePlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasPiePlot, self).__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasPiePlot, self)._make_plot()
class KoalasAreaPlot(AreaPlot, SampledPlot):
def __init__(self, data, **kwargs):
super(KoalasAreaPlot, self).__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasAreaPlot, self)._make_plot()
class KoalasLinePlot(LinePlot, SampledPlot):
def __init__(self, data, **kwargs):
super(KoalasLinePlot, self).__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasLinePlot, self)._make_plot()
class KoalasBarhPlot(BarhPlot, TopNPlot):
def __init__(self, data, **kwargs):
super(KoalasBarhPlot, self).__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasBarhPlot, self)._make_plot()
class KoalasScatterPlot(ScatterPlot, TopNPlot):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super(KoalasScatterPlot, self)._make_plot()
class KoalasKdePlot(KdePlot):
def _compute_plot_data(self):
from databricks.koalas.series import Series
data = self.data
if isinstance(data, Series):
data = data.to_frame()
numeric_data = data.select_dtypes(
include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64]
)
# no empty frames or series allowed
if len(numeric_data.columns) == 0:
raise TypeError(
"Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__)
)
self.data = numeric_data
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
# 'y' is a Spark DataFrame that selects one column.
if self.ind is None:
min_val, max_val = y.select(F.min(y.columns[-1]), F.max(y.columns[-1])).first()
sample_range = max_val - min_val
ind = np.linspace(min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, 1000,)
elif is_integer(self.ind):
min_val, max_val = y.select(F.min(y.columns[-1]), F.max(y.columns[-1])).first()
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, self.ind,)
else:
ind = self.ind
return ind
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
# 'y' is a Spark DataFrame that selects one column.
# Using RDD is slow so we might have to change it to Dataset based implementation
# once Spark has that implementation.
sample = y.rdd.map(lambda x: float(x[0]))
kd = KernelDensity()
kd.setSample(sample)
assert isinstance(bw_method, (int, float)), "'bw_method' must be set as a scalar number."
if bw_method is not None:
# Match the bandwidth with Spark.
kd.setBandwidth(float(bw_method))
y = kd.estimate(list(map(float, ind)))
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
_klasses = [
KoalasHistPlot,
KoalasBarPlot,
KoalasBoxPlot,
KoalasPiePlot,
KoalasAreaPlot,
KoalasLinePlot,
KoalasBarhPlot,
KoalasScatterPlot,
KoalasKdePlot,
]
_plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses}
def plot_series(
data,
kind="line",
ax=None, # Series unique
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False, # Series unique
**kwds
):
"""
Make plots of Series using matplotlib / pylab.
Each plot kind has a corresponding method on the
``Series.plot`` accessor:
``s.plot(kind='line')`` is equivalent to
``s.plot.line()``.
Parameters
----------
data : Series
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
ax : matplotlib axes object
If not passed, uses gca()
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
# function copied from pandas.plotting._core
# so it calls modified _plot below
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = None
with plt.rc_context():
ax = plt.gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(
data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds
)
def plot_frame(
data,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
"""
Make plots of DataFrames using matplotlib / pylab.
Each plot kind has a corresponding method on the
``DataFrame.plot`` accessor:
``kdf.plot(kind='line')`` is equivalent to
``kdf.plot.line()``.
Parameters
----------
data : DataFrame
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
ax : matplotlib axes object
If not passed, uses gca()
x : label or position, default None
y : label, position or list of label, positions, default None
Allows plotting of one column versus another.
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
sharex: bool or None, default is None
Whether to share x axis or not.
sharey: bool, default is False
Whether to share y axis or not.
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
sort_columns: bool, default is False
When True, will sort values on plots.
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
return _plot(
data,
kind=kind,
x=x,
y=y,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
subplots=subplots,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
sharex=sharex,
sharey=sharey,
secondary_y=secondary_y,
layout=layout,
sort_columns=sort_columns,
**kwds
)
def _plot(data, x=None, y=None, subplots=False, ax=None, kind="line", **kwds):
from databricks.koalas import DataFrame
# function copied from pandas.plotting._core
# and adapted to handle Koalas DataFrame and Series
kind = kind.lower().strip()
kind = {"density": "kde"}.get(kind, kind)
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
# scatter and hexbin are inherited from PlanePlot which require x and y
if kind in ("scatter", "hexbin"):
plot_obj = klass(data, x, y, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
# check data type and do preprocess before applying plot
if isinstance(data, DataFrame):
if x is not None:
data = data.set_index(x)
# TODO: check if value of y is plottable
if y is not None:
data = data[y]
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
class KoalasSeriesPlotMethods(PandasObject):
"""
Series plotting accessor and method.
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='hist')`` is equivalent to ``s.plot.hist()``
"""
def __init__(self, data):
self.data = data
def __call__(
self,
kind="line",
ax=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False,
**kwds
):
return plot_series(
self.data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds
)
__call__.__doc__ = plot_series.__doc__
def line(self, x=None, y=None, **kwargs):
"""
Plot Series as lines.
This function is useful to plot lines using Series's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`Series.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.line()
"""
return self(kind="line", x=x, y=y, **kwargs)
def bar(self, **kwds):
"""
Vertical bar plot.
Parameters
----------
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.bar()
"""
return self(kind="bar", **kwds)
def barh(self, **kwds):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> plot = df.val.plot.barh()
"""
return self(kind="barh", **kwds)
def box(self, **kwds):
"""
Make a box plot of the DataFrame columns.
Parameters
----------
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
precision: scalar, default = 0.01
This argument is used by Koalas to compute approximate statistics
for building a boxplot. Use *smaller* values to get more precise
statistics.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
There are behavior differences between Koalas and pandas.
* Koalas computes approximate statistics - expect differences between
pandas and Koalas boxplots, especially regarding 1st and 3rd quartiles.
* The `whis` argument is only supported as a single number.
* Koalas doesn't support the following argument(s).
* `bootstrap` argument is not supported
* `autorange` argument is not supported
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = ks.DataFrame(data, columns=list('ABCD'))
>>> ax = df['A'].plot.box()
"""
return self(kind="box", **kwds)
def hist(self, bins=10, **kwds):
"""
Draw one histogram of the DataFrame’s columns.
Parameters
----------
bins : integer, default 10
Number of histogram bins to be used
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.Series.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> s = ks.Series([1, 3, 2])
>>> ax = s.plot.hist()
"""
return self(kind="hist", bins=bins, **kwds)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
Parameters
----------
bw_method : scalar
The method used to calculate the estimator bandwidth.
See KernelDensity in PySpark for more information.
ind : NumPy array or integer, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs : optional
Keyword arguments to pass on to :meth:`Koalas.Series.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
Examples
--------
A scalar bandwidth should be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> s = ks.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
The `ind` parameter determines the evaluation points for the
plot of the estimated KDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5], bw_method=0.3)
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> plot = df.sales.plot.area()
"""
return self(kind="area", **kwds)
def pie(self, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwds
Keyword arguments to pass on to :meth:`Koalas.Series.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'mass': [0.330, 4.87, 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.mass.plot.pie(figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.mass.plot.pie(subplots=True, figsize=(6, 3))
"""
return self(kind="pie", **kwds)
class KoalasFramePlotMethods(PandasObject):
# TODO: not sure if Koalas wants to combine plot method for Series and DataFrame
"""
DataFrame plotting accessor and method.
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``df.plot(kind='hist')`` is equivalent to ``df.plot.hist()``
"""
def __init__(self, data):
self.data = data
def __call__(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
return plot_frame(
self.data,
x=x,
y=y,
kind=kind,
ax=ax,
subplots=subplots,
sharex=sharex,
sharey=sharey,
layout=layout,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
secondary_y=secondary_y,
sort_columns=sort_columns,
**kwds
)
def line(self, x=None, y=None, **kwargs):
"""
Plot DataFrame as lines.
Parameters
----------
x: int or str, optional
Columns to use for the horizontal axis.
y : int, str, or list of them, optional
The values to be plotted.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = ks.DataFrame({'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]},
... index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
return self(kind="line", x=x, y=y, **kwargs)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
Parameters
----------
bw_method : scalar
The method used to calculate the estimator bandwidth.
See KernelDensity in PySpark for more information.
ind : NumPy array or integer, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs : optional
Keyword arguments to pass on to :meth:`Koalas.DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
Examples
--------
For DataFrame, it works in the same way as Series:
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6], bw_method=0.3)
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def pie(self, y=None, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwds
Keyword arguments to pass on to :meth:`Koalas.DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'mass': [0.330, 4.87, 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(6, 3))
"""
from databricks.koalas import DataFrame
# pandas will raise an error if y is None and subplots if not True
if isinstance(self.data, DataFrame) and y is None and not kwds.get("subplots", False):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", y=y, **kwds)
def area(self, x=None, y=None, stacked=True, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
Examples
--------
.. plot::
:context: close-figs
>>> df = ks.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> plot = df.plot.area()
"""
return self(kind="area", x=x, y=y, stacked=stacked, **kwds)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another.
If not specified, the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another.
If not specified, all numerical columns are used.
**kwds : optional
Additional keyword arguments are documented in
:meth:`Koalas.DataFrame.plot`.
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind="bar", x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with rectangular
bars with lengths proportional to the values that they represent. A bar plot shows
comparisons among discrete categories. One axis of the plot shows the specific
categories being compared, and the other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds:
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = ks.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ks.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
return self(kind="barh", x=x, y=y, **kwargs)
def hexbin(self, **kwds):
return unsupported_function(class_name="pd.DataFrame", method_name="hexbin")()
def box(self, **kwds):
return unsupported_function(class_name="pd.DataFrame", method_name="box")()
def hist(self, bins=10, **kwds):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
**kwds
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns=['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> df = ks.from_pandas(df)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", bins=bins, **kwds)
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
c : str, int or array_like, optional
**kwds: Optional
Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = ks.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwds)
| 1 | 15,712 | Hmm, why did we come to need the `,` at the end? | databricks-koalas | py |
@@ -13,7 +13,8 @@ module.exports = function selectPopulatedFields(query) {
var userProvidedFields = query._userProvidedFields || {};
if (query.selectedInclusively()) {
for (i = 0; i < paths.length; ++i) {
- if (!isPathInFields(userProvidedFields, paths[i])) {
+ var hasPath = query._fields[paths[i]];
+ if (!isPathInFields(userProvidedFields, paths[i]) && hasPath) {
query.select(paths[i]);
}
} | 1 | 'use strict';
/*!
* ignore
*/
module.exports = function selectPopulatedFields(query) {
var opts = query._mongooseOptions;
if (opts.populate != null) {
var paths = Object.keys(opts.populate);
var i;
var userProvidedFields = query._userProvidedFields || {};
if (query.selectedInclusively()) {
for (i = 0; i < paths.length; ++i) {
if (!isPathInFields(userProvidedFields, paths[i])) {
query.select(paths[i]);
}
}
} else if (query.selectedExclusively()) {
for (i = 0; i < paths.length; ++i) {
if (userProvidedFields[paths[i]] == null) {
delete query._fields[paths[i]];
}
}
}
}
};
/*!
* ignore
*/
function isPathInFields(userProvidedFields, path) {
var pieces = path.split('.');
var len = pieces.length;
var cur = pieces[0];
for (var i = 1; i < len; ++i) {
if (userProvidedFields[cur] != null) {
return true;
}
cur += '.' + pieces[i];
}
return userProvidedFields[cur] != null;
}
| 1 | 13,816 | I'm suspicious of this. For one thing, `query._fields[paths[i]]` may be `0`, `false`, etc. so checking for falsy will catch both cases where both the field isn't in the projection and if the field is explicitly excluded from the projection. For another, I'm not so sure that #6546 is a bug. Let's discuss that more. | Automattic-mongoose | js |
@@ -32,6 +32,8 @@ type Parser struct {
interpreter *interpreter
// Stashed set of source code for builtin rules.
builtins map[string][]byte
+
+ statements []*Statement
}
// NewParser creates a new parser instance. One is normally sufficient for a process lifetime. | 1 | // Package asp implements an experimental BUILD-language parser.
// Parsing is doing using Participle (github.com/alecthomas/participle) in native Go,
// with a custom and also native partial Python interpreter.
package asp
import (
"bytes"
"encoding/gob"
"io"
"os"
"reflect"
"strings"
"gopkg.in/op/go-logging.v1"
"core"
)
var log = logging.MustGetLogger("asp")
func init() {
// gob needs to know how to encode and decode our types.
gob.Register(None)
gob.Register(pyInt(0))
gob.Register(pyString(""))
gob.Register(pyList{})
gob.Register(pyDict{})
}
// A Parser implements parsing of BUILD files.
type Parser struct {
interpreter *interpreter
// Stashed set of source code for builtin rules.
builtins map[string][]byte
}
// NewParser creates a new parser instance. One is normally sufficient for a process lifetime.
func NewParser(state *core.BuildState) *Parser {
p := newParser()
p.interpreter = newInterpreter(state, p)
return p
}
// newParser creates just the parser with no interpreter.
func newParser() *Parser {
return &Parser{builtins: map[string][]byte{}}
}
// LoadBuiltins instructs the parser to load rules from this file as built-ins.
// Optionally the file contents can be supplied directly.
// Also optionally a previously parsed form (acquired from ParseToFile) can be supplied.
func (p *Parser) LoadBuiltins(filename string, contents, encoded []byte) error {
var statements []*Statement
if len(encoded) != 0 {
decoder := gob.NewDecoder(bytes.NewReader(encoded))
if err := decoder.Decode(&statements); err != nil {
log.Fatalf("Failed to decode pre-parsed rules: %s", err)
}
}
if len(contents) != 0 {
p.builtins[filename] = contents
}
if err := p.interpreter.LoadBuiltins(filename, contents, statements); err != nil {
return p.annotate(err, nil)
}
return nil
}
// MustLoadBuiltins calls LoadBuiltins, and dies on any errors.
func (p *Parser) MustLoadBuiltins(filename string, contents, encoded []byte) {
if err := p.LoadBuiltins(filename, contents, encoded); err != nil {
log.Fatalf("Error loading builtin rules: %s", err)
}
}
// ParseFile parses the contents of a single file in the BUILD language.
// It returns true if the call was deferred at some point awaiting target to build,
// along with any error encountered.
func (p *Parser) ParseFile(pkg *core.Package, filename string) error {
statements, err := p.parse(filename)
if err != nil {
return err
}
_, err = p.interpreter.interpretAll(pkg, statements)
if err != nil {
f, _ := os.Open(filename)
p.annotate(err, f)
}
return err
}
// ParseReader parses the contents of the given ReadSeeker as a BUILD file.
// The first return value is true if parsing succeeds - if the error is still non-nil
// that indicates that interpretation failed.
func (p *Parser) ParseReader(pkg *core.Package, r io.ReadSeeker) (bool, error) {
stmts, err := p.parseAndHandleErrors(r, "")
if err != nil {
return false, err
}
_, err = p.interpreter.interpretAll(pkg, stmts)
return true, err
}
// ParseToFile parses the given file and writes a binary form of the result to the output file.
func (p *Parser) ParseToFile(input, output string) error {
stmts, err := p.parse(input)
if err != nil {
return err
}
stmts = p.optimise(stmts)
p.interpreter.optimiseExpressions(reflect.ValueOf(stmts))
for _, stmt := range stmts {
if stmt.FuncDef != nil {
stmt.FuncDef.KeywordsOnly = !whitelistedKwargs(stmt.FuncDef.Name, input)
}
}
f, err := os.Create(output)
if err != nil {
return err
}
encoder := gob.NewEncoder(f)
if err := encoder.Encode(stmts); err != nil {
return err
}
return f.Close()
}
// ParseFileOnly parses the given file but does not interpret it.
func (p *Parser) ParseFileOnly(filename string) ([]*Statement, error) {
return p.parse(filename)
}
// parse reads the given file and parses it into a set of statements.
func (p *Parser) parse(filename string) ([]*Statement, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
stmts, err := p.parseAndHandleErrors(f, filename)
if err == nil {
// This appears a bit weird, but the error will still use the file if it's open
// to print additional information about it.
f.Close()
}
return stmts, err
}
// ParseData reads the given byteslice and parses it into a set of statements.
// The 'filename' argument is only used in case of errors so doesn't necessarily have to correspond to a real file.
func (p *Parser) ParseData(data []byte, filename string) ([]*Statement, error) {
r := &namedReader{r: bytes.NewReader(data), name: filename}
return p.parseAndHandleErrors(r, filename)
}
// parseAndHandleErrors handles errors nicely if the given input fails to parse.
func (p *Parser) parseAndHandleErrors(r io.ReadSeeker, filename string) ([]*Statement, error) {
input, err := parseFileInput(r)
if err == nil {
return input.Statements, nil
}
// If we get here, something went wrong. Try to give some nice feedback about it.
return nil, p.annotate(err, r)
}
// annotate annotates the given error with whatever source information we have.
func (p *Parser) annotate(err error, r io.ReadSeeker) error {
err = AddReader(err, r)
// Now annotate with any builtin rules we might have loaded.
for filename, contents := range p.builtins {
err = AddReader(err, &namedReader{r: bytes.NewReader(contents), name: filename})
}
return err
}
// optimise implements some (very) mild optimisations on the given set of statements to translate them
// into a form we find slightly more useful.
// This also sneaks in some rewrites to .append and .extend which are very troublesome otherwise
// (technically that changes the meaning of the code, #dealwithit)
func (p *Parser) optimise(statements []*Statement) []*Statement {
ret := make([]*Statement, 0, len(statements))
for _, stmt := range statements {
if stmt.Literal != nil || stmt.Pass {
continue // Neither statement has any effect.
} else if stmt.FuncDef != nil {
stmt.FuncDef.Statements = p.optimise(stmt.FuncDef.Statements)
} else if stmt.For != nil {
stmt.For.Statements = p.optimise(stmt.For.Statements)
} else if stmt.If != nil {
stmt.If.Statements = p.optimise(stmt.If.Statements)
for i, elif := range stmt.If.Elif {
stmt.If.Elif[i].Statements = p.optimise(elif.Statements)
}
stmt.If.ElseStatements = p.optimise(stmt.If.ElseStatements)
} else if stmt.Ident != nil && stmt.Ident.Action != nil && stmt.Ident.Action.Property != nil && len(stmt.Ident.Action.Property.Action) == 1 {
call := stmt.Ident.Action.Property.Action[0].Call
name := stmt.Ident.Action.Property.Name
if (name == "append" || name == "extend") && call != nil && len(call.Arguments) == 1 {
stmt = &Statement{
Pos: stmt.Pos,
Ident: &IdentStatement{
Name: stmt.Ident.Name,
Action: &IdentStatementAction{
AugAssign: &call.Arguments[0].Value,
},
},
}
if name == "append" {
stmt.Ident.Action.AugAssign = &Expression{Val: &ValueExpression{
List: &List{
Values: []*Expression{&call.Arguments[0].Value},
},
}}
}
}
}
ret = append(ret, stmt)
}
return ret
}
// whitelistedKwargs returns true if the given built-in function name is allowed to
// be called as non-kwargs.
// TODO(peterebden): Come up with a syntax that exposes this directly in the file.
func whitelistedKwargs(name, filename string) bool {
if name[0] == '_' || (strings.HasSuffix(filename, "builtins.build_defs") && name != "build_rule") {
return true // Don't care about anything private, or non-rule builtins.
}
return map[string]bool{
"workspace": true,
"decompose": true,
"check_config": true,
"select": true,
}[name]
}
| 1 | 8,433 | What is this? I'm a bit unclear why the parser would have a list of statements in it. | thought-machine-please | go |
@@ -29,6 +29,14 @@ var (
"name",
}, nil,
)
+ descPrometheusEnforcedSampleLimit = prometheus.NewDesc(
+ "prometheus_operator_enforced_sample_limit",
+ "Global limit on the number of scraped samples per scrape target.",
+ []string{
+ "namespace",
+ "name",
+ }, nil,
+ )
)
type prometheusCollector struct { | 1 | // Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/client-go/tools/cache"
)
var (
descPrometheusSpecReplicas = prometheus.NewDesc(
"prometheus_operator_spec_replicas",
"Number of expected replicas for the object.",
[]string{
"namespace",
"name",
}, nil,
)
)
type prometheusCollector struct {
stores []cache.Store
}
func NewPrometheusCollector(s cache.Store) *prometheusCollector {
return &prometheusCollector{stores: []cache.Store{s}}
}
func NewPrometheusCollectorForStores(s ...cache.Store) *prometheusCollector {
return &prometheusCollector{stores: s}
}
// Describe implements the prometheus.Collector interface.
func (c *prometheusCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- descPrometheusSpecReplicas
}
// Collect implements the prometheus.Collector interface.
func (c *prometheusCollector) Collect(ch chan<- prometheus.Metric) {
for _, s := range c.stores {
for _, p := range s.List() {
c.collectPrometheus(ch, p.(*v1.Prometheus))
}
}
}
func (c *prometheusCollector) collectPrometheus(ch chan<- prometheus.Metric, p *v1.Prometheus) {
replicas := float64(minReplicas)
if p.Spec.Replicas != nil {
replicas = float64(*p.Spec.Replicas)
}
ch <- prometheus.MustNewConstMetric(descPrometheusSpecReplicas, prometheus.GaugeValue, replicas, p.Namespace, p.Name)
}
| 1 | 15,129 | I believe this is Prometheus name, wonder if this is descriptive enough of a label name? @nrchakradhar @simonpasquier wdyt? | prometheus-operator-prometheus-operator | go |
@@ -213,6 +213,11 @@ type Config struct {
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
+ // Knobs provided to explicitly control whether we add rules to drop encap traffic
+ // from workloads. We always add them unless explicitly disabled.
+ DropVXLANPacketsFromWorkloads bool `config:"bool;true"`
+ DropIPIPPacketsFromWorkloads bool `config:"bool;true"`
+
AWSSrcDstCheck string `config:"oneof(DoNothing,Enable,Disable);DoNothing;non-zero"`
ReportingIntervalSecs time.Duration `config:"seconds;30"` | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/names"
"github.com/projectcalico/libcalico-go/lib/numorstring"
"github.com/projectcalico/felix/idalloc"
)
var (
// RegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value represents a regular expression and is marked by '/' at the start
// and end and cannot have spaces
RegexpIfaceElemRegexp = regexp.MustCompile(`^\/[^\s]+\/$`)
// NonRegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value is between 1-15 chars long and only be alphanumeric or - or _
NonRegexpIfaceElemRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}$`)
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
IfaceParamRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,15}$`)
// Hostname have to be valid ipv4, ipv6 or strings up to 64 characters.
HostAddressRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,64}$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
InternalOverride
)
var SourcesInDescendingOrder = []Source{InternalOverride, EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
case InternalOverride:
return "internal override"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable, InternalOverride:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
// Wireguard configuration
WireguardEnabled bool `config:"bool;false"`
WireguardListeningPort int `config:"int;51820"`
WireguardRoutingRulePriority int `config:"int;99"`
WireguardInterfaceName string `config:"iface-param;wireguard.cali;non-zero"`
WireguardMTU int `config:"int;1420;non-zero"`
BPFEnabled bool `config:"bool;false"`
BPFDisableUnprivileged bool `config:"bool;true"`
BPFLogLevel string `config:"oneof(off,info,debug);off;non-zero"`
BPFDataIfacePattern *regexp.Regexp `config:"regexp;^(en[opsx].*|eth.*|tunl0$|wireguard.cali$)"`
BPFConnectTimeLoadBalancingEnabled bool `config:"bool;true"`
BPFExternalServiceMode string `config:"oneof(tunnel,dsr);tunnel;non-zero"`
BPFKubeProxyIptablesCleanupEnabled bool `config:"bool;true"`
BPFKubeProxyMinSyncPeriod time.Duration `config:"seconds;1"`
BPFKubeProxyEndpointSlicesEnabled bool `config:"bool;false"`
// DebugBPFCgroupV2 controls the cgroup v2 path that we apply the connect-time load balancer to. Most distros
// are configured for cgroup v1, which prevents all but hte root cgroup v2 from working so this is only useful
// for development right now.
DebugBPFCgroupV2 string `config:"string;;local"`
// DebugBPFMapRepinEnabled can be used to prevent Felix from repinning its BPF maps at startup. This is useful for
// testing with multiple Felix instances running on one host.
DebugBPFMapRepinEnabled bool `config:"bool;true;local"`
DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;local"`
TyphaK8sServiceName string `config:"string;;local"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"`
TyphaReadTimeout time.Duration `config:"seconds;30;local"`
TyphaWriteTimeout time.Duration `config:"seconds;10;local"`
// Client-side TLS config for Felix's communication with Typha. If any of these are
// specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left
// unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present
// a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN
// matching TyphaURISAN.
TyphaKeyFile string `config:"file(must-exist);;local"`
TyphaCertFile string `config:"file(must-exist);;local"`
TyphaCAFile string `config:"file(must-exist);;local"`
TyphaCN string `config:"string;;local"`
TyphaURISAN string `config:"string;;local"`
Ipv6Support bool `config:"bool;true"`
IptablesBackend string `config:"oneof(legacy,nft,auto);auto"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
InterfaceRefreshInterval time.Duration `config:"seconds;90"`
DeviceRouteSourceAddress net.IP `config:"ipv4;"`
DeviceRouteProtocol int `config:"int;3"`
RemoveExternalRoutes bool `config:"bool;true"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
FeatureDetectOverride map[string]string `config:"keyvaluelist;;"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
XDPRefreshInterval time.Duration `config:"seconds;90"`
PolicySyncPathPrefix string `config:"file;;"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
OpenstackRegion string `config:"region;;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
InterfaceExclude []*regexp.Regexp `config:"iface-list-regexp;kube-ipvs0"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
VXLANEnabled bool `config:"bool;false"`
VXLANPort int `config:"int;4789"`
VXLANVNI int `config:"int;4096"`
VXLANMTU int `config:"int;1410;non-zero"`
IPv4VXLANTunnelAddr net.IP `config:"ipv4;"`
VXLANTunnelMACAddr string `config:"string;"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;1440;non-zero"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
AWSSrcDstCheck string `config:"oneof(DoNothing,Enable,Disable);DoNothing;non-zero"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
HealthHost string `config:"host-address;localhost"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsHost string `config:"host-address;"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:5473,tcp:6443,tcp:6666,tcp:6667;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:5473,tcp:6443,tcp:6666,tcp:6667;die-on-fail"`
KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"`
NATPortRange numorstring.Port `config:"portrange;"`
NATOutgoingAddress net.IP `config:"ipv4;"`
UsageReportingEnabled bool `config:"bool;true"`
UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
UsageReportingIntervalSecs time.Duration `config:"seconds;86400"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
ExternalNodesCIDRList []string `config:"cidr-list;;die-on-fail"`
DebugMemoryProfilePath string `config:"file;;"`
DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
DebugPanicAfter time.Duration `config:"seconds;0"`
DebugSimulateDataRace bool `config:"bool;false"`
// Configure where Felix gets its routing information.
// - workloadIPs: use workload endpoints to construct routes.
// - calicoIPAM: use IPAM data to contruct routes.
RouteSource string `config:"oneof(WorkloadIPs,CalicoIPAM);CalicoIPAM"`
RouteTableRange idalloc.IndexRange `config:"route-table-range;1-250;die-on-fail"`
IptablesNATOutgoingInterfaceFilter string `config:"iface-param;"`
SidecarAccelerationEnabled bool `config:"bool;false"`
XDPEnabled bool `config:"bool;true"`
GenericXDPEnabled bool `config:"bool;false"`
Variant string `config:"string;Calico"`
// State tracking.
// internalOverrides contains our highest priority config source, generated from internal constraints
// such as kernel version support.
internalOverrides map[string]string
// sourceToRawConfig maps each source to the set of config that was give to us via UpdateFrom.
sourceToRawConfig map[Source]map[string]string
// rawValues maps keys to the current highest-priority raw value.
rawValues map[string]string
// Err holds the most recent error from a config update.
Err error
loadClientConfigFromEnvironment func() (*apiconfig.CalicoAPIConfig, error)
useNodeResourceUpdates bool
}
// Copy makes a copy of the object. Internal state is deep copied but config parameters are only shallow copied.
// This saves work since updates to the copy will trigger the config params to be recalculated.
func (config *Config) Copy() *Config {
// Start by shallow-copying the object.
cp := *config
// Copy the internal state over as a deep copy.
cp.internalOverrides = map[string]string{}
for k, v := range config.internalOverrides {
cp.internalOverrides[k] = v
}
cp.sourceToRawConfig = map[Source]map[string]string{}
for k, v := range config.sourceToRawConfig {
cp.sourceToRawConfig[k] = map[string]string{}
for k2, v2 := range v {
cp.sourceToRawConfig[k][k2] = v2
}
}
cp.rawValues = map[string]string{}
for k, v := range config.rawValues {
cp.rawValues[k] = v
}
return &cp
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (config *Config) IsLeader() bool {
return config.Variant == "Calico"
}
func (config *Config) InterfacePrefixes() []string {
return strings.Split(config.InterfacePrefix, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
// Map from lower-case version of name to the highest-priority source found so far.
// We use the lower-case version of the name since we can calculate it both for
// expected and "raw" parameters, which may be used by plugins.
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
lowerCaseName := strings.ToLower(rawName)
currentSource := nameToSource[lowerCaseName]
param, ok := knownParams[lowerCaseName]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for an external
// dataplane driver. Use the raw name since the driver may
// want it.
newRawValues[rawName] = rawValue
nameToSource[lowerCaseName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[lowerCaseName] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) setBy(name string, source Source) bool {
_, set := config.sourceToRawConfig[source][name]
return set
}
func (config *Config) setByConfigFileOrEnvironment(name string) bool {
return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable)
}
func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig {
// We want Felix's datastore connection to be fully configurable using the same
// CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go
// client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a
// long time supported FELIX_XXXYYY environment variables, and we want those to keep working
// too.
// To achieve that, first build a CalicoAPIConfig using libcalico-go's
// LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY
// and XXX_YYY variables.
cfg, err := config.loadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the
// etcd case. Note that that etcd options are set even if the DatastoreType isn't etcdv3.
// This allows the user to rely the default DatastoreType being etcdv3 and still being able
// to configure the other etcdv3 options. As of the time of this code change, the etcd options
// have no affect if the DatastoreType is not etcdv3.
// Datastore type, either etcdv3 or kubernetes
if config.setByConfigFileOrEnvironment("DatastoreType") {
log.Infof("Overriding DatastoreType from felix config to %s", config.DatastoreType)
if config.DatastoreType == string(apiconfig.EtcdV3) {
cfg.Spec.DatastoreType = apiconfig.EtcdV3
} else if config.DatastoreType == string(apiconfig.Kubernetes) {
cfg.Spec.DatastoreType = apiconfig.Kubernetes
}
}
// Endpoints.
if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 {
log.Infof("Overriding EtcdEndpoints from felix config to %s", config.EtcdEndpoints)
cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
} else if config.setByConfigFileOrEnvironment("EtcdAddr") {
etcdEndpoints := config.EtcdScheme + "://" + config.EtcdAddr
log.Infof("Overriding EtcdEndpoints from felix config to %s", etcdEndpoints)
cfg.Spec.EtcdEndpoints = etcdEndpoints
}
// TLS.
if config.setByConfigFileOrEnvironment("EtcdKeyFile") {
log.Infof("Overriding EtcdKeyFile from felix config to %s", config.EtcdKeyFile)
cfg.Spec.EtcdKeyFile = config.EtcdKeyFile
}
if config.setByConfigFileOrEnvironment("EtcdCertFile") {
log.Infof("Overriding EtcdCertFile from felix config to %s", config.EtcdCertFile)
cfg.Spec.EtcdCertFile = config.EtcdCertFile
}
if config.setByConfigFileOrEnvironment("EtcdCaFile") {
log.Infof("Overriding EtcdCaFile from felix config to %s", config.EtcdCaFile)
cfg.Spec.EtcdCACertFile = config.EtcdCaFile
}
if !(config.IpInIpEnabled || config.VXLANEnabled || config.BPFEnabled) {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("Encap disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
// If any client-side TLS config parameters are specified, they _all_ must be - except that
// either TyphaCN or TyphaURISAN may be left unset.
if config.TyphaCAFile != "" ||
config.TyphaCertFile != "" ||
config.TyphaKeyFile != "" ||
config.TyphaCN != "" ||
config.TyphaURISAN != "" {
// Some TLS config specified.
if config.TyphaKeyFile == "" ||
config.TyphaCertFile == "" ||
config.TyphaCAFile == "" ||
(config.TyphaCN == "" && config.TyphaURISAN == "") {
err = errors.New("If any Felix-Typha TLS config parameters are specified," +
" they _all_ must be" +
" - except that either TyphaCN or TyphaURISAN may be left unset.")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "iface-list-regexp":
param = &RegexpPatternListParam{
NonRegexpElemRegexp: NonRegexpIfaceElemRegexp,
RegexpElemRegexp: RegexpIfaceElemRegexp,
Delimiter: ",",
Msg: "list contains invalid Linux interface name or regex pattern",
}
case "regexp":
param = &RegexpPatternParam{}
case "iface-param":
param = &RegexpParam{Regexp: IfaceParamRegexp,
Msg: "invalid Linux interface parameter"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "portrange":
param = &PortRangeParam{}
case "portrange-list":
param = &PortRangeListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "host-address":
param = &RegexpParam{Regexp: HostAddressRegexp,
Msg: "invalid host address"}
case "region":
param = &RegionParam{}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
case "cidr-list":
param = &CIDRListParam{}
case "route-table-range":
param = &RouteTableRangeParam{}
case "keyvaluelist":
param = &KeyValueListParam{}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Contains(flags, "non-zero") {
metadata.NonZero = true
}
if strings.Contains(flags, "die-on-fail") {
metadata.DieOnParseFailure = true
}
if strings.Contains(flags, "local") {
metadata.Local = true
}
if defaultStr != "" {
if strings.Contains(flags, "skip-default-validation") {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) SetUseNodeResourceUpdates(b bool) {
config.useNodeResourceUpdates = b
}
func (config *Config) UseNodeResourceUpdates() bool {
return config.useNodeResourceUpdates
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func (config *Config) SetLoadClientConfigFromEnvironmentFunction(fnc func() (*apiconfig.CalicoAPIConfig, error)) {
config.loadClientConfigFromEnvironment = fnc
}
// OverrideParam installs a maximum priority parameter override for the given parameter. This is useful for
// disabling features that are found to be unsupported, for example. By using an extra priority class, the
// override will persist even if the host/global config is updated.
func (config *Config) OverrideParam(name, value string) (bool, error) {
config.internalOverrides[name] = value
return config.UpdateFrom(config.internalOverrides, InternalOverride)
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: map[string]string{},
sourceToRawConfig: map[Source]map[string]string{},
internalOverrides: map[string]string{},
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := names.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = strings.ToLower(os.Getenv("HOSTNAME"))
}
p.FelixHostname = hostname
p.loadClientConfigFromEnvironment = apiconfig.LoadClientConfigFromEnvironment
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 18,308 | Are most of our other fields default-zero-value? Would `AllowVXLANPacketsFromWorkloads` be a better formulation of this? I think it would make it easier for golang users of the API (default value matches the type zero-value) | projectcalico-felix | go |
@@ -35,7 +35,7 @@ namespace Nethermind.TxPool
void AddPeer(ITxPoolPeer peer);
void RemovePeer(PublicKey nodeId);
AddTxResult AddTransaction(Transaction tx, TxHandlingOptions handlingOptions);
- void RemoveTransaction(Keccak hash, long blockNumber);
+ void RemoveTransaction(Keccak hash, long blockNumber, bool removeSmallerNonces);
bool TryGetPendingTransaction(Keccak hash, out Transaction transaction);
UInt256 ReserveOwnTransactionNonce(Address address);
event EventHandler<TxEventArgs> NewDiscovered; | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Generic;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Int256;
namespace Nethermind.TxPool
{
public interface ITxPool
{
Transaction[] GetPendingTransactions();
Transaction[] GetOwnPendingTransactions();
/// <summary>
/// Grouped by sender address, sorted by nonce and later tx pool sorting
/// </summary>
/// <returns></returns>
IDictionary<Address, Transaction[]> GetPendingTransactionsBySender();
void AddPeer(ITxPoolPeer peer);
void RemovePeer(PublicKey nodeId);
AddTxResult AddTransaction(Transaction tx, TxHandlingOptions handlingOptions);
void RemoveTransaction(Keccak hash, long blockNumber);
bool TryGetPendingTransaction(Keccak hash, out Transaction transaction);
UInt256 ReserveOwnTransactionNonce(Address address);
event EventHandler<TxEventArgs> NewDiscovered;
event EventHandler<TxEventArgs> NewPending;
event EventHandler<TxEventArgs> RemovedPending;
}
}
| 1 | 24,718 | void RemoveTransactions(Address sander, long removeBelowThisNonce) and separate these two calls | NethermindEth-nethermind | .cs |
@@ -4365,6 +4365,8 @@ func TestJetStreamSnapshotsAPI(t *testing.T) {
// Now connect through a cluster server and make sure we can get things to work this way as well.
nc2 := clientConnectToServer(t, ls)
defer nc2.Close()
+ // Wait a bit for interest to propagate.
+ time.Sleep(100 * time.Millisecond)
snapshot = snapshot[:0]
| 1 | // Copyright 2019-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net/url"
"os"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/nats-io/nats-server/v2/server"
"github.com/nats-io/nats-server/v2/server/sysmem"
"github.com/nats-io/nats.go"
"github.com/nats-io/nuid"
)
func TestJetStreamBasicNilConfig(t *testing.T) {
s := RunRandClientPortServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
if err := s.EnableJetStream(nil); err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if !s.JetStreamEnabled() {
t.Fatalf("Expected JetStream to be enabled")
}
if s.SystemAccount() == nil {
t.Fatalf("Expected system account to be created automatically")
}
// Grab our config since it was dynamically generated.
config := s.JetStreamConfig()
if config == nil {
t.Fatalf("Expected non-nil config")
}
// Check dynamic max memory.
hwMem := sysmem.Memory()
if hwMem != 0 {
// Make sure its about 75%
est := hwMem / 4 * 3
if config.MaxMemory != est {
t.Fatalf("Expected memory to be 80 percent of system memory, got %v vs %v", config.MaxMemory, est)
}
}
// Make sure it was created.
stat, err := os.Stat(config.StoreDir)
if err != nil {
t.Fatalf("Expected the store directory to be present, %v", err)
}
if stat == nil || !stat.IsDir() {
t.Fatalf("Expected a directory")
}
}
func RunBasicJetStreamServer() *server.Server {
opts := DefaultTestOptions
opts.Port = -1
opts.JetStream = true
tdir, _ := ioutil.TempDir(os.TempDir(), "jstests-storedir-")
opts.StoreDir = tdir
return RunServer(&opts)
}
func RunJetStreamServerOnPort(port int, sd string) *server.Server {
opts := DefaultTestOptions
opts.Port = port
opts.JetStream = true
opts.StoreDir = filepath.Dir(sd)
return RunServer(&opts)
}
func clientConnectToServer(t *testing.T, s *server.Server) *nats.Conn {
t.Helper()
nc, err := nats.Connect(s.ClientURL(),
nats.Name("JS-TEST"),
nats.ReconnectWait(5*time.Millisecond),
nats.MaxReconnects(-1))
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
return nc
}
func clientConnectWithOldRequest(t *testing.T, s *server.Server) *nats.Conn {
nc, err := nats.Connect(s.ClientURL(), nats.UseOldRequestStyle())
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
return nc
}
func TestJetStreamEnableAndDisableAccount(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
// Global in simple setup should be enabled already.
if !s.GlobalAccount().JetStreamEnabled() {
t.Fatalf("Expected to have jetstream enabled on global account")
}
if na := s.JetStreamNumAccounts(); na != 1 {
t.Fatalf("Expected 1 account, got %d", na)
}
if err := s.GlobalAccount().DisableJetStream(); err != nil {
t.Fatalf("Did not expect error on disabling account: %v", err)
}
if na := s.JetStreamNumAccounts(); na != 0 {
t.Fatalf("Expected no accounts, got %d", na)
}
// Make sure we unreserved resources.
if rm, rd, err := s.JetStreamReservedResources(); err != nil {
t.Fatalf("Unexpected error requesting jetstream reserved resources: %v", err)
} else if rm != 0 || rd != 0 {
t.Fatalf("Expected reserved memory and store to be 0, got %v and %v", server.FriendlyBytes(rm), server.FriendlyBytes(rd))
}
acc, _ := s.LookupOrRegisterAccount("$FOO")
if err := acc.EnableJetStream(nil); err != nil {
t.Fatalf("Did not expect error on enabling account: %v", err)
}
if na := s.JetStreamNumAccounts(); na != 1 {
t.Fatalf("Expected 1 account, got %d", na)
}
if err := acc.DisableJetStream(); err != nil {
t.Fatalf("Did not expect error on disabling account: %v", err)
}
if na := s.JetStreamNumAccounts(); na != 0 {
t.Fatalf("Expected no accounts, got %d", na)
}
// We should get error if disabling something not enabled.
acc, _ = s.LookupOrRegisterAccount("$BAR")
if err := acc.DisableJetStream(); err == nil {
t.Fatalf("Expected error on disabling account that was not enabled")
}
// Should get an error for trying to enable a non-registered account.
acc = server.NewAccount("$BAZ")
if err := acc.EnableJetStream(nil); err == nil {
t.Fatalf("Expected error on enabling account that was not registered")
}
}
func TestJetStreamAddStream(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.MemoryStorage,
Replicas: 1,
}},
{name: "FileStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.FileStorage,
Replicas: 1,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
nc.Publish("foo", []byte("Hello World!"))
nc.Flush()
state := mset.State()
if state.Msgs != 1 {
t.Fatalf("Expected 1 message, got %d", state.Msgs)
}
if state.Bytes == 0 {
t.Fatalf("Expected non-zero bytes")
}
nc.Publish("foo", []byte("Hello World Again!"))
nc.Flush()
state = mset.State()
if state.Msgs != 2 {
t.Fatalf("Expected 2 messages, got %d", state.Msgs)
}
if err := mset.Delete(); err != nil {
t.Fatalf("Got an error deleting the stream: %v", err)
}
})
}
}
func TestJetStreamAddStreamDiscardNew(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore",
mconfig: &server.StreamConfig{
Name: "foo",
MaxMsgs: 10,
MaxBytes: 4096,
Discard: server.DiscardNew,
Storage: server.MemoryStorage,
Replicas: 1,
}},
{name: "FileStore",
mconfig: &server.StreamConfig{
Name: "foo",
MaxMsgs: 10,
MaxBytes: 4096,
Discard: server.DiscardNew,
Storage: server.FileStorage,
Replicas: 1,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
subj := "foo"
toSend := 10
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, subj, fmt.Sprintf("MSG: %d", i+1))
}
// We expect this one to fail due to discard policy.
resp, _ := nc.Request(subj, []byte("discard me"), 100*time.Millisecond)
if resp == nil {
t.Fatalf("No response, possible timeout?")
}
if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error.Description != "maximum messages exceeded" || pa.Stream != "foo" {
t.Fatalf("Expected to get an error about maximum messages, got %q", resp.Data)
}
// Now do bytes.
mset.Purge()
big := make([]byte, 8192)
resp, _ = nc.Request(subj, big, 100*time.Millisecond)
if resp == nil {
t.Fatalf("No response, possible timeout?")
}
if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error.Description != "maximum bytes exceeded" || pa.Stream != "foo" {
t.Fatalf("Expected to get an error about maximum bytes, got %q", resp.Data)
}
})
}
}
func TestJetStreamAutoTuneFSConfig(t *testing.T) {
s := RunRandClientPortServer()
defer s.Shutdown()
jsconfig := &server.JetStreamConfig{MaxMemory: -1, MaxStore: 128 * 1024 * 1024 * 1024 * 1024}
if err := s.EnableJetStream(jsconfig); err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
maxMsgSize := int32(512)
streamConfig := func(name string, maxMsgs, maxBytes int64) *server.StreamConfig {
t.Helper()
cfg := &server.StreamConfig{Name: name, MaxMsgSize: maxMsgSize, Storage: server.FileStorage}
if maxMsgs > 0 {
cfg.MaxMsgs = maxMsgs
}
if maxBytes > 0 {
cfg.MaxBytes = maxBytes
}
return cfg
}
acc := s.GlobalAccount()
testBlkSize := func(subject string, maxMsgs, maxBytes int64, expectedBlkSize uint64) {
t.Helper()
mset, err := acc.AddStream(streamConfig(subject, maxMsgs, maxBytes))
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
fsCfg, err := mset.FileStoreConfig()
if err != nil {
t.Fatalf("Unexpected error retrieving file store: %v", err)
}
if fsCfg.BlockSize != expectedBlkSize {
t.Fatalf("Expected auto tuned block size to be %d, got %d", expectedBlkSize, fsCfg.BlockSize)
}
}
testBlkSize("foo", 1, 0, server.FileStoreMinBlkSize)
testBlkSize("foo", 1, 512, server.FileStoreMinBlkSize)
testBlkSize("foo", 1, 1024*1024, 262200)
testBlkSize("foo", 1, 8*1024*1024, 2097200)
testBlkSize("foo_bar_baz", -1, 32*1024*1024*1024*1024, server.FileStoreMaxBlkSize)
}
func TestJetStreamPubAck(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
sname := "PUBACK"
acc := s.GlobalAccount()
mconfig := &server.StreamConfig{Name: sname, Subjects: []string{"foo"}, Storage: server.MemoryStorage}
mset, err := acc.AddStream(mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
checkRespDetails := func(resp *nats.Msg, err error, seq uint64) {
if err != nil {
t.Fatalf("Unexpected error from send stream msg: %v", err)
}
if resp == nil {
t.Fatalf("No response from send stream msg")
}
pa := getPubAckResponse(resp.Data)
if pa == nil || pa.Error != nil {
t.Fatalf("Expected a valid JetStreamPubAck, got %q", resp.Data)
}
if pa.Stream != sname {
t.Fatalf("Expected %q for stream name, got %q", sname, pa.Stream)
}
if pa.Sequence != seq {
t.Fatalf("Expected %d for sequence, got %d", seq, pa.Sequence)
}
}
// Send messages and make sure pubAck details are correct.
for i := uint64(1); i <= 1000; i++ {
resp, err := nc.Request("foo", []byte("HELLO"), 100*time.Millisecond)
checkRespDetails(resp, err, i)
}
}
func TestJetStreamConsumerWithStartTime(t *testing.T) {
subj := "my_stream"
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: subj, Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: subj, Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
fsCfg := &server.FileStoreConfig{BlockSize: 100}
mset, err := s.GlobalAccount().AddStreamWithStore(c.mconfig, fsCfg)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 250
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, subj, fmt.Sprintf("MSG: %d", i+1))
}
time.Sleep(10 * time.Millisecond)
startTime := time.Now()
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, subj, fmt.Sprintf("MSG: %d", i+1))
}
if msgs := mset.State().Msgs; msgs != uint64(toSend*2) {
t.Fatalf("Expected %d messages, got %d", toSend*2, msgs)
}
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "d",
DeliverPolicy: server.DeliverByStartTime,
OptStartTime: &startTime,
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
msg, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sseq, dseq, _, _, _ := o.ReplyInfo(msg.Reply)
if dseq != 1 {
t.Fatalf("Expected delivered seq of 1, got %d", dseq)
}
if sseq != uint64(toSend+1) {
t.Fatalf("Expected to get store seq of %d, got %d", toSend+1, sseq)
}
})
}
}
// Test for https://github.com/nats-io/jetstream/issues/143
func TestJetStreamConsumerWithMultipleStartOptions(t *testing.T) {
subj := "my_stream"
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: subj, Subjects: []string{"foo.>"}, Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: subj, Subjects: []string{"foo.>"}, Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
obsReq := server.CreateConsumerRequest{
Stream: subj,
Config: server.ConsumerConfig{
Durable: "d",
DeliverPolicy: server.DeliverLast,
FilterSubject: "foo.22",
AckPolicy: server.AckExplicit,
},
}
req, err := json.Marshal(obsReq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, err = nc.Request(fmt.Sprintf(server.JSApiConsumerCreateT, subj), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
nc.Close()
s.Shutdown()
})
}
}
func TestJetStreamConsumerMaxDeliveries(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Queue up our work item.
sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!")
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
maxDeliver := 5
ackWait := 10 * time.Millisecond
o, err := mset.AddConsumer(&server.ConsumerConfig{
DeliverSubject: sub.Subject,
AckPolicy: server.AckExplicit,
AckWait: ackWait,
MaxDeliver: maxDeliver,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
// Wait for redeliveries to pile up.
checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxDeliver {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, maxDeliver)
}
return nil
})
// Now wait a bit longer and make sure we do not have more than maxDeliveries.
time.Sleep(2 * ackWait)
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxDeliver {
t.Fatalf("Did not receive correct number of messages: %d vs %d", nmsgs, maxDeliver)
}
})
}
}
func TestJetStreamPullConsumerDelayedFirstPullWithReplayOriginal(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Queue up our work item.
sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!")
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "d",
AckPolicy: server.AckExplicit,
ReplayPolicy: server.ReplayOriginal,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
// Force delay here which triggers the bug.
time.Sleep(250 * time.Millisecond)
if _, err = nc.Request(o.RequestNextMsgSubject(), nil, time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
})
}
}
func TestJetStreamConsumerAckFloorFill(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MQ", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "MQ", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
for i := 1; i <= 4; i++ {
sendStreamMsg(t, nc, c.mconfig.Name, fmt.Sprintf("msg-%d", i))
}
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "d",
DeliverSubject: sub.Subject,
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
var first *nats.Msg
for i := 1; i <= 3; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error receiving message %d: %v", i, err)
}
// Don't ack 1 or 4.
if i == 1 {
first = m
} else if i == 2 || i == 3 {
m.Respond(nil)
}
}
nc.Flush()
if info := o.Info(); info.AckFloor.Consumer != 0 {
t.Fatalf("Expected the ack floor to be 0, got %d", info.AckFloor.Consumer)
}
// Now ack first, should move ack floor to 3.
first.Respond(nil)
nc.Flush()
if info := o.Info(); info.AckFloor.Consumer != 3 {
t.Fatalf("Expected the ack floor to be 3, got %d", info.AckFloor.Consumer)
}
})
}
}
func TestJetStreamNoPanicOnRaceBetweenShutdownAndConsumerDelete(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_STREAM", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "MY_STREAM", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
var cons []*server.Consumer
for i := 0; i < 100; i++ {
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: fmt.Sprintf("d%d", i),
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
cons = append(cons, o)
}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
for _, c := range cons {
c.Delete()
}
}()
time.Sleep(10 * time.Millisecond)
s.Shutdown()
})
}
}
func TestJetStreamAddStreamMaxMsgSize(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.MemoryStorage,
MaxMsgSize: 22,
Replicas: 1,
}},
{name: "FileStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.FileStorage,
MaxMsgSize: 22,
Replicas: 1,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
if _, err := nc.Request("foo", []byte("Hello World!"), time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
tooBig := []byte("1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ")
resp, err := nc.Request("foo", tooBig, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error.Description != "message size exceeds maximum allowed" {
t.Fatalf("Expected to get an error for maximum message size, got %q", pa.Error)
}
})
}
}
func TestJetStreamAddStreamCanonicalNames(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
acc := s.GlobalAccount()
expectErr := func(_ *server.Stream, err error) {
t.Helper()
if err == nil || !strings.Contains(err.Error(), "can not contain") {
t.Fatalf("Expected error but got none")
}
}
expectErr(acc.AddStream(&server.StreamConfig{Name: "foo.bar"}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "foo.bar."}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "foo.*"}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "foo.>"}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "*"}))
expectErr(acc.AddStream(&server.StreamConfig{Name: ">"}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "*>"}))
}
func TestJetStreamAddStreamBadSubjects(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
// Client for API requests.
nc := clientConnectToServer(t, s)
defer nc.Close()
expectAPIErr := func(cfg server.StreamConfig) {
t.Helper()
req, err := json.Marshal(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, _ := nc.Request(fmt.Sprintf(server.JSApiStreamCreateT, cfg.Name), req, time.Second)
var scResp server.JSApiStreamCreateResponse
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
e := scResp.Error
if e == nil || e.Code != 500 || e.Description != server.ErrMalformedSubject.Error() {
t.Fatalf("Did not get proper error response: %+v", e)
}
}
expectAPIErr(server.StreamConfig{Name: "MyStream", Storage: server.MemoryStorage, Subjects: []string{"foo.bar."}})
expectAPIErr(server.StreamConfig{Name: "MyStream", Storage: server.MemoryStorage, Subjects: []string{".."}})
expectAPIErr(server.StreamConfig{Name: "MyStream", Storage: server.MemoryStorage, Subjects: []string{".*"}})
expectAPIErr(server.StreamConfig{Name: "MyStream", Storage: server.MemoryStorage, Subjects: []string{".>"}})
}
func TestJetStreamAddStreamMaxConsumers(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
cfg := &server.StreamConfig{
Name: "MAXC",
Storage: server.MemoryStorage,
Subjects: []string{"in.maxc.>"},
MaxConsumers: 1,
}
acc := s.GlobalAccount()
mset, err := acc.AddStream(cfg)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
if mset.Config().MaxConsumers != 1 {
t.Fatalf("Expected 1 MaxConsumers, got %d", mset.Config().MaxConsumers)
}
}
func TestJetStreamAddStreamOverlappingSubjects(t *testing.T) {
mconfig := &server.StreamConfig{
Name: "ok",
Storage: server.MemoryStorage,
Subjects: []string{"foo", "bar", "baz.*", "foo.bar.baz.>"},
}
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
mset, err := acc.AddStream(mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
expectErr := func(_ *server.Stream, err error) {
t.Helper()
if err == nil || !strings.Contains(err.Error(), "subjects overlap") {
t.Fatalf("Expected error but got none")
}
}
// Test that any overlapping subjects will fail.
expectErr(acc.AddStream(&server.StreamConfig{Name: "foo"}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "a", Subjects: []string{"baz", "bar"}}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "b", Subjects: []string{">"}}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "c", Subjects: []string{"baz.33"}}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "d", Subjects: []string{"*.33"}}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "e", Subjects: []string{"*.>"}}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "f", Subjects: []string{"foo.bar", "*.bar.>"}}))
}
func TestJetStreamAddStreamOverlapWithJSAPISubjects(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
expectErr := func(_ *server.Stream, err error) {
t.Helper()
if err == nil || !strings.Contains(err.Error(), "subjects overlap") {
t.Fatalf("Expected error but got none")
}
}
// Test that any overlapping subjects with our JSAPI should fail.
expectErr(acc.AddStream(&server.StreamConfig{Name: "a", Subjects: []string{"$JS.API.foo", "$JS.API.bar"}}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "b", Subjects: []string{"$JS.API.>"}}))
expectErr(acc.AddStream(&server.StreamConfig{Name: "c", Subjects: []string{"$JS.API.*"}}))
// Events and Advisories etc should be ok.
if _, err := acc.AddStream(&server.StreamConfig{Name: "a", Subjects: []string{"$JS.EVENT.>"}}); err != nil {
t.Fatalf("Expected this to work: %v", err)
}
}
func TestJetStreamAddStreamSameConfigOK(t *testing.T) {
mconfig := &server.StreamConfig{
Name: "ok",
Subjects: []string{"foo", "bar", "baz.*", "foo.bar.baz.>"},
Storage: server.MemoryStorage,
}
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
mset, err := acc.AddStream(mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Adding again with same config should be idempotent.
if _, err = acc.AddStream(mconfig); err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
}
func sendStreamMsg(t *testing.T, nc *nats.Conn, subject, msg string) *server.PubAck {
t.Helper()
resp, _ := nc.Request(subject, []byte(msg), 500*time.Millisecond)
if resp == nil {
t.Fatalf("No response for %q, possible timeout?", msg)
}
pa := getPubAckResponse(resp.Data)
if pa == nil || pa.Error != nil {
t.Fatalf("Expected a valid JetStreamPubAck, got %q", resp.Data)
}
return pa.PubAck
}
func TestJetStreamBasicAckPublish(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "foo", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "foo", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
for i := 0; i < 50; i++ {
sendStreamMsg(t, nc, "foo.bar", "Hello World!")
}
state := mset.State()
if state.Msgs != 50 {
t.Fatalf("Expected 50 messages, got %d", state.Msgs)
}
})
}
}
func TestJetStreamStateTimestamps(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "foo", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "foo", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
start := time.Now()
delay := 250 * time.Millisecond
sendStreamMsg(t, nc, "foo.bar", "Hello World!")
time.Sleep(delay)
sendStreamMsg(t, nc, "foo.bar", "Hello World Again!")
state := mset.State()
if state.FirstTime.Before(start) {
t.Fatalf("Unexpected first message timestamp: %v", state.FirstTime)
}
if state.LastTime.Before(start.Add(delay)) {
t.Fatalf("Unexpected last message timestamp: %v", state.LastTime)
}
})
}
}
func TestJetStreamNoAckStream(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "foo", Storage: server.MemoryStorage, NoAck: true}},
{"FileStore", &server.StreamConfig{Name: "foo", Storage: server.FileStorage, NoAck: true}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
// We can use NoAck to suppress acks even when reply subjects are present.
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
if _, err := nc.Request("foo", []byte("Hello World!"), 25*time.Millisecond); err != nats.ErrTimeout {
t.Fatalf("Expected a timeout error and no response with acks suppressed")
}
state := mset.State()
if state.Msgs != 1 {
t.Fatalf("Expected 1 message, got %d", state.Msgs)
}
})
}
}
func TestJetStreamCreateConsumer(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "foo", Storage: server.MemoryStorage, Subjects: []string{"foo", "bar"}}},
{"FileStore", &server.StreamConfig{Name: "foo", Storage: server.FileStorage, Subjects: []string{"foo", "bar"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Check for basic errors.
if _, err := mset.AddConsumer(nil); err == nil {
t.Fatalf("Expected an error for no config")
}
// No deliver subject, meaning its in pull mode, work queue mode means it is required to
// do explicit ack.
if _, err := mset.AddConsumer(&server.ConsumerConfig{}); err == nil {
t.Fatalf("Expected an error on work queue / pull mode without explicit ack mode")
}
// Check for delivery subject errors.
// Literal delivery subject required.
if _, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: "foo.*"}); err == nil {
t.Fatalf("Expected an error on bad delivery subject")
}
// Check for cycles
if _, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: "foo"}); err == nil {
t.Fatalf("Expected an error on delivery subject that forms a cycle")
}
if _, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: "bar"}); err == nil {
t.Fatalf("Expected an error on delivery subject that forms a cycle")
}
if _, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: "*"}); err == nil {
t.Fatalf("Expected an error on delivery subject that forms a cycle")
}
// StartPosition conflicts
now := time.Now()
if _, err := mset.AddConsumer(&server.ConsumerConfig{
DeliverSubject: "A",
OptStartSeq: 1,
OptStartTime: &now,
}); err == nil {
t.Fatalf("Expected an error on start position conflicts")
}
if _, err := mset.AddConsumer(&server.ConsumerConfig{
DeliverSubject: "A",
OptStartTime: &now,
}); err == nil {
t.Fatalf("Expected an error on start position conflicts")
}
// Non-Durables need to have subscription to delivery subject.
delivery := nats.NewInbox()
if _, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: delivery}); err == nil {
t.Fatalf("Expected an error on unsubscribed delivery subject")
}
// Pull-based consumers are required to be durable since we do not know when they should
// be cleaned up.
if _, err := mset.AddConsumer(&server.ConsumerConfig{AckPolicy: server.AckExplicit}); err == nil {
t.Fatalf("Expected an error on pull-based that is non-durable.")
}
nc := clientConnectToServer(t, s)
defer nc.Close()
sub, _ := nc.SubscribeSync(delivery)
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: delivery})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
if err := mset.DeleteConsumer(o); err != nil {
t.Fatalf("Expected no error on delete, got %v", err)
}
// Now let's check that durables can be created and a duplicate call to add will be ok.
dcfg := &server.ConsumerConfig{
Durable: "ddd",
DeliverSubject: delivery,
AckPolicy: server.AckAll,
}
if _, err = mset.AddConsumer(dcfg); err != nil {
t.Fatalf("Unexpected error creating consumer: %v", err)
}
if _, err = mset.AddConsumer(dcfg); err != nil {
t.Fatalf("Unexpected error creating second identical consumer: %v", err)
}
// Not test that we can change the delivery subject if that is only thing that has not
// changed and we are not active.
sub.Unsubscribe()
sub, _ = nc.SubscribeSync("d.d.d")
nc.Flush()
defer sub.Unsubscribe()
dcfg.DeliverSubject = "d.d.d"
if _, err = mset.AddConsumer(dcfg); err != nil {
t.Fatalf("Unexpected error creating third consumer with just deliver subject changed: %v", err)
}
})
}
}
func TestJetStreamBasicDeliverSubject(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MSET", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "MSET", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 100
sendSubj := "foo.bar"
for i := 1; i <= toSend; i++ {
sendStreamMsg(t, nc, sendSubj, strconv.Itoa(i))
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
// Now create an consumer. Use different connection.
nc2 := clientConnectToServer(t, s)
defer nc2.Close()
sub, _ := nc2.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc2.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
// Check for our messages.
checkMsgs := func(seqOff int) {
t.Helper()
checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend)
}
return nil
})
// Now let's check the messages
for i := 0; i < toSend; i++ {
m, _ := sub.NextMsg(time.Second)
// JetStream will have the subject match the stream subject, not delivery subject.
if m.Subject != sendSubj {
t.Fatalf("Expected original subject of %q, but got %q", sendSubj, m.Subject)
}
// Now check that reply subject exists and has a sequence as the last token.
if seq := o.SeqFromReply(m.Reply); seq != uint64(i+seqOff) {
t.Fatalf("Expected sequence of %d , got %d", i+seqOff, seq)
}
// Ack the message here.
m.Respond(nil)
}
}
checkMsgs(1)
// Now send more and make sure delivery picks back up.
for i := toSend + 1; i <= toSend*2; i++ {
sendStreamMsg(t, nc, sendSubj, strconv.Itoa(i))
}
state = mset.State()
if state.Msgs != uint64(toSend*2) {
t.Fatalf("Expected %d messages, got %d", toSend*2, state.Msgs)
}
checkMsgs(101)
checkSubEmpty := func() {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 0 {
t.Fatalf("Expected sub to have no pending")
}
}
checkSubEmpty()
o.Delete()
// Now check for deliver last, deliver new and deliver by seq.
o, err = mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject, DeliverPolicy: server.DeliverLast})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Did not get expected message, got %v", err)
}
// All Consumers start with sequence #1.
if seq := o.SeqFromReply(m.Reply); seq != 1 {
t.Fatalf("Expected sequence to be 1, but got %d", seq)
}
// Check that is is the last msg we sent though.
if mseq, _ := strconv.Atoi(string(m.Data)); mseq != 200 {
t.Fatalf("Expected messag sequence to be 200, but got %d", mseq)
}
checkSubEmpty()
o.Delete()
// Make sure we only got one message.
if m, err := sub.NextMsg(5 * time.Millisecond); err == nil {
t.Fatalf("Expected no msg, got %+v", m)
}
checkSubEmpty()
o.Delete()
// Now try by sequence number.
o, err = mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject, DeliverPolicy: server.DeliverByStartSequence, OptStartSeq: 101})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
checkMsgs(1)
// Now do push based queue-subscribers
sub, _ = nc2.QueueSubscribeSync("_qg_", "dev")
defer sub.Unsubscribe()
nc2.Flush()
o, err = mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
// Since we sent another batch need check to be looking for 2x.
toSend *= 2
checkMsgs(1)
})
}
}
func workerModeConfig(name string) *server.ConsumerConfig {
return &server.ConsumerConfig{Durable: name, AckPolicy: server.AckExplicit}
}
func TestJetStreamBasicWorkQueue(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.MemoryStorage, Subjects: []string{"foo", "bar"}}},
{"FileStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.FileStorage, Subjects: []string{"foo", "bar"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Create basic work queue mode consumer.
oname := "WQ"
o, err := mset.AddConsumer(workerModeConfig(oname))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
if o.NextSeq() != 1 {
t.Fatalf("Expected to be starting at sequence 1")
}
nc := clientConnectWithOldRequest(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 100
sendSubj := "bar"
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, sendSubj, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
getNext := func(seqno int) {
t.Helper()
nextMsg, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error for seq %d: %v", seqno, err)
}
if nextMsg.Subject != "bar" {
t.Fatalf("Expected subject of %q, got %q", "bar", nextMsg.Subject)
}
if seq := o.SeqFromReply(nextMsg.Reply); seq != uint64(seqno) {
t.Fatalf("Expected sequence of %d , got %d", seqno, seq)
}
}
// Make sure we can get the messages already there.
for i := 1; i <= toSend; i++ {
getNext(i)
}
// Now we want to make sure we can get a message that is published to the message
// set as we are waiting for it.
nextDelay := 50 * time.Millisecond
go func() {
time.Sleep(nextDelay)
sendStreamMsg(t, nc, sendSubj, "Hello World!")
}()
start := time.Now()
getNext(toSend + 1)
if time.Since(start) < nextDelay {
t.Fatalf("Received message too quickly")
}
// Now do same thing but combine waiting for new ones with sending.
go func() {
time.Sleep(nextDelay)
for i := 0; i < toSend; i++ {
nc.Request(sendSubj, []byte("Hello World!"), 50*time.Millisecond)
}
}()
for i := toSend + 2; i < toSend*2+2; i++ {
getNext(i)
}
})
}
}
func TestJetStreamWorkQueueMaxWaiting(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.MemoryStorage, Subjects: []string{"foo", "bar"}}},
{"FileStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.FileStorage, Subjects: []string{"foo", "bar"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Make sure these cases fail
cfg := &server.ConsumerConfig{Durable: "foo", AckPolicy: server.AckExplicit, MaxWaiting: 10, DeliverSubject: "_INBOX.22"}
if _, err := mset.AddConsumer(cfg); err == nil {
t.Fatalf("Expected an error with MaxWaiting set on non-pull based consumer")
}
cfg = &server.ConsumerConfig{Durable: "foo", AckPolicy: server.AckExplicit, MaxWaiting: -1}
if _, err := mset.AddConsumer(cfg); err == nil {
t.Fatalf("Expected an error with MaxWaiting being negative")
}
// Create basic work queue mode consumer.
wcfg := workerModeConfig("MAXWQ")
o, err := mset.AddConsumer(wcfg)
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
// Make sure we set default correctly.
if cfg := o.Config(); cfg.MaxWaiting != server.JSWaitQueueDefaultMax {
t.Fatalf("Expected default max waiting to have been set to %d, got %d", server.JSWaitQueueDefaultMax, cfg.MaxWaiting)
}
expectWaiting := func(expected int) {
t.Helper()
checkFor(t, time.Second, 25*time.Millisecond, func() error {
if oi := o.Info(); oi.NumWaiting != expected {
return fmt.Errorf("Expected %d waiting, got %d", expected, oi.NumWaiting)
}
return nil
})
}
nc := clientConnectWithOldRequest(t, s)
defer nc.Close()
// Like muxed new INBOX.
sub, _ := nc.SubscribeSync("req.*")
defer sub.Unsubscribe()
nc.Flush()
checkSubPending := func(numExpected int) {
t.Helper()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, err := sub.Pending(); err != nil || nmsgs != numExpected {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected)
}
return nil
})
}
getSubj := o.RequestNextMsgSubject()
// Queue up JSWaitQueueDefaultMax requests.
for i := 0; i < server.JSWaitQueueDefaultMax; i++ {
nc.PublishRequest(getSubj, fmt.Sprintf("req.%d", i), nil)
}
expectWaiting(server.JSWaitQueueDefaultMax)
// So when we submit our next request this one should succeed since we do not want these to fail.
// We should get notified that the first request is now stale and has been removed.
if _, err := nc.Request(getSubj, nil, 10*time.Millisecond); err != nats.ErrTimeout {
t.Fatalf("Expected timeout error, got: %v", err)
}
checkSubPending(1)
m, _ := sub.NextMsg(0)
// Make sure this is an alert that tells us our request is now stale.
if m.Header.Get("Status") != "408" {
t.Fatalf("Expected a 408 status code, got %q", m.Header.Get("Status"))
}
sendStreamMsg(t, nc, "foo", "Hello World!")
sendStreamMsg(t, nc, "bar", "Hello World!")
expectWaiting(server.JSWaitQueueDefaultMax - 2)
})
}
}
func TestJetStreamWorkQueueWrapWaiting(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.MemoryStorage, Subjects: []string{"foo", "bar"}}},
{"FileStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.FileStorage, Subjects: []string{"foo", "bar"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
maxWaiting := 8
wcfg := workerModeConfig("WRAP")
wcfg.MaxWaiting = maxWaiting
o, err := mset.AddConsumer(wcfg)
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
getSubj := o.RequestNextMsgSubject()
expectWaiting := func(expected int) {
t.Helper()
checkFor(t, time.Second, 25*time.Millisecond, func() error {
if oi := o.Info(); oi.NumWaiting != expected {
return fmt.Errorf("Expected %d waiting, got %d", expected, oi.NumWaiting)
}
return nil
})
}
nc := clientConnectToServer(t, s)
defer nc.Close()
sub, _ := nc.SubscribeSync("req.*")
defer sub.Unsubscribe()
nc.Flush()
// Fill up waiting.
for i := 0; i < maxWaiting; i++ {
nc.PublishRequest(getSubj, fmt.Sprintf("req.%d", i), nil)
}
expectWaiting(maxWaiting)
// Now use 1/2 of the waiting.
for i := 0; i < maxWaiting/2; i++ {
sendStreamMsg(t, nc, "foo", "Hello World!")
}
expectWaiting(maxWaiting / 2)
// Now add in two (2) more pull requests.
for i := maxWaiting; i < maxWaiting+2; i++ {
nc.PublishRequest(getSubj, fmt.Sprintf("req.%d", i), nil)
}
expectWaiting(maxWaiting/2 + 2)
// Now use second 1/2 of the waiting and the 2 extra.
for i := 0; i < maxWaiting/2+2; i++ {
sendStreamMsg(t, nc, "bar", "Hello World!")
}
expectWaiting(0)
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxWaiting+2 {
return fmt.Errorf("Expected sub to have %d pending, got %d", maxWaiting+2, nmsgs)
}
return nil
})
})
}
}
func TestJetStreamWorkQueueRequest(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.MemoryStorage, Subjects: []string{"foo", "bar"}}},
{"FileStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.FileStorage, Subjects: []string{"foo", "bar"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
o, err := mset.AddConsumer(workerModeConfig("WRAP"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 25
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "bar", "Hello World!")
}
reply := "_.consumer._"
sub, _ := nc.SubscribeSync(reply)
defer sub.Unsubscribe()
getSubj := o.RequestNextMsgSubject()
checkSubPending := func(numExpected int) {
t.Helper()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected)
}
return nil
})
}
// Create a formal request object.
req := &server.JSApiConsumerGetNextRequest{Batch: toSend}
jreq, _ := json.Marshal(req)
nc.PublishRequest(getSubj, reply, jreq)
checkSubPending(toSend)
// Now check that we can ask for NoWait
req.Batch = 1
req.NoWait = true
jreq, _ = json.Marshal(req)
resp, err := nc.Request(getSubj, jreq, 50*time.Millisecond)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if status := resp.Header.Get("Status"); !strings.HasPrefix(status, "404") {
t.Fatalf("Expected status code of 404")
}
// Load up more messages.
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo", "Hello World!")
}
// Now we will ask for a batch larger then what is queued up.
req.Batch = toSend + 10
req.NoWait = true
jreq, _ = json.Marshal(req)
nc.PublishRequest(getSubj, reply, jreq)
// We should now have 2 * toSend + the 404 message.
checkSubPending(2*toSend + 1)
for i := 0; i < 2*toSend+1; i++ {
sub.NextMsg(time.Millisecond)
}
checkSubPending(0)
mset.Purge()
// Now do expiration
req.Batch = 1
req.NoWait = false
req.Expires = time.Now().Add(10 * time.Millisecond)
jreq, _ = json.Marshal(req)
nc.PublishRequest(getSubj, reply, jreq)
// Let it expire
time.Sleep(20 * time.Millisecond)
// Send a few more messages. These should not be delivered to the sub.
sendStreamMsg(t, nc, "foo", "Hello World!")
sendStreamMsg(t, nc, "bar", "Hello World!")
// We will have an alert here.
checkSubPending(1)
m, _ := sub.NextMsg(0)
// Make sure this is an alert that tells us our request is now stale.
if m.Header.Get("Status") != "408" {
t.Fatalf("Expected a 408 status code, got %q", m.Header.Get("Status"))
}
})
}
}
func TestJetStreamSubjectFiltering(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MSET", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "MSET", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 50
subjA := "foo.A"
subjB := "foo.B"
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, subjA, "Hello World!")
sendStreamMsg(t, nc, subjB, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend*2) {
t.Fatalf("Expected %d messages, got %d", toSend*2, state.Msgs)
}
delivery := nats.NewInbox()
sub, _ := nc.SubscribeSync(delivery)
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: delivery, FilterSubject: subjB})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
// Now let's check the messages
for i := 1; i <= toSend; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// JetStream will have the subject match the stream subject, not delivery subject.
// We want these to only be subjB.
if m.Subject != subjB {
t.Fatalf("Expected original subject of %q, but got %q", subjB, m.Subject)
}
// Now check that reply subject exists and has a sequence as the last token.
if seq := o.SeqFromReply(m.Reply); seq != uint64(i) {
t.Fatalf("Expected sequence of %d , got %d", i, seq)
}
// Ack the message here.
m.Respond(nil)
}
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 0 {
t.Fatalf("Expected sub to have no pending")
}
})
}
}
func TestJetStreamWorkQueueSubjectFiltering(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 50
subjA := "foo.A"
subjB := "foo.B"
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, subjA, "Hello World!")
sendStreamMsg(t, nc, subjB, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend*2) {
t.Fatalf("Expected %d messages, got %d", toSend*2, state.Msgs)
}
oname := "WQ"
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: oname, FilterSubject: subjA, AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
if o.NextSeq() != 1 {
t.Fatalf("Expected to be starting at sequence 1")
}
getNext := func(seqno int) {
t.Helper()
nextMsg, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if nextMsg.Subject != subjA {
t.Fatalf("Expected subject of %q, got %q", subjA, nextMsg.Subject)
}
if seq := o.SeqFromReply(nextMsg.Reply); seq != uint64(seqno) {
t.Fatalf("Expected sequence of %d , got %d", seqno, seq)
}
nextMsg.Respond(nil)
}
// Make sure we can get the messages already there.
for i := 1; i <= toSend; i++ {
getNext(i)
}
})
}
}
func TestJetStreamWildcardSubjectFiltering(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "ORDERS", Storage: server.MemoryStorage, Subjects: []string{"orders.*.*"}}},
{"FileStore", &server.StreamConfig{Name: "ORDERS", Storage: server.FileStorage, Subjects: []string{"orders.*.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 100
for i := 1; i <= toSend; i++ {
subj := fmt.Sprintf("orders.%d.%s", i, "NEW")
sendStreamMsg(t, nc, subj, "new order")
}
// Randomly move 25 to shipped.
toShip := 25
shipped := make(map[int]bool)
for i := 0; i < toShip; {
orderId := rand.Intn(toSend-1) + 1
if shipped[orderId] {
continue
}
subj := fmt.Sprintf("orders.%d.%s", orderId, "SHIPPED")
sendStreamMsg(t, nc, subj, "shipped order")
shipped[orderId] = true
i++
}
state := mset.State()
if state.Msgs != uint64(toSend+toShip) {
t.Fatalf("Expected %d messages, got %d", toSend+toShip, state.Msgs)
}
delivery := nats.NewInbox()
sub, _ := nc.SubscribeSync(delivery)
defer sub.Unsubscribe()
nc.Flush()
// Get all shipped.
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: delivery, FilterSubject: "orders.*.SHIPPED"})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
checkFor(t, time.Second, 25*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toShip {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toShip)
}
return nil
})
for nmsgs, _, _ := sub.Pending(); nmsgs > 0; nmsgs, _, _ = sub.Pending() {
sub.NextMsg(time.Second)
}
if nmsgs, _, _ := sub.Pending(); nmsgs != 0 {
t.Fatalf("Expected no pending, got %d", nmsgs)
}
// Get all new
o, err = mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: delivery, FilterSubject: "orders.*.NEW"})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
checkFor(t, time.Second, 25*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend)
}
return nil
})
for nmsgs, _, _ := sub.Pending(); nmsgs > 0; nmsgs, _, _ = sub.Pending() {
sub.NextMsg(time.Second)
}
if nmsgs, _, _ := sub.Pending(); nmsgs != 0 {
t.Fatalf("Expected no pending, got %d", nmsgs)
}
// Now grab a single orderId that has shipped, so we should have two messages.
var orderId int
for orderId = range shipped {
break
}
subj := fmt.Sprintf("orders.%d.*", orderId)
o, err = mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: delivery, FilterSubject: subj})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
checkFor(t, time.Second, 25*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 2 {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, 2)
}
return nil
})
})
}
}
func TestJetStreamWorkQueueAckAndNext(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.MemoryStorage, Subjects: []string{"foo", "bar"}}},
{"FileStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.FileStorage, Subjects: []string{"foo", "bar"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Create basic work queue mode consumer.
oname := "WQ"
o, err := mset.AddConsumer(workerModeConfig(oname))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
if o.NextSeq() != 1 {
t.Fatalf("Expected to be starting at sequence 1")
}
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 100
sendSubj := "bar"
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, sendSubj, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
// Kick things off.
// For normal work queue semantics, you send requests to the subject with stream and consumer name.
// We will do this to start it off then use ack+next to get other messages.
nc.PublishRequest(o.RequestNextMsgSubject(), sub.Subject, nil)
for i := 0; i < toSend; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error waiting for messages: %v", err)
}
if !bytes.Equal(m.Data, []byte("Hello World!")) {
t.Fatalf("Got an invalid message from the stream: %q", m.Data)
}
nc.PublishRequest(m.Reply, sub.Subject, server.AckNext)
}
})
}
}
func TestJetStreamWorkQueueRequestBatch(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.MemoryStorage, Subjects: []string{"foo", "bar"}}},
{"FileStore", &server.StreamConfig{Name: "MY_MSG_SET", Storage: server.FileStorage, Subjects: []string{"foo", "bar"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Create basic work queue mode consumer.
oname := "WQ"
o, err := mset.AddConsumer(workerModeConfig(oname))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
if o.NextSeq() != 1 {
t.Fatalf("Expected to be starting at sequence 1")
}
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 100
sendSubj := "bar"
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, sendSubj, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
// For normal work queue semantics, you send requests to the subject with stream and consumer name.
// We will do this to start it off then use ack+next to get other messages.
// Kick things off with batch size of 50.
batchSize := 50
nc.PublishRequest(o.RequestNextMsgSubject(), sub.Subject, []byte(strconv.Itoa(batchSize)))
// We should receive batchSize with no acks or additional requests.
checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != batchSize {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, batchSize)
}
return nil
})
// Now queue up the request without messages and add them after.
sub, _ = nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
mset.Purge()
nc.PublishRequest(o.RequestNextMsgSubject(), sub.Subject, []byte(strconv.Itoa(batchSize)))
nc.Flush() // Make sure its registered.
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, sendSubj, "Hello World!")
}
// We should receive batchSize with no acks or additional requests.
checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != batchSize {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, batchSize)
}
return nil
})
})
}
}
func TestJetStreamWorkQueueRetentionStream(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore", mconfig: &server.StreamConfig{
Name: "MWQ",
Storage: server.MemoryStorage,
Subjects: []string{"MY_WORK_QUEUE.*"},
Retention: server.WorkQueuePolicy},
},
{name: "FileStore", mconfig: &server.StreamConfig{
Name: "MWQ",
Storage: server.FileStorage,
Subjects: []string{"MY_WORK_QUEUE.*"},
Retention: server.WorkQueuePolicy},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// This type of stream has restrictions which we will test here.
// DeliverAll is only start mode allowed.
if _, err := mset.AddConsumer(&server.ConsumerConfig{DeliverPolicy: server.DeliverLast}); err == nil {
t.Fatalf("Expected an error with anything but DeliverAll")
}
// We will create a non-partitioned consumer. This should succeed.
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "PBO", AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
// Now if we create another this should fail, only can have one non-partitioned.
if _, err := mset.AddConsumer(&server.ConsumerConfig{}); err == nil {
t.Fatalf("Expected an error on attempt for second consumer for a workqueue")
}
o.Delete()
if numo := mset.NumConsumers(); numo != 0 {
t.Fatalf("Expected to have zero consumers, got %d", numo)
}
// Now add in an consumer that has a partition.
pindex := 1
pConfig := func(pname string) *server.ConsumerConfig {
dname := fmt.Sprintf("PPBO-%d", pindex)
pindex += 1
return &server.ConsumerConfig{Durable: dname, FilterSubject: pname, AckPolicy: server.AckExplicit}
}
o, err = mset.AddConsumer(pConfig("MY_WORK_QUEUE.A"))
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
// Now creating another with separate partition should work.
o2, err := mset.AddConsumer(pConfig("MY_WORK_QUEUE.B"))
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o2.Delete()
// Anything that would overlap should fail though.
if _, err := mset.AddConsumer(pConfig(">")); err == nil {
t.Fatalf("Expected an error on attempt for partitioned consumer for a workqueue")
}
if _, err := mset.AddConsumer(pConfig("MY_WORK_QUEUE.A")); err == nil {
t.Fatalf("Expected an error on attempt for partitioned consumer for a workqueue")
}
if _, err := mset.AddConsumer(pConfig("MY_WORK_QUEUE.A")); err == nil {
t.Fatalf("Expected an error on attempt for partitioned consumer for a workqueue")
}
o3, err := mset.AddConsumer(pConfig("MY_WORK_QUEUE.C"))
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
o.Delete()
o2.Delete()
o3.Delete()
// Push based will be allowed now, including ephemerals.
// They can not overlap etc meaning same rules as above apply.
o4, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "DURABLE",
DeliverSubject: "SOME.SUBJ",
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Unexpected Error: %v", err)
}
defer o4.Delete()
// Now try to create an ephemeral
nc := clientConnectToServer(t, s)
defer nc.Close()
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
// This should fail at first due to conflict above.
ephCfg := &server.ConsumerConfig{DeliverSubject: sub.Subject, AckPolicy: server.AckExplicit}
if _, err := mset.AddConsumer(ephCfg); err == nil {
t.Fatalf("Expected an error ")
}
// Delete of o4 should clear.
o4.Delete()
o5, err := mset.AddConsumer(ephCfg)
if err != nil {
t.Fatalf("Unexpected Error: %v", err)
}
defer o5.Delete()
})
}
}
func TestJetStreamAckAllRedelivery(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_S22", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "MY_S22", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 100
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{
DeliverSubject: sub.Subject,
AckWait: 50 * time.Millisecond,
AckPolicy: server.AckAll,
})
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o.Delete()
// Wait for messages.
// We will do 5 redeliveries.
for i := 1; i <= 5; i++ {
checkFor(t, 500*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend*i {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend*i)
}
return nil
})
}
// Stop redeliveries.
o.Delete()
// Now make sure that they are all redelivered in order for each redelivered batch.
for l := 1; l <= 5; l++ {
for i := 1; i <= toSend; i++ {
m, _ := sub.NextMsg(time.Second)
if seq := o.StreamSeqFromReply(m.Reply); seq != uint64(i) {
t.Fatalf("Expected stream sequence of %d, got %d", i, seq)
}
}
}
})
}
}
func TestJetStreamAckReplyStreamPending(t *testing.T) {
msc := server.StreamConfig{
Name: "MY_WQ",
Subjects: []string{"foo.*"},
Storage: server.MemoryStorage,
MaxAge: 250 * time.Millisecond,
Retention: server.WorkQueuePolicy,
}
fsc := msc
fsc.Storage = server.FileStorage
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &msc},
{"FileStore", &fsc},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 100
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.1", "Hello World!")
}
nc.Flush()
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "PBO", AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
expectPending := func(ep int) {
t.Helper()
// Now check consumer info.
checkFor(t, time.Second, 10*time.Millisecond, func() error {
if info, pep := o.Info(), ep+1; int(info.NumPending) != pep {
return fmt.Errorf("Expected consumer info pending of %d, got %d", pep, info.NumPending)
}
return nil
})
m, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, _, _, _, pending := o.ReplyInfo(m.Reply)
if pending != uint64(ep) {
t.Fatalf("Expected ack reply pending of %d, got %d - reply: %q", ep, pending, m.Reply)
}
}
expectPending(toSend - 1)
// Send some more while we are connected.
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.1", "Hello World!")
}
nc.Flush()
expectPending(toSend*2 - 2)
// Purge and send a new one.
mset.Purge()
nc.Flush()
sendStreamMsg(t, nc, "foo.1", "Hello World!")
expectPending(0)
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.22", "Hello World!")
}
expectPending(toSend - 1) // 201
// Test that delete will not register for consumed messages.
mset.RemoveMsg(mset.State().FirstSeq)
expectPending(toSend - 2) // 202
// Now remove one that has not been delivered.
mset.RemoveMsg(250)
expectPending(toSend - 4) // 203
// Test Expiration.
mset.Purge()
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.1", "Hello World!")
}
nc.Flush()
// Wait for expiration to kick in.
checkFor(t, time.Second, 10*time.Millisecond, func() error {
if state := mset.State(); state.Msgs != 0 {
return fmt.Errorf("Stream still has messages")
}
return nil
})
sendStreamMsg(t, nc, "foo.33", "Hello World!")
expectPending(0)
// Now do filtered consumers.
o.Delete()
o, err = mset.AddConsumer(&server.ConsumerConfig{Durable: "PBO-FILTERED", AckPolicy: server.AckExplicit, FilterSubject: "foo.22"})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.33", "Hello World!")
}
nc.Flush()
if info := o.Info(); info.NumPending != 0 {
t.Fatalf("Expected no pending, got %d", info.NumPending)
}
// Now send one message that will match us.
sendStreamMsg(t, nc, "foo.22", "Hello World!")
expectPending(0)
sendStreamMsg(t, nc, "foo.22", "Hello World!") // 504
sendStreamMsg(t, nc, "foo.22", "Hello World!") // 505
sendStreamMsg(t, nc, "foo.22", "Hello World!") // 506
sendStreamMsg(t, nc, "foo.22", "Hello World!") // 507
expectPending(3)
mset.RemoveMsg(506)
expectPending(1)
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.22", "Hello World!")
}
nc.Flush()
expectPending(100)
mset.Purge()
sendStreamMsg(t, nc, "foo.22", "Hello World!")
expectPending(0)
})
}
}
func TestJetStreamAckReplyStreamPendingWithAcks(t *testing.T) {
msc := server.StreamConfig{
Name: "MY_STREAM",
Subjects: []string{"foo", "bar", "baz"},
Storage: server.MemoryStorage,
}
fsc := msc
fsc.Storage = server.FileStorage
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &msc},
{"FileStore", &fsc},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 500
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo", "Hello Foo!")
sendStreamMsg(t, nc, "bar", "Hello Bar!")
sendStreamMsg(t, nc, "baz", "Hello Baz!")
}
state := mset.State()
if state.Msgs != uint64(toSend*3) {
t.Fatalf("Expected %d messages, got %d", toSend*3, state.Msgs)
}
dsubj := "_d_"
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "D-1",
AckPolicy: server.AckExplicit,
FilterSubject: "foo",
DeliverSubject: dsubj,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
if info := o.Info(); int(info.NumPending) != toSend {
t.Fatalf("Expected consumer info pending of %d, got %d", toSend, info.NumPending)
}
sub, _ := nc.SubscribeSync(dsubj)
defer sub.Unsubscribe()
checkFor(t, 500*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend)
}
return nil
})
// Should be zero.
if info := o.Info(); int(info.NumPending) != 0 {
t.Fatalf("Expected consumer info pending of %d, got %d", 0, info.NumPending)
} else if info.NumAckPending != toSend {
t.Fatalf("Expected %d to be pending acks, got %d", toSend, info.NumAckPending)
}
})
}
}
func TestJetStreamWorkQueueAckWaitRedelivery(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.MemoryStorage, Retention: server.WorkQueuePolicy}},
{"FileStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.FileStorage, Retention: server.WorkQueuePolicy}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 100
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
ackWait := 100 * time.Millisecond
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "PBO", AckPolicy: server.AckExplicit, AckWait: ackWait})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
reqNextMsgSubj := o.RequestNextMsgSubject()
// Consume all the messages. But do not ack.
for i := 0; i < toSend; i++ {
nc.PublishRequest(reqNextMsgSubj, sub.Subject, nil)
if _, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error waiting for messages: %v", err)
}
}
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 0 {
t.Fatalf("Did not consume all messages, still have %d", nmsgs)
}
// All messages should still be there.
state = mset.State()
if int(state.Msgs) != toSend {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
// Now consume and ack.
for i := 1; i <= toSend; i++ {
nc.PublishRequest(reqNextMsgSubj, sub.Subject, nil)
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error waiting for message[%d]: %v", i, err)
}
sseq, dseq, dcount, _, _ := o.ReplyInfo(m.Reply)
if sseq != uint64(i) {
t.Fatalf("Expected set sequence of %d , got %d", i, sseq)
}
// Delivery sequences should always increase.
if dseq != uint64(toSend+i) {
t.Fatalf("Expected delivery sequence of %d , got %d", toSend+i, dseq)
}
if dcount == 1 {
t.Fatalf("Expected these to be marked as redelivered")
}
// Ack the message here.
m.Respond(nil)
}
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 0 {
t.Fatalf("Did not consume all messages, still have %d", nmsgs)
}
// Flush acks
nc.Flush()
// Now check the mset as well, since we have a WorkQueue retention policy this should be empty.
if state := mset.State(); state.Msgs != 0 {
t.Fatalf("Expected no messages, got %d", state.Msgs)
}
})
}
}
func TestJetStreamWorkQueueNakRedelivery(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.MemoryStorage, Retention: server.WorkQueuePolicy}},
{"FileStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.FileStorage, Retention: server.WorkQueuePolicy}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 10
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "PBO", AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
getMsg := func(sseq, dseq int) *nats.Msg {
t.Helper()
m, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rsseq, rdseq, _, _, _ := o.ReplyInfo(m.Reply)
if rdseq != uint64(dseq) {
t.Fatalf("Expected delivered sequence of %d , got %d", dseq, rdseq)
}
if rsseq != uint64(sseq) {
t.Fatalf("Expected store sequence of %d , got %d", sseq, rsseq)
}
return m
}
for i := 1; i <= 5; i++ {
m := getMsg(i, i)
// Ack the message here.
m.Respond(nil)
}
// Grab #6
m := getMsg(6, 6)
// NAK this one.
m.Respond(server.AckNak)
// When we request again should be store sequence 6 again.
getMsg(6, 7)
// Then we should get 7, 8, etc.
getMsg(7, 8)
getMsg(8, 9)
})
}
}
func TestJetStreamWorkQueueWorkingIndicator(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.MemoryStorage, Retention: server.WorkQueuePolicy}},
{"FileStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.FileStorage, Retention: server.WorkQueuePolicy}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 2
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
ackWait := 100 * time.Millisecond
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "PBO", AckPolicy: server.AckExplicit, AckWait: ackWait})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
getMsg := func(sseq, dseq int) *nats.Msg {
t.Helper()
m, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rsseq, rdseq, _, _, _ := o.ReplyInfo(m.Reply)
if rdseq != uint64(dseq) {
t.Fatalf("Expected delivered sequence of %d , got %d", dseq, rdseq)
}
if rsseq != uint64(sseq) {
t.Fatalf("Expected store sequence of %d , got %d", sseq, rsseq)
}
return m
}
getMsg(1, 1)
// Now wait past ackWait
time.Sleep(ackWait * 2)
// We should get 1 back.
m := getMsg(1, 2)
// Now let's take longer than ackWait to process but signal we are working on the message.
timeout := time.Now().Add(3 * ackWait)
for time.Now().Before(timeout) {
m.Respond(server.AckProgress)
nc.Flush()
time.Sleep(ackWait / 5)
}
// We should get 2 here, not 1 since we have indicated we are working on it.
m2 := getMsg(2, 3)
time.Sleep(ackWait / 2)
m2.Respond(server.AckProgress)
// Now should get 1 back then 2.
m = getMsg(1, 4)
m.Respond(nil)
getMsg(2, 5)
})
}
}
func TestJetStreamWorkQueueTerminateDelivery(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.MemoryStorage, Retention: server.WorkQueuePolicy}},
{"FileStore", &server.StreamConfig{Name: "MY_WQ", Storage: server.FileStorage, Retention: server.WorkQueuePolicy}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 22
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
ackWait := 25 * time.Millisecond
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "PBO", AckPolicy: server.AckExplicit, AckWait: ackWait})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
getMsg := func(sseq, dseq int) *nats.Msg {
t.Helper()
m, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rsseq, rdseq, _, _, _ := o.ReplyInfo(m.Reply)
if rdseq != uint64(dseq) {
t.Fatalf("Expected delivered sequence of %d , got %d", dseq, rdseq)
}
if rsseq != uint64(sseq) {
t.Fatalf("Expected store sequence of %d , got %d", sseq, rsseq)
}
return m
}
// Make sure we get the correct advisory
sub, _ := nc.SubscribeSync(server.JSAdvisoryConsumerMsgTerminatedPre + ".>")
defer sub.Unsubscribe()
getMsg(1, 1)
// Now wait past ackWait
time.Sleep(ackWait * 2)
// We should get 1 back.
m := getMsg(1, 2)
// Now terminate
m.Respond(server.AckTerm)
time.Sleep(ackWait * 2)
// We should get 2 here, not 1 since we have indicated we wanted to terminate.
getMsg(2, 3)
// Check advisory was delivered.
am, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var adv server.JSConsumerDeliveryTerminatedAdvisory
json.Unmarshal(am.Data, &adv)
if adv.Stream != "MY_WQ" {
t.Fatalf("Expected stream of %s, got %s", "MY_WQ", adv.Stream)
}
if adv.Consumer != "PBO" {
t.Fatalf("Expected consumer of %s, got %s", "PBO", adv.Consumer)
}
if adv.StreamSeq != 1 {
t.Fatalf("Expected stream sequence of %d, got %d", 1, adv.StreamSeq)
}
if adv.ConsumerSeq != 2 {
t.Fatalf("Expected consumer sequence of %d, got %d", 2, adv.ConsumerSeq)
}
if adv.Deliveries != 2 {
t.Fatalf("Expected delivery count of %d, got %d", 2, adv.Deliveries)
}
})
}
}
func TestJetStreamConsumerAckAck(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "ACK-ACK"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Storage: server.MemoryStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "worker", AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
rqn := o.RequestNextMsgSubject()
nc := clientConnectToServer(t, s)
defer nc.Close()
// 4 for number of ack protocols to test them all.
for i := 0; i < 4; i++ {
sendStreamMsg(t, nc, mname, "Hello World!")
}
testAck := func(ackType []byte) {
m, err := nc.Request(rqn, nil, 10*time.Millisecond)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Send a request for the ack and make sure the server "ack's" the ack.
if _, err := nc.Request(m.Reply, ackType, 10*time.Millisecond); err != nil {
t.Fatalf("Unexpected error on ack/ack: %v", err)
}
}
testAck(server.AckAck)
testAck(server.AckNak)
testAck(server.AckProgress)
testAck(server.AckTerm)
}
func TestJetStreamAckNext(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "ACKNXT"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Storage: server.MemoryStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "worker", AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
for i := 0; i < 12; i++ {
sendStreamMsg(t, nc, mname, fmt.Sprintf("msg %d", i))
}
q := make(chan *nats.Msg, 10)
sub, err := nc.ChanSubscribe(nats.NewInbox(), q)
if err != nil {
t.Fatalf("SubscribeSync failed: %s", err)
}
nc.PublishRequest(o.RequestNextMsgSubject(), sub.Subject, []byte("1"))
// normal next should imply 1
msg := <-q
err = msg.RespondMsg(&nats.Msg{Reply: sub.Subject, Subject: msg.Reply, Data: server.AckNext})
if err != nil {
t.Fatalf("RespondMsg failed: %s", err)
}
// read 1 message and check ack was done etc
msg = <-q
if len(q) != 0 {
t.Fatalf("Expected empty q got %d", len(q))
}
if o.Info().AckFloor.Stream != 1 {
t.Fatalf("First message was not acknowledged")
}
if !bytes.Equal(msg.Data, []byte("msg 1")) {
t.Fatalf("wrong message received, expected: msg 1 got %q", msg.Data)
}
// now ack and request 5 more using a naked number
err = msg.RespondMsg(&nats.Msg{Reply: sub.Subject, Subject: msg.Reply, Data: append(server.AckNext, []byte(" 5")...)})
if err != nil {
t.Fatalf("RespondMsg failed: %s", err)
}
getMsgs := func(start, count int) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
for i := start; i < count+1; i++ {
select {
case msg := <-q:
expect := fmt.Sprintf("msg %d", i+1)
if !bytes.Equal(msg.Data, []byte(expect)) {
t.Fatalf("wrong message received, expected: %s got %#v", expect, msg)
}
case <-ctx.Done():
t.Fatalf("did not receive all messages")
}
}
}
getMsgs(1, 5)
// now ack and request 5 more using the full request
err = msg.RespondMsg(&nats.Msg{Reply: sub.Subject, Subject: msg.Reply, Data: append(server.AckNext, []byte(`{"batch": 5}`)...)})
if err != nil {
t.Fatalf("RespondMsg failed: %s", err)
}
getMsgs(6, 10)
if o.Info().AckFloor.Stream != 2 {
t.Fatalf("second message was not acknowledged")
}
}
func TestJetStreamPublishDeDupe(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "DeDupe"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Storage: server.FileStorage, MaxAge: time.Hour, Subjects: []string{"foo.*"}})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Check Duplicates setting.
duplicates := mset.Config().Duplicates
if duplicates != server.StreamDefaultDuplicatesWindow {
t.Fatalf("Expected a default of %v, got %v", server.StreamDefaultDuplicatesWindow, duplicates)
}
cfg := mset.Config()
// Make sure can't be negative.
cfg.Duplicates = -25 * time.Millisecond
if err := mset.Update(&cfg); err == nil {
t.Fatalf("Expected an error but got none")
}
// Make sure can't be longer than age if its set.
cfg.Duplicates = 2 * time.Hour
if err := mset.Update(&cfg); err == nil {
t.Fatalf("Expected an error but got none")
}
nc := clientConnectToServer(t, s)
defer nc.Close()
sendMsg := func(seq uint64, id, msg string) *server.PubAck {
t.Helper()
m := nats.NewMsg(fmt.Sprintf("foo.%d", seq))
m.Header.Add(server.JSMsgId, id)
m.Data = []byte(msg)
resp, _ := nc.RequestMsg(m, 100*time.Millisecond)
if resp == nil {
t.Fatalf("No response for %q, possible timeout?", msg)
}
pa := getPubAckResponse(resp.Data)
if pa == nil || pa.Error != nil {
t.Fatalf("Expected a JetStreamPubAck, got %q", resp.Data)
}
if pa.Sequence != seq {
t.Fatalf("Did not get correct sequence in PubAck, expected %d, got %d", seq, pa.Sequence)
}
return pa.PubAck
}
expect := func(n uint64) {
t.Helper()
state := mset.State()
if state.Msgs != n {
t.Fatalf("Expected %d messages, got %d", n, state.Msgs)
}
}
sendMsg(1, "AA", "Hello DeDupe!")
sendMsg(2, "BB", "Hello DeDupe!")
sendMsg(3, "CC", "Hello DeDupe!")
sendMsg(4, "ZZ", "Hello DeDupe!")
expect(4)
sendMsg(1, "AA", "Hello DeDupe!")
sendMsg(2, "BB", "Hello DeDupe!")
sendMsg(4, "ZZ", "Hello DeDupe!")
expect(4)
cfg = mset.Config()
cfg.Duplicates = 25 * time.Millisecond
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
nmids := func(expected int) {
t.Helper()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if nids := mset.NumMsgIds(); nids != expected {
return fmt.Errorf("Expected %d message ids, got %d", expected, nids)
}
return nil
})
}
nmids(4)
time.Sleep(cfg.Duplicates * 2)
sendMsg(5, "AAA", "Hello DeDupe!")
sendMsg(6, "BBB", "Hello DeDupe!")
sendMsg(7, "CCC", "Hello DeDupe!")
sendMsg(8, "DDD", "Hello DeDupe!")
sendMsg(9, "ZZZ", "Hello DeDupe!")
nmids(5)
// Eventually will drop to zero.
nmids(0)
// Now test server restart
cfg.Duplicates = 30 * time.Minute
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
mset.Purge()
// Send 5 new messages.
sendMsg(10, "AAAA", "Hello DeDupe!")
sendMsg(11, "BBBB", "Hello DeDupe!")
sendMsg(12, "CCCC", "Hello DeDupe!")
sendMsg(13, "DDDD", "Hello DeDupe!")
sendMsg(14, "EEEE", "Hello DeDupe!")
// Stop current server.
sd := s.JetStreamConfig().StoreDir
s.Shutdown()
// Restart.
s = RunJetStreamServerOnPort(-1, sd)
defer s.Shutdown()
nc = clientConnectToServer(t, s)
defer nc.Close()
mset, _ = s.GlobalAccount().LookupStream(mname)
if nms := mset.State().Msgs; nms != 5 {
t.Fatalf("Expected 5 restored messages, got %d", nms)
}
nmids(5)
// Send same and make sure duplicate detection still works.
// Send 5 duplicate messages.
sendMsg(10, "AAAA", "Hello DeDupe!")
sendMsg(11, "BBBB", "Hello DeDupe!")
sendMsg(12, "CCCC", "Hello DeDupe!")
sendMsg(13, "DDDD", "Hello DeDupe!")
sendMsg(14, "EEEE", "Hello DeDupe!")
if nms := mset.State().Msgs; nms != 5 {
t.Fatalf("Expected 5 restored messages, got %d", nms)
}
nmids(5)
// Check we set duplicate properly.
pa := sendMsg(10, "AAAA", "Hello DeDupe!")
if !pa.Duplicate {
t.Fatalf("Expected duplicate to be set")
}
// Purge should wipe the msgIds as well.
mset.Purge()
nmids(0)
}
func getPubAckResponse(msg []byte) *server.JSPubAckResponse {
var par server.JSPubAckResponse
if err := json.Unmarshal(msg, &par); err != nil {
return nil
}
return &par
}
func TestJetStreamPublishExpect(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "EXPECT"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Storage: server.FileStorage, MaxAge: time.Hour, Subjects: []string{"foo.*"}})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Test that we get no error when expected stream is correct.
m := nats.NewMsg("foo.bar")
m.Data = []byte("HELLO")
m.Header.Set(server.JSExpectedStream, mname)
resp, err := nc.RequestMsg(m, 100*time.Millisecond)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error != nil {
t.Fatalf("Expected a valid JetStreamPubAck, got %q", resp.Data)
}
// Now test that we get an error back when expecting a different stream.
m.Header.Set(server.JSExpectedStream, "ORDERS")
resp, err = nc.RequestMsg(m, 100*time.Millisecond)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error == nil {
t.Fatalf("Expected an error, got %q", resp.Data)
}
// Now test that we get an error back when expecting a different sequence number.
m.Header.Set(server.JSExpectedStream, mname)
m.Header.Set(server.JSExpectedLastSeq, "10")
resp, err = nc.RequestMsg(m, 100*time.Millisecond)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error == nil {
t.Fatalf("Expected an error, got %q", resp.Data)
}
// Now send a message with a message ID and make sure we can match that.
m = nats.NewMsg("foo.bar")
m.Data = []byte("HELLO")
m.Header.Set(server.JSMsgId, "AAA")
if _, err = nc.RequestMsg(m, 100*time.Millisecond); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now try again with new message ID but require last one to be 'BBB'
m.Header.Set(server.JSMsgId, "ZZZ")
m.Header.Set(server.JSExpectedLastMsgId, "BBB")
resp, err = nc.RequestMsg(m, 100*time.Millisecond)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error == nil {
t.Fatalf("Expected an error, got %q", resp.Data)
}
// Restart the server and make sure we remember/rebuild last seq and last msgId.
// Stop current server.
sd := s.JetStreamConfig().StoreDir
s.Shutdown()
// Restart.
s = RunJetStreamServerOnPort(-1, sd)
defer s.Shutdown()
nc = clientConnectToServer(t, s)
defer nc.Close()
// Our last sequence was 2 and last msgId was "AAA"
m = nats.NewMsg("foo.baz")
m.Data = []byte("HELLO AGAIN")
m.Header.Set(server.JSExpectedLastSeq, "2")
m.Header.Set(server.JSExpectedLastMsgId, "AAA")
m.Header.Set(server.JSMsgId, "BBB")
resp, err = nc.RequestMsg(m, 100*time.Millisecond)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error != nil {
t.Fatalf("Expected a valid JetStreamPubAck, got %q", resp.Data)
}
}
func TestJetStreamPullConsumerRemoveInterest(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "MYS-PULL"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Storage: server.MemoryStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
wcfg := &server.ConsumerConfig{Durable: "worker", AckPolicy: server.AckExplicit}
o, err := mset.AddConsumer(wcfg)
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
rqn := o.RequestNextMsgSubject()
defer o.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Ask for a message even though one is not there. This will queue us up for waiting.
if _, err := nc.Request(rqn, nil, 10*time.Millisecond); err == nil {
t.Fatalf("Expected an error, got none")
}
// This is using new style request mechanism. so drop the connection itself to get rid of interest.
nc.Close()
// Wait for client cleanup
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if n := s.NumClients(); err != nil || n != 0 {
return fmt.Errorf("Still have %d clients", n)
}
return nil
})
nc = clientConnectToServer(t, s)
defer nc.Close()
// Send a message
sendStreamMsg(t, nc, mname, "Hello World!")
msg, err := nc.Request(rqn, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, dseq, dc, _, _ := o.ReplyInfo(msg.Reply)
if dseq != 1 {
t.Fatalf("Expected consumer sequence of 1, got %d", dseq)
}
if dc != 1 {
t.Fatalf("Expected delivery count of 1, got %d", dc)
}
// Now do old school request style and more than one waiting.
nc = clientConnectWithOldRequest(t, s)
defer nc.Close()
// Now queue up 10 waiting via failed requests.
for i := 0; i < 10; i++ {
if _, err := nc.Request(rqn, nil, 1*time.Millisecond); err == nil {
t.Fatalf("Expected an error, got none")
}
}
// Send a second message
sendStreamMsg(t, nc, mname, "Hello World!")
msg, err = nc.Request(rqn, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, dseq, dc, _, _ = o.ReplyInfo(msg.Reply)
if dseq != 2 {
t.Fatalf("Expected consumer sequence of 2, got %d", dseq)
}
if dc != 1 {
t.Fatalf("Expected delivery count of 1, got %d", dc)
}
}
func TestJetStreamConsumerRateLimit(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "RATELIMIT"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Storage: server.FileStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
msgSize := 128 * 1024
msg := make([]byte, msgSize)
rand.Read(msg)
// 10MB
totalSize := 10 * 1024 * 1024
toSend := totalSize / msgSize
for i := 0; i < toSend; i++ {
nc.Publish(mname, msg)
}
nc.Flush()
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
// 100Mbit
rateLimit := uint64(100 * 1024 * 1024)
// Make sure if you set a rate with a pull based consumer it errors.
_, err = mset.AddConsumer(&server.ConsumerConfig{Durable: "to", AckPolicy: server.AckExplicit, RateLimit: rateLimit})
if err == nil {
t.Fatalf("Expected an error, got none")
}
// Now create one and measure the rate delivered.
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "rate",
DeliverSubject: "to",
RateLimit: rateLimit,
AckPolicy: server.AckNone})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
var received int
done := make(chan bool)
start := time.Now()
nc.Subscribe("to", func(m *nats.Msg) {
received++
if received >= toSend {
done <- true
}
})
nc.Flush()
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatalf("Did not receive all the messages in time")
}
tt := time.Since(start)
rate := float64(8*toSend*msgSize) / tt.Seconds()
if rate > float64(rateLimit)*1.25 {
t.Fatalf("Exceeded desired rate of %d mbps, got %0.f mbps", rateLimit/(1024*1024), rate/(1024*1024))
}
}
func TestJetStreamEphemeralConsumerRecoveryAfterServerRestart(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "MYS"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Storage: server.FileStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{
DeliverSubject: sub.Subject,
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Error creating consumer: %v", err)
}
defer o.Delete()
// Snapshot our name.
oname := o.Name()
// Send 100 messages
for i := 0; i < 100; i++ {
sendStreamMsg(t, nc, mname, "Hello World!")
}
if state := mset.State(); state.Msgs != 100 {
t.Fatalf("Expected %d messages, got %d", 100, state.Msgs)
}
// Read 6 messages
for i := 0; i <= 6; i++ {
if m, err := sub.NextMsg(time.Second); err == nil {
m.Respond(nil)
} else {
t.Fatalf("Unexpected error: %v", err)
}
}
// Capture port since it was dynamic.
u, _ := url.Parse(s.ClientURL())
port, _ := strconv.Atoi(u.Port())
restartServer := func() {
t.Helper()
// Stop current server.
sd := s.JetStreamConfig().StoreDir
s.Shutdown()
// Restart.
s = RunJetStreamServerOnPort(port, sd)
}
// Do twice
for i := 0; i < 2; i++ {
// Restart.
restartServer()
defer s.Shutdown()
mset, err = s.GlobalAccount().LookupStream(mname)
if err != nil {
t.Fatalf("Expected to find a stream for %q", mname)
}
o = mset.LookupConsumer(oname)
if o == nil {
t.Fatalf("Error looking up consumer %q", oname)
}
// Make sure config does not have durable.
if cfg := o.Config(); cfg.Durable != "" {
t.Fatalf("Expected no durable to be set")
}
// Wait for it to become active
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if !o.Active() {
return fmt.Errorf("Consumer not active")
}
return nil
})
}
// Now close the connection. Make sure this acts like an ephemeral and goes away.
o.SetInActiveDeleteThreshold(10 * time.Millisecond)
nc.Close()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if o := mset.LookupConsumer(oname); o != nil {
return fmt.Errorf("Consumer still active")
}
return nil
})
}
func TestJetStreamConsumerMaxDeliveryAndServerRestart(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "MYS"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: mname, Storage: server.FileStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
streamCreated := mset.Created()
dsubj := "D.TO"
max := 3
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "TO",
DeliverSubject: dsubj,
AckPolicy: server.AckExplicit,
AckWait: 100 * time.Millisecond,
MaxDeliver: max,
})
defer o.Delete()
consumerCreated := o.Created()
// For calculation of consumer created times below.
time.Sleep(5 * time.Millisecond)
nc := clientConnectToServer(t, s)
defer nc.Close()
sub, _ := nc.SubscribeSync(dsubj)
nc.Flush()
defer sub.Unsubscribe()
// Send one message.
sendStreamMsg(t, nc, mname, "order-1")
checkSubPending := func(numExpected int) {
t.Helper()
checkFor(t, time.Second, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); nmsgs != numExpected {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected)
}
return nil
})
}
checkNumMsgs := func(numExpected uint64) {
t.Helper()
mset, err = s.GlobalAccount().LookupStream(mname)
if err != nil {
t.Fatalf("Expected to find a stream for %q", mname)
}
state := mset.State()
if state.Msgs != numExpected {
t.Fatalf("Expected %d msgs, got %d", numExpected, state.Msgs)
}
}
// Wait til we know we have max queued up.
checkSubPending(max)
// Once here we have gone over the limit for the 1st message for max deliveries.
// Send second
sendStreamMsg(t, nc, mname, "order-2")
// Just wait for first delivery + one redelivery.
checkSubPending(max + 2)
// Capture port since it was dynamic.
u, _ := url.Parse(s.ClientURL())
port, _ := strconv.Atoi(u.Port())
restartServer := func() {
t.Helper()
sd := s.JetStreamConfig().StoreDir
// Stop current server.
s.Shutdown()
// Restart.
s = RunJetStreamServerOnPort(port, sd)
}
waitForClientReconnect := func() {
checkFor(t, 2500*time.Millisecond, 5*time.Millisecond, func() error {
if !nc.IsConnected() {
return fmt.Errorf("Not connected")
}
return nil
})
}
// Restart.
restartServer()
defer s.Shutdown()
checkNumMsgs(2)
// Wait for client to be reconnected.
waitForClientReconnect()
// Once we are here send third order.
sendStreamMsg(t, nc, mname, "order-3")
checkNumMsgs(3)
// Restart.
restartServer()
defer s.Shutdown()
checkNumMsgs(3)
// Wait for client to be reconnected.
waitForClientReconnect()
// Now we should have max times three on our sub.
checkSubPending(max * 3)
// Now do some checks on created timestamps.
mset, err = s.GlobalAccount().LookupStream(mname)
if err != nil {
t.Fatalf("Expected to find a stream for %q", mname)
}
if mset.Created() != streamCreated {
t.Fatalf("Stream creation time not restored, wanted %v, got %v", streamCreated, mset.Created())
}
o = mset.LookupConsumer("TO")
if o == nil {
t.Fatalf("Error looking up consumer: %v", err)
}
// Consumer created times can have a very small skew.
delta := o.Created().Sub(consumerCreated)
if delta > 5*time.Millisecond {
t.Fatalf("Consumer creation time not restored, wanted %v, got %v", consumerCreated, o.Created())
}
}
func TestJetStreamDeleteConsumerAndServerRestart(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
sendSubj := "MYQ"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: sendSubj, Storage: server.FileStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Create basic work queue mode consumer.
oname := "WQ"
o, err := mset.AddConsumer(workerModeConfig(oname))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
// Now delete and then we will restart the server.
o.Delete()
if numo := mset.NumConsumers(); numo != 0 {
t.Fatalf("Expected to have zero consumers, got %d", numo)
}
// Capture port since it was dynamic.
u, _ := url.Parse(s.ClientURL())
port, _ := strconv.Atoi(u.Port())
sd := s.JetStreamConfig().StoreDir
// Stop current server.
s.Shutdown()
// Restart.
s = RunJetStreamServerOnPort(port, sd)
defer s.Shutdown()
mset, err = s.GlobalAccount().LookupStream(sendSubj)
if err != nil {
t.Fatalf("Expected to find a stream for %q", sendSubj)
}
if numo := mset.NumConsumers(); numo != 0 {
t.Fatalf("Expected to have zero consumers, got %d", numo)
}
}
func TestJetStreamRedeliveryAfterServerRestart(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
sendSubj := "MYQ"
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: sendSubj, Storage: server.FileStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 25
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, sendSubj, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "TO",
DeliverSubject: sub.Subject,
AckPolicy: server.AckExplicit,
AckWait: 100 * time.Millisecond,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend)
}
return nil
})
// Capture port since it was dynamic.
u, _ := url.Parse(s.ClientURL())
port, _ := strconv.Atoi(u.Port())
sd := s.JetStreamConfig().StoreDir
// Stop current server.
s.Shutdown()
// Restart.
s = RunJetStreamServerOnPort(port, sd)
defer s.Shutdown()
// Don't wait for reconnect from old client.
nc = clientConnectToServer(t, s)
defer nc.Close()
sub, _ = nc.SubscribeSync(sub.Subject)
defer sub.Unsubscribe()
checkFor(t, time.Second, 50*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend)
}
return nil
})
}
func TestJetStreamSnapshots(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "MY-STREAM"
subjects := []string{"foo", "bar", "baz"}
cfg := server.StreamConfig{
Name: mname,
Storage: server.FileStorage,
Subjects: subjects,
MaxMsgs: 1000,
}
acc := s.GlobalAccount()
mset, err := acc.AddStream(&cfg)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
// Make sure we send some as floor.
toSend := rand.Intn(200) + 22
for i := 1; i <= toSend; i++ {
msg := fmt.Sprintf("Hello World %d", i)
subj := subjects[rand.Intn(len(subjects))]
sendStreamMsg(t, nc, subj, msg)
}
// Create up to 10 consumers.
numConsumers := rand.Intn(10) + 1
var obs []obsi
for i := 1; i <= numConsumers; i++ {
cname := fmt.Sprintf("WQ-%d", i)
o, err := mset.AddConsumer(workerModeConfig(cname))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now grab some messages.
toReceive := rand.Intn(toSend/2) + 1
for r := 0; r < toReceive; r++ {
resp, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if resp != nil {
resp.Respond(nil)
}
}
obs = append(obs, obsi{o.Config(), toReceive})
}
nc.Flush()
// Snapshot state of the stream and consumers.
info := info{mset.Config(), mset.State(), obs}
sr, err := mset.Snapshot(5*time.Second, false, true)
if err != nil {
t.Fatalf("Error getting snapshot: %v", err)
}
zr := sr.Reader
snapshot, err := ioutil.ReadAll(zr)
if err != nil {
t.Fatalf("Error reading snapshot")
}
// Try to restore from snapshot with current stream present, should error.
r := bytes.NewReader(snapshot)
if _, err := acc.RestoreStream(mname, r); err == nil {
t.Fatalf("Expected an error trying to restore existing stream")
} else if !strings.Contains(err.Error(), "name already in use") {
t.Fatalf("Incorrect error received: %v", err)
}
// Now delete so we can restore.
pusage := acc.JetStreamUsage()
mset.Delete()
r.Reset(snapshot)
// Now send in wrong name
if _, err := acc.RestoreStream("foo", r); err == nil {
t.Fatalf("Expected an error trying to restore stream with wrong name")
}
r.Reset(snapshot)
mset, err = acc.RestoreStream(mname, r)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now compare to make sure they are equal.
if nusage := acc.JetStreamUsage(); nusage != pusage {
t.Fatalf("Usage does not match after restore: %+v vs %+v", nusage, pusage)
}
if state := mset.State(); !reflect.DeepEqual(state, info.state) {
t.Fatalf("State does not match: %+v vs %+v", state, info.state)
}
if cfg := mset.Config(); !reflect.DeepEqual(cfg, info.cfg) {
t.Fatalf("Configs do not match: %+v vs %+v", cfg, info.cfg)
}
// Consumers.
if mset.NumConsumers() != len(info.obs) {
t.Fatalf("Number of consumers do not match: %d vs %d", mset.NumConsumers(), len(info.obs))
}
for _, oi := range info.obs {
if o := mset.LookupConsumer(oi.cfg.Durable); o != nil {
if uint64(oi.ack+1) != o.NextSeq() {
t.Fatalf("[%v] Consumer next seq is not correct: %d vs %d", o.Name(), oi.ack+1, o.NextSeq())
}
} else {
t.Fatalf("Expected to get an consumer")
}
}
// Now try restoring to a different server.
s2 := RunBasicJetStreamServer()
defer s2.Shutdown()
if config := s2.JetStreamConfig(); config != nil && config.StoreDir != "" {
defer os.RemoveAll(config.StoreDir)
}
acc = s2.GlobalAccount()
r.Reset(snapshot)
mset, err = acc.RestoreStream(mname, r)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
o := mset.LookupConsumer("WQ-1")
if o == nil {
t.Fatalf("Could not lookup consumer")
}
nc2 := clientConnectToServer(t, s2)
defer nc2.Close()
// Make sure we can read messages.
if _, err := nc2.Request(o.RequestNextMsgSubject(), nil, 5*time.Second); err != nil {
t.Fatalf("Unexpected error getting next message: %v", err)
}
}
func TestJetStreamSnapshotsAPI(t *testing.T) {
lopts := DefaultTestOptions
lopts.ServerName = "LS"
lopts.Port = -1
lopts.LeafNode.Host = lopts.Host
lopts.LeafNode.Port = -1
ls := RunServer(&lopts)
defer ls.Shutdown()
opts := DefaultTestOptions
opts.ServerName = "S"
opts.Port = -1
tdir, _ := ioutil.TempDir(os.TempDir(), "jstests-storedir-")
opts.JetStream = true
opts.StoreDir = tdir
rurl, _ := url.Parse(fmt.Sprintf("nats-leaf://%s:%d", lopts.LeafNode.Host, lopts.LeafNode.Port))
opts.LeafNode.Remotes = []*server.RemoteLeafOpts{{URLs: []*url.URL{rurl}}}
s := RunServer(&opts)
defer s.Shutdown()
checkLeafNodeConnected(t, s)
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mname := "MY-STREAM"
subjects := []string{"foo", "bar", "baz"}
cfg := server.StreamConfig{
Name: mname,
Storage: server.FileStorage,
Subjects: subjects,
MaxMsgs: 1000,
}
acc := s.GlobalAccount()
mset, err := acc.AddStreamWithStore(&cfg, &server.FileStoreConfig{BlockSize: 128})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := rand.Intn(100) + 1
for i := 1; i <= toSend; i++ {
msg := fmt.Sprintf("Hello World %d", i)
subj := subjects[rand.Intn(len(subjects))]
sendStreamMsg(t, nc, subj, msg)
}
o, err := mset.AddConsumer(workerModeConfig("WQ"))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now grab some messages.
toReceive := rand.Intn(toSend) + 1
for r := 0; r < toReceive; r++ {
resp, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if resp != nil {
resp.Respond(nil)
}
}
// Make sure we get proper error for non-existent request, streams,etc,
rmsg, err := nc.Request(fmt.Sprintf(server.JSApiStreamSnapshotT, "foo"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
var resp server.JSApiStreamSnapshotResponse
json.Unmarshal(rmsg.Data, &resp)
if resp.Error == nil || resp.Error.Code != 400 || resp.Error.Description != "bad request" {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
sreq := &server.JSApiStreamSnapshotRequest{}
req, _ := json.Marshal(sreq)
rmsg, err = nc.Request(fmt.Sprintf(server.JSApiStreamSnapshotT, "foo"), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
json.Unmarshal(rmsg.Data, &resp)
if resp.Error == nil || resp.Error.Code != 404 || resp.Error.Description != "stream not found" {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
rmsg, err = nc.Request(fmt.Sprintf(server.JSApiStreamSnapshotT, mname), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
json.Unmarshal(rmsg.Data, &resp)
if resp.Error == nil || resp.Error.Code != 400 || resp.Error.Description != "deliver subject not valid" {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
// Set delivery subject, do not subscribe yet. Want this to be an ok pattern.
sreq.DeliverSubject = nats.NewInbox()
// Just for test, usually left alone.
sreq.ChunkSize = 1024
req, _ = json.Marshal(sreq)
rmsg, err = nc.Request(fmt.Sprintf(server.JSApiStreamSnapshotT, mname), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
resp.Error = nil
json.Unmarshal(rmsg.Data, &resp)
if resp.Error != nil {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
// Check that we have the config and the state.
if resp.Config == nil {
t.Fatalf("Expected a stream config in the response, got %+v\n", resp)
}
if resp.State == nil {
t.Fatalf("Expected a stream state in the response, got %+v\n", resp)
}
// Grab state for comparison.
state := *resp.State
config := *resp.Config
// Setup to process snapshot chunks.
var snapshot []byte
done := make(chan bool)
sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) {
// EOF
if len(m.Data) == 0 {
done <- true
return
}
// Could be writing to a file here too.
snapshot = append(snapshot, m.Data...)
// Flow ack
m.Respond(nil)
})
defer sub.Unsubscribe()
// Wait to receive the snapshot.
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatalf("Did not receive our snapshot in time")
}
// Now make sure this snapshot is legit.
var rresp server.JSApiStreamRestoreResponse
rreq := &server.JSApiStreamRestoreRequest{
Config: config,
State: state,
}
req, _ = json.Marshal(rreq)
// Make sure we get an error since stream still exists.
rmsg, err = nc.Request(fmt.Sprintf(server.JSApiStreamRestoreT, mname), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error == nil || rresp.Error.Code != 500 || !strings.Contains(rresp.Error.Description, "already in use") {
t.Fatalf("Did not get correct error response: %+v", rresp.Error)
}
// Delete this stream.
mset.Delete()
// Sending no request message will error now.
rmsg, err = nc.Request(fmt.Sprintf(server.JSApiStreamRestoreT, mname), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
// Make sure to clear.
rresp.Error = nil
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error == nil || rresp.Error.Code != 400 || rresp.Error.Description != "bad request" {
t.Fatalf("Did not get correct error response: %+v", rresp.Error)
}
// This should work.
rmsg, err = nc.Request(fmt.Sprintf(server.JSApiStreamRestoreT, mname), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
// Make sure to clear.
rresp.Error = nil
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error != nil {
t.Fatalf("Got an unexpected error response: %+v", rresp.Error)
}
// Can be any size message.
var chunk [512]byte
for r := bytes.NewReader(snapshot); ; {
n, err := r.Read(chunk[:])
if err != nil {
break
}
nc.Request(rresp.DeliverSubject, chunk[:n], time.Second)
}
nc.Request(rresp.DeliverSubject, nil, time.Second)
mset, err = acc.LookupStream(mname)
if err != nil {
t.Fatalf("Expected to find a stream for %q", mname)
}
if !reflect.DeepEqual(mset.State(), state) {
t.Fatalf("Did not match states, %+v vs %+v", mset.State(), state)
}
// Now ask that the stream be checked first.
sreq.ChunkSize = 0
sreq.CheckMsgs = true
snapshot = snapshot[:0]
req, _ = json.Marshal(sreq)
if _, err = nc.Request(fmt.Sprintf(server.JSApiStreamSnapshotT, mname), req, 5*time.Second); err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
// Wait to receive the snapshot.
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatalf("Did not receive our snapshot in time")
}
// Now connect through a cluster server and make sure we can get things to work this way as well.
nc2 := clientConnectToServer(t, ls)
defer nc2.Close()
snapshot = snapshot[:0]
req, _ = json.Marshal(sreq)
rmsg, err = nc2.Request(fmt.Sprintf(server.JSApiStreamSnapshotT, mname), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
resp.Error = nil
json.Unmarshal(rmsg.Data, &resp)
if resp.Error != nil {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
// Wait to receive the snapshot.
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatalf("Did not receive our snapshot in time")
}
// Now do a restore through the new client connection.
// Delete this stream first.
mset, err = acc.LookupStream(mname)
if err != nil {
t.Fatalf("Expected to find a stream for %q", mname)
}
state = mset.State()
mset.Delete()
rmsg, err = nc2.Request(fmt.Sprintf(server.JSApiStreamRestoreT, mname), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
// Make sure to clear.
rresp.Error = nil
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error != nil {
t.Fatalf("Got an unexpected error response: %+v", rresp.Error)
}
// Make sure when we send something without a reply subject the subscription is shutoff.
r := bytes.NewReader(snapshot)
n, _ := r.Read(chunk[:])
nc2.Publish(rresp.DeliverSubject, chunk[:n])
nc2.Flush()
n, _ = r.Read(chunk[:])
if _, err := nc2.Request(rresp.DeliverSubject, chunk[:n], 100*time.Millisecond); err == nil {
t.Fatalf("Expected restore subscription to be closed")
}
rmsg, err = nc2.Request(fmt.Sprintf(server.JSApiStreamRestoreT, mname), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
// Make sure to clear.
rresp.Error = nil
json.Unmarshal(rmsg.Data, &rresp)
if rresp.Error != nil {
t.Fatalf("Got an unexpected error response: %+v", rresp.Error)
}
for r := bytes.NewReader(snapshot); ; {
n, err := r.Read(chunk[:])
if err != nil {
break
}
// Make sure other side responds to reply subjects for ack flow. Optional.
if _, err := nc2.Request(rresp.DeliverSubject, chunk[:n], time.Second); err != nil {
t.Fatalf("Restore not honoring reply subjects for ack flow")
}
}
// For EOF this will send back stream info or an error.
si, err := nc2.Request(rresp.DeliverSubject, nil, time.Second)
if err != nil {
t.Fatalf("Got an error restoring stream: %v", err)
}
var scResp server.JSApiStreamCreateResponse
if err := json.Unmarshal(si.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.Error != nil {
t.Fatalf("Got an unexpected error from EOF omn restore: %+v", scResp.Error)
}
if !reflect.DeepEqual(scResp.StreamInfo.State, state) {
t.Fatalf("Did not match states, %+v vs %+v", scResp.StreamInfo.State, state)
}
}
func TestJetStreamSnapshotsAPIPerf(t *testing.T) {
// Comment out to run, holding place for now.
t.SkipNow()
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
cfg := server.StreamConfig{
Name: "snap-perf",
Storage: server.FileStorage,
}
acc := s.GlobalAccount()
if _, err := acc.AddStream(&cfg); err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
msg := make([]byte, 128*1024)
// If you don't give gzip some data will spend too much time compressing everything to zero.
rand.Read(msg)
for i := 0; i < 10000; i++ {
nc.Publish("snap-perf", msg)
}
nc.Flush()
sreq := &server.JSApiStreamSnapshotRequest{DeliverSubject: nats.NewInbox()}
req, _ := json.Marshal(sreq)
rmsg, err := nc.Request(fmt.Sprintf(server.JSApiStreamSnapshotT, "snap-perf"), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error on snapshot request: %v", err)
}
var resp server.JSApiStreamSnapshotResponse
json.Unmarshal(rmsg.Data, &resp)
if resp.Error != nil {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
done := make(chan bool)
total := 0
sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) {
// EOF
if len(m.Data) == 0 {
m.Sub.Unsubscribe()
done <- true
return
}
// We don't do anything with the snapshot, just take
// note of the size.
total += len(m.Data)
// Flow ack
m.Respond(nil)
})
defer sub.Unsubscribe()
start := time.Now()
// Wait to receive the snapshot.
select {
case <-done:
case <-time.After(30 * time.Second):
t.Fatalf("Did not receive our snapshot in time")
}
td := time.Since(start)
fmt.Printf("Received %d bytes in %v\n", total, td)
fmt.Printf("Rate %.0f MB/s\n", float64(total)/td.Seconds()/(1024*1024))
}
func TestJetStreamActiveDelivery(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "ADS", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "ADS", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil && config.StoreDir != "" {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Now load up some messages.
toSend := 100
sendSubj := "foo.22"
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, sendSubj, "Hello World!")
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs)
}
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "to", DeliverSubject: "d"})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
// We have no active interest above. So consumer will be considered inactive. Let's subscribe and make sure
// we get the messages instantly. This will test that we hook interest activation correctly.
sub, _ := nc.SubscribeSync("d")
defer sub.Unsubscribe()
nc.Flush()
checkFor(t, 100*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend)
}
return nil
})
})
}
}
func TestJetStreamEphemeralConsumers(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "EP", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "EP", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !o.Active() {
t.Fatalf("Expected the consumer to be considered active")
}
if numo := mset.NumConsumers(); numo != 1 {
t.Fatalf("Expected number of consumers to be 1, got %d", numo)
}
// Set our delete threshold to something low for testing purposes.
o.SetInActiveDeleteThreshold(100 * time.Millisecond)
// Make sure works now.
nc.Request("foo.22", nil, 100*time.Millisecond)
checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 1 {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, 1)
}
return nil
})
// Now close the subscription, this should trip active state on the ephemeral consumer.
sub.Unsubscribe()
checkFor(t, time.Second, 10*time.Millisecond, func() error {
if o.Active() {
return fmt.Errorf("Expected the ephemeral consumer to be considered inactive")
}
return nil
})
// The reason for this still being 1 is that we give some time in case of a reconnect scenario.
// We detect right away on the interest change but we wait for interest to be re-established.
// This is in case server goes away but app is fine, we do not want to recycle those consumers.
if numo := mset.NumConsumers(); numo != 1 {
t.Fatalf("Expected number of consumers to be 1, got %d", numo)
}
// We should delete this one after the delete threshold.
checkFor(t, time.Second, 100*time.Millisecond, func() error {
if numo := mset.NumConsumers(); numo != 0 {
return fmt.Errorf("Expected number of consumers to be 0, got %d", numo)
}
return nil
})
})
}
}
func TestJetStreamConsumerReconnect(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "ET", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "ET", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
// Capture the subscription.
delivery := sub.Subject
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: delivery, AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !o.Active() {
t.Fatalf("Expected the consumer to be considered active")
}
if numo := mset.NumConsumers(); numo != 1 {
t.Fatalf("Expected number of consumers to be 1, got %d", numo)
}
// We will simulate reconnect by unsubscribing on one connection and forming
// the same on another. Once we have cluster tests we will do more testing on
// reconnect scenarios.
getMsg := func(seqno int) *nats.Msg {
t.Helper()
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error for %d: %v", seqno, err)
}
if seq := o.SeqFromReply(m.Reply); seq != uint64(seqno) {
t.Fatalf("Expected sequence of %d , got %d", seqno, seq)
}
m.Respond(nil)
return m
}
sendMsg := func() {
t.Helper()
if err := nc.Publish("foo.22", []byte("OK!")); err != nil {
return
}
}
checkForInActive := func() {
checkFor(t, 250*time.Millisecond, 50*time.Millisecond, func() error {
if o.Active() {
return fmt.Errorf("Consumer is still active")
}
return nil
})
}
// Send and Pull first message.
sendMsg() // 1
getMsg(1)
// Cancel first one.
sub.Unsubscribe()
// Re-establish new sub on same subject.
sub, _ = nc.SubscribeSync(delivery)
nc.Flush()
// We should be getting 2 here.
sendMsg() // 2
getMsg(2)
sub.Unsubscribe()
checkForInActive()
// send 3-10
for i := 0; i <= 7; i++ {
sendMsg()
}
// Make sure they are all queued up with no interest.
nc.Flush()
// Restablish again.
sub, _ = nc.SubscribeSync(delivery)
nc.Flush()
// We should be getting 3-10 here.
for i := 3; i <= 10; i++ {
getMsg(i)
}
})
}
}
func TestJetStreamDurableConsumerReconnect(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DT", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "DT", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
dname := "d22"
subj1 := nats.NewInbox()
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: dname,
DeliverSubject: subj1,
AckPolicy: server.AckExplicit,
AckWait: 50 * time.Millisecond})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendMsg := func() {
t.Helper()
if err := nc.Publish("foo.22", []byte("OK!")); err != nil {
return
}
}
// Send 10 msgs
toSend := 10
for i := 0; i < toSend; i++ {
sendMsg()
}
sub, _ := nc.SubscribeSync(subj1)
defer sub.Unsubscribe()
checkFor(t, 500*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend)
}
return nil
})
getMsg := func(seqno int) *nats.Msg {
t.Helper()
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if seq := o.StreamSeqFromReply(m.Reply); seq != uint64(seqno) {
t.Fatalf("Expected sequence of %d , got %d", seqno, seq)
}
m.Respond(nil)
return m
}
// Ack first half
for i := 1; i <= toSend/2; i++ {
m := getMsg(i)
m.Respond(nil)
}
// Now unsubscribe and wait to become inactive
sub.Unsubscribe()
checkFor(t, 250*time.Millisecond, 50*time.Millisecond, func() error {
if o.Active() {
return fmt.Errorf("Consumer is still active")
}
return nil
})
// Now we should be able to replace the delivery subject.
subj2 := nats.NewInbox()
sub, _ = nc.SubscribeSync(subj2)
defer sub.Unsubscribe()
nc.Flush()
o, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: dname,
DeliverSubject: subj2,
AckPolicy: server.AckExplicit,
AckWait: 50 * time.Millisecond})
if err != nil {
t.Fatalf("Unexpected error trying to add a new durable consumer: %v", err)
}
// We should get the remaining messages here.
for i := toSend/2 + 1; i <= toSend; i++ {
m := getMsg(i)
m.Respond(nil)
}
})
}
}
func TestJetStreamDurableConsumerReconnectWithOnlyPending(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DT", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "DT", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
dname := "d22"
subj1 := nats.NewInbox()
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: dname,
DeliverSubject: subj1,
AckPolicy: server.AckExplicit,
AckWait: 25 * time.Millisecond})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sendMsg := func(payload string) {
t.Helper()
if err := nc.Publish("foo.22", []byte(payload)); err != nil {
return
}
}
sendMsg("1")
sub, _ := nc.SubscribeSync(subj1)
defer sub.Unsubscribe()
checkFor(t, 500*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 1 {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, 1)
}
return nil
})
// Now unsubscribe and wait to become inactive
sub.Unsubscribe()
checkFor(t, 250*time.Millisecond, 50*time.Millisecond, func() error {
if o.Active() {
return fmt.Errorf("Consumer is still active")
}
return nil
})
// Send the second message while delivery subscriber is not running
sendMsg("2")
// Now we should be able to replace the delivery subject.
subj2 := nats.NewInbox()
o, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: dname,
DeliverSubject: subj2,
AckPolicy: server.AckExplicit,
AckWait: 25 * time.Millisecond})
if err != nil {
t.Fatalf("Unexpected error trying to add a new durable consumer: %v", err)
}
sub, _ = nc.SubscribeSync(subj2)
defer sub.Unsubscribe()
nc.Flush()
// We should get msg "1" and "2" delivered. They will be reversed.
for i := 0; i < 2; i++ {
msg, err := sub.NextMsg(500 * time.Millisecond)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sseq, _, dc, _, _ := o.ReplyInfo(msg.Reply)
if sseq == 1 && dc == 1 {
t.Fatalf("Expected a redelivery count greater then 1 for sseq 1, got %d", dc)
}
if sseq != 1 && sseq != 2 {
t.Fatalf("Expected stream sequence of 1 or 2 but got %d", sseq)
}
}
})
}
}
func TestJetStreamDurableFilteredSubjectConsumerReconnect(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DT", Storage: server.MemoryStorage, Subjects: []string{"foo.*"}}},
{"FileStore", &server.StreamConfig{Name: "DT", Storage: server.FileStorage, Subjects: []string{"foo.*"}}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
sendMsgs := func(toSend int) {
for i := 0; i < toSend; i++ {
var subj string
if i%2 == 0 {
subj = "foo.AA"
} else {
subj = "foo.ZZ"
}
if err := nc.Publish(subj, []byte("OK!")); err != nil {
return
}
}
nc.Flush()
}
// Send 50 msgs
toSend := 50
sendMsgs(toSend)
dname := "d33"
dsubj := nats.NewInbox()
// Now create an consumer for foo.AA, only requesting the last one.
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: dname,
DeliverSubject: dsubj,
FilterSubject: "foo.AA",
DeliverPolicy: server.DeliverLast,
AckPolicy: server.AckExplicit,
AckWait: 100 * time.Millisecond,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sub, _ := nc.SubscribeSync(dsubj)
defer sub.Unsubscribe()
// Used to calculate difference between store seq and delivery seq.
storeBaseOff := 47
getMsg := func(seq int) *nats.Msg {
t.Helper()
sseq := 2*seq + storeBaseOff
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rsseq, roseq, dcount, _, _ := o.ReplyInfo(m.Reply)
if roseq != uint64(seq) {
t.Fatalf("Expected consumer sequence of %d , got %d", seq, roseq)
}
if rsseq != uint64(sseq) {
t.Fatalf("Expected stream sequence of %d , got %d", sseq, rsseq)
}
if dcount != 1 {
t.Fatalf("Expected message to not be marked as redelivered")
}
return m
}
getRedeliveredMsg := func(seq int) *nats.Msg {
t.Helper()
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
_, roseq, dcount, _, _ := o.ReplyInfo(m.Reply)
if roseq != uint64(seq) {
t.Fatalf("Expected consumer sequence of %d , got %d", seq, roseq)
}
if dcount < 2 {
t.Fatalf("Expected message to be marked as redelivered")
}
// Ack this message.
m.Respond(nil)
return m
}
// All consumers start at 1 and always have increasing sequence numbers.
m := getMsg(1)
m.Respond(nil)
// Now send 50 more, so 100 total, 26 (last + 50/2) for this consumer.
sendMsgs(toSend)
state := mset.State()
if state.Msgs != uint64(toSend*2) {
t.Fatalf("Expected %d messages, got %d", toSend*2, state.Msgs)
}
// For tracking next expected.
nextSeq := 2
noAcks := 0
for i := 0; i < toSend/2; i++ {
m := getMsg(nextSeq)
if i%2 == 0 {
m.Respond(nil) // Ack evens.
} else {
noAcks++
}
nextSeq++
}
// We should now get those redelivered.
for i := 0; i < noAcks; i++ {
getRedeliveredMsg(nextSeq)
nextSeq++
}
// Now send 50 more.
sendMsgs(toSend)
storeBaseOff -= noAcks * 2
for i := 0; i < toSend/2; i++ {
m := getMsg(nextSeq)
m.Respond(nil)
nextSeq++
}
})
}
}
func TestJetStreamConsumerInactiveNoDeadlock(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send lots of msgs and have them queued up.
for i := 0; i < 10000; i++ {
nc.Publish("DC", []byte("OK!"))
}
nc.Flush()
if state := mset.State(); state.Msgs != 10000 {
t.Fatalf("Expected %d messages, got %d", 10000, state.Msgs)
}
sub, _ := nc.SubscribeSync(nats.NewInbox())
sub.SetPendingLimits(-1, -1)
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
for i := 0; i < 10; i++ {
if _, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
// Force us to become inactive but we want to make sure we do not lock up
// the internal sendq.
sub.Unsubscribe()
nc.Flush()
})
}
}
func TestJetStreamMetadata(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Retention: server.WorkQueuePolicy, Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Retention: server.WorkQueuePolicy, Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
for i := 0; i < 10; i++ {
nc.Publish("DC", []byte("OK!"))
nc.Flush()
time.Sleep(time.Millisecond)
}
if state := mset.State(); state.Msgs != 10 {
t.Fatalf("Expected %d messages, got %d", 10, state.Msgs)
}
o, err := mset.AddConsumer(workerModeConfig("WQ"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
for i := uint64(1); i <= 10; i++ {
m, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sseq, dseq, dcount, ts, _ := o.ReplyInfo(m.Reply)
mreq := &server.JSApiMsgGetRequest{Seq: sseq}
req, err := json.Marshal(mreq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Load the original message from the stream to verify ReplyInfo ts against stored message
smsgj, err := nc.Request(fmt.Sprintf(server.JSApiMsgGetT, c.mconfig.Name), req, time.Second)
if err != nil {
t.Fatalf("Could not retrieve stream message: %v", err)
}
var resp server.JSApiMsgGetResponse
err = json.Unmarshal(smsgj.Data, &resp)
if err != nil {
t.Fatalf("Could not parse stream message: %v", err)
}
if resp.Message == nil || resp.Error != nil {
t.Fatalf("Did not receive correct response")
}
smsg := resp.Message
if ts != smsg.Time.UnixNano() {
t.Fatalf("Wrong timestamp in ReplyInfo for msg %d, expected %v got %v", i, ts, smsg.Time.UnixNano())
}
if sseq != i {
t.Fatalf("Expected set sequence of %d, got %d", i, sseq)
}
if dseq != i {
t.Fatalf("Expected delivery sequence of %d, got %d", i, dseq)
}
if dcount != 1 {
t.Fatalf("Expected delivery count to be 1, got %d", dcount)
}
m.Respond(server.AckAck)
}
// Now make sure we get right response when message is missing.
mreq := &server.JSApiMsgGetRequest{Seq: 1}
req, err := json.Marshal(mreq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Load the original message from the stream to verify ReplyInfo ts against stored message
rmsg, err := nc.Request(fmt.Sprintf(server.JSApiMsgGetT, c.mconfig.Name), req, time.Second)
if err != nil {
t.Fatalf("Could not retrieve stream message: %v", err)
}
var resp server.JSApiMsgGetResponse
err = json.Unmarshal(rmsg.Data, &resp)
if err != nil {
t.Fatalf("Could not parse stream message: %v", err)
}
if resp.Error == nil || resp.Error.Code != 500 || resp.Error.Description != "no message found" {
t.Fatalf("Did not get correct error response: %+v", resp.Error)
}
})
}
}
func TestJetStreamRedeliverCount(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 10 msgs
for i := 0; i < 10; i++ {
nc.Publish("DC", []byte("OK!"))
}
nc.Flush()
if state := mset.State(); state.Msgs != 10 {
t.Fatalf("Expected %d messages, got %d", 10, state.Msgs)
}
o, err := mset.AddConsumer(workerModeConfig("WQ"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
for i := uint64(1); i <= 10; i++ {
m, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sseq, dseq, dcount, _, _ := o.ReplyInfo(m.Reply)
// Make sure we keep getting stream sequence #1
if sseq != 1 {
t.Fatalf("Expected set sequence of 1, got %d", sseq)
}
if dseq != i {
t.Fatalf("Expected delivery sequence of %d, got %d", i, dseq)
}
// Now make sure dcount is same as dseq (or i).
if dcount != i {
t.Fatalf("Expected delivery count to be %d, got %d", i, dcount)
}
// Make sure it keeps getting sent back.
m.Respond(server.AckNak)
}
})
}
}
// We want to make sure that for pull based consumers that if we ack
// late with no interest the redelivery attempt is removed and we do
// not get the message back.
func TestJetStreamRedeliverAndLateAck(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
// Forced cleanup of all persisted state.
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: "LA", Storage: server.MemoryStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
o, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "DDD", AckPolicy: server.AckExplicit, AckWait: 100 * time.Millisecond})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Queue up message
sendStreamMsg(t, nc, "LA", "Hello World!")
nextSubj := o.RequestNextMsgSubject()
msg, err := nc.Request(nextSubj, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Wait for past ackwait time
time.Sleep(150 * time.Millisecond)
// Now ack!
msg.Respond(nil)
// We should not get this back.
if _, err := nc.Request(nextSubj, nil, 10*time.Millisecond); err == nil {
t.Fatalf("Message should not have been sent back")
}
}
// https://github.com/nats-io/nats-server/issues/1502
func TestJetStreamPendingNextTimer(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
// Forced cleanup of all persisted state.
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: "NT", Storage: server.MemoryStorage, Subjects: []string{"ORDERS.*"}})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "DDD",
AckPolicy: server.AckExplicit,
FilterSubject: "ORDERS.test",
AckWait: 100 * time.Millisecond,
})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
sendAndReceive := func() {
nc := clientConnectToServer(t, s)
defer nc.Close()
// Queue up message
sendStreamMsg(t, nc, "ORDERS.test", "Hello World! #1")
sendStreamMsg(t, nc, "ORDERS.test", "Hello World! #2")
nextSubj := o.RequestNextMsgSubject()
for i := 0; i < 2; i++ {
if _, err := nc.Request(nextSubj, nil, time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
nc.Close()
time.Sleep(200 * time.Millisecond)
}
sendAndReceive()
sendAndReceive()
sendAndReceive()
}
func TestJetStreamCanNotNakAckd(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 10 msgs
for i := 0; i < 10; i++ {
nc.Publish("DC", []byte("OK!"))
}
nc.Flush()
if state := mset.State(); state.Msgs != 10 {
t.Fatalf("Expected %d messages, got %d", 10, state.Msgs)
}
o, err := mset.AddConsumer(workerModeConfig("WQ"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
for i := uint64(1); i <= 10; i++ {
m, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Ack evens.
if i%2 == 0 {
m.Respond(nil)
}
}
nc.Flush()
// Fake these for now.
ackReplyT := "$JS.A.DC.WQ.1.%d.%d"
checkBadNak := func(seq int) {
t.Helper()
if err := nc.Publish(fmt.Sprintf(ackReplyT, seq, seq), server.AckNak); err != nil {
t.Fatalf("Error sending nak: %v", err)
}
nc.Flush()
if _, err := nc.Request(o.RequestNextMsgSubject(), nil, 10*time.Millisecond); err != nats.ErrTimeout {
t.Fatalf("Did not expect new delivery on nak of %d", seq)
}
}
// If the nak took action it will deliver another message, incrementing the next delivery seq.
// We ack evens above, so these should fail
for i := 2; i <= 10; i += 2 {
checkBadNak(i)
}
// Now check we can not nak something we do not have.
checkBadNak(22)
})
}
}
func TestJetStreamStreamPurge(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 100 msgs
for i := 0; i < 100; i++ {
nc.Publish("DC", []byte("OK!"))
}
nc.Flush()
if state := mset.State(); state.Msgs != 100 {
t.Fatalf("Expected %d messages, got %d", 100, state.Msgs)
}
mset.Purge()
state := mset.State()
if state.Msgs != 0 {
t.Fatalf("Expected %d messages, got %d", 0, state.Msgs)
}
// Make sure first timestamp are reset.
if !state.FirstTime.IsZero() {
t.Fatalf("Expected the state's first time to be zero after purge")
}
time.Sleep(10 * time.Millisecond)
now := time.Now()
nc.Publish("DC", []byte("OK!"))
nc.Flush()
state = mset.State()
if state.Msgs != 1 {
t.Fatalf("Expected %d message, got %d", 1, state.Msgs)
}
if state.FirstTime.Before(now) {
t.Fatalf("First time is incorrect after adding messages back in")
}
if state.FirstTime != state.LastTime {
t.Fatalf("Expected first and last times to be the same for only message")
}
})
}
}
func TestJetStreamStreamPurgeWithConsumer(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 100 msgs
for i := 0; i < 100; i++ {
nc.Publish("DC", []byte("OK!"))
}
nc.Flush()
if state := mset.State(); state.Msgs != 100 {
t.Fatalf("Expected %d messages, got %d", 100, state.Msgs)
}
// Now create an consumer and make sure it functions properly.
o, err := mset.AddConsumer(workerModeConfig("WQ"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
nextSubj := o.RequestNextMsgSubject()
for i := 0; i < 50; i++ {
msg, err := nc.Request(nextSubj, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Ack.
msg.Respond(nil)
}
// Now grab next 25 without ack.
for i := 0; i < 25; i++ {
if _, err := nc.Request(nextSubj, nil, time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
state := o.Info()
if state.AckFloor.Consumer != 50 {
t.Fatalf("Expected ack floor of 50, got %d", state.AckFloor.Consumer)
}
if state.NumAckPending != 25 {
t.Fatalf("Expected len(pending) to be 25, got %d", state.NumAckPending)
}
// Now do purge.
mset.Purge()
if state := mset.State(); state.Msgs != 0 {
t.Fatalf("Expected %d messages, got %d", 0, state.Msgs)
}
// Now re-acquire state and check that we did the right thing.
// Pending should be cleared, and stream sequences should have been set
// to the total messages before purge + 1.
state = o.Info()
if state.NumAckPending != 0 {
t.Fatalf("Expected no pending, got %d", state.NumAckPending)
}
if state.Delivered.Stream != 100 {
t.Fatalf("Expected to have setseq now at next seq of 100, got %d", state.Delivered.Stream)
}
// Check AckFloors which should have also been adjusted.
if state.AckFloor.Stream != 100 {
t.Fatalf("Expected ackfloor for setseq to be 100, got %d", state.AckFloor.Stream)
}
if state.AckFloor.Consumer != 75 {
t.Fatalf("Expected ackfloor for obsseq to be 75, got %d", state.AckFloor.Consumer)
}
// Also make sure we can get new messages correctly.
nc.Request("DC", []byte("OK-22"), time.Second)
if msg, err := nc.Request(nextSubj, nil, time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else if string(msg.Data) != "OK-22" {
t.Fatalf("Received wrong message, wanted 'OK-22', got %q", msg.Data)
}
})
}
}
func TestJetStreamStreamPurgeWithConsumerAndRedelivery(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 100 msgs
for i := 0; i < 100; i++ {
nc.Publish("DC", []byte("OK!"))
}
nc.Flush()
if state := mset.State(); state.Msgs != 100 {
t.Fatalf("Expected %d messages, got %d", 100, state.Msgs)
}
// Now create an consumer and make sure it functions properly.
// This will test redelivery state and purge of the stream.
wcfg := &server.ConsumerConfig{
Durable: "WQ",
AckPolicy: server.AckExplicit,
AckWait: 20 * time.Millisecond,
}
o, err := mset.AddConsumer(wcfg)
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
nextSubj := o.RequestNextMsgSubject()
for i := 0; i < 50; i++ {
// Do not ack these.
if _, err := nc.Request(nextSubj, nil, time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
// Now wait to make sure we are in a redelivered state.
time.Sleep(wcfg.AckWait * 2)
// Now do purge.
mset.Purge()
if state := mset.State(); state.Msgs != 0 {
t.Fatalf("Expected %d messages, got %d", 0, state.Msgs)
}
// Now get the state and check that we did the right thing.
// Pending should be cleared, and stream sequences should have been set
// to the total messages before purge + 1.
state := o.Info()
if state.NumAckPending != 0 {
t.Fatalf("Expected no pending, got %d", state.NumAckPending)
}
if state.Delivered.Stream != 100 {
t.Fatalf("Expected to have setseq now at next seq of 100, got %d", state.Delivered.Stream)
}
// Check AckFloors which should have also been adjusted.
if state.AckFloor.Stream != 100 {
t.Fatalf("Expected ackfloor for setseq to be 100, got %d", state.AckFloor.Stream)
}
if state.AckFloor.Consumer != 50 {
t.Fatalf("Expected ackfloor for obsseq to be 75, got %d", state.AckFloor.Consumer)
}
// Also make sure we can get new messages correctly.
nc.Request("DC", []byte("OK-22"), time.Second)
if msg, err := nc.Request(nextSubj, nil, time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else if string(msg.Data) != "OK-22" {
t.Fatalf("Received wrong message, wanted 'OK-22', got %q", msg.Data)
}
})
}
}
func TestJetStreamInterestRetentionStream(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage, Retention: server.InterestPolicy}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage, Retention: server.InterestPolicy}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 100 msgs
totalMsgs := 100
for i := 0; i < totalMsgs; i++ {
nc.Publish("DC", []byte("OK!"))
}
nc.Flush()
checkNumMsgs := func(numExpected int) {
t.Helper()
if state := mset.State(); state.Msgs != uint64(numExpected) {
t.Fatalf("Expected %d messages, got %d", numExpected, state.Msgs)
}
}
// Since we had no interest this should be 0.
checkNumMsgs(0)
syncSub := func() *nats.Subscription {
sub, _ := nc.SubscribeSync(nats.NewInbox())
nc.Flush()
return sub
}
// Now create three consumers.
// 1. AckExplicit
// 2. AckAll
// 3. AckNone
sub1 := syncSub()
mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub1.Subject, AckPolicy: server.AckExplicit})
sub2 := syncSub()
mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub2.Subject, AckPolicy: server.AckAll})
sub3 := syncSub()
mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub3.Subject, AckPolicy: server.AckNone})
for i := 0; i < totalMsgs; i++ {
nc.Publish("DC", []byte("OK!"))
}
nc.Flush()
checkNumMsgs(totalMsgs)
// Wait for all messsages to be pending for each sub.
for i, sub := range []*nats.Subscription{sub1, sub2, sub3} {
checkFor(t, 500*time.Millisecond, 25*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); nmsgs != totalMsgs {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d for sub %d", nmsgs, totalMsgs, i+1)
}
return nil
})
}
getAndAck := func(sub *nats.Subscription) {
t.Helper()
if m, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else {
m.Respond(nil)
}
nc.Flush()
}
// Ack evens for the explicit ack sub.
var odds []*nats.Msg
for i := 1; i <= totalMsgs; i++ {
if m, err := sub1.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else if i%2 == 0 {
m.Respond(nil) // Ack evens.
} else {
odds = append(odds, m)
}
}
nc.Flush()
checkNumMsgs(totalMsgs)
// Now ack first for AckAll sub2
getAndAck(sub2)
// We should be at the same number since we acked 1, explicit acked 2
checkNumMsgs(totalMsgs)
// Now ack second for AckAll sub2
getAndAck(sub2)
// We should now have 1 removed.
checkNumMsgs(totalMsgs - 1)
// Now ack third for AckAll sub2
getAndAck(sub2)
// We should still only have 1 removed.
checkNumMsgs(totalMsgs - 1)
// Now ack odds from explicit.
for _, m := range odds {
m.Respond(nil) // Ack
}
nc.Flush()
// we should have 1, 2, 3 acks now.
checkNumMsgs(totalMsgs - 3)
nm, _, _ := sub2.Pending()
// Now ack last ackAll message. This should clear all of them.
for i := 1; i <= nm; i++ {
if m, err := sub2.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else if i == nm {
m.Respond(nil)
}
}
nc.Flush()
// Should be zero now.
checkNumMsgs(0)
})
}
}
func TestJetStreamInterestRetentionWithWildcardsAndFilteredConsumers(t *testing.T) {
msc := server.StreamConfig{
Name: "DCWC",
Subjects: []string{"foo.*"},
Storage: server.MemoryStorage,
Retention: server.InterestPolicy,
}
fsc := msc
fsc.Storage = server.FileStorage
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &msc},
{"FileStore", &fsc},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 10 msgs
for i := 0; i < 10; i++ {
sendStreamMsg(t, nc, "foo.bar", "Hello World!")
}
if state := mset.State(); state.Msgs != 0 {
t.Fatalf("Expected %d messages, got %d", 0, state.Msgs)
}
cfg := &server.ConsumerConfig{Durable: "ddd", FilterSubject: "foo.bar", AckPolicy: server.AckExplicit}
o, err := mset.AddConsumer(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
sendStreamMsg(t, nc, "foo.bar", "Hello World!")
if state := mset.State(); state.Msgs != 1 {
t.Fatalf("Expected %d message, got %d", 1, state.Msgs)
} else if state.FirstSeq != 11 {
t.Fatalf("Expected %d for first seq, got %d", 11, state.FirstSeq)
}
// Now send to foo.baz, which has no interest, so we should not hold onto this message.
sendStreamMsg(t, nc, "foo.baz", "Hello World!")
if state := mset.State(); state.Msgs != 1 {
t.Fatalf("Expected %d message, got %d", 1, state.Msgs)
}
})
}
}
func TestJetStreamInterestRetentionStreamWithDurableRestart(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "IK", Storage: server.MemoryStorage, Retention: server.InterestPolicy}},
{"FileStore", &server.StreamConfig{Name: "IK", Storage: server.FileStorage, Retention: server.InterestPolicy}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
checkNumMsgs := func(numExpected int) {
t.Helper()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if state := mset.State(); state.Msgs != uint64(numExpected) {
return fmt.Errorf("Expected %d messages, got %d", numExpected, state.Msgs)
}
return nil
})
}
nc := clientConnectToServer(t, s)
defer nc.Close()
sub, _ := nc.SubscribeSync(nats.NewInbox())
nc.Flush()
cfg := &server.ConsumerConfig{Durable: "ivan", DeliverPolicy: server.DeliverNew, DeliverSubject: sub.Subject, AckPolicy: server.AckNone}
o, _ := mset.AddConsumer(cfg)
sendStreamMsg(t, nc, "IK", "M1")
sendStreamMsg(t, nc, "IK", "M2")
checkSubPending := func(numExpected int) {
t.Helper()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected)
}
return nil
})
}
checkSubPending(2)
checkNumMsgs(0)
// Now stop the subscription.
sub.Unsubscribe()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if o.Active() {
return fmt.Errorf("Still active consumer")
}
return nil
})
sendStreamMsg(t, nc, "IK", "M3")
sendStreamMsg(t, nc, "IK", "M4")
checkNumMsgs(2)
// Now restart the durable.
sub, _ = nc.SubscribeSync(nats.NewInbox())
nc.Flush()
cfg.DeliverSubject = sub.Subject
if o, err = mset.AddConsumer(cfg); err != nil {
t.Fatalf("Error re-establishing the durable consumer: %v", err)
}
checkSubPending(2)
for _, expected := range []string{"M3", "M4"} {
if m, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else if string(m.Data) != expected {
t.Fatalf("Expected %q, got %q", expected, m.Data)
}
}
// Should all be gone now.
checkNumMsgs(0)
// Now restart again and make sure we do not get any messages.
sub.Unsubscribe()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if o.Active() {
return fmt.Errorf("Still active consumer")
}
return nil
})
o.Delete()
sub, _ = nc.SubscribeSync(nats.NewInbox())
nc.Flush()
cfg.DeliverSubject = sub.Subject
cfg.AckPolicy = server.AckExplicit // Set ack
if o, err = mset.AddConsumer(cfg); err != nil {
t.Fatalf("Error re-establishing the durable consumer: %v", err)
}
time.Sleep(100 * time.Millisecond)
checkSubPending(0)
checkNumMsgs(0)
// Now queue up some messages.
for i := 1; i <= 10; i++ {
sendStreamMsg(t, nc, "IK", fmt.Sprintf("M%d", i))
}
checkNumMsgs(10)
checkSubPending(10)
// Create second consumer
sub2, _ := nc.SubscribeSync(nats.NewInbox())
nc.Flush()
cfg.DeliverSubject = sub2.Subject
cfg.Durable = "derek"
o2, err := mset.AddConsumer(cfg)
if err != nil {
t.Fatalf("Error creating second durable consumer: %v", err)
}
// Now queue up some messages.
for i := 11; i <= 20; i++ {
sendStreamMsg(t, nc, "IK", fmt.Sprintf("M%d", i))
}
checkNumMsgs(20)
checkSubPending(20)
// Now make sure deleting the consumers will remove messages from
// the stream since we are interest retention based.
o.Delete()
checkNumMsgs(10)
o2.Delete()
checkNumMsgs(0)
})
}
}
func TestJetStreamConsumerReplayRate(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 10 msgs
totalMsgs := 10
var gaps []time.Duration
lst := time.Now()
for i := 0; i < totalMsgs; i++ {
gaps = append(gaps, time.Since(lst))
lst = time.Now()
nc.Publish("DC", []byte("OK!"))
// Calculate a gap between messages.
gap := 10*time.Millisecond + time.Duration(rand.Intn(20))*time.Millisecond
time.Sleep(gap)
}
if state := mset.State(); state.Msgs != uint64(totalMsgs) {
t.Fatalf("Expected %d messages, got %d", totalMsgs, state.Msgs)
}
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
// Firehose/instant which is default.
last := time.Now()
for i := 0; i < totalMsgs; i++ {
if _, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
now := time.Now()
// Delivery from AddConsumer starts in a go routine, so be
// more tolerant for the first message.
limit := 5 * time.Millisecond
if i == 0 {
limit = 10 * time.Millisecond
}
if now.Sub(last) > limit {
t.Fatalf("Expected firehose/instant delivery, got message gap of %v", now.Sub(last))
}
last = now
}
// Now do replay rate to match original.
o, err = mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject, ReplayPolicy: server.ReplayOriginal})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
// Original rate messsages were received for push based consumer.
for i := 0; i < totalMsgs; i++ {
start := time.Now()
if _, err := sub.NextMsg(time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
gap := time.Since(start)
// 15ms is high but on macs time.Sleep(delay) does not sleep only delay.
// Also on travis if things get bogged down this could be delayed.
gl, gh := gaps[i]-10*time.Millisecond, gaps[i]+15*time.Millisecond
if gap < gl || gap > gh {
t.Fatalf("Gap is off for %d, expected %v got %v", i, gaps[i], gap)
}
}
// Now create pull based.
oc := workerModeConfig("PM")
oc.ReplayPolicy = server.ReplayOriginal
o, err = mset.AddConsumer(oc)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
for i := 0; i < totalMsgs; i++ {
start := time.Now()
if _, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
gap := time.Since(start)
// 10ms is high but on macs time.Sleep(delay) does not sleep only delay.
gl, gh := gaps[i]-5*time.Millisecond, gaps[i]+10*time.Millisecond
if gap < gl || gap > gh {
t.Fatalf("Gap is incorrect for %d, expected %v got %v", i, gaps[i], gap)
}
}
})
}
}
func TestJetStreamConsumerReplayRateNoAck(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 10 msgs
totalMsgs := 10
for i := 0; i < totalMsgs; i++ {
nc.Request("DC", []byte("Hello World"), time.Second)
time.Sleep(time.Duration(rand.Intn(5)) * time.Millisecond)
}
if state := mset.State(); state.Msgs != uint64(totalMsgs) {
t.Fatalf("Expected %d messages, got %d", totalMsgs, state.Msgs)
}
subj := "d.dc"
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "derek",
DeliverSubject: subj,
AckPolicy: server.AckNone,
ReplayPolicy: server.ReplayOriginal,
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer o.Delete()
// Sleep a random amount of time.
time.Sleep(time.Duration(rand.Intn(20)) * time.Millisecond)
sub, _ := nc.SubscribeSync(subj)
nc.Flush()
checkFor(t, time.Second, 25*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != totalMsgs {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, totalMsgs)
}
return nil
})
})
}
}
func TestJetStreamConsumerReplayQuit(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{Name: "DC", Storage: server.MemoryStorage}},
{"FileStore", &server.StreamConfig{Name: "DC", Storage: server.FileStorage}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Send 2 msgs
nc.Request("DC", []byte("OK!"), time.Second)
time.Sleep(100 * time.Millisecond)
nc.Request("DC", []byte("OK!"), time.Second)
if state := mset.State(); state.Msgs != 2 {
t.Fatalf("Expected %d messages, got %d", 2, state.Msgs)
}
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
// Now do replay rate to match original.
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject, ReplayPolicy: server.ReplayOriginal})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Allow loop and deliver / replay go routine to spin up
time.Sleep(50 * time.Millisecond)
base := runtime.NumGoroutine()
o.Delete()
checkFor(t, 100*time.Millisecond, 10*time.Millisecond, func() error {
if runtime.NumGoroutine() >= base {
return fmt.Errorf("Consumer go routines still running")
}
return nil
})
})
}
}
func TestJetStreamSystemLimits(t *testing.T) {
s := RunRandClientPortServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
if _, _, err := s.JetStreamReservedResources(); err == nil {
t.Fatalf("Expected error requesting jetstream reserved resources when not enabled")
}
// Create some accounts.
facc, _ := s.LookupOrRegisterAccount("FOO")
bacc, _ := s.LookupOrRegisterAccount("BAR")
zacc, _ := s.LookupOrRegisterAccount("BAZ")
jsconfig := &server.JetStreamConfig{MaxMemory: 1024, MaxStore: 8192}
if err := s.EnableJetStream(jsconfig); err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if rm, rd, err := s.JetStreamReservedResources(); err != nil {
t.Fatalf("Unexpected error requesting jetstream reserved resources: %v", err)
} else if rm != 0 || rd != 0 {
t.Fatalf("Expected reserved memory and store to be 0, got %d and %d", rm, rd)
}
limits := func(mem int64, store int64) *server.JetStreamAccountLimits {
return &server.JetStreamAccountLimits{
MaxMemory: mem,
MaxStore: store,
MaxStreams: -1,
MaxConsumers: -1,
}
}
if err := facc.EnableJetStream(limits(24, 192)); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Use up rest of our resources in memory
if err := bacc.EnableJetStream(limits(1000, 0)); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now ask for more memory. Should error.
if err := zacc.EnableJetStream(limits(1000, 0)); err == nil {
t.Fatalf("Expected an error when exhausting memory resource limits")
}
// Disk too.
if err := zacc.EnableJetStream(limits(0, 10000)); err == nil {
t.Fatalf("Expected an error when exhausting memory resource limits")
}
facc.DisableJetStream()
bacc.DisableJetStream()
zacc.DisableJetStream()
// Make sure we unreserved resources.
if rm, rd, err := s.JetStreamReservedResources(); err != nil {
t.Fatalf("Unexpected error requesting jetstream reserved resources: %v", err)
} else if rm != 0 || rd != 0 {
t.Fatalf("Expected reserved memory and store to be 0, got %v and %v", server.FriendlyBytes(rm), server.FriendlyBytes(rd))
}
if err := facc.EnableJetStream(limits(24, 192)); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Test Adjust
l := limits(jsconfig.MaxMemory, jsconfig.MaxStore)
l.MaxStreams = 10
l.MaxConsumers = 10
if err := facc.UpdateJetStreamLimits(l); err != nil {
t.Fatalf("Unexpected error updating jetstream account limits: %v", err)
}
var msets []*server.Stream
// Now test max streams and max consumers. Note max consumers is per stream.
for i := 0; i < 10; i++ {
mname := fmt.Sprintf("foo.%d", i)
mset, err := facc.AddStream(&server.StreamConfig{Name: strconv.Itoa(i), Storage: server.MemoryStorage, Subjects: []string{mname}})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
msets = append(msets, mset)
}
// This one should fail since over the limit for max number of streams.
if _, err := facc.AddStream(&server.StreamConfig{Name: "22", Storage: server.MemoryStorage, Subjects: []string{"foo.22"}}); err == nil {
t.Fatalf("Expected error adding stream over limit")
}
// Remove them all
for _, mset := range msets {
mset.Delete()
}
// Now try to add one with bytes limit that would exceed the account limit.
if _, err := facc.AddStream(&server.StreamConfig{Name: "22", Storage: server.MemoryStorage, MaxBytes: jsconfig.MaxStore * 2}); err == nil {
t.Fatalf("Expected error adding stream over limit")
}
// Replicas can't be > 1
if _, err := facc.AddStream(&server.StreamConfig{Name: "22", Storage: server.MemoryStorage, Replicas: 10}); err == nil {
t.Fatalf("Expected error adding stream over limit")
}
// Test consumers limit against account limit when the stream does not set a limit
mset, err := facc.AddStream(&server.StreamConfig{Name: "22", Storage: server.MemoryStorage, Subjects: []string{"foo.22"}})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
for i := 0; i < 10; i++ {
oname := fmt.Sprintf("O:%d", i)
_, err := mset.AddConsumer(&server.ConsumerConfig{Durable: oname, AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
// This one should fail.
if _, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "O:22", AckPolicy: server.AckExplicit}); err == nil {
t.Fatalf("Expected error adding consumer over the limit")
}
// Test consumer limit against stream limit
mset.Delete()
mset, err = facc.AddStream(&server.StreamConfig{Name: "22", Storage: server.MemoryStorage, Subjects: []string{"foo.22"}, MaxConsumers: 5})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
for i := 0; i < 5; i++ {
oname := fmt.Sprintf("O:%d", i)
_, err := mset.AddConsumer(&server.ConsumerConfig{Durable: oname, AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
// This one should fail.
if _, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "O:22", AckPolicy: server.AckExplicit}); err == nil {
t.Fatalf("Expected error adding consumer over the limit")
}
// Test the account having smaller limits than the stream
mset.Delete()
mset, err = facc.AddStream(&server.StreamConfig{Name: "22", Storage: server.MemoryStorage, Subjects: []string{"foo.22"}, MaxConsumers: 10})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
l.MaxConsumers = 5
if err := facc.UpdateJetStreamLimits(l); err != nil {
t.Fatalf("Unexpected error updating jetstream account limits: %v", err)
}
for i := 0; i < 5; i++ {
oname := fmt.Sprintf("O:%d", i)
_, err := mset.AddConsumer(&server.ConsumerConfig{Durable: oname, AckPolicy: server.AckExplicit})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
// This one should fail.
if _, err := mset.AddConsumer(&server.ConsumerConfig{Durable: "O:22", AckPolicy: server.AckExplicit}); err == nil {
t.Fatalf("Expected error adding consumer over the limit")
}
}
func TestJetStreamStreamStorageTrackingAndLimits(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
gacc := s.GlobalAccount()
al := &server.JetStreamAccountLimits{
MaxMemory: 8192,
MaxStore: -1,
MaxStreams: -1,
MaxConsumers: -1,
}
if err := gacc.UpdateJetStreamLimits(al); err != nil {
t.Fatalf("Unexpected error updating jetstream account limits: %v", err)
}
mset, err := gacc.AddStream(&server.StreamConfig{Name: "LIMITS", Storage: server.MemoryStorage, Retention: server.WorkQueuePolicy})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 100
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "LIMITS", "Hello World!")
}
state := mset.State()
usage := gacc.JetStreamUsage()
// Make sure these are working correctly.
if state.Bytes != usage.Memory {
t.Fatalf("Expected to have stream bytes match memory usage, %d vs %d", state.Bytes, usage.Memory)
}
if usage.Streams != 1 {
t.Fatalf("Expected to have 1 stream, got %d", usage.Streams)
}
// Do second stream.
mset2, err := gacc.AddStream(&server.StreamConfig{Name: "NUM22", Storage: server.MemoryStorage, Retention: server.WorkQueuePolicy})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset2.Delete()
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "NUM22", "Hello World!")
}
stats2 := mset2.State()
usage = gacc.JetStreamUsage()
if usage.Memory != (state.Bytes + stats2.Bytes) {
t.Fatalf("Expected to track both streams, account is %v, stream1 is %v, stream2 is %v", usage.Memory, state.Bytes, stats2.Bytes)
}
// Make sure delete works.
mset2.Delete()
stats2 = mset2.State()
usage = gacc.JetStreamUsage()
if usage.Memory != (state.Bytes + stats2.Bytes) {
t.Fatalf("Expected to track both streams, account is %v, stream1 is %v, stream2 is %v", usage.Memory, state.Bytes, stats2.Bytes)
}
// Now drain the first one by consuming the messages.
o, err := mset.AddConsumer(workerModeConfig("WQ"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
for i := 0; i < toSend; i++ {
msg, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
msg.Respond(nil)
}
nc.Flush()
state = mset.State()
usage = gacc.JetStreamUsage()
if usage.Memory != 0 {
t.Fatalf("Expected usage memeory to be 0, got %d", usage.Memory)
}
// Now send twice the number of messages. Should receive an error at some point, and we will check usage against limits.
var errSeen string
for i := 0; i < toSend*2; i++ {
resp, _ := nc.Request("LIMITS", []byte("The quick brown fox jumped over the..."), 50*time.Millisecond)
if string(resp.Data) != server.OK {
errSeen = string(resp.Data)
break
}
}
if errSeen == "" {
t.Fatalf("Expected to see an error when exceeding the account limits")
}
state = mset.State()
usage = gacc.JetStreamUsage()
if usage.Memory > uint64(al.MaxMemory) {
t.Fatalf("Expected memory to not exceed limit of %d, got %d", al.MaxMemory, usage.Memory)
}
// make sure that unlimited accounts work
al.MaxMemory = -1
if err := gacc.UpdateJetStreamLimits(al); err != nil {
t.Fatalf("Unexpected error updating jetstream account limits: %v", err)
}
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "LIMITS", "Hello World!")
}
}
func TestJetStreamStreamFileTrackingAndLimits(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
gacc := s.GlobalAccount()
al := &server.JetStreamAccountLimits{
MaxMemory: 8192,
MaxStore: 9600,
MaxStreams: -1,
MaxConsumers: -1,
}
if err := gacc.UpdateJetStreamLimits(al); err != nil {
t.Fatalf("Unexpected error updating jetstream account limits: %v", err)
}
mconfig := &server.StreamConfig{Name: "LIMITS", Storage: server.FileStorage, Retention: server.WorkQueuePolicy}
mset, err := gacc.AddStream(mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 100
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "LIMITS", "Hello World!")
}
state := mset.State()
usage := gacc.JetStreamUsage()
// Make sure these are working correctly.
if usage.Store != state.Bytes {
t.Fatalf("Expected to have stream bytes match the store usage, %d vs %d", usage.Store, state.Bytes)
}
if usage.Streams != 1 {
t.Fatalf("Expected to have 1 stream, got %d", usage.Streams)
}
// Do second stream.
mconfig2 := &server.StreamConfig{Name: "NUM22", Storage: server.FileStorage, Retention: server.WorkQueuePolicy}
mset2, err := gacc.AddStream(mconfig2)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset2.Delete()
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "NUM22", "Hello World!")
}
stats2 := mset2.State()
usage = gacc.JetStreamUsage()
if usage.Store != (state.Bytes + stats2.Bytes) {
t.Fatalf("Expected to track both streams, usage is %v, stream1 is %v, stream2 is %v", usage.Store, state.Bytes, stats2.Bytes)
}
// Make sure delete works.
mset2.Delete()
stats2 = mset2.State()
usage = gacc.JetStreamUsage()
if usage.Store != (state.Bytes + stats2.Bytes) {
t.Fatalf("Expected to track both streams, account is %v, stream1 is %v, stream2 is %v", usage.Store, state.Bytes, stats2.Bytes)
}
// Now drain the first one by consuming the messages.
o, err := mset.AddConsumer(workerModeConfig("WQ"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
for i := 0; i < toSend; i++ {
msg, err := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
msg.Respond(nil)
}
nc.Flush()
state = mset.State()
usage = gacc.JetStreamUsage()
if usage.Memory != 0 {
t.Fatalf("Expected usage memeory to be 0, got %d", usage.Memory)
}
// Now send twice the number of messages. Should receive an error at some point, and we will check usage against limits.
var errSeen string
for i := 0; i < toSend*2; i++ {
resp, _ := nc.Request("LIMITS", []byte("The quick brown fox jumped over the..."), 50*time.Millisecond)
if string(resp.Data) != server.OK {
errSeen = string(resp.Data)
break
}
}
if errSeen == "" {
t.Fatalf("Expected to see an error when exceeding the account limits")
}
state = mset.State()
usage = gacc.JetStreamUsage()
if usage.Memory > uint64(al.MaxMemory) {
t.Fatalf("Expected memory to not exceed limit of %d, got %d", al.MaxMemory, usage.Memory)
}
}
type obsi struct {
cfg server.ConsumerConfig
ack int
}
type info struct {
cfg server.StreamConfig
state server.StreamState
obs []obsi
}
func TestJetStreamSimpleFileRecovery(t *testing.T) {
base := runtime.NumGoroutine()
s := RunRandClientPortServer()
defer s.Shutdown()
jsconfig := &server.JetStreamConfig{MaxMemory: 128 * 1024 * 1024, MaxStore: 32 * 1024 * 1024 * 1024}
if err := s.EnableJetStream(jsconfig); err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
ostate := make(map[string]info)
nid := nuid.New()
randomSubject := func() string {
nid.RandomizePrefix()
return fmt.Sprintf("SUBJ.%s", nid.Next())
}
nc := clientConnectToServer(t, s)
defer nc.Close()
numStreams := 10
for i := 1; i <= numStreams; i++ {
msetName := fmt.Sprintf("MMS-%d", i)
subjects := []string{randomSubject(), randomSubject(), randomSubject()}
msetConfig := server.StreamConfig{
Name: msetName,
Storage: server.FileStorage,
Subjects: subjects,
MaxMsgs: 100,
}
mset, err := acc.AddStream(&msetConfig)
if err != nil {
t.Fatalf("Unexpected error adding stream %q: %v", msetName, err)
}
defer mset.Delete()
toSend := rand.Intn(100) + 1
for n := 1; n <= toSend; n++ {
msg := fmt.Sprintf("Hello %d", n*i)
subj := subjects[rand.Intn(len(subjects))]
sendStreamMsg(t, nc, subj, msg)
}
// Create up to 5 consumers.
numObs := rand.Intn(5) + 1
var obs []obsi
for n := 1; n <= numObs; n++ {
oname := fmt.Sprintf("WQ-%d", n)
o, err := mset.AddConsumer(workerModeConfig(oname))
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now grab some messages.
toReceive := rand.Intn(toSend) + 1
for r := 0; r < toReceive; r++ {
resp, _ := nc.Request(o.RequestNextMsgSubject(), nil, time.Second)
if resp != nil {
resp.Respond(nil)
}
}
obs = append(obs, obsi{o.Config(), toReceive})
}
ostate[msetName] = info{mset.Config(), mset.State(), obs}
}
pusage := acc.JetStreamUsage()
// Shutdown the server. Restart and make sure things come back.
s.Shutdown()
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
delta := (runtime.NumGoroutine() - base)
if delta > 3 {
return fmt.Errorf("%d Go routines still exist post Shutdown()", delta)
}
return nil
})
s = RunRandClientPortServer()
defer s.Shutdown()
if err := s.EnableJetStream(jsconfig); err != nil {
t.Fatalf("Expected no error, got %v", err)
}
acc = s.GlobalAccount()
nusage := acc.JetStreamUsage()
if nusage != pusage {
t.Fatalf("Usage does not match after restore: %+v vs %+v", nusage, pusage)
}
for mname, info := range ostate {
mset, err := acc.LookupStream(mname)
if err != nil {
t.Fatalf("Expected to find a stream for %q", mname)
}
if state := mset.State(); !reflect.DeepEqual(state, info.state) {
t.Fatalf("State does not match: %+v vs %+v", state, info.state)
}
if cfg := mset.Config(); !reflect.DeepEqual(cfg, info.cfg) {
t.Fatalf("Configs do not match: %+v vs %+v", cfg, info.cfg)
}
// Consumers.
if mset.NumConsumers() != len(info.obs) {
t.Fatalf("Number of consumers do not match: %d vs %d", mset.NumConsumers(), len(info.obs))
}
for _, oi := range info.obs {
if o := mset.LookupConsumer(oi.cfg.Durable); o != nil {
if uint64(oi.ack+1) != o.NextSeq() {
t.Fatalf("Consumer next seq is not correct: %d vs %d", oi.ack+1, o.NextSeq())
}
} else {
t.Fatalf("Expected to get an consumer")
}
}
}
}
func TestJetStreamInfoAPIWithHeaders(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
// Forced cleanup of all persisted state.
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
// Client for API requests.
nc := clientConnectToServer(t, s)
defer nc.Close()
m := nats.NewMsg(server.JSApiAccountInfo)
m.Header.Add("Accept-Encoding", "json")
m.Header.Add("Authorization", "s3cr3t")
m.Data = []byte("HELLO-JS!")
resp, err := nc.RequestMsg(m, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info server.JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.Error != nil {
t.Fatalf("Received an error: %+v", info.Error)
}
}
func TestJetStreamRequestAPI(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
// Forced cleanup of all persisted state.
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
// Client for API requests.
nc := clientConnectToServer(t, s)
defer nc.Close()
// This will get the current information about usage and limits for this account.
resp, err := nc.Request(server.JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info server.JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Now create a stream.
msetCfg := server.StreamConfig{
Name: "MSET22",
Storage: server.FileStorage,
Subjects: []string{"foo", "bar", "baz"},
MaxMsgs: 100,
}
req, err := json.Marshal(msetCfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, _ = nc.Request(fmt.Sprintf(server.JSApiStreamCreateT, msetCfg.Name), req, time.Second)
var scResp server.JSApiStreamCreateResponse
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo == nil || scResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", scResp.Error)
}
if time.Since(scResp.Created) > time.Second {
t.Fatalf("Created time seems wrong: %v\n", scResp.Created)
}
checkBadRequest := func(e *server.ApiError, description string) {
t.Helper()
if e == nil || e.Code != 400 || e.Description != description {
t.Fatalf("Did not get proper error: %+v", e)
}
}
checkServerError := func(e *server.ApiError, description string) {
t.Helper()
if e == nil || e.Code != 500 || e.Description != description {
t.Fatalf("Did not get proper server error: %+v\n", e)
}
}
checkNotFound := func(e *server.ApiError, description string) {
t.Helper()
if e == nil || e.Code != 404 || e.Description != description {
t.Fatalf("Did not get proper server error: %+v\n", e)
}
}
// Check that the name in config has to match the name in the subject
resp, _ = nc.Request(fmt.Sprintf(server.JSApiStreamCreateT, "BOB"), req, time.Second)
scResp.Error, scResp.StreamInfo = nil, nil
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkBadRequest(scResp.Error, "stream name in subject does not match request")
// Check that update works.
msetCfg.Subjects = []string{"foo", "bar", "baz"}
msetCfg.MaxBytes = 2222222
req, err = json.Marshal(msetCfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, _ = nc.Request(fmt.Sprintf(server.JSApiStreamUpdateT, msetCfg.Name), req, time.Second)
scResp.Error, scResp.StreamInfo = nil, nil
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo == nil || scResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", scResp.Error)
}
// Check that updating a non existing stream fails
cfg := server.StreamConfig{
Name: "UNKNOWN_STREAM",
Storage: server.FileStorage,
Subjects: []string{"foo"},
}
req, err = json.Marshal(cfg)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, _ = nc.Request(fmt.Sprintf(server.JSApiStreamUpdateT, cfg.Name), req, time.Second)
scResp.Error, scResp.StreamInfo = nil, nil
if err := json.Unmarshal(resp.Data, &scResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if scResp.StreamInfo != nil || scResp.Error == nil || scResp.Error.Code != 404 {
t.Fatalf("Unexpected error: %+v", scResp.Error)
}
// Now lookup info again and see that we can see the new stream.
resp, err = nc.Request(server.JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err = json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.Streams != 1 {
t.Fatalf("Expected to see 1 Stream, got %d", info.Streams)
}
// Make sure list names works.
resp, err = nc.Request(server.JSApiStreams, nil, time.Second)
var namesResponse server.JSApiStreamNamesResponse
if err = json.Unmarshal(resp.Data, &namesResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(namesResponse.Streams) != 1 {
t.Fatalf("Expected only 1 stream but got %d", len(namesResponse.Streams))
}
if namesResponse.Total != 1 {
t.Fatalf("Expected total to be 1 but got %d", namesResponse.Total)
}
if namesResponse.Offset != 0 {
t.Fatalf("Expected offset to be 0 but got %d", namesResponse.Offset)
}
if namesResponse.Limit != server.JSApiNamesLimit {
t.Fatalf("Expected limit to be %d but got %d", server.JSApiNamesLimit, namesResponse.Limit)
}
if namesResponse.Streams[0] != msetCfg.Name {
t.Fatalf("Expected to get %q, but got %q", msetCfg.Name, namesResponse.Streams[0])
}
// Now do detailed version.
resp, err = nc.Request(server.JSApiStreamList, nil, time.Second)
var listResponse server.JSApiStreamListResponse
if err = json.Unmarshal(resp.Data, &listResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(listResponse.Streams) != 1 {
t.Fatalf("Expected only 1 stream but got %d", len(listResponse.Streams))
}
if listResponse.Total != 1 {
t.Fatalf("Expected total to be 1 but got %d", listResponse.Total)
}
if listResponse.Offset != 0 {
t.Fatalf("Expected offset to be 0 but got %d", listResponse.Offset)
}
if listResponse.Limit != server.JSApiListLimit {
t.Fatalf("Expected limit to be %d but got %d", server.JSApiListLimit, listResponse.Limit)
}
if listResponse.Streams[0].Config.Name != msetCfg.Name {
t.Fatalf("Expected to get %q, but got %q", msetCfg.Name, listResponse.Streams[0].Config.Name)
}
// Now send some messages, then we can poll for info on this stream.
toSend := 10
for i := 0; i < toSend; i++ {
nc.Request("foo", []byte("WELCOME JETSTREAM"), time.Second)
}
resp, err = nc.Request(fmt.Sprintf(server.JSApiStreamInfoT, msetCfg.Name), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var msi server.StreamInfo
if err = json.Unmarshal(resp.Data, &msi); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if msi.State.Msgs != uint64(toSend) {
t.Fatalf("Expected to get %d msgs, got %d", toSend, msi.State.Msgs)
}
if time.Since(msi.Created) > time.Second {
t.Fatalf("Created time seems wrong: %v\n", msi.Created)
}
// Looking up one that is not there should yield an error.
resp, err = nc.Request(fmt.Sprintf(server.JSApiStreamInfoT, "BOB"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var bResp server.JSApiStreamInfoResponse
if err = json.Unmarshal(resp.Data, &bResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkNotFound(bResp.Error, "stream not found")
// Now create a consumer.
delivery := nats.NewInbox()
obsReq := server.CreateConsumerRequest{
Stream: msetCfg.Name,
Config: server.ConsumerConfig{DeliverSubject: delivery},
}
req, err = json.Marshal(obsReq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err = nc.Request(fmt.Sprintf(server.JSApiConsumerCreateT, msetCfg.Name), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var ccResp server.JSApiConsumerCreateResponse
if err = json.Unmarshal(resp.Data, &ccResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkServerError(ccResp.Error, "consumer requires interest for delivery subject when ephemeral")
// Now create subscription and make sure we get proper response.
sub, _ := nc.SubscribeSync(delivery)
nc.Flush()
resp, err = nc.Request(fmt.Sprintf(server.JSApiConsumerCreateT, msetCfg.Name), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ccResp.Error, ccResp.ConsumerInfo = nil, nil
if err = json.Unmarshal(resp.Data, &ccResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if ccResp.ConsumerInfo == nil || ccResp.Error != nil {
t.Fatalf("Got a bad response %+v", ccResp)
}
if time.Since(ccResp.Created) > time.Second {
t.Fatalf("Created time seems wrong: %v\n", ccResp.Created)
}
checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend)
}
return nil
})
// Check that we get an error if the stream name in the subject does not match the config.
resp, err = nc.Request(fmt.Sprintf(server.JSApiConsumerCreateT, "BOB"), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ccResp.Error, ccResp.ConsumerInfo = nil, nil
if err = json.Unmarshal(resp.Data, &ccResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Since we do not have interest this should have failed.
checkBadRequest(ccResp.Error, "stream name in subject does not match request")
// Get the list of all of the consumers for our stream.
resp, err = nc.Request(fmt.Sprintf(server.JSApiConsumersT, msetCfg.Name), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var clResponse server.JSApiConsumerNamesResponse
if err = json.Unmarshal(resp.Data, &clResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(clResponse.Consumers) != 1 {
t.Fatalf("Expected only 1 consumer but got %d", len(clResponse.Consumers))
}
// Now let's get info about our consumer.
cName := clResponse.Consumers[0]
resp, err = nc.Request(fmt.Sprintf(server.JSApiConsumerInfoT, msetCfg.Name, cName), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var oinfo server.ConsumerInfo
if err = json.Unmarshal(resp.Data, &oinfo); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Do some sanity checking.
// Must match consumer.go
const randConsumerNameLen = 8
if len(oinfo.Name) != randConsumerNameLen {
t.Fatalf("Expected ephemeral name, got %q", oinfo.Name)
}
if len(oinfo.Config.Durable) != 0 {
t.Fatalf("Expected no durable name, but got %q", oinfo.Config.Durable)
}
if oinfo.Config.DeliverSubject != delivery {
t.Fatalf("Expected to have delivery subject of %q, got %q", delivery, oinfo.Config.DeliverSubject)
}
if oinfo.Delivered.Consumer != 10 {
t.Fatalf("Expected consumer delivered sequence of 10, got %d", oinfo.Delivered.Consumer)
}
if oinfo.AckFloor.Consumer != 10 {
t.Fatalf("Expected ack floor to be 10, got %d", oinfo.AckFloor.Consumer)
}
// Now delete the consumer.
resp, _ = nc.Request(fmt.Sprintf(server.JSApiConsumerDeleteT, msetCfg.Name, cName), nil, time.Second)
var cdResp server.JSApiConsumerDeleteResponse
if err = json.Unmarshal(resp.Data, &cdResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !cdResp.Success || cdResp.Error != nil {
t.Fatalf("Got a bad response %+v", ccResp)
}
// Make sure we can't create a durable using the ephemeral API endpoint.
obsReq = server.CreateConsumerRequest{
Stream: msetCfg.Name,
Config: server.ConsumerConfig{Durable: "myd", DeliverSubject: delivery},
}
req, err = json.Marshal(obsReq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err = nc.Request(fmt.Sprintf(server.JSApiConsumerCreateT, msetCfg.Name), req, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ccResp.Error, ccResp.ConsumerInfo = nil, nil
if err = json.Unmarshal(resp.Data, &ccResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkBadRequest(ccResp.Error, "consumer expected to be ephemeral but a durable name was set in request")
// Now make sure we can create a durable on the subject with the proper name.
resp, err = nc.Request(fmt.Sprintf(server.JSApiDurableCreateT, msetCfg.Name, obsReq.Config.Durable), req, time.Second)
ccResp.Error, ccResp.ConsumerInfo = nil, nil
if err = json.Unmarshal(resp.Data, &ccResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if ccResp.ConsumerInfo == nil || ccResp.Error != nil {
t.Fatalf("Did not receive correct response")
}
// Make sure empty durable in cfg does not work
obsReq2 := server.CreateConsumerRequest{
Stream: msetCfg.Name,
Config: server.ConsumerConfig{DeliverSubject: delivery},
}
req2, err := json.Marshal(obsReq2)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, err = nc.Request(fmt.Sprintf(server.JSApiDurableCreateT, msetCfg.Name, obsReq.Config.Durable), req2, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ccResp.Error, ccResp.ConsumerInfo = nil, nil
if err = json.Unmarshal(resp.Data, &ccResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkBadRequest(ccResp.Error, "consumer expected to be durable but a durable name was not set")
// Now delete a msg.
dreq := server.JSApiMsgDeleteRequest{Seq: 2}
dreqj, err := json.Marshal(dreq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
resp, _ = nc.Request(fmt.Sprintf(server.JSApiMsgDeleteT, msetCfg.Name), dreqj, time.Second)
var delMsgResp server.JSApiMsgDeleteResponse
if err = json.Unmarshal(resp.Data, &delMsgResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !delMsgResp.Success || delMsgResp.Error != nil {
t.Fatalf("Got a bad response %+v", delMsgResp.Error)
}
// Now purge the stream.
resp, _ = nc.Request(fmt.Sprintf(server.JSApiStreamPurgeT, msetCfg.Name), nil, time.Second)
var pResp server.JSApiStreamPurgeResponse
if err = json.Unmarshal(resp.Data, &pResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !pResp.Success || pResp.Error != nil {
t.Fatalf("Got a bad response %+v", pResp)
}
if pResp.Purged != 9 {
t.Fatalf("Expected 9 purged, got %d", pResp.Purged)
}
// Now delete the stream.
resp, _ = nc.Request(fmt.Sprintf(server.JSApiStreamDeleteT, msetCfg.Name), nil, time.Second)
var dResp server.JSApiStreamDeleteResponse
if err = json.Unmarshal(resp.Data, &dResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !dResp.Success || dResp.Error != nil {
t.Fatalf("Got a bad response %+v", dResp.Error)
}
// Now grab stats again.
// This will get the current information about usage and limits for this account.
resp, err = nc.Request(server.JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.Streams != 0 {
t.Fatalf("Expected no remaining streams, got %d", info.Streams)
}
// Now do templates.
mcfg := &server.StreamConfig{
Subjects: []string{"kv.*"},
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
MaxMsgs: 4,
Storage: server.MemoryStorage,
Replicas: 1,
}
template := &server.StreamTemplateConfig{
Name: "kv",
Config: mcfg,
MaxStreams: 4,
}
req, err = json.Marshal(template)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Check that the name in config has to match the name in the subject
resp, _ = nc.Request(fmt.Sprintf(server.JSApiTemplateCreateT, "BOB"), req, time.Second)
var stResp server.JSApiStreamTemplateCreateResponse
if err = json.Unmarshal(resp.Data, &stResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkBadRequest(stResp.Error, "template name in subject does not match request")
resp, _ = nc.Request(fmt.Sprintf(server.JSApiTemplateCreateT, template.Name), req, time.Second)
stResp.Error, stResp.StreamTemplateInfo = nil, nil
if err = json.Unmarshal(resp.Data, &stResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if stResp.StreamTemplateInfo == nil || stResp.Error != nil {
t.Fatalf("Did not receive correct response")
}
// Create a second one.
template.Name = "ss"
template.Config.Subjects = []string{"foo", "bar"}
req, err = json.Marshal(template)
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
resp, _ = nc.Request(fmt.Sprintf(server.JSApiTemplateCreateT, template.Name), req, time.Second)
stResp.Error, stResp.StreamTemplateInfo = nil, nil
if err = json.Unmarshal(resp.Data, &stResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if stResp.StreamTemplateInfo == nil || stResp.Error != nil {
t.Fatalf("Did not receive correct response")
}
// Now grab the list of templates
var tListResp server.JSApiStreamTemplateNamesResponse
resp, err = nc.Request(server.JSApiTemplates, nil, time.Second)
if err = json.Unmarshal(resp.Data, &tListResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(tListResp.Templates) != 2 {
t.Fatalf("Expected 2 templates but got %d", len(tListResp.Templates))
}
sort.Strings(tListResp.Templates)
if tListResp.Templates[0] != "kv" {
t.Fatalf("Expected to get %q, but got %q", "kv", tListResp.Templates[0])
}
if tListResp.Templates[1] != "ss" {
t.Fatalf("Expected to get %q, but got %q", "ss", tListResp.Templates[1])
}
// Now delete one.
// Test bad name.
resp, _ = nc.Request(fmt.Sprintf(server.JSApiTemplateDeleteT, "bob"), nil, time.Second)
var tDeleteResp server.JSApiStreamTemplateDeleteResponse
if err = json.Unmarshal(resp.Data, &tDeleteResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
checkServerError(tDeleteResp.Error, "template not found")
resp, _ = nc.Request(fmt.Sprintf(server.JSApiTemplateDeleteT, "ss"), nil, time.Second)
tDeleteResp.Error = nil
if err = json.Unmarshal(resp.Data, &tDeleteResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !tDeleteResp.Success || tDeleteResp.Error != nil {
t.Fatalf("Did not receive correct response: %+v", tDeleteResp.Error)
}
resp, err = nc.Request(server.JSApiTemplates, nil, time.Second)
tListResp.Error, tListResp.Templates = nil, nil
if err = json.Unmarshal(resp.Data, &tListResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(tListResp.Templates) != 1 {
t.Fatalf("Expected 1 template but got %d", len(tListResp.Templates))
}
if tListResp.Templates[0] != "kv" {
t.Fatalf("Expected to get %q, but got %q", "kv", tListResp.Templates[0])
}
// First create a stream from the template
sendStreamMsg(t, nc, "kv.22", "derek")
// Last do info
resp, err = nc.Request(fmt.Sprintf(server.JSApiTemplateInfoT, "kv"), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var ti server.StreamTemplateInfo
if err = json.Unmarshal(resp.Data, &ti); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(ti.Streams) != 1 {
t.Fatalf("Expected 1 stream, got %d", len(ti.Streams))
}
if ti.Streams[0] != server.CanonicalName("kv.22") {
t.Fatalf("Expected stream with name %q, but got %q", server.CanonicalName("kv.22"), ti.Streams[0])
}
// Test that we can send nil or an empty legal json for requests that take no args.
// We know this stream does not exist, this just checking request processing.
checkEmptyReqArg := func(arg string) {
t.Helper()
var req []byte
if len(arg) > 0 {
req = []byte(arg)
}
resp, err = nc.Request(fmt.Sprintf(server.JSApiStreamDeleteT, "foo_bar_baz"), req, time.Second)
var dResp server.JSApiStreamDeleteResponse
if err = json.Unmarshal(resp.Data, &dResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if dResp.Error == nil || dResp.Error.Code != 404 {
t.Fatalf("Got a bad response, expected a 404 response %+v", dResp.Error)
}
}
checkEmptyReqArg("")
checkEmptyReqArg("{}")
checkEmptyReqArg(" {} ")
checkEmptyReqArg(" { } ")
}
func TestJetStreamFilteredStreamNames(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
// Forced cleanup of all persisted state.
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
// Client for API requests.
nc := clientConnectToServer(t, s)
defer nc.Close()
// Create some streams.
var snid int
createStream := func(subjects []string) {
t.Helper()
snid++
name := fmt.Sprintf("S-%d", snid)
sc := &server.StreamConfig{Name: name, Subjects: subjects}
if _, err := s.GlobalAccount().AddStream(sc); err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
}
createStream([]string{"foo"}) // S1
createStream([]string{"bar"}) // S2
createStream([]string{"baz"}) // S3
createStream([]string{"foo.*", "bar.*"}) // S4
createStream([]string{"foo-1.22", "bar-1.33"}) // S5
expectStreams := func(filter string, streams []string) {
t.Helper()
req, _ := json.Marshal(&server.JSApiStreamNamesRequest{Subject: filter})
r, _ := nc.Request(server.JSApiStreams, req, time.Second)
var resp server.JSApiStreamNamesResponse
if err := json.Unmarshal(r.Data, &resp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(resp.Streams) != len(streams) {
t.Fatalf("Expected %d results, got %d", len(streams), len(resp.Streams))
}
}
expectStreams("foo", []string{"S1"})
expectStreams("bar", []string{"S2"})
expectStreams("baz", []string{"S3"})
expectStreams("*", []string{"S1", "S2", "S3"})
expectStreams(">", []string{"S1", "S2", "S3", "S4", "S5"})
expectStreams("*.*", []string{"S4", "S5"})
expectStreams("*.22", []string{"S4", "S5"})
}
func TestJetStreamUpdateStream(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.MemoryStorage,
Replicas: 1,
}},
{name: "FileStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.FileStorage,
Replicas: 1,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil && config.StoreDir != "" {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// Test basic updates. We allow changing the subjects, limits, and no_ack along with replicas(TBD w/ cluster)
cfg := *c.mconfig
// Can't change name.
cfg.Name = "bar"
if err := mset.Update(&cfg); err == nil || !strings.Contains(err.Error(), "name must match") {
t.Fatalf("Expected error trying to update name")
}
// Can't change max consumers for now.
cfg = *c.mconfig
cfg.MaxConsumers = 10
if err := mset.Update(&cfg); err == nil || !strings.Contains(err.Error(), "can not change") {
t.Fatalf("Expected error trying to change MaxConsumers")
}
// Can't change storage types.
cfg = *c.mconfig
if cfg.Storage == server.FileStorage {
cfg.Storage = server.MemoryStorage
} else {
cfg.Storage = server.FileStorage
}
if err := mset.Update(&cfg); err == nil || !strings.Contains(err.Error(), "can not change") {
t.Fatalf("Expected error trying to change Storage")
}
// Can't change replicas > 1 for now.
cfg = *c.mconfig
cfg.Replicas = 10
if err := mset.Update(&cfg); err == nil || !strings.Contains(err.Error(), "maximum replicas") {
t.Fatalf("Expected error trying to change Replicas")
}
// Can't have a template set for now.
cfg = *c.mconfig
cfg.Template = "baz"
if err := mset.Update(&cfg); err == nil || !strings.Contains(err.Error(), "template") {
t.Fatalf("Expected error trying to change Template owner")
}
// Can't change limits policy.
cfg = *c.mconfig
cfg.Retention = server.WorkQueuePolicy
if err := mset.Update(&cfg); err == nil || !strings.Contains(err.Error(), "can not change") {
t.Fatalf("Expected error trying to change Retention")
}
// Now test changing limits.
nc := clientConnectToServer(t, s)
defer nc.Close()
pending := uint64(100)
for i := uint64(0); i < pending; i++ {
sendStreamMsg(t, nc, "foo", "0123456789")
}
pendingBytes := mset.State().Bytes
checkPending := func(msgs, bts uint64) {
t.Helper()
state := mset.State()
if state.Msgs != msgs {
t.Fatalf("Expected %d messages, got %d", msgs, state.Msgs)
}
if state.Bytes != bts {
t.Fatalf("Expected %d bytes, got %d", bts, state.Bytes)
}
}
checkPending(pending, pendingBytes)
// Update msgs to higher.
cfg = *c.mconfig
cfg.MaxMsgs = int64(pending * 2)
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error %v", err)
}
if mset.Config().MaxMsgs != cfg.MaxMsgs {
t.Fatalf("Expected the change to take effect, %d vs %d", mset.Config().MaxMsgs, cfg.MaxMsgs)
}
checkPending(pending, pendingBytes)
// Update msgs to lower.
cfg = *c.mconfig
cfg.MaxMsgs = int64(pending / 2)
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error %v", err)
}
if mset.Config().MaxMsgs != cfg.MaxMsgs {
t.Fatalf("Expected the change to take effect, %d vs %d", mset.Config().MaxMsgs, cfg.MaxMsgs)
}
checkPending(pending/2, pendingBytes/2)
// Now do bytes.
cfg = *c.mconfig
cfg.MaxBytes = int64(pendingBytes / 4)
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error %v", err)
}
if mset.Config().MaxBytes != cfg.MaxBytes {
t.Fatalf("Expected the change to take effect, %d vs %d", mset.Config().MaxBytes, cfg.MaxBytes)
}
checkPending(pending/4, pendingBytes/4)
// Now do age.
cfg = *c.mconfig
cfg.MaxAge = time.Millisecond
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error %v", err)
}
// Just wait a bit for expiration.
time.Sleep(25 * time.Millisecond)
if mset.Config().MaxAge != cfg.MaxAge {
t.Fatalf("Expected the change to take effect, %d vs %d", mset.Config().MaxAge, cfg.MaxAge)
}
checkPending(0, 0)
// Now put back to original.
cfg = *c.mconfig
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error %v", err)
}
for i := uint64(0); i < pending; i++ {
sendStreamMsg(t, nc, "foo", "0123456789")
}
// subject changes.
// Add in a subject first.
cfg = *c.mconfig
cfg.Subjects = []string{"foo", "bar"}
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error %v", err)
}
// Make sure we can still send to foo.
sendStreamMsg(t, nc, "foo", "0123456789")
// And we can now send to bar.
sendStreamMsg(t, nc, "bar", "0123456789")
// Now delete both and change to baz only.
cfg.Subjects = []string{"baz"}
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error %v", err)
}
// Make sure we do not get response acks for "foo" or "bar".
if resp, err := nc.Request("foo", nil, 25*time.Millisecond); err == nil || resp != nil {
t.Fatalf("Expected no response from jetstream for deleted subject: %q", "foo")
}
if resp, err := nc.Request("bar", nil, 25*time.Millisecond); err == nil || resp != nil {
t.Fatalf("Expected no response from jetstream for deleted subject: %q", "bar")
}
// Make sure we can send to "baz"
sendStreamMsg(t, nc, "baz", "0123456789")
if nmsgs := mset.State().Msgs; nmsgs != pending+3 {
t.Fatalf("Expected %d msgs, got %d", pending+3, nmsgs)
}
// FileStore restarts for config save.
cfg = *c.mconfig
if cfg.Storage == server.FileStorage {
cfg.Subjects = []string{"foo", "bar"}
cfg.MaxMsgs = 2222
cfg.MaxBytes = 3333333
cfg.MaxAge = 22 * time.Hour
if err := mset.Update(&cfg); err != nil {
t.Fatalf("Unexpected error %v", err)
}
// Pull since certain defaults etc are set in processing.
cfg = mset.Config()
// Restart the server.
// Capture port since it was dynamic.
u, _ := url.Parse(s.ClientURL())
port, _ := strconv.Atoi(u.Port())
// Stop current server.
sd := s.JetStreamConfig().StoreDir
s.Shutdown()
// Restart.
s = RunJetStreamServerOnPort(port, sd)
defer s.Shutdown()
mset, err = s.GlobalAccount().LookupStream(cfg.Name)
if err != nil {
t.Fatalf("Expected to find a stream for %q", cfg.Name)
}
restored_cfg := mset.Config()
if !reflect.DeepEqual(cfg, restored_cfg) {
t.Fatalf("restored configuration does not match: \n%+v\n vs \n%+v", restored_cfg, cfg)
}
}
})
}
}
func TestJetStreamDeleteMsg(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.MemoryStorage,
Replicas: 1,
}},
{name: "FileStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.FileStorage,
Replicas: 1,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil && config.StoreDir != "" {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
pubTen := func() {
t.Helper()
for i := 0; i < 10; i++ {
nc.Publish("foo", []byte("Hello World!"))
}
nc.Flush()
}
pubTen()
state := mset.State()
if state.Msgs != 10 {
t.Fatalf("Expected 10 messages, got %d", state.Msgs)
}
bytesPerMsg := state.Bytes / 10
if bytesPerMsg == 0 {
t.Fatalf("Expected non-zero bytes for msg size")
}
deleteAndCheck := func(seq, expectedFirstSeq uint64) {
t.Helper()
beforeState := mset.State()
if removed, _ := mset.DeleteMsg(seq); !removed {
t.Fatalf("Expected the delete of sequence %d to succeed", seq)
}
expectedState := beforeState
expectedState.Msgs--
expectedState.Bytes -= bytesPerMsg
expectedState.FirstSeq = expectedFirstSeq
sm, err := mset.GetMsg(expectedFirstSeq)
if err != nil {
t.Fatalf("Error fetching message for seq: %d - %v", expectedFirstSeq, err)
}
expectedState.FirstTime = sm.Time
expectedState.Deleted = nil
afterState := mset.State()
afterState.Deleted = nil
// Ignore first time in this test.
if !reflect.DeepEqual(afterState, expectedState) {
t.Fatalf("Stats not what we expected. Expected %+v, got %+v\n", expectedState, afterState)
}
}
// Delete one from the middle
deleteAndCheck(5, 1)
// Now make sure sequences are updated properly.
// Delete first msg.
deleteAndCheck(1, 2)
// Now last
deleteAndCheck(10, 2)
// Now gaps.
deleteAndCheck(3, 2)
deleteAndCheck(2, 4)
mset.Purge()
// Put ten more one.
pubTen()
deleteAndCheck(11, 12)
deleteAndCheck(15, 12)
deleteAndCheck(16, 12)
deleteAndCheck(20, 12)
// Only file storage beyond here.
if c.mconfig.Storage == server.MemoryStorage {
return
}
// Capture port since it was dynamic.
u, _ := url.Parse(s.ClientURL())
port, _ := strconv.Atoi(u.Port())
sd := s.JetStreamConfig().StoreDir
// Shutdown the server.
s.Shutdown()
s = RunJetStreamServerOnPort(port, sd)
defer s.Shutdown()
mset, err = s.GlobalAccount().LookupStream("foo")
if err != nil {
t.Fatalf("Expected to get the stream back")
}
expected := server.StreamState{Msgs: 6, Bytes: 6 * bytesPerMsg, FirstSeq: 12, LastSeq: 20}
state = mset.State()
state.FirstTime, state.LastTime, state.Deleted = time.Time{}, time.Time{}, nil
if !reflect.DeepEqual(expected, state) {
t.Fatalf("State not what we expected. Expected %+v, got %+v\n", expected, state)
}
// Now create an consumer and make sure we get the right sequence.
nc = clientConnectToServer(t, s)
defer nc.Close()
delivery := nats.NewInbox()
sub, _ := nc.SubscribeSync(delivery)
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: delivery, FilterSubject: "foo"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
expectedStoreSeq := []uint64{12, 13, 14, 17, 18, 19}
for i := 0; i < 6; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if o.StreamSeqFromReply(m.Reply) != expectedStoreSeq[i] {
t.Fatalf("Expected store seq of %d, got %d", expectedStoreSeq[i], o.StreamSeqFromReply(m.Reply))
}
}
})
}
}
// https://github.com/nats-io/jetstream/issues/396
func TestJetStreamLimitLockBug(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxMsgs: 10,
Storage: server.MemoryStorage,
Replicas: 1,
}},
{name: "FileStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxMsgs: 10,
Storage: server.FileStorage,
Replicas: 1,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil && config.StoreDir != "" {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
for i := 0; i < 100; i++ {
sendStreamMsg(t, nc, "foo", "ok")
}
state := mset.State()
if state.Msgs != 10 {
t.Fatalf("Expected 10 messages, got %d", state.Msgs)
}
})
}
}
func TestJetStreamNextMsgNoInterest(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.MemoryStorage,
Replicas: 1,
}},
{name: "FileStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.FileStorage,
Replicas: 1,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
cfg := &server.StreamConfig{Name: "foo", Storage: server.FileStorage}
mset, err := s.GlobalAccount().AddStream(cfg)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectWithOldRequest(t, s)
defer nc.Close()
// Now create an consumer and make sure it functions properly.
o, err := mset.AddConsumer(workerModeConfig("WQ"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
nextSubj := o.RequestNextMsgSubject()
// Queue up a worker but use a short time out.
if _, err := nc.Request(nextSubj, nil, time.Millisecond); err != nats.ErrTimeout {
t.Fatalf("Expected a timeout error and no response with acks suppressed")
}
// Now send a message, the worker from above will still be known but we want to make
// sure the system detects that so we will do a request for next msg right behind it.
nc.Publish("foo", []byte("OK"))
if msg, err := nc.Request(nextSubj, nil, 5*time.Millisecond); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else {
msg.Respond(nil) // Ack
}
// Now queue up 10 workers.
for i := 0; i < 10; i++ {
if _, err := nc.Request(nextSubj, nil, time.Microsecond); err != nats.ErrTimeout {
t.Fatalf("Expected a timeout error and no response with acks suppressed")
}
}
// Now publish ten messages.
for i := 0; i < 10; i++ {
nc.Publish("foo", []byte("OK"))
}
nc.Flush()
for i := 0; i < 10; i++ {
if msg, err := nc.Request(nextSubj, nil, 10*time.Millisecond); err != nil {
t.Fatalf("Unexpected error for %d: %v", i, err)
} else {
msg.Respond(nil) // Ack
}
}
nc.Flush()
ostate := o.Info()
if ostate.AckFloor.Stream != 11 || ostate.NumAckPending > 0 {
t.Fatalf("Inconsistent ack state: %+v", ostate)
}
})
}
}
func TestJetStreamMsgHeaders(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{name: "MemoryStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.MemoryStorage,
Replicas: 1,
}},
{name: "FileStore",
mconfig: &server.StreamConfig{
Name: "foo",
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
Storage: server.FileStorage,
Replicas: 1,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
m := nats.NewMsg("foo")
m.Header.Add("Accept-Encoding", "json")
m.Header.Add("Authorization", "s3cr3t")
m.Data = []byte("Hello JetStream Headers - #1!")
nc.PublishMsg(m)
nc.Flush()
state := mset.State()
if state.Msgs != 1 {
t.Fatalf("Expected 1 message, got %d", state.Msgs)
}
if state.Bytes == 0 {
t.Fatalf("Expected non-zero bytes")
}
// Now access raw from stream.
sm, err := mset.GetMsg(1)
if err != nil {
t.Fatalf("Unexpected error getting stored message: %v", err)
}
// Calculate the []byte version of the headers.
var b bytes.Buffer
b.WriteString("NATS/1.0\r\n")
m.Header.Write(&b)
b.WriteString("\r\n")
hdr := b.Bytes()
if !bytes.Equal(sm.Header, hdr) {
t.Fatalf("Message headers do not match, %q vs %q", hdr, sm.Header)
}
if !bytes.Equal(sm.Data, m.Data) {
t.Fatalf("Message data do not match, %q vs %q", m.Data, sm.Data)
}
// Now do consumer based.
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
cm, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error getting message: %v", err)
}
// Check the message.
// Check out original headers.
if cm.Header.Get("Accept-Encoding") != "json" ||
cm.Header.Get("Authorization") != "s3cr3t" {
t.Fatalf("Original headers not present")
}
if !bytes.Equal(m.Data, cm.Data) {
t.Fatalf("Message payloads are not the same: %q vs %q", cm.Data, m.Data)
}
})
}
}
func TestJetStreamTemplateBasics(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
mcfg := &server.StreamConfig{
Subjects: []string{"kv.*"},
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
MaxMsgs: 4,
Storage: server.MemoryStorage,
Replicas: 1,
}
template := &server.StreamTemplateConfig{
Name: "kv",
Config: mcfg,
MaxStreams: 4,
}
if _, err := acc.AddStreamTemplate(template); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if templates := acc.Templates(); len(templates) != 1 {
t.Fatalf("Expected to get array of 1 template, got %d", len(templates))
}
if err := acc.DeleteStreamTemplate("foo"); err == nil {
t.Fatalf("Expected an error for non-existent template")
}
if err := acc.DeleteStreamTemplate(template.Name); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if templates := acc.Templates(); len(templates) != 0 {
t.Fatalf("Expected to get array of no templates, got %d", len(templates))
}
// Add it back in and test basics
if _, err := acc.AddStreamTemplate(template); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Connect a client and send a message which should trigger the stream creation.
nc := clientConnectToServer(t, s)
defer nc.Close()
sendStreamMsg(t, nc, "kv.22", "derek")
sendStreamMsg(t, nc, "kv.33", "cat")
sendStreamMsg(t, nc, "kv.44", "sam")
sendStreamMsg(t, nc, "kv.55", "meg")
if nms := acc.NumStreams(); nms != 4 {
t.Fatalf("Expected 4 auto-created streams, got %d", nms)
}
// This one should fail due to max.
if resp, err := nc.Request("kv.99", nil, 100*time.Millisecond); err == nil {
t.Fatalf("Expected this to fail, but got %q", resp.Data)
}
// Now delete template and make sure the underlying streams go away too.
if err := acc.DeleteStreamTemplate(template.Name); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if nms := acc.NumStreams(); nms != 0 {
t.Fatalf("Expected no auto-created streams to remain, got %d", nms)
}
}
func TestJetStreamTemplateFileStoreRecovery(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
mcfg := &server.StreamConfig{
Subjects: []string{"kv.*"},
Retention: server.LimitsPolicy,
MaxAge: time.Hour,
MaxMsgs: 50,
Storage: server.FileStorage,
Replicas: 1,
}
template := &server.StreamTemplateConfig{
Name: "kv",
Config: mcfg,
MaxStreams: 100,
}
if _, err := acc.AddStreamTemplate(template); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure we can not add in a stream on our own with a template owner.
badCfg := *mcfg
badCfg.Name = "bad"
badCfg.Template = "kv"
if _, err := acc.AddStream(&badCfg); err == nil {
t.Fatalf("Expected error adding stream with direct template owner")
}
// Connect a client and send a message which should trigger the stream creation.
nc := clientConnectToServer(t, s)
defer nc.Close()
for i := 1; i <= 100; i++ {
subj := fmt.Sprintf("kv.%d", i)
for x := 0; x < 50; x++ {
sendStreamMsg(t, nc, subj, "Hello")
}
}
nc.Flush()
if nms := acc.NumStreams(); nms != 100 {
t.Fatalf("Expected 100 auto-created streams, got %d", nms)
}
// Capture port since it was dynamic.
u, _ := url.Parse(s.ClientURL())
port, _ := strconv.Atoi(u.Port())
restartServer := func() {
t.Helper()
sd := s.JetStreamConfig().StoreDir
// Stop current server.
s.Shutdown()
// Restart.
s = RunJetStreamServerOnPort(port, sd)
}
// Restart.
restartServer()
defer s.Shutdown()
acc = s.GlobalAccount()
if nms := acc.NumStreams(); nms != 100 {
t.Fatalf("Expected 100 auto-created streams, got %d", nms)
}
tmpl, err := acc.LookupStreamTemplate(template.Name)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure t.Delete() survives restart.
tmpl.Delete()
// Restart.
restartServer()
defer s.Shutdown()
acc = s.GlobalAccount()
if nms := acc.NumStreams(); nms != 0 {
t.Fatalf("Expected no auto-created streams, got %d", nms)
}
if _, err := acc.LookupStreamTemplate(template.Name); err == nil {
t.Fatalf("Expected to not find the template after restart")
}
}
// This will be testing our ability to conditionally rewrite subjects for last mile
// when working with JetStream. Consumers receive messages that have their subjects
// rewritten to match the original subject. NATS routing is all subject based except
// for the last mile to the client.
func TestJetStreamSingleInstanceRemoteAccess(t *testing.T) {
ca := createClusterWithName(t, "A", 1)
defer shutdownCluster(ca)
cb := createClusterWithName(t, "B", 1, ca)
defer shutdownCluster(cb)
// Connect our leafnode server to cluster B.
opts := cb.opts[rand.Intn(len(cb.opts))]
s, _ := runSolicitLeafServer(opts)
defer s.Shutdown()
checkLeafNodeConnected(t, s)
if err := s.EnableJetStream(nil); err != nil {
t.Fatalf("Expected no error, got %v", err)
}
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: "foo", Storage: server.MemoryStorage})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 10
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo", "Hello World!")
}
// Now create a push based consumer. Connected to the non-jetstream server via a random server on cluster A.
sl := ca.servers[rand.Intn(len(ca.servers))]
nc2 := clientConnectToServer(t, sl)
defer nc2.Close()
sub, _ := nc2.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
// Need to wait for interest to propagate across GW.
nc2.Flush()
time.Sleep(25 * time.Millisecond)
o, err := mset.AddConsumer(&server.ConsumerConfig{DeliverSubject: sub.Subject})
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
checkSubPending := func(numExpected int) {
t.Helper()
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected)
}
return nil
})
}
checkSubPending(toSend)
checkMsg := func(m *nats.Msg, err error, i int) {
t.Helper()
if err != nil {
t.Fatalf("Got an error checking message: %v", err)
}
if m.Subject != "foo" {
t.Fatalf("Expected original subject of %q, but got %q", "foo", m.Subject)
}
// Now check that reply subject exists and has a sequence as the last token.
if seq := o.SeqFromReply(m.Reply); seq != uint64(i) {
t.Fatalf("Expected sequence of %d , got %d", i, seq)
}
}
// Now check the subject to make sure its the original one.
for i := 1; i <= toSend; i++ {
m, err := sub.NextMsg(time.Second)
checkMsg(m, err, i)
}
// Now do a pull based consumer.
o, err = mset.AddConsumer(workerModeConfig("p"))
if err != nil {
t.Fatalf("Expected no error with registered interest, got %v", err)
}
defer o.Delete()
nextMsg := o.RequestNextMsgSubject()
for i := 1; i <= toSend; i++ {
m, err := nc.Request(nextMsg, nil, time.Second)
checkMsg(m, err, i)
}
}
func clientConnectToServerWithUP(t *testing.T, opts *server.Options, user, pass string) *nats.Conn {
curl := fmt.Sprintf("nats://%s:%s@%s:%d", user, pass, opts.Host, opts.Port)
nc, err := nats.Connect(curl, nats.Name("JS-UP-TEST"), nats.ReconnectWait(5*time.Millisecond), nats.MaxReconnects(-1))
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
return nc
}
func TestJetStreamCanNotEnableOnSystemAccount(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
sa := s.SystemAccount()
if err := sa.EnableJetStream(nil); err == nil {
t.Fatalf("Expected an error trying to enable on the system account")
}
}
func TestJetStreamMultipleAccountsBasics(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: 127.0.0.1:-1
jetstream: {max_mem_store: 64GB, max_file_store: 10TB}
accounts: {
A: {
jetstream: enabled
users: [ {user: ua, password: pwd} ]
},
B: {
jetstream: {max_mem: 1GB, max_store: 1TB, max_streams: 10, max_consumers: 1k}
users: [ {user: ub, password: pwd} ]
},
C: {
users: [ {user: uc, password: pwd} ]
},
}
`))
defer os.Remove(conf)
s, opts := RunServerWithConfig(conf)
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
if !s.JetStreamEnabled() {
t.Fatalf("Expected JetStream to be enabled")
}
nca := clientConnectToServerWithUP(t, opts, "ua", "pwd")
defer nca.Close()
ncb := clientConnectToServerWithUP(t, opts, "ub", "pwd")
defer ncb.Close()
resp, err := ncb.Request(server.JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info server.JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
limits := info.Limits
if limits.MaxStreams != 10 {
t.Fatalf("Expected 10 for MaxStreams, got %d", limits.MaxStreams)
}
if limits.MaxConsumers != 1000 {
t.Fatalf("Expected MaxConsumers of %d, got %d", 1000, limits.MaxConsumers)
}
gb := int64(1024 * 1024 * 1024)
if limits.MaxMemory != gb {
t.Fatalf("Expected MaxMemory to be 1GB, got %d", limits.MaxMemory)
}
if limits.MaxStore != 1024*gb {
t.Fatalf("Expected MaxStore to be 1TB, got %d", limits.MaxStore)
}
ncc := clientConnectToServerWithUP(t, opts, "uc", "pwd")
defer ncc.Close()
expectNotEnabled := func(resp *nats.Msg, err error) {
t.Helper()
if err != nil {
t.Fatalf("Unexpected error requesting enabled status: %v", err)
}
if resp == nil {
t.Fatalf("No response, possible timeout?")
}
var iResp server.JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &iResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if iResp.Error == nil {
t.Fatalf("Expected an error on not enabled account")
}
}
// Check C is not enabled. We expect a negative response, not a timeout.
expectNotEnabled(ncc.Request(server.JSApiAccountInfo, nil, 250*time.Millisecond))
// Now do simple reload and check that we do the right thing. Testing enable and disable and also change in limits
newConf := []byte(`
listen: 127.0.0.1:-1
jetstream: {max_mem_store: 64GB, max_file_store: 10TB}
accounts: {
A: {
jetstream: disabled
users: [ {user: ua, password: pwd} ]
},
B: {
jetstream: {max_mem: 32GB, max_store: 512GB, max_streams: 100, max_consumers: 4k}
users: [ {user: ub, password: pwd} ]
},
C: {
jetstream: {max_mem: 1GB, max_store: 1TB, max_streams: 10, max_consumers: 1k}
users: [ {user: uc, password: pwd} ]
},
}
`)
if err := ioutil.WriteFile(conf, newConf, 0600); err != nil {
t.Fatalf("Error rewriting server's config file: %v", err)
}
if err := s.Reload(); err != nil {
t.Fatalf("Error on server reload: %v", err)
}
expectNotEnabled(nca.Request(server.JSApiAccountInfo, nil, 250*time.Millisecond))
resp, _ = ncb.Request(server.JSApiAccountInfo, nil, 250*time.Millisecond)
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.Error != nil {
t.Fatalf("Expected JetStream to be enabled, got %+v", info.Error)
}
resp, _ = ncc.Request(server.JSApiAccountInfo, nil, 250*time.Millisecond)
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.Error != nil {
t.Fatalf("Expected JetStream to be enabled, got %+v", info.Error)
}
// Now check that limits have been updated.
// Account B
resp, err = ncb.Request(server.JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
limits = info.Limits
if limits.MaxStreams != 100 {
t.Fatalf("Expected 100 for MaxStreams, got %d", limits.MaxStreams)
}
if limits.MaxConsumers != 4000 {
t.Fatalf("Expected MaxConsumers of %d, got %d", 4000, limits.MaxConsumers)
}
if limits.MaxMemory != 32*gb {
t.Fatalf("Expected MaxMemory to be 32GB, got %d", limits.MaxMemory)
}
if limits.MaxStore != 512*gb {
t.Fatalf("Expected MaxStore to be 512GB, got %d", limits.MaxStore)
}
// Account C
resp, err = ncc.Request(server.JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
limits = info.Limits
if limits.MaxStreams != 10 {
t.Fatalf("Expected 10 for MaxStreams, got %d", limits.MaxStreams)
}
if limits.MaxConsumers != 1000 {
t.Fatalf("Expected MaxConsumers of %d, got %d", 1000, limits.MaxConsumers)
}
if limits.MaxMemory != gb {
t.Fatalf("Expected MaxMemory to be 1GB, got %d", limits.MaxMemory)
}
if limits.MaxStore != 1024*gb {
t.Fatalf("Expected MaxStore to be 1TB, got %d", limits.MaxStore)
}
}
func TestJetStreamServerResourcesConfig(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: 127.0.0.1:-1
jetstream: {max_mem_store: 2GB, max_file_store: 1TB}
`))
defer os.Remove(conf)
s, _ := RunServerWithConfig(conf)
defer s.Shutdown()
if !s.JetStreamEnabled() {
t.Fatalf("Expected JetStream to be enabled")
}
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
gb := int64(1024 * 1024 * 1024)
jsc := s.JetStreamConfig()
if jsc.MaxMemory != 2*gb {
t.Fatalf("Expected MaxMemory to be %d, got %d", 2*gb, jsc.MaxMemory)
}
if jsc.MaxStore != 1024*gb {
t.Fatalf("Expected MaxStore to be %d, got %d", 1024*gb, jsc.MaxStore)
}
}
////////////////////////////////////////
// Benchmark placeholders
// TODO(dlc) - move
////////////////////////////////////////
func TestJetStreamPubPerf(t *testing.T) {
// Comment out to run, holding place for now.
t.SkipNow()
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
msetConfig := server.StreamConfig{
Name: "sr22",
Storage: server.FileStorage,
Subjects: []string{"foo"},
}
if _, err := acc.AddStream(&msetConfig); err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 5000000
numProducers := 1
payload := []byte("Hello World")
startCh := make(chan bool)
var wg sync.WaitGroup
for n := 0; n < numProducers; n++ {
wg.Add(1)
go func() {
defer wg.Done()
<-startCh
for i := 0; i < int(toSend)/numProducers; i++ {
nc.Publish("foo", payload)
}
nc.Flush()
}()
}
// Wait for Go routines.
time.Sleep(10 * time.Millisecond)
start := time.Now()
close(startCh)
wg.Wait()
tt := time.Since(start)
fmt.Printf("time is %v\n", tt)
fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds())
}
func TestJetStreamPubWithAsyncResponsePerf(t *testing.T) {
// Comment out to run, holding place for now.
t.SkipNow()
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
msetConfig := server.StreamConfig{
Name: "sr33",
Storage: server.MemoryStorage,
Subjects: []string{"foo"},
}
if _, err := acc.AddStream(&msetConfig); err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
toSend := 1000000
payload := []byte("Hello World")
start := time.Now()
for i := 0; i < toSend; i++ {
nc.PublishRequest("foo", "bar", payload)
}
nc.Flush()
tt := time.Since(start)
fmt.Printf("time is %v\n", tt)
fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds())
}
func TestJetStreamConsumerPerf(t *testing.T) {
// Comment out to run, holding place for now.
t.SkipNow()
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
msetConfig := server.StreamConfig{
Name: "sr22",
Storage: server.MemoryStorage,
Subjects: []string{"foo"},
}
mset, err := acc.AddStream(&msetConfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
payload := []byte("Hello World")
toStore := 2000000
for i := 0; i < toStore; i++ {
nc.Publish("foo", payload)
}
nc.Flush()
_, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: "d",
DeliverSubject: "d",
AckPolicy: server.AckNone,
})
if err != nil {
t.Fatalf("Error creating consumer: %v", err)
}
var received int
done := make(chan bool)
nc.Subscribe("d", func(m *nats.Msg) {
received++
if received >= toStore {
done <- true
}
})
start := time.Now()
nc.Flush()
<-done
tt := time.Since(start)
fmt.Printf("time is %v\n", tt)
fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds())
}
func TestJetStreamConsumerAckFileStorePerf(t *testing.T) {
// Comment out to run, holding place for now.
t.SkipNow()
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
msetConfig := server.StreamConfig{
Name: "sr22",
Storage: server.FileStorage,
Subjects: []string{"foo"},
}
mset, err := acc.AddStream(&msetConfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
payload := []byte("Hello World")
toStore := uint64(200000)
for i := uint64(0); i < toStore; i++ {
nc.Publish("foo", payload)
}
nc.Flush()
if msgs := mset.State().Msgs; msgs != uint64(toStore) {
t.Fatalf("Expected %d messages, got %d", toStore, msgs)
}
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "d",
DeliverSubject: "d",
AckPolicy: server.AckExplicit,
AckWait: 10 * time.Minute,
})
if err != nil {
t.Fatalf("Error creating consumer: %v", err)
}
defer o.Stop()
var received uint64
done := make(chan bool)
sub, _ := nc.Subscribe("d", func(m *nats.Msg) {
m.Respond(nil) // Ack
received++
if received >= toStore {
done <- true
}
})
sub.SetPendingLimits(-1, -1)
start := time.Now()
nc.Flush()
<-done
tt := time.Since(start)
fmt.Printf("time is %v\n", tt)
fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds())
}
func TestJetStreamPubSubPerf(t *testing.T) {
// Comment out to run, holding place for now.
t.SkipNow()
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc := s.GlobalAccount()
msetConfig := server.StreamConfig{
Name: "MSET22",
Storage: server.FileStorage,
Subjects: []string{"foo"},
}
mset, err := acc.AddStream(&msetConfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
nc := clientConnectToServer(t, s)
defer nc.Close()
var toSend = 1000000
var received int
done := make(chan bool)
delivery := "d"
nc.Subscribe(delivery, func(m *nats.Msg) {
received++
if received >= toSend {
done <- true
}
})
nc.Flush()
_, err = mset.AddConsumer(&server.ConsumerConfig{
DeliverSubject: delivery,
AckPolicy: server.AckNone,
})
if err != nil {
t.Fatalf("Error creating consumer: %v", err)
}
payload := []byte("Hello World")
start := time.Now()
for i := 0; i < toSend; i++ {
nc.Publish("foo", payload)
}
<-done
tt := time.Since(start)
fmt.Printf("time is %v\n", tt)
fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds())
}
func TestJetStreamAckExplicitMsgRemoval(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.MemoryStorage,
Subjects: []string{"foo.*"},
Retention: server.InterestPolicy,
}},
{"FileStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.FileStorage,
Subjects: []string{"foo.*"},
Retention: server.InterestPolicy,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc1 := clientConnectToServer(t, s)
defer nc1.Close()
nc2 := clientConnectToServer(t, s)
defer nc2.Close()
// Create two durable consumers on the same subject
sub1, _ := nc1.SubscribeSync(nats.NewInbox())
defer sub1.Unsubscribe()
nc1.Flush()
o1, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "dur1",
DeliverSubject: sub1.Subject,
FilterSubject: "foo.bar",
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o1.Delete()
sub2, _ := nc2.SubscribeSync(nats.NewInbox())
defer sub2.Unsubscribe()
nc2.Flush()
o2, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "dur2",
DeliverSubject: sub2.Subject,
FilterSubject: "foo.bar",
AckPolicy: server.AckExplicit,
AckWait: 100 * time.Millisecond,
})
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o2.Delete()
// Send 2 messages
toSend := 2
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc1, "foo.bar", fmt.Sprintf("msg%v", i+1))
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %v messages, got %d", toSend, state.Msgs)
}
// Receive the messages and ack them.
subs := []*nats.Subscription{sub1, sub2}
for _, sub := range subs {
for i := 0; i < toSend; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error acking message: %v", err)
}
m.Respond(nil)
}
}
// To make sure acks are processed for checking state after sending new ones.
checkFor(t, time.Second, 25*time.Millisecond, func() error {
if state = mset.State(); state.Msgs != 0 {
return fmt.Errorf("Stream still has messages")
}
return nil
})
// Now close the 2nd subscription...
sub2.Unsubscribe()
nc2.Flush()
// Send 2 more new messages
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc1, "foo.bar", fmt.Sprintf("msg%v", 2+i+1))
}
state = mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %v messages, got %d", toSend, state.Msgs)
}
// first subscription should get it and will ack it.
for i := 0; i < toSend; i++ {
m, err := sub1.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error getting message to ack: %v", err)
}
m.Respond(nil)
}
// For acks from m.Respond above
nc1.Flush()
// Now recreate the subscription for the 2nd JS consumer
sub2, _ = nc2.SubscribeSync(nats.NewInbox())
defer sub2.Unsubscribe()
o2, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: "dur2",
DeliverSubject: sub2.Subject,
FilterSubject: "foo.bar",
AckPolicy: server.AckExplicit,
AckWait: 100 * time.Millisecond,
})
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o2.Delete()
// Those messages should be redelivered to the 2nd consumer
for i := 1; i <= toSend; i++ {
m, err := sub2.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error receiving message %d: %v", i, err)
}
m.Respond(nil)
sseq := o2.StreamSeqFromReply(m.Reply)
// Depending on timing from above we could receive stream sequences out of order but
// we know we want 3 & 4.
if sseq != 3 && sseq != 4 {
t.Fatalf("Expected stream sequence of 3 or 4 but got %d", sseq)
}
}
})
}
}
// This test is in support fo clients that want to match on subject, they
// can set the filter subject always and if the stream only has one subject
// and they match the filter is cleared automatically. This eliminates us
// needing to know if a subject is a subset of a stream when looking it up.
func TestJetStreamConsumerFilterSubject(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
sc := &server.StreamConfig{Name: "MY_STREAM", Subjects: []string{"foo"}}
mset, err := s.GlobalAccount().AddStream(sc)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
cfg := &server.ConsumerConfig{
Durable: "d",
DeliverSubject: "A",
AckPolicy: server.AckExplicit,
FilterSubject: "foo",
}
o, err := mset.AddConsumer(cfg)
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o.Delete()
if o.Info().Config.FilterSubject != "" {
t.Fatalf("Expected the filter to be cleared")
}
// Now use the original cfg with updated delivery subject and make sure that works ok.
cfg = &server.ConsumerConfig{
Durable: "d",
DeliverSubject: "B",
AckPolicy: server.AckExplicit,
FilterSubject: "foo",
}
o, err = mset.AddConsumer(cfg)
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o.Delete()
}
func TestJetStreamStoredMsgsDontDisappearAfterCacheExpiration(t *testing.T) {
sc := &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.FileStorage,
Subjects: []string{"foo.>"},
Retention: server.InterestPolicy,
}
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStreamWithStore(sc, &server.FileStoreConfig{BlockSize: 128, CacheExpire: 15 * time.Millisecond})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc1 := clientConnectWithOldRequest(t, s)
defer nc1.Close()
// Create a durable consumers
sub, _ := nc1.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc1.Flush()
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "dur",
DeliverSubject: sub.Subject,
FilterSubject: "foo.bar",
DeliverPolicy: server.DeliverNew,
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o.Delete()
nc2 := clientConnectWithOldRequest(t, s)
defer nc2.Close()
sendStreamMsg(t, nc2, "foo.bar", "msg1")
msg, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Did not get message: %v", err)
}
if string(msg.Data) != "msg1" {
t.Fatalf("Unexpected message: %q", msg.Data)
}
nc1.Close()
// Get the message from the stream
getMsgSeq := func(seq uint64) {
t.Helper()
mreq := &server.JSApiMsgGetRequest{Seq: seq}
req, err := json.Marshal(mreq)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
smsgj, err := nc2.Request(fmt.Sprintf(server.JSApiMsgGetT, sc.Name), req, time.Second)
if err != nil {
t.Fatalf("Could not retrieve stream message: %v", err)
}
if strings.Contains(string(smsgj.Data), "code") {
t.Fatalf("Error: %q", smsgj.Data)
}
}
getMsgSeq(1)
time.Sleep(time.Second)
sendStreamMsg(t, nc2, "foo.bar", "msg2")
sendStreamMsg(t, nc2, "foo.bar", "msg3")
getMsgSeq(1)
getMsgSeq(2)
getMsgSeq(3)
}
func TestJetStreamConsumerUpdateRedelivery(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.MemoryStorage,
Subjects: []string{"foo.>"},
Retention: server.InterestPolicy,
}},
{"FileStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.FileStorage,
Subjects: []string{"foo.>"},
Retention: server.InterestPolicy,
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Create a durable consumer.
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "dur22",
DeliverSubject: sub.Subject,
FilterSubject: "foo.bar",
AckPolicy: server.AckExplicit,
AckWait: 100 * time.Millisecond,
MaxDeliver: 3,
})
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o.Delete()
// Send 20 messages
toSend := 20
for i := 1; i <= toSend; i++ {
sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("msg-%v", i))
}
state := mset.State()
if state.Msgs != uint64(toSend) {
t.Fatalf("Expected %v messages, got %d", toSend, state.Msgs)
}
// Receive the messages and ack only every 4th
for i := 0; i < toSend; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error getting message: %v", err)
}
seq, _, _, _, _ := o.ReplyInfo(m.Reply)
// 4, 8, 12, 16, 20
if seq%4 == 0 {
m.Respond(nil)
}
}
// Now close the sub and open a new one and update the consumer.
sub.Unsubscribe()
// Wait for it to become inactive
checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error {
if o.Active() {
return fmt.Errorf("Consumer still active")
}
return nil
})
// Send 20 more messages.
for i := toSend; i < toSend*2; i++ {
sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("msg-%v", i))
}
// Create new subscription.
sub, _ = nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
nc.Flush()
o, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: "dur22",
DeliverSubject: sub.Subject,
FilterSubject: "foo.bar",
AckPolicy: server.AckExplicit,
AckWait: 100 * time.Millisecond,
MaxDeliver: 3,
})
if err != nil {
t.Fatalf("Unexpected error adding consumer: %v", err)
}
defer o.Delete()
expect := toSend + toSend - 5 // mod 4 acks
checkFor(t, time.Second, 5*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != expect {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, expect)
}
return nil
})
for i, eseq := 0, uint64(1); i < expect; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error getting message: %v", err)
}
// Skip the ones we ack'd from above. We should not get them back here.
if eseq <= uint64(toSend) && eseq%4 == 0 {
eseq++
}
seq, _, dc, _, _ := o.ReplyInfo(m.Reply)
if seq != eseq {
t.Fatalf("Expected stream sequence of %d, got %d", eseq, seq)
}
if seq <= uint64(toSend) && dc != 2 {
t.Fatalf("Expected delivery count of 2 for sequence of %d, got %d", seq, dc)
}
if seq > uint64(toSend) && dc != 1 {
t.Fatalf("Expected delivery count of 1 for sequence of %d, got %d", seq, dc)
}
if seq > uint64(toSend) {
m.Respond(nil) // Ack
}
eseq++
}
// We should get the second half back since we did not ack those from above.
expect = toSend - 5
checkFor(t, time.Second, 5*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != expect {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, expect)
}
return nil
})
for i, eseq := 0, uint64(1); i < expect; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error getting message: %v", err)
}
// Skip the ones we ack'd from above. We should not get them back here.
if eseq <= uint64(toSend) && eseq%4 == 0 {
eseq++
}
seq, _, dc, _, _ := o.ReplyInfo(m.Reply)
if seq != eseq {
t.Fatalf("Expected stream sequence of %d, got %d", eseq, seq)
}
if dc != 3 {
t.Fatalf("Expected delivery count of 3 for sequence of %d, got %d", seq, dc)
}
eseq++
}
})
}
}
func TestJetStreamConsumerMaxAckPending(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.MemoryStorage,
Subjects: []string{"foo.*"},
}},
{"FileStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.FileStorage,
Subjects: []string{"foo.*"},
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Do error scenarios.
_, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: "d22",
DeliverSubject: nats.NewInbox(),
AckPolicy: server.AckNone,
MaxAckPending: 1,
})
if err == nil {
t.Fatalf("Expected error, MaxAckPending only applicable to ack != AckNone")
}
// Queue up 100 messages.
toSend := 100
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("MSG: %d", i+1))
}
// Limit to 33
maxAckPending := 33
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "d22",
DeliverSubject: nats.NewInbox(),
AckPolicy: server.AckExplicit,
MaxAckPending: maxAckPending,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
sub, _ := nc.SubscribeSync(o.Info().Config.DeliverSubject)
defer sub.Unsubscribe()
checkSubPending := func(numExpected int) {
t.Helper()
checkFor(t, time.Second, 20*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected)
}
return nil
})
}
checkSubPending(maxAckPending)
// We hit the limit, double check we stayed there.
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxAckPending {
t.Fatalf("Too many messages received: %d vs %d", nmsgs, maxAckPending)
}
// Now ack them all.
for i := 0; i < maxAckPending; i++ {
m, err := sub.NextMsg(time.Second)
if err != nil {
t.Fatalf("Error receiving message %d: %v", i, err)
}
m.Respond(nil)
}
checkSubPending(maxAckPending)
o.Stop()
mset.Purge()
// Now test a consumer that is live while we publish messages to the stream.
o, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: "d33",
DeliverSubject: nats.NewInbox(),
AckPolicy: server.AckExplicit,
MaxAckPending: maxAckPending,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
sub, _ = nc.SubscribeSync(o.Info().Config.DeliverSubject)
defer sub.Unsubscribe()
nc.Flush()
checkSubPending(0)
// Now stream more then maxAckPending.
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.baz", fmt.Sprintf("MSG: %d", i+1))
}
checkSubPending(maxAckPending)
// We hit the limit, double check we stayed there.
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxAckPending {
t.Fatalf("Too many messages received: %d vs %d", nmsgs, maxAckPending)
}
})
}
}
func TestJetStreamPullConsumerMaxAckPending(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.MemoryStorage,
Subjects: []string{"foo.*"},
}},
{"FileStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.FileStorage,
Subjects: []string{"foo.*"},
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Queue up 100 messages.
toSend := 100
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("MSG: %d", i+1))
}
// Limit to 33
maxAckPending := 33
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "d22",
AckPolicy: server.AckExplicit,
MaxAckPending: maxAckPending,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
getSubj := o.RequestNextMsgSubject()
var toAck []*nats.Msg
for i := 0; i < maxAckPending; i++ {
if m, err := nc.Request(getSubj, nil, time.Second); err != nil {
t.Fatalf("Unexpected error: %v", err)
} else {
toAck = append(toAck, m)
}
}
// This should fail.. But we do not want to queue up our request.
req := &server.JSApiConsumerGetNextRequest{Batch: 1, NoWait: true}
jreq, _ := json.Marshal(req)
m, err := nc.Request(getSubj, jreq, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if m.Header.Get("Status") != "409" {
t.Fatalf("Expected a 409 status code, got %q", m.Header.Get("Status"))
}
// Now ack them all.
for _, m := range toAck {
m.Respond(nil)
}
// Now do batch above the max.
sub, _ := nc.SubscribeSync(nats.NewInbox())
defer sub.Unsubscribe()
checkSubPending := func(numExpected int) {
t.Helper()
checkFor(t, time.Second, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected)
}
return nil
})
}
req = &server.JSApiConsumerGetNextRequest{Batch: toSend}
jreq, _ = json.Marshal(req)
nc.PublishRequest(getSubj, sub.Subject, jreq)
checkSubPending(maxAckPending)
// We hit the limit, double check we stayed there.
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxAckPending {
t.Fatalf("Too many messages received: %d vs %d", nmsgs, maxAckPending)
}
})
}
}
func TestJetStreamPullConsumerMaxAckPendingRedeliveries(t *testing.T) {
cases := []struct {
name string
mconfig *server.StreamConfig
}{
{"MemoryStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.MemoryStorage,
Subjects: []string{"foo.*"},
}},
{"FileStore", &server.StreamConfig{
Name: "MY_STREAM",
Storage: server.FileStorage,
Subjects: []string{"foo.*"},
}},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
s := RunBasicJetStreamServer()
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(c.mconfig)
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
// Queue up 10 messages.
toSend := 10
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("MSG: %d", i+1))
}
// Limit to 1
maxAckPending := 1
ackWait := 20 * time.Millisecond
expSeq := uint64(4)
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "d22",
DeliverPolicy: server.DeliverByStartSequence,
OptStartSeq: expSeq,
AckPolicy: server.AckExplicit,
AckWait: ackWait,
MaxAckPending: maxAckPending,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
getSubj := o.RequestNextMsgSubject()
delivery := uint64(1)
getNext := func() {
t.Helper()
m, err := nc.Request(getSubj, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
sseq, dseq, dcount, _, pending := o.ReplyInfo(m.Reply)
if sseq != expSeq {
t.Fatalf("Expected stream sequence of %d, got %d", expSeq, sseq)
}
if dseq != delivery {
t.Fatalf("Expected consumer sequence of %d, got %d", delivery, dseq)
}
if dcount != delivery {
t.Fatalf("Expected delivery count of %d, got %d", delivery, dcount)
}
if pending != uint64(toSend)-expSeq {
t.Fatalf("Expected pending to be %d, got %d", uint64(toSend)-expSeq, pending)
}
delivery++
}
getNext()
getNext()
getNext()
getNext()
getNext()
})
}
}
func TestJetStreamDeliveryAfterServerRestart(t *testing.T) {
opts := DefaultTestOptions
opts.Port = -1
opts.JetStream = true
s := RunServer(&opts)
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{
Name: "MY_STREAM",
Storage: server.FileStorage,
Subjects: []string{"foo.>"},
Retention: server.InterestPolicy,
})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
nc := clientConnectToServer(t, s)
defer nc.Close()
inbox := nats.NewInbox()
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "dur",
DeliverSubject: inbox,
DeliverPolicy: server.DeliverNew,
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
sub, err := nc.SubscribeSync(inbox)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
nc.Flush()
// Send 1 message
sendStreamMsg(t, nc, "foo.bar", "msg1")
// Make sure we receive it and ack it.
msg, err := sub.NextMsg(250 * time.Millisecond)
if err != nil {
t.Fatalf("Did not get message: %v", err)
}
// Ack it!
msg.Respond(nil)
nc.Flush()
// Shutdown client and server
nc.Close()
dir := strings.TrimSuffix(s.JetStreamConfig().StoreDir, server.JetStreamStoreDir)
s.Shutdown()
opts.Port = -1
opts.StoreDir = dir
s = RunServer(&opts)
defer s.Shutdown()
// Lookup stream.
mset, err = s.GlobalAccount().LookupStream("MY_STREAM")
if err != nil {
t.Fatalf("Error looking up stream: %v", err)
}
// Update consumer's deliver subject with new inbox
inbox = nats.NewInbox()
o, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: "dur",
DeliverSubject: inbox,
DeliverPolicy: server.DeliverNew,
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
nc = clientConnectToServer(t, s)
defer nc.Close()
// Send 2nd message
sendStreamMsg(t, nc, "foo.bar", "msg2")
// Start sub on new inbox
sub, err = nc.SubscribeSync(inbox)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
nc.Flush()
// Should receive message 2.
if _, err := sub.NextMsg(500 * time.Millisecond); err != nil {
t.Fatalf("Did not get message: %v", err)
}
}
// This is for the basics of importing the ability to send to a stream and consume
// from a consumer that is pull based on push based on a well known delivery subject.
func TestJetStreamAccountImportBasics(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: 127.0.0.1:-1
no_auth_user: rip
jetstream: {max_mem_store: 64GB, max_file_store: 10TB}
accounts: {
JS: {
jetstream: enabled
users: [ {user: dlc, password: foo} ]
exports [
# This is for sending into a stream from other accounts.
{ service: "ORDERS.*" }
# This is for accessing a pull based consumer.
{ service: "$JS.API.CONSUMER.MSG.NEXT.*.*" }
# This is streaming to a delivery subject for a push based consumer.
{ stream: "deliver.ORDERS" }
# This is to ack received messages. This is a service to ack acks..
{ service: "$JS.ACK.ORDERS.*.>" }
]
},
IU: {
users: [ {user: rip, password: bar} ]
imports [
{ service: { subject: "ORDERS.*", account: JS }, to: "my.orders.$1" }
{ service: { subject: "$JS.API.CONSUMER.MSG.NEXT.ORDERS.d", account: JS }, to: "nxt.msg" }
{ stream: { subject: "deliver.ORDERS", account: JS }, to: "d" }
{ service: { subject: "$JS.ACK.ORDERS.*.>", account: JS } }
]
},
}
`))
defer os.Remove(conf)
s, _ := RunServerWithConfig(conf)
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc, err := s.LookupAccount("JS")
if err != nil {
t.Fatalf("Unexpected error looking up account: %v", err)
}
mset, err := acc.AddStream(&server.StreamConfig{Name: "ORDERS", Subjects: []string{"ORDERS.*"}})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// This should be the rip user, the one that imports some JS.
nc := clientConnectToServer(t, s)
defer nc.Close()
// Simple publish to a stream.
pubAck := sendStreamMsg(t, nc, "my.orders.foo", "ORDERS-1")
if pubAck.Stream != "ORDERS" || pubAck.Sequence != 1 {
t.Fatalf("Bad pubAck received: %+v", pubAck)
}
if msgs := mset.State().Msgs; msgs != 1 {
t.Fatalf("Expected 1 message, got %d", msgs)
}
total := 2
for i := 2; i <= total; i++ {
sendStreamMsg(t, nc, "my.orders.bar", fmt.Sprintf("ORDERS-%d", i))
}
if msgs := mset.State().Msgs; msgs != uint64(total) {
t.Fatalf("Expected %d messages, got %d", total, msgs)
}
// Now test access to a pull based consumer, e.g. workqueue.
o, err := mset.AddConsumer(&server.ConsumerConfig{
Durable: "d",
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
// We mapped the next message request, "$JS.API.CONSUMER.MSG.NEXT.ORDERS.d" -> "nxt.msg"
m, err := nc.Request("nxt.msg", nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if string(m.Data) != "ORDERS-1" {
t.Fatalf("Expected to receive %q, got %q", "ORDERS-1", m.Data)
}
// Now test access to a push based consumer
o, err = mset.AddConsumer(&server.ConsumerConfig{
Durable: "p",
DeliverSubject: "deliver.ORDERS",
AckPolicy: server.AckExplicit,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
defer o.Delete()
// We remapped from above, deliver.ORDERS -> d
sub, _ := nc.SubscribeSync("d")
defer sub.Unsubscribe()
checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error {
if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != total {
return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, total)
}
return nil
})
m, _ = sub.NextMsg(time.Second)
// Make sure we remapped subject correctly across the account boundary.
if m.Subject != "ORDERS.foo" {
t.Fatalf("Expected subject of %q, got %q", "ORDERS.foo", m.Subject)
}
// Now make sure we can ack messages correctly.
m.Respond(server.AckAck)
nc.Flush()
if info := o.Info(); info.AckFloor.Consumer != 1 {
t.Fatalf("Did not receive the ack properly")
}
// Grab second one now.
m, _ = sub.NextMsg(time.Second)
// Make sure we remapped subject correctly across the account boundary.
if m.Subject != "ORDERS.bar" {
t.Fatalf("Expected subject of %q, got %q", "ORDERS.bar", m.Subject)
}
// Now make sure we can ack messages and get back an ack as well.
resp, _ := nc.Request(m.Reply, nil, 100*time.Millisecond)
if resp == nil {
t.Fatalf("No response, possible timeout?")
}
if info := o.Info(); info.AckFloor.Consumer != 2 {
t.Fatalf("Did not receive the ack properly")
}
}
// This is for importing all of JetStream into another account for admin purposes.
func TestJetStreamAccountImportAll(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: 127.0.0.1:-1
no_auth_user: rip
jetstream: {max_mem_store: 64GB, max_file_store: 10TB}
accounts: {
JS: {
jetstream: enabled
users: [ {user: dlc, password: foo} ]
exports [ { service: "$JS.API.>" } ]
},
IU: {
users: [ {user: rip, password: bar} ]
imports [ { service: { subject: "$JS.API.>", account: JS }, to: "jsapi.>"} ]
},
}
`))
defer os.Remove(conf)
s, _ := RunServerWithConfig(conf)
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
acc, err := s.LookupAccount("JS")
if err != nil {
t.Fatalf("Unexpected error looking up account: %v", err)
}
mset, err := acc.AddStream(&server.StreamConfig{Name: "ORDERS", Subjects: []string{"ORDERS.*"}})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
// This should be the rip user, the one that imports all of JS.
nc := clientConnectToServer(t, s)
defer nc.Close()
mapSubj := func(subject string) string {
return strings.Replace(subject, "$JS.API.", "jsapi.", 1)
}
// This will get the current information about usage and limits for this account.
resp, err := nc.Request(mapSubj(server.JSApiAccountInfo), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info server.JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if info.Error != nil {
t.Fatalf("Unexpected error: %+v", info.Error)
}
// Lookup streams.
resp, err = nc.Request(mapSubj(server.JSApiStreams), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var namesResponse server.JSApiStreamNamesResponse
if err = json.Unmarshal(resp.Data, &namesResponse); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if namesResponse.Error != nil {
t.Fatalf("Unexpected error: %+v", namesResponse.Error)
}
}
// https://github.com/nats-io/nats-server/issues/1736
func TestJetStreamServerReload(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: 127.0.0.1:-1
jetstream: {max_mem_store: 64GB, max_file_store: 10TB }
accounts: {
A: { users: [ {user: ua, password: pwd} ] },
B: {
jetstream: {max_mem: 1GB, max_store: 1TB, max_streams: 10, max_consumers: 1k}
users: [ {user: ub, password: pwd} ]
},
SYS: { users: [ {user: uc, password: pwd} ] },
}
no_auth_user: ub
system_account: SYS
`))
defer os.Remove(conf)
s, _ := RunServerWithConfig(conf)
defer s.Shutdown()
if config := s.JetStreamConfig(); config != nil {
defer os.RemoveAll(config.StoreDir)
}
if !s.JetStreamEnabled() {
t.Fatalf("Expected JetStream to be enabled")
}
// Client for API requests.
nc := clientConnectToServer(t, s)
defer nc.Close()
checkJSAccount := func() {
t.Helper()
resp, err := nc.Request(server.JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info server.JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
checkJSAccount()
acc, err := s.LookupAccount("B")
if err != nil {
t.Fatalf("Unexpected error looking up account: %v", err)
}
mset, err := acc.AddStream(&server.StreamConfig{Name: "22"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
toSend := 10
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "22", fmt.Sprintf("MSG: %d", i+1))
}
if msgs := mset.State().Msgs; msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, msgs)
}
if err := s.Reload(); err != nil {
t.Fatalf("Error on server reload: %v", err)
}
// Wait to get reconnected.
checkFor(t, 5*time.Second, 10*time.Millisecond, func() error {
if !nc.IsConnected() {
return fmt.Errorf("Not connected")
}
return nil
})
checkJSAccount()
sendStreamMsg(t, nc, "22", "MSG: 22")
}
func TestJetStreamConfigReloadWithGlobalAccount(t *testing.T) {
template := `
authorization {
users [
{user: anonymous}
{user: user1, password: %s}
]
}
no_auth_user: anonymous
jetstream: enabled
`
conf := createConfFile(t, []byte(fmt.Sprintf(template, "pwd")))
defer os.Remove(conf)
s, _ := RunServerWithConfig(conf)
defer s.Shutdown()
// Client for API requests.
nc := clientConnectToServer(t, s)
defer nc.Close()
checkJSAccount := func() {
t.Helper()
resp, err := nc.Request(server.JSApiAccountInfo, nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var info server.JSApiAccountInfoResponse
if err := json.Unmarshal(resp.Data, &info); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
checkJSAccount()
mset, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: "foo"})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset.Delete()
toSend := 10
for i := 0; i < toSend; i++ {
sendStreamMsg(t, nc, "foo", fmt.Sprintf("MSG: %d", i+1))
}
if msgs := mset.State().Msgs; msgs != uint64(toSend) {
t.Fatalf("Expected %d messages, got %d", toSend, msgs)
}
if err := ioutil.WriteFile(conf, []byte(fmt.Sprintf(template, "pwd2")), 0666); err != nil {
t.Fatalf("Error writing config: %v", err)
}
if err := s.Reload(); err != nil {
t.Fatalf("Error during config reload: %v", err)
}
// Try to add a new stream to the global account
mset2, err := s.GlobalAccount().AddStream(&server.StreamConfig{Name: "bar"})
if err != nil {
t.Fatalf("Unexpected error adding stream: %v", err)
}
defer mset2.Delete()
// Wait to get reconnected.
checkFor(t, 5*time.Second, 10*time.Millisecond, func() error {
if !nc.IsConnected() {
return fmt.Errorf("Not connected")
}
return nil
})
checkJSAccount()
}
| 1 | 12,357 | Not sure which interest you are looking for to propagate here, but keep in mind that we have helpers (maybe not in /test package?) to check/wait for interest on a literal on a server for a given account. | nats-io-nats-server | go |
@@ -140,6 +140,7 @@ func execProcess(context *cli.Context) (int, error) {
detach: detach,
pidFile: context.String("pid-file"),
action: CT_ACT_RUN,
+ init: false,
}
return r.run(p)
} | 1 | // +build linux
package main
import (
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/urfave/cli"
)
var execCommand = cli.Command{
Name: "exec",
Usage: "execute new process inside the container",
ArgsUsage: `<container-id> <command> [command options] || -p process.json <container-id>
Where "<container-id>" is the name for the instance of the container and
"<command>" is the command to be executed in the container.
"<command>" can't be empty unless a "-p" flag provided.
EXAMPLE:
For example, if the container is configured to run the linux ps command the
following will output a list of processes running in the container:
# runc exec <container-id> ps`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "console-socket",
Usage: "path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal",
},
cli.StringFlag{
Name: "cwd",
Usage: "current working directory in the container",
},
cli.StringSliceFlag{
Name: "env, e",
Usage: "set environment variables",
},
cli.BoolFlag{
Name: "tty, t",
Usage: "allocate a pseudo-TTY",
},
cli.StringFlag{
Name: "user, u",
Usage: "UID (format: <uid>[:<gid>])",
},
cli.Int64SliceFlag{
Name: "additional-gids, g",
Usage: "additional gids",
},
cli.StringFlag{
Name: "process, p",
Usage: "path to the process.json",
},
cli.BoolFlag{
Name: "detach,d",
Usage: "detach from the container's process",
},
cli.StringFlag{
Name: "pid-file",
Value: "",
Usage: "specify the file to write the process id to",
},
cli.StringFlag{
Name: "process-label",
Usage: "set the asm process label for the process commonly used with selinux",
},
cli.StringFlag{
Name: "apparmor",
Usage: "set the apparmor profile for the process",
},
cli.BoolFlag{
Name: "no-new-privs",
Usage: "set the no new privileges value for the process",
},
cli.StringSliceFlag{
Name: "cap, c",
Value: &cli.StringSlice{},
Usage: "add a capability to the bounding set for the process",
},
cli.BoolFlag{
Name: "no-subreaper",
Usage: "disable the use of the subreaper used to reap reparented processes",
Hidden: true,
},
},
Action: func(context *cli.Context) error {
if err := checkArgs(context, 1, minArgs); err != nil {
return err
}
if err := revisePidFile(context); err != nil {
return err
}
status, err := execProcess(context)
if err == nil {
os.Exit(status)
}
return fmt.Errorf("exec failed: %v", err)
},
SkipArgReorder: true,
}
func execProcess(context *cli.Context) (int, error) {
container, err := getContainer(context)
if err != nil {
return -1, err
}
status, err := container.Status()
if err != nil {
return -1, err
}
if status == libcontainer.Stopped {
return -1, fmt.Errorf("cannot exec a container that has stopped")
}
path := context.String("process")
if path == "" && len(context.Args()) == 1 {
return -1, fmt.Errorf("process args cannot be empty")
}
detach := context.Bool("detach")
state, err := container.State()
if err != nil {
return -1, err
}
bundle := utils.SearchLabels(state.Config.Labels, "bundle")
p, err := getProcess(context, bundle)
if err != nil {
return -1, err
}
r := &runner{
enableSubreaper: false,
shouldDestroy: false,
container: container,
consoleSocket: context.String("console-socket"),
detach: detach,
pidFile: context.String("pid-file"),
action: CT_ACT_RUN,
}
return r.run(p)
}
func getProcess(context *cli.Context, bundle string) (*specs.Process, error) {
if path := context.String("process"); path != "" {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var p specs.Process
if err := json.NewDecoder(f).Decode(&p); err != nil {
return nil, err
}
return &p, validateProcessSpec(&p)
}
// process via cli flags
if err := os.Chdir(bundle); err != nil {
return nil, err
}
spec, err := loadSpec(specConfig)
if err != nil {
return nil, err
}
p := spec.Process
p.Args = context.Args()[1:]
// override the cwd, if passed
if context.String("cwd") != "" {
p.Cwd = context.String("cwd")
}
if ap := context.String("apparmor"); ap != "" {
p.ApparmorProfile = ap
}
if l := context.String("process-label"); l != "" {
p.SelinuxLabel = l
}
if caps := context.StringSlice("cap"); len(caps) > 0 {
for _, c := range caps {
p.Capabilities.Bounding = append(p.Capabilities.Bounding, c)
p.Capabilities.Inheritable = append(p.Capabilities.Inheritable, c)
p.Capabilities.Effective = append(p.Capabilities.Effective, c)
p.Capabilities.Permitted = append(p.Capabilities.Permitted, c)
p.Capabilities.Ambient = append(p.Capabilities.Ambient, c)
}
}
// append the passed env variables
p.Env = append(p.Env, context.StringSlice("env")...)
// set the tty
if context.IsSet("tty") {
p.Terminal = context.Bool("tty")
}
if context.IsSet("no-new-privs") {
p.NoNewPrivileges = context.Bool("no-new-privs")
}
// override the user, if passed
if context.String("user") != "" {
u := strings.SplitN(context.String("user"), ":", 2)
if len(u) > 1 {
gid, err := strconv.Atoi(u[1])
if err != nil {
return nil, fmt.Errorf("parsing %s as int for gid failed: %v", u[1], err)
}
p.User.GID = uint32(gid)
}
uid, err := strconv.Atoi(u[0])
if err != nil {
return nil, fmt.Errorf("parsing %s as int for uid failed: %v", u[0], err)
}
p.User.UID = uint32(uid)
}
for _, gid := range context.Int64Slice("additional-gids") {
if gid < 0 {
return nil, fmt.Errorf("additional-gids must be a positive number %d", gid)
}
p.User.AdditionalGids = append(p.User.AdditionalGids, uint32(gid))
}
return p, nil
}
| 1 | 16,471 | nit: should not need this. | opencontainers-runc | go |
@@ -89,6 +89,8 @@ public class Constants {
// The flow exec id for a flow trigger instance unable to trigger a flow yet
public static final int FAILED_EXEC_ID = -2;
+ // Name of the file which keeps project directory size
+ public static final String PROJECT_DIR_SIZE_FILE_NAME = "___azkaban_project_dir_size_in_bytes___";
public static class ConfigurationKeys {
| 1 | /*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban;
import java.time.Duration;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*
* <p>Use '.' to separate name spaces and '_" to separate words in the same namespace. e.g.
* azkaban.job.some_key</p>
*/
public class Constants {
// Azkaban Flow Versions
public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0;
public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0;
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
// Flow 2.0 node type
public static final String NODE_TYPE = "type";
public static final String FLOW_NODE_TYPE = "flow";
// Flow 2.0 flow and job path delimiter
public static final String PATH_DELIMITER = ":";
// Job properties override suffix
public static final String JOB_OVERRIDE_SUFFIX = ".jor";
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_EXECUTOR_PORT_FILE = "executor.portfile";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
// Default flow trigger max wait time
public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10);
public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1);
// The flow exec id for a flow trigger instance which hasn't started a flow yet
public static final int UNASSIGNED_EXEC_ID = -1;
// The flow exec id for a flow trigger instance unable to trigger a flow yet
public static final int FAILED_EXEC_ID = -2;
public static class ConfigurationKeys {
// Configures Azkaban Flow Version in project YAML file
public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version";
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
/*
* Hadoop/Spark user job link.
* Example:
* a) azkaban.server.external.resource_manager_job_url=http://***rm***:8088/cluster/app/application_${application.id}
* b) azkaban.server.external.history_server_job_url=http://***jh***:19888/jobhistory/job/job_${application.id}
* c) azkaban.server.external.spark_history_server_job_url=http://***sh***:18080/history/application_${application.id}/1/jobs
* */
public static final String RESOURCE_MANAGER_JOB_URL = "azkaban.server.external.resource_manager_job_url";
public static final String HISTORY_SERVER_JOB_URL = "azkaban.server.external.history_server_job_url";
public static final String SPARK_HISTORY_SERVER_JOB_URL = "azkaban.server.external.spark_history_server_job_url";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// The property is used for the web server to get the host name of the executor when running in SOLO mode.
public static final String EXECUTOR_HOST = "executor.host";
// The property is used for the web server to get the port of the executor when running in SOLO mode.
public static final String EXECUTOR_PORT = "executor.port";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
public static final String AZKABAN_STORAGE_HDFS_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
// enable quartz scheduler and flow trigger if true.
public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz";
public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential";
// dir to keep dependency plugins
public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir";
public static final String USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors";
public static final String MAX_CONCURRENT_RUNS_ONEFLOW = "azkaban.max.concurrent.runs.oneflow";
public static final String WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_MS =
"azkaban.activeexecutor.refresh.milisecinterval";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW =
"azkaban.activeexecutor.refresh.flowinterval";
public static final String EXECUTORINFO_REFRESH_MAX_THREADS =
"azkaban.executorinfo.refresh.maxThreads";
public static final String MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors";
public static final String EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters";
public static final String EXECUTOR_SELECTOR_COMPARATOR_PREFIX =
"azkaban.executorselector.comparator.";
public static final String QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled";
public static final String SESSION_TIME_TO_LIVE = "session.time.to.live";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
/*
* this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available.
* EXTRA_HCAT_CLUSTERS has the following format:
* other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port"
* Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster.
* The uris(hcat servers) in a "cluster" ensures HA is provided.
**/
public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters";
/*
* the settings to be defined by user indicating if there are hcat locations other than the
* default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are
* supported, use comma to separate the values, values are case insensitive.
**/
// Use EXTRA_HCAT_CLUSTERS instead
@Deprecated
public static final String EXTRA_HCAT_LOCATION = "other_hcat_location";
// If true, AZ will fetches the jobs' certificate from remote Certificate Authority.
public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl";
// Job properties that indicate maximum memory size
public static final String JOB_MAX_XMS = "job.max.Xms";
public static final String MAX_XMS_DEFAULT = "1G";
public static final String JOB_MAX_XMX = "job.max.Xmx";
public static final String MAX_XMX_DEFAULT = "2G";
// The hadoop user the job should run under. If not specified, it will default to submit user.
public static final String USER_TO_PROXY = "user.to.proxy";
/**
* Format string for Log4j's EnhancedPatternLayout
*/
public static final String JOB_LOG_LAYOUT = "azkaban.job.log.layout";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
public static class FlowTriggerProps {
// Flow trigger props
public static final String SCHEDULE_TYPE = "type";
public static final String CRON_SCHEDULE_TYPE = "cron";
public static final String SCHEDULE_VALUE = "value";
public static final String DEP_NAME = "name";
// Flow trigger dependency run time props
public static final String START_TIME = "startTime";
public static final String TRIGGER_INSTANCE_ID = "triggerInstanceId";
}
}
| 1 | 16,524 | Since this constant is an implementation detail rather than a user-facing API, is it better to define it in a place where it is used? | azkaban-azkaban | java |
@@ -1007,6 +1007,8 @@ class ListParameter(Parameter):
:param str x: the value to parse.
:return: the parsed value.
"""
+ if isinstance(x, list):
+ x = json.dumps(x)
return list(json.loads(x, object_pairs_hook=FrozenOrderedDict))
def serialize(self, x): | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Parameters are one of the core concepts of Luigi.
All Parameters sit on :class:`~luigi.task.Task` classes.
See :ref:`Parameter` for more info on how to define parameters.
'''
import abc
import datetime
import warnings
from enum import IntEnum
import json
from json import JSONEncoder
import operator
from ast import literal_eval
try:
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import NoOptionError, NoSectionError
from luigi import date_interval
from luigi import task_register
from luigi import six
from luigi import configuration
from luigi.cmdline_parser import CmdlineParser
from .freezing import recursively_freeze, FrozenOrderedDict
_no_value = object()
class ParameterVisibility(IntEnum):
"""
Possible values for the parameter visibility option. Public is the default.
See :doc:`/parameters` for more info.
"""
PUBLIC = 0
HIDDEN = 1
PRIVATE = 2
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls)
def serialize(self):
return self.value
class ParameterException(Exception):
"""
Base exception.
"""
pass
class MissingParameterException(ParameterException):
"""
Exception signifying that there was a missing Parameter.
"""
pass
class UnknownParameterException(ParameterException):
"""
Exception signifying that an unknown Parameter was supplied.
"""
pass
class DuplicateParameterException(ParameterException):
"""
Exception signifying that a Parameter was specified multiple times.
"""
pass
class Parameter(object):
"""
Parameter whose value is a ``str``, and a base class for other parameter types.
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
.. code:: python
class MyTask(luigi.Task):
foo = luigi.Parameter()
class RequiringTask(luigi.Task):
def requires(self):
return MyTask(foo="hello")
def run(self):
print(self.requires().foo) # prints "hello"
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
When a task is instantiated, it will first use any argument as the value of the parameter, eg.
if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the
value will be resolved in this order of falling priority:
* Any value provided on the command line:
- To the root task (eg. ``--param xyz``)
- Then to the class, using the qualified task name syntax (eg. ``--TaskA-param xyz``).
* With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion`
* Any default value set using the ``default`` flag.
Parameter objects may be reused, but you must then set the ``positional=False`` flag.
"""
_counter = 0 # non-atomically increasing counter used for ordering parameters.
def __init__(self, default=_no_value, is_global=False, significant=True, description=None,
config_path=None, positional=True, always_in_help=False, batch_method=None,
visibility=ParameterVisibility.PUBLIC):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter. DEPRECATED.
Default: ``None``.
:param bool positional: If true, you can set the argument as a
positional argument. It's true by default but we recommend
``positional=False`` for abstract base classes and similar cases.
:param bool always_in_help: For the --help option in the command line
parsing. Set true to always show in --help.
:param function(iterable[A])->A batch_method: Method to combine an iterable of parsed
parameter values into a single value. Used
when receiving batched parameter lists from
the scheduler. See :ref:`batch_method`
:param visibility: A Parameter whose value is a :py:class:`~luigi.parameter.ParameterVisibility`.
Default value is ParameterVisibility.PUBLIC
"""
self._default = default
self._batch_method = batch_method
if is_global:
warnings.warn("is_global support is removed. Assuming positional=False",
DeprecationWarning,
stacklevel=2)
positional = False
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
self.positional = positional
self.visibility = visibility if ParameterVisibility.has_value(visibility) else ParameterVisibility.PUBLIC
self.description = description
self.always_in_help = always_in_help
if config_path is not None and ('section' not in config_path or 'name' not in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self._config_path = config_path
self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class)
Parameter._counter += 1
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError, KeyError):
return _no_value
return self.parse(value)
def _get_value(self, task_name, param_name):
for value, warn in self._value_iterator(task_name, param_name):
if value != _no_value:
if warn:
warnings.warn(warn, DeprecationWarning)
return value
return _no_value
def _value_iterator(self, task_name, param_name):
"""
Yield the parameter values, with optional deprecation warning as second tuple value.
The parameter value will be whatever non-_no_value that is yielded first.
"""
cp_parser = CmdlineParser.get_instance()
if cp_parser:
dest = self._parser_global_dest(param_name, task_name)
found = getattr(cp_parser.known_args, dest, None)
yield (self._parse_or_no_value(found), None)
yield (self._get_value_from_config(task_name, param_name), None)
if self._config_path:
yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']),
'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format(
self._config_path['section'], self._config_path['name'], task_name, param_name))
yield (self._default, None)
def has_task_value(self, task_name, param_name):
return self._get_value(task_name, param_name) != _no_value
def task_value(self, task_name, param_name):
value = self._get_value(task_name, param_name)
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return self.normalize(value)
def _is_batchable(self):
return self._batch_method is not None
def parse(self, x):
"""
Parse an individual value from the input.
The default implementation is the identity function, but subclasses should override
this method for specialized parsing.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def _parse_list(self, xs):
"""
Parse a list of values from the scheduler.
Only possible if this is_batchable() is True. This will combine the list into a single
parameter value using batch method. This should never need to be overridden.
:param xs: list of values to parse and combine
:return: the combined parsed values
"""
if not self._is_batchable():
raise NotImplementedError('No batch method found')
elif not xs:
raise ValueError('Empty parameter list passed to parse_list')
else:
return self._batch_method(map(self.parse, xs))
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return str(x)
def _warn_on_wrong_param_type(self, param_name, param_value):
if self.__class__ != Parameter:
return
if not isinstance(param_value, six.string_types):
warnings.warn('Parameter "{}" with value "{}" is not of type string.'.format(param_name, param_value))
def normalize(self, x):
"""
Given a parsed parameter value, normalizes it.
The value can either be the result of parse(), the default value or
arguments passed into the task's constructor by instantiation.
This is very implementation defined, but can be used to validate/clamp
valid values. For example, if you wanted to only accept even integers,
and "correct" odd values to the nearest integer, you can implement
normalize as ``x // 2 * 2``.
"""
return x # default impl
def next_in_enumeration(self, _value):
"""
If your Parameter type has an enumerable ordering of values. You can
choose to override this method. This method is used by the
:py:mod:`luigi.execution_summary` module for pretty printing
purposes. Enabling it to pretty print tasks like ``MyTask(num=1),
MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``.
:param value: The value
:return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering.
"""
return None
def _parse_or_no_value(self, x):
if not x:
return _no_value
else:
return self.parse(x)
@staticmethod
def _parser_global_dest(param_name, task_name):
return task_name + '_' + param_name
@classmethod
def _parser_kwargs(cls, param_name, task_name=None):
return {
"action": "store",
"dest": cls._parser_global_dest(param_name, task_name) if task_name else param_name,
}
class OptionalParameter(Parameter):
""" A Parameter that treats empty string as None """
def serialize(self, x):
if x is None:
return ''
else:
return str(x)
def parse(self, x):
return x or None
def _warn_on_wrong_param_type(self, param_name, param_value):
if self.__class__ != OptionalParameter:
return
if not isinstance(param_value, six.string_types) and param_value is not None:
warnings.warn('OptionalParameter "{}" with value "{}" is not of type string or None.'.format(
param_name, param_value))
_UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0)
class _DateParameterBase(Parameter):
"""
Base class Parameter for date (not datetime).
"""
def __init__(self, interval=1, start=None, **kwargs):
super(_DateParameterBase, self).__init__(**kwargs)
self.interval = interval
self.start = start if start is not None else _UNIX_EPOCH.date()
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
def parse(self, s):
"""
Parses a date string formatted like ``YYYY-MM-DD``.
"""
return datetime.datetime.strptime(s, self.date_format).date()
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
class DateParameter(_DateParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
DateParameters are 90% of the time used to be interpolated into file system paths or the like.
Here is a gentle reminder of how to interpolate date parameters into strings:
.. code:: python
class MyTask(luigi.Task):
date = luigi.DateParameter()
def run(self):
templated_path = "/my/path/to/my/dataset/{date:%Y/%m/%d}/"
instantiated_path = templated_path.format(date=self.date)
# print(instantiated_path) --> /my/path/to/my/dataset/2016/06/09/
# ... use instantiated_path ...
To set this parameter to default to the current day. You can write code like this:
.. code:: python
import datetime
class MyTask(luigi.Task):
date = luigi.DateParameter(default=datetime.date.today())
"""
date_format = '%Y-%m-%d'
def next_in_enumeration(self, value):
return value + datetime.timedelta(days=self.interval)
def normalize(self, value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = value.date()
delta = (value - self.start).days % self.interval
return value - datetime.timedelta(days=delta)
class MonthParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the month
(day of :py:class:`~datetime.date` is "rounded" to first of the month).
A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies
July of 2013. Task objects constructed from code accept :py:class:`~datetime.date` (ignoring the day value) or
:py:class:`~luigi.date_interval.Month`.
"""
date_format = '%Y-%m'
def _add_months(self, date, months):
"""
Add ``months`` months to ``date``.
Unfortunately we can't use timedeltas to add months because timedelta counts in days
and there's no foolproof way to add N months in days without counting the number of
days per month.
"""
year = date.year + (date.month + months - 1) // 12
month = (date.month + months - 1) % 12 + 1
return datetime.date(year=year, month=month, day=1)
def next_in_enumeration(self, value):
return self._add_months(value, self.interval)
def normalize(self, value):
if value is None:
return None
if isinstance(value, date_interval.Month):
value = value.date_a
months_since_start = (value.year - self.start.year) * 12 + (value.month - self.start.month)
months_since_start -= months_since_start % self.interval
return self._add_months(self.start, months_since_start)
class YearParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the year
(day and month of :py:class:`~datetime.date` is "rounded" to first day of the year).
A YearParameter is a Date string formatted ``YYYY``. Task objects constructed from code accept
:py:class:`~datetime.date` (ignoring the month and day values) or :py:class:`~luigi.date_interval.Year`.
"""
date_format = '%Y'
def next_in_enumeration(self, value):
return value.replace(year=value.year + self.interval)
def normalize(self, value):
if value is None:
return None
if isinstance(value, date_interval.Year):
value = value.date_a
delta = (value.year - self.start.year) % self.interval
return datetime.date(year=value.year - delta, month=1, day=1)
class _DatetimeParameterBase(Parameter):
"""
Base class Parameter for datetime
"""
def __init__(self, interval=1, start=None, **kwargs):
super(_DatetimeParameterBase, self).__init__(**kwargs)
self.interval = interval
self.start = start if start is not None else _UNIX_EPOCH
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
@abc.abstractproperty
def _timedelta(self):
"""
How to move one interval of this type forward (i.e. not counting self.interval).
"""
pass
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime`.
"""
return datetime.datetime.strptime(s, self.date_format)
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~_DatetimeParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
@staticmethod
def _convert_to_dt(dt):
if not isinstance(dt, datetime.datetime):
dt = datetime.datetime.combine(dt, datetime.time.min)
return dt
def normalize(self, dt):
"""
Clamp dt to every Nth :py:attr:`~_DatetimeParameterBase.interval` starting at
:py:attr:`~_DatetimeParameterBase.start`.
"""
if dt is None:
return None
dt = self._convert_to_dt(dt)
dt = dt.replace(microsecond=0) # remove microseconds, to avoid float rounding issues.
delta = (dt - self.start).total_seconds()
granularity = (self._timedelta * self.interval).total_seconds()
return dt - datetime.timedelta(seconds=delta % granularity)
def next_in_enumeration(self, value):
return value + self._timedelta * self.interval
class DateHourParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T'
_timedelta = datetime.timedelta(hours=1)
class DateMinuteParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute.
A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at
19:07.
The interval parameter can be used to clamp this parameter to every N minutes, instead of every minute.
"""
date_format = '%Y-%m-%dT%H%M'
_timedelta = datetime.timedelta(minutes=1)
deprecated_date_format = '%Y-%m-%dT%HH%M'
def parse(self, s):
try:
value = datetime.datetime.strptime(s, self.deprecated_date_format)
warnings.warn(
'Using "H" between hours and minutes is deprecated, omit it instead.',
DeprecationWarning,
stacklevel=2
)
return value
except ValueError:
return super(DateMinuteParameter, self).parse(s)
class DateSecondParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the second.
A DateSecondParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the second. For example, ``2013-07-10T190738`` specifies July 10, 2013 at
19:07:38.
The interval parameter can be used to clamp this parameter to every N seconds, instead of every second.
"""
date_format = '%Y-%m-%dT%H%M%S'
_timedelta = datetime.timedelta(seconds=1)
class IntParameter(Parameter):
"""
Parameter whose value is an ``int``.
"""
def parse(self, s):
"""
Parses an ``int`` from the string using ``int()``.
"""
return int(s)
def next_in_enumeration(self, value):
return value + 1
class FloatParameter(Parameter):
"""
Parameter whose value is a ``float``.
"""
def parse(self, s):
"""
Parses a ``float`` from the string using ``float()``.
"""
return float(s)
class BoolParameter(Parameter):
"""
A Parameter whose value is a ``bool``. This parameter has an implicit default value of
``False``. For the command line interface this means that the value is ``False`` unless you
add ``"--the-bool-parameter"`` to your command without giving a parameter value. This is
considered *implicit* parsing (the default). However, in some situations one might want to give
the explicit bool value (``"--the-bool-parameter true|false"``), e.g. when you configure the
default value to be ``True``. This is called *explicit* parsing. When omitting the parameter
value, it is still considered ``True`` but to avoid ambiguities during argument parsing, make
sure to always place bool parameters behind the task family on the command line when using
explicit parsing.
You can toggle between the two parsing modes on a per-parameter base via
.. code-block:: python
class MyTask(luigi.Task):
implicit_bool = luigi.BoolParameter(parsing=luigi.BoolParameter.IMPLICIT_PARSING)
explicit_bool = luigi.BoolParameter(parsing=luigi.BoolParameter.EXPLICIT_PARSING)
or globally by
.. code-block:: python
luigi.BoolParameter.parsing = luigi.BoolParameter.EXPLICIT_PARSING
for all bool parameters instantiated after this line.
"""
IMPLICIT_PARSING = "implicit"
EXPLICIT_PARSING = "explicit"
parsing = IMPLICIT_PARSING
def __init__(self, *args, **kwargs):
self.parsing = kwargs.pop("parsing", self.__class__.parsing)
super(BoolParameter, self).__init__(*args, **kwargs)
if self._default == _no_value:
self._default = False
def parse(self, val):
"""
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
"""
s = str(val).lower()
if s == "true":
return True
elif s == "false":
return False
else:
raise ValueError("cannot interpret '{}' as boolean".format(val))
def normalize(self, value):
try:
return self.parse(value)
except ValueError:
return None
def _parser_kwargs(self, *args, **kwargs):
parser_kwargs = super(BoolParameter, self)._parser_kwargs(*args, **kwargs)
if self.parsing == self.IMPLICIT_PARSING:
parser_kwargs["action"] = "store_true"
elif self.parsing == self.EXPLICIT_PARSING:
parser_kwargs["nargs"] = "?"
parser_kwargs["const"] = True
else:
raise ValueError("unknown parsing value '{}'".format(self.parsing))
return parser_kwargs
class DateIntervalParameter(Parameter):
"""
A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`.
Date Intervals are specified using the ISO 8601 date notation for dates
(eg. "2015-11-04"), months (eg. "2015-05"), years (eg. "2015"), or weeks
(eg. "2015-W35"). In addition, it also supports arbitrary date intervals
provided as two dates separated with a dash (eg. "2015-11-04-2015-12-04").
"""
def parse(self, s):
"""
Parses a :py:class:`~luigi.date_interval.DateInterval` from the input.
see :py:mod:`luigi.date_interval`
for details on the parsing of DateIntervals.
"""
# TODO: can we use xml.utils.iso8601 or something similar?
from luigi import date_interval as d
for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]:
i = cls.parse(s)
if i:
return i
raise ValueError('Invalid date interval - could not be parsed')
class TimeDeltaParameter(Parameter):
"""
Class that maps to timedelta using strings in any of the following forms:
* ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h")
Note: multiple arguments must be supplied in longest to shortest unit order
* ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported)
* ISO 8601 duration ``PnW``
See https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
def _apply_regex(self, regex, input):
import re
re_match = re.match(regex, input)
if re_match and any(re_match.groups()):
kwargs = {}
has_val = False
for k, v in six.iteritems(re_match.groupdict(default="0")):
val = int(v)
if val > -1:
has_val = True
kwargs[k] = val
if has_val:
return datetime.timedelta(**kwargs)
def _parseIso8601(self, input):
def field(key):
return r"(?P<%s>\d+)%s" % (key, key[0].upper())
def optional_field(key):
return "(%s)?" % field(key)
# A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta)
regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"),
"".join([optional_field(key) for key in ["hours", "minutes", "seconds"]]))
return self._apply_regex(regex, input)
def _parseSimple(self, input):
keys = ["weeks", "days", "hours", "minutes", "seconds"]
# Give the digits a regex group name from the keys, then look for text with the first letter of the key,
# optionally followed by the rest of the word, with final char (the "s") optional
regex = "".join([r"((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys])
return self._apply_regex(regex, input)
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result is not None:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input)
def serialize(self, x):
"""
Converts datetime.timedelta to a string
:param x: the value to serialize.
"""
weeks = x.days // 7
days = x.days % 7
hours = x.seconds // 3600
minutes = (x.seconds % 3600) // 60
seconds = (x.seconds % 3600) % 60
result = "{} w {} d {} h {} m {} s".format(weeks, days, hours, minutes, seconds)
return result
def _warn_on_wrong_param_type(self, param_name, param_value):
if self.__class__ != TimeDeltaParameter:
return
if not isinstance(param_value, datetime.timedelta):
warnings.warn('Parameter "{}" with value "{}" is not of type timedelta.'.format(param_name, param_value))
class TaskParameter(Parameter):
"""
A parameter that takes another luigi task class.
When used programatically, the parameter should be specified
directly with the :py:class:`luigi.task.Task` (sub) class. Like
``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line,
you specify the :py:meth:`luigi.task.Task.get_task_family`. Like
.. code-block:: console
$ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask
Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module.
When the :py:class:`luigi.task.Task` class is instantiated to an object.
The value will always be a task class (and not a string).
"""
def parse(self, input):
"""
Parse a task_famly using the :class:`~luigi.task_register.Register`
"""
return task_register.Register.get_task_cls(input)
def serialize(self, cls):
"""
Converts the :py:class:`luigi.task.Task` (sub) class to its family name.
"""
return cls.get_task_family()
class EnumParameter(Parameter):
"""
A parameter whose value is an :class:`~enum.Enum`.
In the task definition, use
.. code-block:: python
class Model(enum.Enum):
Honda = 1
Volvo = 2
class MyTask(luigi.Task):
my_param = luigi.EnumParameter(enum=Model)
At the command line, use,
.. code-block:: console
$ luigi --module my_tasks MyTask --my-param Honda
"""
def __init__(self, *args, **kwargs):
if 'enum' not in kwargs:
raise ParameterException('An enum class must be specified.')
self._enum = kwargs.pop('enum')
super(EnumParameter, self).__init__(*args, **kwargs)
def parse(self, s):
try:
return self._enum[s]
except KeyError:
raise ValueError('Invalid enum value - could not be parsed')
def serialize(self, e):
return e.name
class _DictParamEncoder(JSONEncoder):
"""
JSON encoder for :py:class:`~DictParameter`, which makes :py:class:`~FrozenOrderedDict` JSON serializable.
"""
def default(self, obj):
if isinstance(obj, FrozenOrderedDict):
return obj.get_wrapped()
return json.JSONEncoder.default(self, obj)
class DictParameter(Parameter):
"""
Parameter whose value is a ``dict``.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
tags = luigi.DictParameter()
def run(self):
logging.info("Find server with role: %s", self.tags['role'])
server = aws.ec2.find_my_resource(self.tags)
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --tags <JSON string>
Simple example with two tags:
.. code-block:: console
$ luigi --module my_tasks MyTask --tags '{"role": "web", "env": "staging"}'
It can be used to define dynamic parameters, when you do not know the exact list of your parameters (e.g. list of
tags, that are dynamically constructed outside Luigi), or you have a complex parameter containing logically related
values (like a database connection config).
"""
def normalize(self, value):
"""
Ensure that dictionary parameter is converted to a FrozenOrderedDict so it can be hashed.
"""
return recursively_freeze(value)
def parse(self, source):
"""
Parses an immutable and ordered ``dict`` from a JSON string using standard JSON library.
We need to use an immutable dictionary, to create a hashable parameter and also preserve the internal structure
of parsing. The traversal order of standard ``dict`` is undefined, which can result various string
representations of this parameter, and therefore a different task id for the task containing this parameter.
This is because task id contains the hash of parameters' JSON representation.
:param s: String to be parse
"""
# TOML based config convert params to python types itself.
if not isinstance(source, six.string_types):
return source
return json.loads(source, object_pairs_hook=FrozenOrderedDict)
def serialize(self, x):
return json.dumps(x, cls=_DictParamEncoder)
class ListParameter(Parameter):
"""
Parameter whose value is a ``list``.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
grades = luigi.ListParameter()
def run(self):
sum = 0
for element in self.grades:
sum += element
avg = sum / len(self.grades)
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --grades <JSON string>
Simple example with two grades:
.. code-block:: console
$ luigi --module my_tasks MyTask --grades '[100,70]'
"""
def normalize(self, x):
"""
Ensure that struct is recursively converted to a tuple so it can be hashed.
:param str x: the value to parse.
:return: the normalized (hashable/immutable) value.
"""
return recursively_freeze(x)
def parse(self, x):
"""
Parse an individual value from the input.
:param str x: the value to parse.
:return: the parsed value.
"""
return list(json.loads(x, object_pairs_hook=FrozenOrderedDict))
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return json.dumps(x, cls=_DictParamEncoder)
class TupleParameter(ListParameter):
"""
Parameter whose value is a ``tuple`` or ``tuple`` of tuples.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
book_locations = luigi.TupleParameter()
def run(self):
for location in self.book_locations:
print("Go to page %d, line %d" % (location[0], location[1]))
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --book_locations <JSON string>
Simple example with two grades:
.. code-block:: console
$ luigi --module my_tasks MyTask --book_locations '((12,3),(4,15),(52,1))'
"""
def parse(self, x):
"""
Parse an individual value from the input.
:param str x: the value to parse.
:return: the parsed value.
"""
# Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case.
# A tuple string may come from a config file or from cli execution.
# t = ((1, 2), (3, 4))
# t_str = '((1,2),(3,4))'
# t_json_str = json.dumps(t)
# t_json_str == '[[1, 2], [3, 4]]'
# json.loads(t_json_str) == t
# json.loads(t_str) == ValueError: No JSON object could be decoded
# Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x).
# ast.literal_eval(t_str) == t
try:
# loop required to parse tuple of tuples
return tuple(tuple(x) for x in json.loads(x, object_pairs_hook=FrozenOrderedDict))
except (ValueError, TypeError):
return tuple(literal_eval(x)) # if this causes an error, let that error be raised.
class NumericalParameter(Parameter):
"""
Parameter whose value is a number of the specified type, e.g. ``int`` or
``float`` and in the range specified.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
my_param_1 = luigi.NumericalParameter(
var_type=int, min_value=-3, max_value=7) # -3 <= my_param_1 < 7
my_param_2 = luigi.NumericalParameter(
var_type=int, min_value=-3, max_value=7, left_op=operator.lt, right_op=operator.le) # -3 < my_param_2 <= 7
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --my-param-1 -3 --my-param-2 -2
"""
def __init__(self, left_op=operator.le, right_op=operator.lt, *args, **kwargs):
"""
:param function var_type: The type of the input variable, e.g. int or float.
:param min_value: The minimum value permissible in the accepted values
range. May be inclusive or exclusive based on left_op parameter.
This should be the same type as var_type.
:param max_value: The maximum value permissible in the accepted values
range. May be inclusive or exclusive based on right_op parameter.
This should be the same type as var_type.
:param function left_op: The comparison operator for the left-most comparison in
the expression ``min_value left_op value right_op value``.
This operator should generally be either
``operator.lt`` or ``operator.le``.
Default: ``operator.le``.
:param function right_op: The comparison operator for the right-most comparison in
the expression ``min_value left_op value right_op value``.
This operator should generally be either
``operator.lt`` or ``operator.le``.
Default: ``operator.lt``.
"""
if "var_type" not in kwargs:
raise ParameterException("var_type must be specified")
self._var_type = kwargs.pop("var_type")
if "min_value" not in kwargs:
raise ParameterException("min_value must be specified")
self._min_value = kwargs.pop("min_value")
if "max_value" not in kwargs:
raise ParameterException("max_value must be specified")
self._max_value = kwargs.pop("max_value")
self._left_op = left_op
self._right_op = right_op
self._permitted_range = (
"{var_type} in {left_endpoint}{min_value}, {max_value}{right_endpoint}".format(
var_type=self._var_type.__name__,
min_value=self._min_value, max_value=self._max_value,
left_endpoint="[" if left_op == operator.le else "(",
right_endpoint=")" if right_op == operator.lt else "]"))
super(NumericalParameter, self).__init__(*args, **kwargs)
if self.description:
self.description += " "
else:
self.description = ""
self.description += "permitted values: " + self._permitted_range
def parse(self, s):
value = self._var_type(s)
if (self._left_op(self._min_value, value) and self._right_op(value, self._max_value)):
return value
else:
raise ValueError(
"{s} is not in the set of {permitted_range}".format(
s=s, permitted_range=self._permitted_range))
class ChoiceParameter(Parameter):
"""
A parameter which takes two values:
1. an instance of :class:`~collections.Iterable` and
2. the class of the variables to convert to.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
my_param = luigi.ChoiceParameter(choices=[0.1, 0.2, 0.3], var_type=float)
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --my-param 0.1
Consider using :class:`~luigi.EnumParameter` for a typed, structured
alternative. This class can perform the same role when all choices are the
same type and transparency of parameter value on the command line is
desired.
"""
def __init__(self, var_type=str, *args, **kwargs):
"""
:param function var_type: The type of the input variable, e.g. str, int,
float, etc.
Default: str
:param choices: An iterable, all of whose elements are of `var_type` to
restrict parameter choices to.
"""
if "choices" not in kwargs:
raise ParameterException("A choices iterable must be specified")
self._choices = set(kwargs.pop("choices"))
self._var_type = var_type
assert all(type(choice) is self._var_type for choice in self._choices), "Invalid type in choices"
super(ChoiceParameter, self).__init__(*args, **kwargs)
if self.description:
self.description += " "
else:
self.description = ""
self.description += (
"Choices: {" + ", ".join(str(choice) for choice in self._choices) + "}")
def parse(self, s):
var = self._var_type(s)
return self.normalize(var)
def normalize(self, var):
if var in self._choices:
return var
else:
raise ValueError("{var} is not a valid choice from {choices}".format(
var=var, choices=self._choices))
| 1 | 19,817 | It would be nice if we have some docs explaining this. Core luigi should avoid having hard to understand code. | spotify-luigi | py |
@@ -43,6 +43,11 @@ public abstract class StemmerTestBase extends LuceneTestCase {
static void init(boolean ignoreCase, String affix, String... dictionaries)
throws IOException, ParseException {
+ stemmer = new Stemmer(loadDictionary(ignoreCase, affix, dictionaries));
+ }
+
+ static Dictionary loadDictionary(boolean ignoreCase, String affix, String... dictionaries)
+ throws IOException, ParseException {
if (dictionaries.length == 0) {
throw new IllegalArgumentException("there must be at least one dictionary");
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.hunspell;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.text.ParseException;
import java.util.Arrays;
import java.util.List;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.AfterClass;
/** base class for hunspell stemmer tests */
public abstract class StemmerTestBase extends LuceneTestCase {
private static Stemmer stemmer;
@AfterClass
public static void afterClass() {
stemmer = null;
}
static void init(String affix, String dictionary) throws IOException, ParseException {
init(false, affix, dictionary);
}
static void init(boolean ignoreCase, String affix, String... dictionaries)
throws IOException, ParseException {
if (dictionaries.length == 0) {
throw new IllegalArgumentException("there must be at least one dictionary");
}
InputStream affixStream = StemmerTestBase.class.getResourceAsStream(affix);
if (affixStream == null) {
throw new FileNotFoundException("file not found: " + affix);
}
InputStream dictStreams[] = new InputStream[dictionaries.length];
for (int i = 0; i < dictionaries.length; i++) {
dictStreams[i] = StemmerTestBase.class.getResourceAsStream(dictionaries[i]);
if (dictStreams[i] == null) {
throw new FileNotFoundException("file not found: " + dictStreams[i]);
}
}
try {
Dictionary dictionary =
new Dictionary(
new ByteBuffersDirectory(),
"dictionary",
affixStream,
Arrays.asList(dictStreams),
ignoreCase);
stemmer = new Stemmer(dictionary);
} finally {
IOUtils.closeWhileHandlingException(affixStream);
IOUtils.closeWhileHandlingException(dictStreams);
}
}
static void assertStemsTo(String s, String... expected) {
assertNotNull(stemmer);
Arrays.sort(expected);
List<CharsRef> stems = stemmer.stem(s);
String actual[] = new String[stems.size()];
for (int i = 0; i < actual.length; i++) {
actual[i] = stems.get(i).toString();
}
Arrays.sort(actual);
assertArrayEquals(
"expected=" + Arrays.toString(expected) + ",actual=" + Arrays.toString(actual),
expected,
actual);
}
}
| 1 | 40,333 | extract a method to call from a test | apache-lucene-solr | java |
@@ -184,6 +184,8 @@ public class CoreContainer {
private volatile ExecutorService coreContainerWorkExecutor = ExecutorUtil.newMDCAwareCachedThreadPool(
new DefaultSolrThreadFactory("coreContainerWorkExecutor"));
+ final private ExecutorService collectorExecutor;
+
private final OrderedExecutor replayUpdatesExecutor;
protected volatile LogWatcher logging = null; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import java.io.Closeable;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.spec.InvalidKeySpecException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.auth.AuthSchemeProvider;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.config.Lookup;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.solr.api.AnnotatedApi;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.AuthSchemeRegistryProvider;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.CredentialsProviderProvider;
import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
import org.apache.solr.cloud.CloudDescriptor;
import org.apache.solr.cloud.Overseer;
import org.apache.solr.cloud.OverseerTaskQueue;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.cloud.autoscaling.AutoScalingHandler;
import org.apache.solr.common.AlreadyClosedException;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Replica.State;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.common.util.SolrjNamedThreadFactory;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.DirectoryFactory.DirContext;
import org.apache.solr.core.backup.repository.BackupRepository;
import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
import org.apache.solr.filestore.PackageStoreAPI;
import org.apache.solr.handler.RequestHandlerBase;
import org.apache.solr.handler.SnapShooter;
import org.apache.solr.handler.admin.AutoscalingHistoryHandler;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.handler.admin.ConfigSetsHandler;
import org.apache.solr.handler.admin.CoreAdminHandler;
import org.apache.solr.handler.admin.HealthCheckHandler;
import org.apache.solr.handler.admin.InfoHandler;
import org.apache.solr.handler.admin.MetricsCollectorHandler;
import org.apache.solr.handler.admin.MetricsHandler;
import org.apache.solr.handler.admin.MetricsHistoryHandler;
import org.apache.solr.handler.admin.SecurityConfHandler;
import org.apache.solr.handler.admin.SecurityConfHandlerLocal;
import org.apache.solr.handler.admin.SecurityConfHandlerZk;
import org.apache.solr.handler.admin.ZookeeperInfoHandler;
import org.apache.solr.handler.admin.ZookeeperStatusHandler;
import org.apache.solr.handler.component.ShardHandlerFactory;
import org.apache.solr.logging.LogWatcher;
import org.apache.solr.logging.MDCLoggingContext;
import org.apache.solr.metrics.SolrCoreMetricManager;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricProducer;
import org.apache.solr.metrics.SolrMetricsContext;
import org.apache.solr.pkg.PackageLoader;
import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.search.SolrFieldCacheBean;
import org.apache.solr.security.AuditLoggerPlugin;
import org.apache.solr.security.AuthenticationPlugin;
import org.apache.solr.security.AuthorizationPlugin;
import org.apache.solr.security.HttpClientBuilderPlugin;
import org.apache.solr.security.PKIAuthenticationPlugin;
import org.apache.solr.security.PublicKeyHandler;
import org.apache.solr.security.SecurityPluginHolder;
import org.apache.solr.update.SolrCoreState;
import org.apache.solr.update.UpdateShardHandler;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.solr.util.OrderedExecutor;
import org.apache.solr.util.RefCounted;
import org.apache.solr.util.stats.MetricUtils;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.Objects.requireNonNull;
import static org.apache.solr.common.params.CommonParams.AUTHC_PATH;
import static org.apache.solr.common.params.CommonParams.AUTHZ_PATH;
import static org.apache.solr.common.params.CommonParams.AUTOSCALING_HISTORY_PATH;
import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.CONFIGSETS_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.CORES_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.INFO_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.METRICS_HISTORY_PATH;
import static org.apache.solr.common.params.CommonParams.METRICS_PATH;
import static org.apache.solr.common.params.CommonParams.ZK_PATH;
import static org.apache.solr.common.params.CommonParams.ZK_STATUS_PATH;
import static org.apache.solr.core.CorePropertiesLocator.PROPERTIES_FILENAME;
import static org.apache.solr.security.AuthenticationPlugin.AUTHENTICATION_PLUGIN_PROP;
/**
* @since solr 1.3
*/
public class CoreContainer {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
final SolrCores solrCores = new SolrCores(this);
public static class CoreLoadFailure {
public final CoreDescriptor cd;
public final Exception exception;
public CoreLoadFailure(CoreDescriptor cd, Exception loadFailure) {
this.cd = new CoreDescriptor(cd.getName(), cd);
this.exception = loadFailure;
}
}
protected final Map<String, CoreLoadFailure> coreInitFailures = new ConcurrentHashMap<>();
protected volatile CoreAdminHandler coreAdminHandler = null;
protected volatile CollectionsHandler collectionsHandler = null;
protected volatile HealthCheckHandler healthCheckHandler = null;
private volatile InfoHandler infoHandler;
protected volatile ConfigSetsHandler configSetsHandler = null;
private volatile PKIAuthenticationPlugin pkiAuthenticationPlugin;
protected volatile Properties containerProperties;
private volatile ConfigSetService coreConfigService;
protected final ZkContainer zkSys = new ZkContainer();
protected volatile ShardHandlerFactory shardHandlerFactory;
private volatile UpdateShardHandler updateShardHandler;
private volatile ExecutorService coreContainerWorkExecutor = ExecutorUtil.newMDCAwareCachedThreadPool(
new DefaultSolrThreadFactory("coreContainerWorkExecutor"));
private final OrderedExecutor replayUpdatesExecutor;
protected volatile LogWatcher logging = null;
private volatile CloserThread backgroundCloser = null;
protected final NodeConfig cfg;
protected final SolrResourceLoader loader;
protected final String solrHome;
protected final CoresLocator coresLocator;
private volatile String hostName;
private final BlobRepository blobRepository = new BlobRepository(this);
private volatile PluginBag<SolrRequestHandler> containerHandlers = new PluginBag<>(SolrRequestHandler.class, null);
private volatile boolean asyncSolrCoreLoad;
protected volatile SecurityConfHandler securityConfHandler;
private volatile SecurityPluginHolder<AuthorizationPlugin> authorizationPlugin;
private volatile SecurityPluginHolder<AuthenticationPlugin> authenticationPlugin;
private volatile SecurityPluginHolder<AuditLoggerPlugin> auditloggerPlugin;
private volatile BackupRepositoryFactory backupRepoFactory;
protected volatile SolrMetricManager metricManager;
protected volatile String metricTag = SolrMetricProducer.getUniqueMetricTag(this, null);
protected volatile SolrMetricsContext solrMetricsContext;
protected MetricsHandler metricsHandler;
protected volatile MetricsHistoryHandler metricsHistoryHandler;
protected volatile MetricsCollectorHandler metricsCollectorHandler;
protected volatile AutoscalingHistoryHandler autoscalingHistoryHandler;
private PackageStoreAPI packageStoreAPI;
private PackageLoader packageLoader;
// Bits for the state variable.
public final static long LOAD_COMPLETE = 0x1L;
public final static long CORE_DISCOVERY_COMPLETE = 0x2L;
public final static long INITIAL_CORE_LOAD_COMPLETE = 0x4L;
private volatile long status = 0L;
protected volatile AutoScalingHandler autoScalingHandler;
private ExecutorService coreContainerAsyncTaskExecutor = ExecutorUtil.newMDCAwareCachedThreadPool("Core Container Async Task");
private enum CoreInitFailedAction {fromleader, none}
/**
* This method instantiates a new instance of {@linkplain BackupRepository}.
*
* @param repositoryName The name of the backup repository (Optional).
* If not specified, a default implementation is used.
* @return a new instance of {@linkplain BackupRepository}.
*/
public BackupRepository newBackupRepository(Optional<String> repositoryName) {
BackupRepository repository;
if (repositoryName.isPresent()) {
repository = backupRepoFactory.newInstance(getResourceLoader(), repositoryName.get());
} else {
repository = backupRepoFactory.newInstance(getResourceLoader());
}
return repository;
}
public ExecutorService getCoreZkRegisterExecutorService() {
return zkSys.getCoreZkRegisterExecutorService();
}
public SolrRequestHandler getRequestHandler(String path) {
return RequestHandlerBase.getRequestHandler(path, containerHandlers);
}
public PluginBag<SolrRequestHandler> getRequestHandlers() {
return this.containerHandlers;
}
{
log.debug("New CoreContainer " + System.identityHashCode(this));
}
/**
* Create a new CoreContainer using system properties to detect the solr home
* directory. The container's cores are not loaded.
*
* @see #load()
*/
public CoreContainer() {
this(new SolrResourceLoader(SolrResourceLoader.locateSolrHome()));
}
/**
* Create a new CoreContainer using the given SolrResourceLoader. The container's
* cores are not loaded.
*
* @param loader the SolrResourceLoader
* @see #load()
*/
public CoreContainer(SolrResourceLoader loader) {
this(SolrXmlConfig.fromSolrHome(loader, loader.getInstancePath()));
}
/**
* Create a new CoreContainer using the given solr home directory. The container's
* cores are not loaded.
*
* @param solrHome a String containing the path to the solr home directory
* @see #load()
*/
public CoreContainer(String solrHome) {
this(new SolrResourceLoader(Paths.get(solrHome)));
}
/**
* Create a new CoreContainer using the given SolrResourceLoader,
* configuration and CoresLocator. The container's cores are
* not loaded.
*
* @param config a ConfigSolr representation of this container's configuration
* @see #load()
*/
public CoreContainer(NodeConfig config) {
this(config, new Properties());
}
public CoreContainer(NodeConfig config, Properties properties) {
this(config, properties, new CorePropertiesLocator(config.getCoreRootDirectory()));
}
public CoreContainer(NodeConfig config, Properties properties, boolean asyncSolrCoreLoad) {
this(config, properties, new CorePropertiesLocator(config.getCoreRootDirectory()), asyncSolrCoreLoad);
}
public CoreContainer(NodeConfig config, Properties properties, CoresLocator locator) {
this(config, properties, locator, false);
}
public CoreContainer(NodeConfig config, Properties properties, CoresLocator locator, boolean asyncSolrCoreLoad) {
this.cfg = requireNonNull(config);
this.loader = config.getSolrResourceLoader();
this.solrHome = loader.getInstancePath().toString();
try {
containerHandlers.put(PublicKeyHandler.PATH, new PublicKeyHandler(cfg.getCloudConfig()));
} catch (IOException | InvalidKeySpecException e) {
throw new RuntimeException("Bad PublicKeyHandler configuration.", e);
}
if (null != this.cfg.getBooleanQueryMaxClauseCount()) {
IndexSearcher.setMaxClauseCount(this.cfg.getBooleanQueryMaxClauseCount());
}
this.coresLocator = locator;
this.containerProperties = new Properties(properties);
this.asyncSolrCoreLoad = asyncSolrCoreLoad;
this.replayUpdatesExecutor = new OrderedExecutor(
cfg.getReplayUpdatesThreads(),
ExecutorUtil.newMDCAwareCachedThreadPool(
cfg.getReplayUpdatesThreads(),
new DefaultSolrThreadFactory("replayUpdatesExecutor")));
}
private synchronized void initializeAuthorizationPlugin(Map<String, Object> authorizationConf) {
authorizationConf = Utils.getDeepCopy(authorizationConf, 4);
int newVersion = readVersion(authorizationConf);
//Initialize the Authorization module
SecurityPluginHolder<AuthorizationPlugin> old = authorizationPlugin;
SecurityPluginHolder<AuthorizationPlugin> authorizationPlugin = null;
if (authorizationConf != null) {
String klas = (String) authorizationConf.get("class");
if (klas == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "class is required for authorization plugin");
}
if (old != null && old.getZnodeVersion() == newVersion && newVersion > 0) {
log.debug("Authorization config not modified");
return;
}
log.info("Initializing authorization plugin: " + klas);
authorizationPlugin = new SecurityPluginHolder<>(newVersion,
getResourceLoader().newInstance(klas, AuthorizationPlugin.class));
// Read and pass the authorization context to the plugin
authorizationPlugin.plugin.init(authorizationConf);
} else {
log.debug("Security conf doesn't exist. Skipping setup for authorization module.");
}
this.authorizationPlugin = authorizationPlugin;
if (old != null) {
try {
old.plugin.close();
} catch (Exception e) {
log.error("Exception while attempting to close old authorization plugin", e);
}
}
}
private void initializeAuditloggerPlugin(Map<String, Object> auditConf) {
auditConf = Utils.getDeepCopy(auditConf, 4);
int newVersion = readVersion(auditConf);
//Initialize the Auditlog module
SecurityPluginHolder<AuditLoggerPlugin> old = auditloggerPlugin;
SecurityPluginHolder<AuditLoggerPlugin> newAuditloggerPlugin = null;
if (auditConf != null) {
String klas = (String) auditConf.get("class");
if (klas == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "class is required for auditlogger plugin");
}
if (old != null && old.getZnodeVersion() == newVersion && newVersion > 0) {
log.debug("Auditlogger config not modified");
return;
}
log.info("Initializing auditlogger plugin: " + klas);
newAuditloggerPlugin = new SecurityPluginHolder<>(newVersion,
getResourceLoader().newInstance(klas, AuditLoggerPlugin.class));
newAuditloggerPlugin.plugin.init(auditConf);
newAuditloggerPlugin.plugin.initializeMetrics(solrMetricsContext, "/auditlogging");
} else {
log.debug("Security conf doesn't exist. Skipping setup for audit logging module.");
}
this.auditloggerPlugin = newAuditloggerPlugin;
if (old != null) {
try {
old.plugin.close();
} catch (Exception e) {
log.error("Exception while attempting to close old auditlogger plugin", e);
}
}
}
private synchronized void initializeAuthenticationPlugin(Map<String, Object> authenticationConfig) {
authenticationConfig = Utils.getDeepCopy(authenticationConfig, 4);
int newVersion = readVersion(authenticationConfig);
String pluginClassName = null;
if (authenticationConfig != null) {
if (authenticationConfig.containsKey("class")) {
pluginClassName = String.valueOf(authenticationConfig.get("class"));
} else {
throw new SolrException(ErrorCode.SERVER_ERROR, "No 'class' specified for authentication in ZK.");
}
}
if (pluginClassName != null) {
log.debug("Authentication plugin class obtained from security.json: " + pluginClassName);
} else if (System.getProperty(AUTHENTICATION_PLUGIN_PROP) != null) {
pluginClassName = System.getProperty(AUTHENTICATION_PLUGIN_PROP);
log.debug("Authentication plugin class obtained from system property '" +
AUTHENTICATION_PLUGIN_PROP + "': " + pluginClassName);
} else {
log.debug("No authentication plugin used.");
}
SecurityPluginHolder<AuthenticationPlugin> old = authenticationPlugin;
SecurityPluginHolder<AuthenticationPlugin> authenticationPlugin = null;
if (old != null && old.getZnodeVersion() == newVersion && newVersion > 0) {
log.debug("Authentication config not modified");
return;
}
// Initialize the plugin
if (pluginClassName != null) {
log.info("Initializing authentication plugin: " + pluginClassName);
authenticationPlugin = new SecurityPluginHolder<>(newVersion,
getResourceLoader().newInstance(pluginClassName,
AuthenticationPlugin.class,
null,
new Class[]{CoreContainer.class},
new Object[]{this}));
}
if (authenticationPlugin != null) {
authenticationPlugin.plugin.init(authenticationConfig);
setupHttpClientForAuthPlugin(authenticationPlugin.plugin);
authenticationPlugin.plugin.initializeMetrics(solrMetricsContext, "/authentication");
}
this.authenticationPlugin = authenticationPlugin;
try {
if (old != null) old.plugin.close();
} catch (Exception e) {
log.error("Exception while attempting to close old authentication plugin", e);
}
}
private void setupHttpClientForAuthPlugin(Object authcPlugin) {
if (authcPlugin instanceof HttpClientBuilderPlugin) {
// Setup HttpClient for internode communication
HttpClientBuilderPlugin builderPlugin = ((HttpClientBuilderPlugin) authcPlugin);
SolrHttpClientBuilder builder = builderPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
shardHandlerFactory.setSecurityBuilder(builderPlugin);
updateShardHandler.setSecurityBuilder(builderPlugin);
// The default http client of the core container's shardHandlerFactory has already been created and
// configured using the default httpclient configurer. We need to reconfigure it using the plugin's
// http client configurer to set it up for internode communication.
log.debug("Reconfiguring HttpClient settings.");
SolrHttpClientContextBuilder httpClientBuilder = new SolrHttpClientContextBuilder();
if (builder.getCredentialsProviderProvider() != null) {
httpClientBuilder.setDefaultCredentialsProvider(new CredentialsProviderProvider() {
@Override
public CredentialsProvider getCredentialsProvider() {
return builder.getCredentialsProviderProvider().getCredentialsProvider();
}
});
}
if (builder.getAuthSchemeRegistryProvider() != null) {
httpClientBuilder.setAuthSchemeRegistryProvider(new AuthSchemeRegistryProvider() {
@Override
public Lookup<AuthSchemeProvider> getAuthSchemeRegistry() {
return builder.getAuthSchemeRegistryProvider().getAuthSchemeRegistry();
}
});
}
HttpClientUtil.setHttpClientRequestContextBuilder(httpClientBuilder);
}
// Always register PKI auth interceptor, which will then delegate the decision of who should secure
// each request to the configured authentication plugin.
if (pkiAuthenticationPlugin != null && !pkiAuthenticationPlugin.isInterceptorRegistered()) {
pkiAuthenticationPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
shardHandlerFactory.setSecurityBuilder(pkiAuthenticationPlugin);
updateShardHandler.setSecurityBuilder(pkiAuthenticationPlugin);
}
}
private static int readVersion(Map<String, Object> conf) {
if (conf == null) return -1;
Map meta = (Map) conf.get("");
if (meta == null) return -1;
Number v = (Number) meta.get("v");
return v == null ? -1 : v.intValue();
}
/**
* This method allows subclasses to construct a CoreContainer
* without any default init behavior.
*
* @param testConstructor pass (Object)null.
* @lucene.experimental
*/
protected CoreContainer(Object testConstructor) {
solrHome = null;
loader = null;
coresLocator = null;
cfg = null;
containerProperties = null;
replayUpdatesExecutor = null;
}
public static CoreContainer createAndLoad(Path solrHome) {
return createAndLoad(solrHome, solrHome.resolve(SolrXmlConfig.SOLR_XML_FILE));
}
/**
* Create a new CoreContainer and load its cores
*
* @param solrHome the solr home directory
* @param configFile the file containing this container's configuration
* @return a loaded CoreContainer
*/
public static CoreContainer createAndLoad(Path solrHome, Path configFile) {
SolrResourceLoader loader = new SolrResourceLoader(solrHome);
CoreContainer cc = new CoreContainer(SolrXmlConfig.fromFile(loader, configFile));
try {
cc.load();
} catch (Exception e) {
cc.shutdown();
throw e;
}
return cc;
}
public Properties getContainerProperties() {
return containerProperties;
}
public PKIAuthenticationPlugin getPkiAuthenticationPlugin() {
return pkiAuthenticationPlugin;
}
public SolrMetricManager getMetricManager() {
return metricManager;
}
public MetricsHandler getMetricsHandler() {
return metricsHandler;
}
public MetricsHistoryHandler getMetricsHistoryHandler() {
return metricsHistoryHandler;
}
public OrderedExecutor getReplayUpdatesExecutor() {
return replayUpdatesExecutor;
}
public PackageLoader getPackageLoader() {
return packageLoader;
}
public PackageStoreAPI getPackageStoreAPI() {
return packageStoreAPI;
}
//-------------------------------------------------------------------
// Initialization / Cleanup
//-------------------------------------------------------------------
/**
* Load the cores defined for this CoreContainer
*/
public void load() {
log.debug("Loading cores into CoreContainer [instanceDir={}]", loader.getInstancePath());
// Always add $SOLR_HOME/lib to the shared resource loader
Set<String> libDirs = new LinkedHashSet<>();
libDirs.add("lib");
if (!StringUtils.isBlank(cfg.getSharedLibDirectory())) {
List<String> sharedLibs = Arrays.asList(cfg.getSharedLibDirectory().split("\\s*,\\s*"));
libDirs.addAll(sharedLibs);
}
boolean modified = false;
// add the sharedLib to the shared resource loader before initializing cfg based plugins
for (String libDir : libDirs) {
Path libPath = loader.getInstancePath().resolve(libDir);
try {
loader.addToClassLoader(SolrResourceLoader.getURLs(libPath));
modified = true;
} catch (IOException e) {
if (!libDir.equals("lib")) { // Don't complain if default "lib" dir does not exist
log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage());
}
}
}
if (modified) {
loader.reloadLuceneSPI();
}
packageStoreAPI = new PackageStoreAPI(this);
containerHandlers.getApiBag().register(new AnnotatedApi(packageStoreAPI.readAPI), Collections.EMPTY_MAP);
containerHandlers.getApiBag().register(new AnnotatedApi(packageStoreAPI.writeAPI), Collections.EMPTY_MAP);
metricManager = new SolrMetricManager(loader, cfg.getMetricsConfig());
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.node);
solrMetricsContext = new SolrMetricsContext(metricManager, registryName, metricTag);
coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService(
coreContainerWorkExecutor, null,
metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)),
SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
if (shardHandlerFactory instanceof SolrMetricProducer) {
SolrMetricProducer metricProducer = (SolrMetricProducer) shardHandlerFactory;
metricProducer.initializeMetrics(solrMetricsContext, "httpShardHandler");
}
updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig());
updateShardHandler.initializeMetrics(solrMetricsContext, "updateShardHandler");
solrCores.load(loader);
logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
hostName = cfg.getNodeName();
zkSys.initZooKeeper(this, solrHome, cfg.getCloudConfig());
if (isZooKeeperAware()) {
pkiAuthenticationPlugin = new PKIAuthenticationPlugin(this, zkSys.getZkController().getNodeName(),
(PublicKeyHandler) containerHandlers.get(PublicKeyHandler.PATH));
// use deprecated API for back-compat, remove in 9.0
pkiAuthenticationPlugin.initializeMetrics(solrMetricsContext, "/authentication/pki");
TracerConfigurator.loadTracer(loader, cfg.getTracerConfiguratorPluginInfo(), getZkController().getZkStateReader());
packageLoader = new PackageLoader(this);
containerHandlers.getApiBag().register(new AnnotatedApi(packageLoader.getPackageAPI().editAPI), Collections.EMPTY_MAP);
containerHandlers.getApiBag().register(new AnnotatedApi(packageLoader.getPackageAPI().readAPI), Collections.EMPTY_MAP);
}
MDCLoggingContext.setNode(this);
securityConfHandler = isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
reloadSecurityProperties();
warnUsersOfInsecureSettings();
this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins());
createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
createHandler(ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
infoHandler = createHandler(INFO_HANDLER_PATH, cfg.getInfoHandlerClass(), InfoHandler.class);
coreAdminHandler = createHandler(CORES_HANDLER_PATH, cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
// metricsHistoryHandler uses metricsHandler, so create it first
metricsHandler = new MetricsHandler(this);
containerHandlers.put(METRICS_PATH, metricsHandler);
metricsHandler.initializeMetrics(solrMetricsContext, METRICS_PATH);
createMetricsHistoryHandler();
autoscalingHistoryHandler = createHandler(AUTOSCALING_HISTORY_PATH, AutoscalingHistoryHandler.class.getName(), AutoscalingHistoryHandler.class);
metricsCollectorHandler = createHandler(MetricsCollectorHandler.HANDLER_PATH, MetricsCollectorHandler.class.getName(), MetricsCollectorHandler.class);
// may want to add some configuration here in the future
metricsCollectorHandler.init(null);
containerHandlers.put(AUTHZ_PATH, securityConfHandler);
securityConfHandler.initializeMetrics(solrMetricsContext, AUTHZ_PATH);
containerHandlers.put(AUTHC_PATH, securityConfHandler);
PluginInfo[] metricReporters = cfg.getMetricsConfig().getMetricReporters();
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.node);
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.jvm);
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.jetty);
coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController);
containerProperties.putAll(cfg.getSolrProperties());
// initialize gauges for reporting the number of cores and disk total/free
solrMetricsContext.gauge(() -> solrCores.getCores().size(),
true, "loaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
solrMetricsContext.gauge(() -> solrCores.getLoadedCoreNames().size() - solrCores.getCores().size(),
true, "lazy", SolrInfoBean.Category.CONTAINER.toString(), "cores");
solrMetricsContext.gauge(() -> solrCores.getAllCoreNames().size() - solrCores.getLoadedCoreNames().size(),
true, "unloaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
Path dataHome = cfg.getSolrDataHome() != null ? cfg.getSolrDataHome() : cfg.getCoreRootDirectory();
solrMetricsContext.gauge(() -> dataHome.toFile().getTotalSpace(),
true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
solrMetricsContext.gauge(() -> dataHome.toFile().getUsableSpace(),
true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
solrMetricsContext.gauge(() -> dataHome.toAbsolutePath().toString(),
true, "path", SolrInfoBean.Category.CONTAINER.toString(), "fs");
solrMetricsContext.gauge(() -> {
try {
return org.apache.lucene.util.IOUtils.spins(dataHome.toAbsolutePath());
} catch (IOException e) {
// default to spinning
return true;
}
},
true, "spins", SolrInfoBean.Category.CONTAINER.toString(), "fs");
solrMetricsContext.gauge(() -> cfg.getCoreRootDirectory().toFile().getTotalSpace(),
true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
solrMetricsContext.gauge(() -> cfg.getCoreRootDirectory().toFile().getUsableSpace(),
true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
solrMetricsContext.gauge(() -> cfg.getCoreRootDirectory().toAbsolutePath().toString(),
true, "path", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
solrMetricsContext.gauge(() -> {
try {
return org.apache.lucene.util.IOUtils.spins(cfg.getCoreRootDirectory().toAbsolutePath());
} catch (IOException e) {
// default to spinning
return true;
}
},
true, "spins", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
// add version information
solrMetricsContext.gauge(() -> this.getClass().getPackage().getSpecificationVersion(),
true, "specification", SolrInfoBean.Category.CONTAINER.toString(), "version");
solrMetricsContext.gauge(() -> this.getClass().getPackage().getImplementationVersion(),
true, "implementation", SolrInfoBean.Category.CONTAINER.toString(), "version");
SolrFieldCacheBean fieldCacheBean = new SolrFieldCacheBean();
fieldCacheBean.initializeMetrics(solrMetricsContext, null);
if (isZooKeeperAware()) {
metricManager.loadClusterReporters(metricReporters, this);
}
// setup executor to load cores in parallel
ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService(
ExecutorUtil.newMDCAwareFixedThreadPool(
cfg.getCoreLoadThreadCount(isZooKeeperAware()),
new DefaultSolrThreadFactory("coreLoadExecutor")), null,
metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)),
SolrMetricManager.mkName("coreLoadExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
final List<Future<SolrCore>> futures = new ArrayList<>();
try {
List<CoreDescriptor> cds = coresLocator.discover(this);
if (isZooKeeperAware()) {
//sort the cores if it is in SolrCloud. In standalone node the order does not matter
CoreSorter coreComparator = new CoreSorter().init(this);
cds = new ArrayList<>(cds);//make a copy
Collections.sort(cds, coreComparator::compare);
}
checkForDuplicateCoreNames(cds);
status |= CORE_DISCOVERY_COMPLETE;
for (final CoreDescriptor cd : cds) {
if (cd.isTransient() || !cd.isLoadOnStartup()) {
solrCores.addCoreDescriptor(cd);
} else if (asyncSolrCoreLoad) {
solrCores.markCoreAsLoading(cd);
}
if (cd.isLoadOnStartup()) {
futures.add(coreLoadExecutor.submit(() -> {
SolrCore core;
try {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
}
solrCores.waitAddPendingCoreOps(cd.getName());
core = createFromDescriptor(cd, false, false);
} finally {
solrCores.removeFromPendingOps(cd.getName());
if (asyncSolrCoreLoad) {
solrCores.markCoreAsNotLoading(cd);
}
}
try {
zkSys.registerInZk(core, true, false);
} catch (RuntimeException e) {
SolrException.log(log, "Error registering SolrCore", e);
}
return core;
}));
}
}
// Start the background thread
backgroundCloser = new CloserThread(this, solrCores, cfg);
backgroundCloser.start();
} finally {
if (asyncSolrCoreLoad && futures != null) {
coreContainerWorkExecutor.submit(() -> {
try {
for (Future<SolrCore> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
log.error("Error waiting for SolrCore to be loaded on startup", e.getCause());
}
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
});
} else {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
}
if (isZooKeeperAware()) {
zkSys.getZkController().checkOverseerDesignate();
// initialize this handler here when SolrCloudManager is ready
autoScalingHandler = new AutoScalingHandler(getZkController().getSolrCloudManager(), loader);
containerHandlers.put(AutoScalingHandler.HANDLER_PATH, autoScalingHandler);
autoScalingHandler.initializeMetrics(solrMetricsContext, AutoScalingHandler.HANDLER_PATH);
}
// This is a bit redundant but these are two distinct concepts for all they're accomplished at the same time.
status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
}
// MetricsHistoryHandler supports both cloud and standalone configs
private void createMetricsHistoryHandler() {
PluginInfo plugin = cfg.getMetricsConfig().getHistoryHandler();
Map<String, Object> initArgs;
if (plugin != null && plugin.initArgs != null) {
initArgs = plugin.initArgs.asMap(5);
initArgs.put(MetricsHistoryHandler.ENABLE_PROP, plugin.isEnabled());
} else {
initArgs = new HashMap<>();
}
String name;
SolrCloudManager cloudManager;
SolrClient client;
if (isZooKeeperAware()) {
name = getZkController().getNodeName();
cloudManager = getZkController().getSolrCloudManager();
client = new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
.withSocketTimeout(30000).withConnectionTimeout(15000)
.withHttpClient(updateShardHandler.getDefaultHttpClient()).build();
} else {
name = getNodeConfig().getNodeName();
if (name == null || name.isEmpty()) {
name = "localhost";
}
cloudManager = null;
client = new EmbeddedSolrServer(this, null) {
@Override
public void close() throws IOException {
// do nothing - we close the container ourselves
}
};
// enable local metrics unless specifically set otherwise
if (!initArgs.containsKey(MetricsHistoryHandler.ENABLE_NODES_PROP)) {
initArgs.put(MetricsHistoryHandler.ENABLE_NODES_PROP, true);
}
if (!initArgs.containsKey(MetricsHistoryHandler.ENABLE_REPLICAS_PROP)) {
initArgs.put(MetricsHistoryHandler.ENABLE_REPLICAS_PROP, true);
}
}
metricsHistoryHandler = new MetricsHistoryHandler(name, metricsHandler,
client, cloudManager, initArgs);
containerHandlers.put(METRICS_HISTORY_PATH, metricsHistoryHandler);
metricsHistoryHandler.initializeMetrics(solrMetricsContext, METRICS_HISTORY_PATH);
}
public void securityNodeChanged() {
log.info("Security node changed, reloading security.json");
reloadSecurityProperties();
}
/**
* Make sure securityConfHandler is initialized
*/
private void reloadSecurityProperties() {
SecurityConfHandler.SecurityConfig securityConfig = securityConfHandler.getSecurityConfig(false);
initializeAuthorizationPlugin((Map<String, Object>) securityConfig.getData().get("authorization"));
initializeAuthenticationPlugin((Map<String, Object>) securityConfig.getData().get("authentication"));
initializeAuditloggerPlugin((Map<String, Object>) securityConfig.getData().get("auditlogging"));
}
private void warnUsersOfInsecureSettings() {
if (authenticationPlugin == null || authorizationPlugin == null) {
log.warn("Not all security plugins configured! authentication={} authorization={}. Solr is only as secure as " +
"you make it. Consider configuring authentication/authorization before exposing Solr to users internal or " +
"external. See https://s.apache.org/solrsecurity for more info",
(authenticationPlugin != null) ? "enabled" : "disabled",
(authorizationPlugin != null) ? "enabled" : "disabled");
}
if (authenticationPlugin !=null && StringUtils.isNotEmpty(System.getProperty("solr.jetty.https.port"))) {
log.warn("Solr authentication is enabled, but SSL is off. Consider enabling SSL to protect user credentials and " +
"data with encryption.");
}
}
private static void checkForDuplicateCoreNames(List<CoreDescriptor> cds) {
Map<String, Path> addedCores = Maps.newHashMap();
for (CoreDescriptor cd : cds) {
final String name = cd.getName();
if (addedCores.containsKey(name))
throw new SolrException(ErrorCode.SERVER_ERROR,
String.format(Locale.ROOT, "Found multiple cores with the name [%s], with instancedirs [%s] and [%s]",
name, addedCores.get(name), cd.getInstanceDir()));
addedCores.put(name, cd.getInstanceDir());
}
}
private volatile boolean isShutDown = false;
public boolean isShutDown() {
return isShutDown;
}
public void shutdown() {
ZkController zkController = getZkController();
if (zkController != null) {
OverseerTaskQueue overseerCollectionQueue = zkController.getOverseerCollectionQueue();
overseerCollectionQueue.allowOverseerPendingTasksToComplete();
}
log.info("Shutting down CoreContainer instance=" + System.identityHashCode(this));
ExecutorUtil.shutdownAndAwaitTermination(coreContainerAsyncTaskExecutor);
ExecutorService customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("closeThreadPool"));
isShutDown = true;
try {
if (isZooKeeperAware()) {
cancelCoreRecoveries();
zkSys.zkController.preClose();
}
ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor);
// First wake up the closer thread, it'll terminate almost immediately since it checks isShutDown.
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // wake up anyone waiting
}
if (backgroundCloser != null) { // Doesn't seem right, but tests get in here without initializing the core.
try {
while (true) {
backgroundCloser.join(15000);
if (backgroundCloser.isAlive()) {
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // there is a race we have to protect against
}
} else {
break;
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (log.isDebugEnabled()) {
log.debug("backgroundCloser thread was interrupted before finishing");
}
}
}
// Now clear all the cores that are being operated upon.
solrCores.close();
// It's still possible that one of the pending dynamic load operation is waiting, so wake it up if so.
// Since all the pending operations queues have been drained, there should be nothing to do.
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // wake up the thread
}
customThreadPool.submit(() -> {
replayUpdatesExecutor.shutdownAndAwaitTermination();
});
if (metricsHistoryHandler != null) {
metricsHistoryHandler.close();
IOUtils.closeQuietly(metricsHistoryHandler.getSolrClient());
}
if (metricManager != null) {
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node));
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm));
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty));
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node), metricTag);
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm), metricTag);
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty), metricTag);
}
if (isZooKeeperAware()) {
cancelCoreRecoveries();
if (metricManager != null) {
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.cluster));
}
}
try {
if (coreAdminHandler != null) {
customThreadPool.submit(() -> {
coreAdminHandler.shutdown();
});
}
} catch (Exception e) {
log.warn("Error shutting down CoreAdminHandler. Continuing to close CoreContainer.", e);
}
} finally {
try {
if (shardHandlerFactory != null) {
customThreadPool.submit(() -> {
shardHandlerFactory.close();
});
}
} finally {
try {
if (updateShardHandler != null) {
customThreadPool.submit(() -> Collections.singleton(shardHandlerFactory).parallelStream().forEach(c -> {
updateShardHandler.close();
}));
}
} finally {
try {
// we want to close zk stuff last
zkSys.close();
} finally {
ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
}
}
}
}
// It should be safe to close the authorization plugin at this point.
try {
if (authorizationPlugin != null) {
authorizationPlugin.plugin.close();
}
} catch (IOException e) {
log.warn("Exception while closing authorization plugin.", e);
}
// It should be safe to close the authentication plugin at this point.
try {
if (authenticationPlugin != null) {
authenticationPlugin.plugin.close();
authenticationPlugin = null;
}
} catch (Exception e) {
log.warn("Exception while closing authentication plugin.", e);
}
// It should be safe to close the auditlogger plugin at this point.
try {
if (auditloggerPlugin != null) {
auditloggerPlugin.plugin.close();
auditloggerPlugin = null;
}
} catch (Exception e) {
log.warn("Exception while closing auditlogger plugin.", e);
}
if(packageLoader != null){
org.apache.lucene.util.IOUtils.closeWhileHandlingException(packageLoader);
}
org.apache.lucene.util.IOUtils.closeWhileHandlingException(loader); // best effort
}
public void cancelCoreRecoveries() {
List<SolrCore> cores = solrCores.getCores();
// we must cancel without holding the cores sync
// make sure we wait for any recoveries to stop
for (SolrCore core : cores) {
try {
core.getSolrCoreState().cancelRecovery();
} catch (Exception e) {
SolrException.log(log, "Error canceling recovery for core", e);
}
}
}
public CoresLocator getCoresLocator() {
return coresLocator;
}
protected SolrCore registerCore(CoreDescriptor cd, SolrCore core, boolean registerInZk, boolean skipRecovery) {
if (core == null) {
throw new RuntimeException("Can not register a null core.");
}
if (isShutDown) {
core.close();
throw new IllegalStateException("This CoreContainer has been closed");
}
SolrCore old = solrCores.putCore(cd, core);
/*
* set both the name of the descriptor and the name of the
* core, since the descriptors name is used for persisting.
*/
core.setName(cd.getName());
coreInitFailures.remove(cd.getName());
if (old == null || old == core) {
log.debug("registering core: " + cd.getName());
if (registerInZk) {
zkSys.registerInZk(core, false, skipRecovery);
}
return null;
} else {
log.debug("replacing core: " + cd.getName());
old.close();
if (registerInZk) {
zkSys.registerInZk(core, false, skipRecovery);
}
return old;
}
}
/**
* Creates a new core, publishing the core state to the cluster
*
* @param coreName the core name
* @param parameters the core parameters
* @return the newly created core
*/
public SolrCore create(String coreName, Map<String, String> parameters) {
return create(coreName, cfg.getCoreRootDirectory().resolve(coreName), parameters, false);
}
/**
* Creates a new core in a specified instance directory, publishing the core state to the cluster
*
* @param coreName the core name
* @param instancePath the instance directory
* @param parameters the core parameters
* @return the newly created core
*/
public SolrCore create(String coreName, Path instancePath, Map<String, String> parameters, boolean newCollection) {
CoreDescriptor cd = new CoreDescriptor(coreName, instancePath, parameters, getContainerProperties(), getZkController());
// TODO: There's a race here, isn't there?
// Since the core descriptor is removed when a core is unloaded, it should never be anywhere when a core is created.
if (getAllCoreNames().contains(coreName)) {
log.warn("Creating a core with existing name is not allowed");
// TODO: Shouldn't this be a BAD_REQUEST?
throw new SolrException(ErrorCode.SERVER_ERROR, "Core with name '" + coreName + "' already exists.");
}
boolean preExisitingZkEntry = false;
try {
if (getZkController() != null) {
if (!Overseer.isLegacy(getZkController().getZkStateReader())) {
if (cd.getCloudDescriptor().getCoreNodeName() == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "non legacy mode coreNodeName missing " + parameters.toString());
}
}
preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
}
// Much of the logic in core handling pre-supposes that the core.properties file already exists, so create it
// first and clean it up if there's an error.
coresLocator.create(this, cd);
SolrCore core = null;
try {
solrCores.waitAddPendingCoreOps(cd.getName());
core = createFromDescriptor(cd, true, newCollection);
coresLocator.persist(this, cd); // Write out the current core properties in case anything changed when the core was created
} finally {
solrCores.removeFromPendingOps(cd.getName());
}
return core;
} catch (Exception ex) {
// First clean up any core descriptor, there should never be an existing core.properties file for any core that
// failed to be created on-the-fly.
coresLocator.delete(this, cd);
if (isZooKeeperAware() && !preExisitingZkEntry) {
try {
getZkController().unregister(coreName, cd);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
SolrException.log(log, null, e);
} catch (KeeperException e) {
SolrException.log(log, null, e);
} catch (Exception e) {
SolrException.log(log, null, e);
}
}
Throwable tc = ex;
Throwable c = null;
do {
tc = tc.getCause();
if (tc != null) {
c = tc;
}
} while (tc != null);
String rootMsg = "";
if (c != null) {
rootMsg = " Caused by: " + c.getMessage();
}
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Error CREATEing SolrCore '" + coreName + "': " + ex.getMessage() + rootMsg, ex);
}
}
/**
* Creates a new core based on a CoreDescriptor.
*
* @param dcore a core descriptor
* @param publishState publish core state to the cluster if true
* <p>
* WARNING: Any call to this method should be surrounded by a try/finally block
* that calls solrCores.waitAddPendingCoreOps(...) and solrCores.removeFromPendingOps(...)
*
* <pre>
* <code>
* try {
* solrCores.waitAddPendingCoreOps(dcore.getName());
* createFromDescriptor(...);
* } finally {
* solrCores.removeFromPendingOps(dcore.getName());
* }
* </code>
* </pre>
* <p>
* Trying to put the waitAddPending... in this method results in Bad Things Happening due to race conditions.
* getCore() depends on getting the core returned _if_ it's in the pending list due to some other thread opening it.
* If the core is not in the pending list and not loaded, then getCore() calls this method. Anything that called
* to check if the core was loaded _or_ in pending ops and, based on the return called createFromDescriptor would
* introduce a race condition, see getCore() for the place it would be a problem
* @return the newly created core
*/
@SuppressWarnings("resource")
private SolrCore createFromDescriptor(CoreDescriptor dcore, boolean publishState, boolean newCollection) {
if (isShutDown) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Solr has been shutdown.");
}
SolrCore core = null;
try {
MDCLoggingContext.setCoreDescriptor(this, dcore);
SolrIdentifierValidator.validateCoreName(dcore.getName());
if (zkSys.getZkController() != null) {
zkSys.getZkController().preRegister(dcore, publishState);
}
ConfigSet coreConfig = coreConfigService.loadConfigSet(dcore);
dcore.setConfigSetTrusted(coreConfig.isTrusted());
log.info("Creating SolrCore '{}' using configuration from {}, trusted={}", dcore.getName(), coreConfig.getName(), dcore.isConfigSetTrusted());
try {
core = new SolrCore(this, dcore, coreConfig);
} catch (SolrException e) {
core = processCoreCreateException(e, dcore, coreConfig);
}
// always kick off recovery if we are in non-Cloud mode
if (!isZooKeeperAware() && core.getUpdateHandler().getUpdateLog() != null) {
core.getUpdateHandler().getUpdateLog().recoverFromLog();
}
registerCore(dcore, core, publishState, newCollection);
return core;
} catch (Exception e) {
coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
if (e instanceof ZkController.NotInClusterStateException && !newCollection) {
// this mostly happen when the core is deleted when this node is down
unload(dcore.getName(), true, true, true);
throw e;
}
solrCores.removeCoreDescriptor(dcore);
final SolrException solrException = new SolrException(ErrorCode.SERVER_ERROR, "Unable to create core [" + dcore.getName() + "]", e);
if (core != null && !core.isClosed())
IOUtils.closeQuietly(core);
throw solrException;
} catch (Throwable t) {
SolrException e = new SolrException(ErrorCode.SERVER_ERROR, "JVM Error creating core [" + dcore.getName() + "]: " + t.getMessage(), t);
coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
solrCores.removeCoreDescriptor(dcore);
if (core != null && !core.isClosed())
IOUtils.closeQuietly(core);
throw t;
} finally {
MDCLoggingContext.clear();
}
}
public boolean isSharedFs(CoreDescriptor cd) {
try (SolrCore core = this.getCore(cd.getName())) {
if (core != null) {
return core.getDirectoryFactory().isSharedStorage();
} else {
ConfigSet configSet = coreConfigService.loadConfigSet(cd);
return DirectoryFactory.loadDirectoryFactory(configSet.getSolrConfig(), this, null).isSharedStorage();
}
}
}
/**
* Take action when we failed to create a SolrCore. If error is due to corrupt index, try to recover. Various recovery
* strategies can be specified via system properties "-DCoreInitFailedAction={fromleader, none}"
*
* @param original the problem seen when loading the core the first time.
* @param dcore core descriptor for the core to create
* @param coreConfig core config for the core to create
* @return if possible
* @throws SolrException rethrows the original exception if we will not attempt to recover, throws a new SolrException with the
* original exception as a suppressed exception if there is a second problem creating the solr core.
* @see CoreInitFailedAction
*/
private SolrCore processCoreCreateException(SolrException original, CoreDescriptor dcore, ConfigSet coreConfig) {
// Traverse full chain since CIE may not be root exception
Throwable cause = original;
while ((cause = cause.getCause()) != null) {
if (cause instanceof CorruptIndexException) {
break;
}
}
// If no CorruptIndexException, nothing we can try here
if (cause == null) throw original;
CoreInitFailedAction action = CoreInitFailedAction.valueOf(System.getProperty(CoreInitFailedAction.class.getSimpleName(), "none"));
log.debug("CorruptIndexException while creating core, will attempt to repair via {}", action);
switch (action) {
case fromleader: // Recovery from leader on a CorruptedIndexException
if (isZooKeeperAware()) {
CloudDescriptor desc = dcore.getCloudDescriptor();
try {
Replica leader = getZkController().getClusterState()
.getCollection(desc.getCollectionName())
.getSlice(desc.getShardId())
.getLeader();
if (leader != null && leader.getState() == State.ACTIVE) {
log.info("Found active leader, will attempt to create fresh core and recover.");
resetIndexDirectory(dcore, coreConfig);
// the index of this core is emptied, its term should be set to 0
getZkController().getShardTerms(desc.getCollectionName(), desc.getShardId()).setTermToZero(desc.getCoreNodeName());
return new SolrCore(this, dcore, coreConfig);
}
} catch (SolrException se) {
se.addSuppressed(original);
throw se;
}
}
throw original;
case none:
throw original;
default:
log.warn("Failed to create core, and did not recognize specified 'CoreInitFailedAction': [{}]. Valid options are {}.",
action, Arrays.asList(CoreInitFailedAction.values()));
throw original;
}
}
/**
* Write a new index directory for the a SolrCore, but do so without loading it.
*/
private void resetIndexDirectory(CoreDescriptor dcore, ConfigSet coreConfig) {
SolrConfig config = coreConfig.getSolrConfig();
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, dcore.getName());
DirectoryFactory df = DirectoryFactory.loadDirectoryFactory(config, this, registryName);
String dataDir = SolrCore.findDataDir(df, null, config, dcore);
String tmpIdxDirName = "index." + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
SolrCore.modifyIndexProps(df, dataDir, config, tmpIdxDirName);
// Free the directory object that we had to create for this
Directory dir = null;
try {
dir = df.get(dataDir, DirContext.META_DATA, config.indexConfig.lockType);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} finally {
try {
df.release(dir);
df.doneWithDirectory(dir);
} catch (IOException e) {
SolrException.log(log, e);
}
}
}
/**
* @return a Collection of registered SolrCores
*/
public Collection<SolrCore> getCores() {
return solrCores.getCores();
}
/**
* Gets the cores that are currently loaded, i.e. cores that have
* 1: loadOnStartup=true and are either not-transient or, if transient, have been loaded and have not been aged out
* 2: loadOnStartup=false and have been loaded but are either non-transient or have not been aged out.
* <p>
* Put another way, this will not return any names of cores that are lazily loaded but have not been called for yet
* or are transient and either not loaded or have been swapped out.
*/
public Collection<String> getLoadedCoreNames() {
return solrCores.getLoadedCoreNames();
}
/**
* This method is currently experimental.
*
* @return a Collection of the names that a specific core object is mapped to, there are more than one.
*/
public Collection<String> getNamesForCore(SolrCore core) {
return solrCores.getNamesForCore(core);
}
/**
* get a list of all the cores that are currently known, whether currently loaded or not
*
* @return a list of all the available core names in either permanent or transient cores
*/
public Collection<String> getAllCoreNames() {
return solrCores.getAllCoreNames();
}
/**
* Returns an immutable Map of Exceptions that occurred when initializing
* SolrCores (either at startup, or do to runtime requests to create cores)
* keyed off of the name (String) of the SolrCore that had the Exception
* during initialization.
* <p>
* While the Map returned by this method is immutable and will not change
* once returned to the client, the source data used to generate this Map
* can be changed as various SolrCore operations are performed:
* </p>
* <ul>
* <li>Failed attempts to create new SolrCores will add new Exceptions.</li>
* <li>Failed attempts to re-create a SolrCore using a name already contained in this Map will replace the Exception.</li>
* <li>Failed attempts to reload a SolrCore will cause an Exception to be added to this list -- even though the existing SolrCore with that name will continue to be available.</li>
* <li>Successful attempts to re-created a SolrCore using a name already contained in this Map will remove the Exception.</li>
* <li>Registering an existing SolrCore with a name already contained in this Map (ie: ALIAS or SWAP) will remove the Exception.</li>
* </ul>
*/
public Map<String, CoreLoadFailure> getCoreInitFailures() {
return ImmutableMap.copyOf(coreInitFailures);
}
// ---------------- Core name related methods ---------------
private CoreDescriptor reloadCoreDescriptor(CoreDescriptor oldDesc) {
if (oldDesc == null) {
return null;
}
CorePropertiesLocator cpl = new CorePropertiesLocator(null);
CoreDescriptor ret = cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
// Ok, this little jewel is all because we still create core descriptors on the fly from lists of properties
// in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in the new world
// of core discovery without writing the core.properties file out first.
//
// TODO: remove core.properties from the conf directory in test files, it's in a bad place there anyway.
if (ret == null) {
oldDesc.loadExtraProperties(); // there may be changes to extra properties that we need to pick up.
return oldDesc;
}
// The CloudDescriptor bit here is created in a very convoluted way, requiring access to private methods
// in ZkController. When reloading, this behavior is identical to what used to happen where a copy of the old
// CoreDescriptor was just re-used.
if (ret.getCloudDescriptor() != null) {
ret.getCloudDescriptor().reload(oldDesc.getCloudDescriptor());
}
return ret;
}
/**
* Recreates a SolrCore.
* While the new core is loading, requests will continue to be dispatched to
* and processed by the old core
*
* @param name the name of the SolrCore to reload
*/
public void reload(String name) {
if (isShutDown) {
throw new AlreadyClosedException();
}
SolrCore newCore = null;
SolrCore core = solrCores.getCoreFromAnyList(name, false);
if (core != null) {
// The underlying core properties files may have changed, we don't really know. So we have a (perhaps) stale
// CoreDescriptor and we need to reload it from the disk files
CoreDescriptor cd = reloadCoreDescriptor(core.getCoreDescriptor());
solrCores.addCoreDescriptor(cd);
Closeable oldCore = null;
boolean success = false;
try {
solrCores.waitAddPendingCoreOps(cd.getName());
ConfigSet coreConfig = coreConfigService.loadConfigSet(cd);
log.info("Reloading SolrCore '{}' using configuration from {}", cd.getName(), coreConfig.getName());
newCore = core.reload(coreConfig);
DocCollection docCollection = null;
if (getZkController() != null) {
docCollection = getZkController().getClusterState().getCollection(cd.getCollectionName());
// turn off indexing now, before the new core is registered
if (docCollection.getBool(ZkStateReader.READ_ONLY, false)) {
newCore.readOnly = true;
}
}
registerCore(cd, newCore, false, false);
// force commit on old core if the new one is readOnly and prevent any new updates
if (newCore.readOnly) {
RefCounted<IndexWriter> iwRef = core.getSolrCoreState().getIndexWriter(null);
if (iwRef != null) {
IndexWriter iw = iwRef.get();
// switch old core to readOnly
core.readOnly = true;
try {
if (iw != null) {
iw.commit();
}
} finally {
iwRef.decref();
}
}
}
if (docCollection != null) {
Replica replica = docCollection.getReplica(cd.getCloudDescriptor().getCoreNodeName());
assert replica != null;
if (replica.getType() == Replica.Type.TLOG) { // TODO: needed here?
getZkController().stopReplicationFromLeader(core.getName());
if (!cd.getCloudDescriptor().isLeader()) {
getZkController().startReplicationFromLeader(newCore.getName(), true);
}
} else if (replica.getType() == Replica.Type.PULL) {
getZkController().stopReplicationFromLeader(core.getName());
getZkController().startReplicationFromLeader(newCore.getName(), false);
}
}
success = true;
} catch (SolrCoreState.CoreIsClosedException e) {
throw e;
} catch (Exception e) {
coreInitFailures.put(cd.getName(), new CoreLoadFailure(cd, (Exception) e));
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to reload core [" + cd.getName() + "]", e);
} finally {
if (!success && newCore != null && newCore.getOpenCount() > 0) {
IOUtils.closeQuietly(newCore);
}
solrCores.removeFromPendingOps(cd.getName());
}
} else {
CoreLoadFailure clf = coreInitFailures.get(name);
if (clf != null) {
try {
solrCores.waitAddPendingCoreOps(clf.cd.getName());
createFromDescriptor(clf.cd, true, false);
} finally {
solrCores.removeFromPendingOps(clf.cd.getName());
}
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + name);
}
}
}
/**
* Swaps two SolrCore descriptors.
*/
public void swap(String n0, String n1) {
if (n0 == null || n1 == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can not swap unnamed cores.");
}
solrCores.swap(n0, n1);
coresLocator.swap(this, solrCores.getCoreDescriptor(n0), solrCores.getCoreDescriptor(n1));
log.info("swapped: " + n0 + " with " + n1);
}
/**
* Unload a core from this container, leaving all files on disk
*
* @param name the name of the core to unload
*/
public void unload(String name) {
unload(name, false, false, false);
}
/**
* Unload a core from this container, optionally removing the core's data and configuration
*
* @param name the name of the core to unload
* @param deleteIndexDir if true, delete the core's index on close
* @param deleteDataDir if true, delete the core's data directory on close
* @param deleteInstanceDir if true, delete the core's instance directory on close
*/
public void unload(String name, boolean deleteIndexDir, boolean deleteDataDir, boolean deleteInstanceDir) {
CoreDescriptor cd = solrCores.getCoreDescriptor(name);
if (name != null) {
// check for core-init errors first
CoreLoadFailure loadFailure = coreInitFailures.remove(name);
if (loadFailure != null) {
// getting the index directory requires opening a DirectoryFactory with a SolrConfig, etc,
// which we may not be able to do because of the init error. So we just go with what we
// can glean from the CoreDescriptor - datadir and instancedir
SolrCore.deleteUnloadedCore(loadFailure.cd, deleteDataDir, deleteInstanceDir);
// If last time around we didn't successfully load, make sure that all traces of the coreDescriptor are gone.
if (cd != null) {
solrCores.removeCoreDescriptor(cd);
coresLocator.delete(this, cd);
}
return;
}
}
if (cd == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
}
boolean close = solrCores.isLoadedNotPendingClose(name);
SolrCore core = solrCores.remove(name);
solrCores.removeCoreDescriptor(cd);
coresLocator.delete(this, cd);
if (core == null) {
// transient core
SolrCore.deleteUnloadedCore(cd, deleteDataDir, deleteInstanceDir);
return;
}
// delete metrics specific to this core
metricManager.removeRegistry(core.getCoreMetricManager().getRegistryName());
if (zkSys.getZkController() != null) {
// cancel recovery in cloud mode
core.getSolrCoreState().cancelRecovery();
if (cd.getCloudDescriptor().getReplicaType() == Replica.Type.PULL
|| cd.getCloudDescriptor().getReplicaType() == Replica.Type.TLOG) {
// Stop replication if this is part of a pull/tlog replica before closing the core
zkSys.getZkController().stopReplicationFromLeader(name);
}
}
core.unloadOnClose(cd, deleteIndexDir, deleteDataDir, deleteInstanceDir);
if (close)
core.closeAndWait();
if (zkSys.getZkController() != null) {
try {
zkSys.getZkController().unregister(name, cd);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted while unregistering core [" + name + "] from cloud state");
} catch (KeeperException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
}
}
}
public void rename(String name, String toName) {
SolrIdentifierValidator.validateCoreName(toName);
try (SolrCore core = getCore(name)) {
if (core != null) {
String oldRegistryName = core.getCoreMetricManager().getRegistryName();
String newRegistryName = SolrCoreMetricManager.createRegistryName(core, toName);
metricManager.swapRegistries(oldRegistryName, newRegistryName);
// The old coreDescriptor is obsolete, so remove it. registerCore will put it back.
CoreDescriptor cd = core.getCoreDescriptor();
solrCores.removeCoreDescriptor(cd);
cd.setProperty("name", toName);
solrCores.addCoreDescriptor(cd);
core.setName(toName);
registerCore(cd, core, true, false);
SolrCore old = solrCores.remove(name);
coresLocator.rename(this, old.getCoreDescriptor(), core.getCoreDescriptor());
}
}
}
/**
* Get the CoreDescriptors for all cores managed by this container
*
* @return a List of CoreDescriptors
*/
public List<CoreDescriptor> getCoreDescriptors() {
return solrCores.getCoreDescriptors();
}
public CoreDescriptor getCoreDescriptor(String coreName) {
return solrCores.getCoreDescriptor(coreName);
}
public Path getCoreRootDirectory() {
return cfg.getCoreRootDirectory();
}
/**
* Gets a core by name and increase its refcount.
*
* @param name the core name
* @return the core if found, null if a SolrCore by this name does not exist
* @throws SolrCoreInitializationException if a SolrCore with this name failed to be initialized
* @see SolrCore#close()
*/
public SolrCore getCore(String name) {
// Do this in two phases since we don't want to lock access to the cores over a load.
SolrCore core = solrCores.getCoreFromAnyList(name, true);
// If a core is loaded, we're done just return it.
if (core != null) {
return core;
}
// If it's not yet loaded, we can check if it's had a core init failure and "do the right thing"
CoreDescriptor desc = solrCores.getCoreDescriptor(name);
// if there was an error initializing this core, throw a 500
// error with the details for clients attempting to access it.
CoreLoadFailure loadFailure = getCoreInitFailures().get(name);
if (null != loadFailure) {
throw new SolrCoreInitializationException(name, loadFailure.exception);
}
// This is a bit of awkwardness where SolrCloud and transient cores don't play nice together. For transient cores,
// we have to allow them to be created at any time there hasn't been a core load failure (use reload to cure that).
// But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to load the core if
// the core is null and there was an error. If you change this, be sure to run both TestConfiSetsAPI and
// TestLazyCores
if (desc == null || zkSys.getZkController() != null) return null;
// This will put an entry in pending core ops if the core isn't loaded. Here's where moving the
// waitAddPendingCoreOps to createFromDescriptor would introduce a race condition.
core = solrCores.waitAddPendingCoreOps(name);
if (isShutDown) return null; // We're quitting, so stop. This needs to be after the wait above since we may come off
// the wait as a consequence of shutting down.
try {
if (core == null) {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(desc);
}
core = createFromDescriptor(desc, true, false); // This should throw an error if it fails.
}
core.open();
} finally {
solrCores.removeFromPendingOps(name);
}
return core;
}
public BlobRepository getBlobRepository() {
return blobRepository;
}
/**
* If using asyncSolrCoreLoad=true, calling this after {@link #load()} will
* not return until all cores have finished loading.
*
* @param timeoutMs timeout, upon which method simply returns
*/
public void waitForLoadingCoresToFinish(long timeoutMs) {
solrCores.waitForLoadingCoresToFinish(timeoutMs);
}
public void waitForLoadingCore(String name, long timeoutMs) {
solrCores.waitForLoadingCoreToFinish(name, timeoutMs);
}
// ---------------- CoreContainer request handlers --------------
protected <T> T createHandler(String path, String handlerClass, Class<T> clazz) {
T handler = loader.newInstance(handlerClass, clazz, null, new Class[]{CoreContainer.class}, new Object[]{this});
if (handler instanceof SolrRequestHandler) {
containerHandlers.put(path, (SolrRequestHandler) handler);
}
if (handler instanceof SolrMetricProducer) {
((SolrMetricProducer) handler).initializeMetrics(solrMetricsContext, path);
}
return handler;
}
public CoreAdminHandler getMultiCoreHandler() {
return coreAdminHandler;
}
public CollectionsHandler getCollectionsHandler() {
return collectionsHandler;
}
public HealthCheckHandler getHealthCheckHandler() {
return healthCheckHandler;
}
public InfoHandler getInfoHandler() {
return infoHandler;
}
public ConfigSetsHandler getConfigSetsHandler() {
return configSetsHandler;
}
public String getHostName() {
return this.hostName;
}
/**
* Gets the alternate path for multicore handling:
* This is used in case there is a registered unnamed core (aka name is "") to
* declare an alternate way of accessing named cores.
* This can also be used in a pseudo single-core environment so admins can prepare
* a new version before swapping.
*/
public String getManagementPath() {
return cfg.getManagementPath();
}
public LogWatcher getLogging() {
return logging;
}
/**
* Determines whether the core is already loaded or not but does NOT load the core
*/
public boolean isLoaded(String name) {
return solrCores.isLoaded(name);
}
public boolean isLoadedNotPendingClose(String name) {
return solrCores.isLoadedNotPendingClose(name);
}
// Primarily for transient cores when a core is aged out.
public void queueCoreToClose(SolrCore coreToClose) {
solrCores.queueCoreToClose(coreToClose);
}
/**
* Gets a solr core descriptor for a core that is not loaded. Note that if the caller calls this on a
* loaded core, the unloaded descriptor will be returned.
*
* @param cname - name of the unloaded core descriptor to load. NOTE:
* @return a coreDescriptor. May return null
*/
public CoreDescriptor getUnloadedCoreDescriptor(String cname) {
return solrCores.getUnloadedCoreDescriptor(cname);
}
public String getSolrHome() {
return solrHome;
}
public boolean isZooKeeperAware() {
return zkSys.getZkController() != null;
}
public ZkController getZkController() {
return zkSys.getZkController();
}
public NodeConfig getConfig() {
return cfg;
}
/**
* The default ShardHandlerFactory used to communicate with other solr instances
*/
public ShardHandlerFactory getShardHandlerFactory() {
return shardHandlerFactory;
}
public UpdateShardHandler getUpdateShardHandler() {
return updateShardHandler;
}
public SolrResourceLoader getResourceLoader() {
return loader;
}
public boolean isCoreLoading(String name) {
return solrCores.isCoreLoading(name);
}
public AuthorizationPlugin getAuthorizationPlugin() {
return authorizationPlugin == null ? null : authorizationPlugin.plugin;
}
public AuthenticationPlugin getAuthenticationPlugin() {
return authenticationPlugin == null ? null : authenticationPlugin.plugin;
}
public AuditLoggerPlugin getAuditLoggerPlugin() {
return auditloggerPlugin == null ? null : auditloggerPlugin.plugin;
}
public NodeConfig getNodeConfig() {
return cfg;
}
public long getStatus() {
return status;
}
// Occasionally we need to access the transient cache handler in places other than coreContainer.
public TransientSolrCoreCache getTransientCache() {
return solrCores.getTransientCacheHandler();
}
/**
* @param cd CoreDescriptor, presumably a deficient one
* @param prop The property that needs to be repaired.
* @return true if we were able to successfuly perisist the repaired coreDescriptor, false otherwise.
* <p>
* See SOLR-11503, This can be removed when there's no chance we'll need to upgrade a
* Solr installation created with legacyCloud=true from 6.6.1 through 7.1
*/
public boolean repairCoreProperty(CoreDescriptor cd, String prop) {
// So far, coreNodeName is the only property that we need to repair, this may get more complex as other properties
// are added.
if (CoreDescriptor.CORE_NODE_NAME.equals(prop) == false) {
throw new SolrException(ErrorCode.SERVER_ERROR,
String.format(Locale.ROOT, "The only supported property for repair is currently [%s]",
CoreDescriptor.CORE_NODE_NAME));
}
// Try to read the coreNodeName from the cluster state.
String coreName = cd.getName();
DocCollection coll = getZkController().getZkStateReader().getClusterState().getCollection(cd.getCollectionName());
for (Replica rep : coll.getReplicas()) {
if (coreName.equals(rep.getCoreName())) {
log.warn("Core properties file for node {} found with no coreNodeName, attempting to repair with value {}. See SOLR-11503. " +
"This message should only appear if upgrading from collections created Solr 6.6.1 through 7.1.",
rep.getCoreName(), rep.getName());
cd.getCloudDescriptor().setCoreNodeName(rep.getName());
coresLocator.persist(this, cd);
return true;
}
}
log.error("Could not repair coreNodeName in core.properties file for core {}", coreName);
return false;
}
/**
* @param solrCore the core against which we check if there has been a tragic exception
* @return whether this Solr core has tragic exception
* @see org.apache.lucene.index.IndexWriter#getTragicException()
*/
public boolean checkTragicException(SolrCore solrCore) {
Throwable tragicException;
try {
tragicException = solrCore.getSolrCoreState().getTragicException();
} catch (IOException e) {
// failed to open an indexWriter
tragicException = e;
}
if (tragicException != null && isZooKeeperAware()) {
getZkController().giveupLeadership(solrCore.getCoreDescriptor(), tragicException);
}
return tragicException != null;
}
static {
ExecutorUtil.addThreadLocalProvider(SolrRequestInfo.getInheritableThreadLocalProvider());
}
/**
* Run an arbitrary task in it's own thread. This is an expert option and is
* a method you should use with great care. It would be bad to run something that never stopped
* or run something that took a very long time. Typically this is intended for actions that take
* a few seconds, and therefore would be bad to wait for within a request, or actions that need to happen
* when a core has zero references, but but would not pose a significant hindrance to server shut down times.
* It is not intended for long running tasks and if you are using a Runnable with a loop in it, you are
* almost certainly doing it wrong.
* <p><br>
* WARNING: Solr wil not be able to shut down gracefully until this task completes!
* <p><br>
* A significant upside of using this method vs creating your own ExecutorService is that your code
* does not have to properly shutdown executors which typically is risky from a unit testing
* perspective since the test framework will complain if you don't carefully ensure the executor
* shuts down before the end of the test. Also the threads running this task are sure to have
* a proper MDC for logging.
* <p><br>
* Normally, one uses {@link SolrCore#runAsync(Runnable)} if possible, but in some cases
* you might need to execute a task asynchronously when you could be running on a node with no
* cores, and then use of this method is indicated.
*
* @param r the task to run
*/
public void runAsync(Runnable r) {
coreContainerAsyncTaskExecutor.submit(r);
}
}
class CloserThread extends Thread {
CoreContainer container;
SolrCores solrCores;
NodeConfig cfg;
CloserThread(CoreContainer container, SolrCores solrCores, NodeConfig cfg) {
this.container = container;
this.solrCores = solrCores;
this.cfg = cfg;
}
// It's important that this be the _only_ thread removing things from pendingDynamicCloses!
// This is single-threaded, but I tried a multi-threaded approach and didn't see any performance gains, so
// there's no good justification for the complexity. I suspect that the locking on things like DefaultSolrCoreState
// essentially create a single-threaded process anyway.
@Override
public void run() {
while (!container.isShutDown()) {
synchronized (solrCores.getModifyLock()) { // need this so we can wait and be awoken.
try {
solrCores.getModifyLock().wait();
} catch (InterruptedException e) {
// Well, if we've been told to stop, we will. Otherwise, continue on and check to see if there are
// any cores to close.
}
}
for (SolrCore removeMe = solrCores.getCoreToClose();
removeMe != null && !container.isShutDown();
removeMe = solrCores.getCoreToClose()) {
try {
removeMe.close();
} finally {
solrCores.removeFromPendingOps(removeMe.getName());
}
}
}
}
}
| 1 | 32,929 | nitpick: regular order is private than final. | apache-lucene-solr | java |
@@ -145,6 +145,11 @@ class DocstringParameterChecker(BaseChecker):
"useless-type-doc",
"Please remove the ignored parameter type documentation.",
),
+ "W9021": (
+ 'Missing any documentation in "%s"',
+ "missing-any-param-doc",
+ "Please add parameter and/or type documentation.",
+ ),
}
options = ( | 1 | # Copyright (c) 2014-2015 Bruno Daniel <bruno.daniel@blue-yonder.com>
# Copyright (c) 2015-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016-2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Glenn Matthews <glmatthe@cisco.com>
# Copyright (c) 2016 Moises Lopez <moylop260@vauxoo.com>
# Copyright (c) 2017 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2017 John Paraskevopoulos <io.paraskev@gmail.com>
# Copyright (c) 2018, 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2018 Jim Robertson <jrobertson98atx@gmail.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 Adam Dangoor <adamdangoor@gmail.com>
# Copyright (c) 2019, 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2020 Luigi <luigi.cristofolini@q-ctrl.com>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2021 SupImDos <62866982+SupImDos@users.noreply.github.com>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Logan Miller <14319179+komodo472@users.noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Pylint plugin for checking in Sphinx, Google, or Numpy style docstrings
"""
import re
from typing import Optional
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker
from pylint.checkers import utils as checker_utils
from pylint.extensions import _check_docs_utils as utils
from pylint.extensions._check_docs_utils import Docstring
from pylint.interfaces import IAstroidChecker
from pylint.utils import get_global_option
class DocstringParameterChecker(BaseChecker):
"""Checker for Sphinx, Google, or Numpy style docstrings
* Check that all function, method and constructor parameters are mentioned
in the params and types part of the docstring. Constructor parameters
can be documented in either the class docstring or ``__init__`` docstring,
but not both.
* Check that there are no naming inconsistencies between the signature and
the documentation, i.e. also report documented parameters that are missing
in the signature. This is important to find cases where parameters are
renamed only in the code, not in the documentation.
* Check that all explicitly raised exceptions in a function are documented
in the function docstring. Caught exceptions are ignored.
Activate this checker by adding the line::
load-plugins=pylint.extensions.docparams
to the ``MASTER`` section of your ``.pylintrc``.
:param linter: linter object
:type linter: :class:`pylint.lint.PyLinter`
"""
__implements__ = IAstroidChecker
name = "parameter_documentation"
msgs = {
"W9005": (
'"%s" has constructor parameters documented in class and __init__',
"multiple-constructor-doc",
"Please remove parameter declarations in the class or constructor.",
),
"W9006": (
'"%s" not documented as being raised',
"missing-raises-doc",
"Please document exceptions for all raised exception types.",
),
"W9008": (
"Redundant returns documentation",
"redundant-returns-doc",
"Please remove the return/rtype documentation from this method.",
),
"W9010": (
"Redundant yields documentation",
"redundant-yields-doc",
"Please remove the yields documentation from this method.",
),
"W9011": (
"Missing return documentation",
"missing-return-doc",
"Please add documentation about what this method returns.",
{"old_names": [("W9007", "old-missing-returns-doc")]},
),
"W9012": (
"Missing return type documentation",
"missing-return-type-doc",
"Please document the type returned by this method.",
# we can't use the same old_name for two different warnings
# {'old_names': [('W9007', 'missing-returns-doc')]},
),
"W9013": (
"Missing yield documentation",
"missing-yield-doc",
"Please add documentation about what this generator yields.",
{"old_names": [("W9009", "old-missing-yields-doc")]},
),
"W9014": (
"Missing yield type documentation",
"missing-yield-type-doc",
"Please document the type yielded by this method.",
# we can't use the same old_name for two different warnings
# {'old_names': [('W9009', 'missing-yields-doc')]},
),
"W9015": (
'"%s" missing in parameter documentation',
"missing-param-doc",
"Please add parameter declarations for all parameters.",
{"old_names": [("W9003", "old-missing-param-doc")]},
),
"W9016": (
'"%s" missing in parameter type documentation',
"missing-type-doc",
"Please add parameter type declarations for all parameters.",
{"old_names": [("W9004", "old-missing-type-doc")]},
),
"W9017": (
'"%s" differing in parameter documentation',
"differing-param-doc",
"Please check parameter names in declarations.",
),
"W9018": (
'"%s" differing in parameter type documentation',
"differing-type-doc",
"Please check parameter names in type declarations.",
),
"W9019": (
'"%s" useless ignored parameter documentation',
"useless-param-doc",
"Please remove the ignored parameter documentation.",
),
"W9020": (
'"%s" useless ignored parameter type documentation',
"useless-type-doc",
"Please remove the ignored parameter type documentation.",
),
}
options = (
(
"accept-no-param-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing parameter "
"documentation in the docstring of a function that has "
"parameters.",
},
),
(
"accept-no-raise-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing raises "
"documentation in the docstring of a function that "
"raises an exception.",
},
),
(
"accept-no-return-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing return "
"documentation in the docstring of a function that "
"returns a statement.",
},
),
(
"accept-no-yields-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing yields "
"documentation in the docstring of a generator.",
},
),
(
"default-docstring-type",
{
"type": "choice",
"default": "default",
"choices": list(utils.DOCSTRING_TYPES),
"help": "If the docstring type cannot be guessed "
"the specified docstring type will be used.",
},
),
)
priority = -2
constructor_names = {"__init__", "__new__"}
not_needed_param_in_docstring = {"self", "cls"}
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""Called for function and method definitions (def).
:param node: Node for a function or method definition in the AST
:type node: :class:`astroid.scoped_nodes.Function`
"""
node_doc = utils.docstringify(node.doc, self.config.default_docstring_type)
# skip functions that match the 'no-docstring-rgx' config option
no_docstring_rgx = get_global_option(self, "no-docstring-rgx")
if no_docstring_rgx and re.match(no_docstring_rgx, node.name):
return
# skip functions smaller than 'docstring-min-length'
lines = checker_utils.get_node_last_lineno(node) - node.lineno
max_lines = get_global_option(self, "docstring-min-length")
if max_lines > -1 and lines < max_lines:
return
self.check_functiondef_params(node, node_doc)
self.check_functiondef_returns(node, node_doc)
self.check_functiondef_yields(node, node_doc)
visit_asyncfunctiondef = visit_functiondef
def check_functiondef_params(self, node, node_doc):
node_allow_no_param = None
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
class_doc = utils.docstringify(
class_node.doc, self.config.default_docstring_type
)
self.check_single_constructor_params(class_doc, node_doc, class_node)
# __init__ or class docstrings can have no parameters documented
# as long as the other documents them.
node_allow_no_param = (
class_doc.has_params()
or class_doc.params_documented_elsewhere()
or None
)
class_allow_no_param = (
node_doc.has_params()
or node_doc.params_documented_elsewhere()
or None
)
self.check_arguments_in_docstring(
class_doc, node.args, class_node, class_allow_no_param
)
self.check_arguments_in_docstring(
node_doc, node.args, node, node_allow_no_param
)
def check_functiondef_returns(self, node, node_doc):
if (not node_doc.supports_yields and node.is_generator()) or node.is_abstract():
return
return_nodes = node.nodes_of_class(astroid.Return)
if (node_doc.has_returns() or node_doc.has_rtype()) and not any(
utils.returns_something(ret_node) for ret_node in return_nodes
):
self.add_message("redundant-returns-doc", node=node)
def check_functiondef_yields(self, node, node_doc):
if not node_doc.supports_yields or node.is_abstract():
return
if (
node_doc.has_yields() or node_doc.has_yields_type()
) and not node.is_generator():
self.add_message("redundant-yields-doc", node=node)
def visit_raise(self, node: nodes.Raise) -> None:
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
expected_excs = utils.possible_exc_types(node)
if not expected_excs:
return
if not func_node.doc:
# If this is a property setter,
# the property should have the docstring instead.
property_ = utils.get_setters_property(func_node)
if property_:
func_node = property_
doc = utils.docstringify(func_node.doc, self.config.default_docstring_type)
if not doc.is_valid():
if doc.doc:
self._handle_no_raise_doc(expected_excs, func_node)
return
found_excs_full_names = doc.exceptions()
# Extract just the class name, e.g. "error" from "re.error"
found_excs_class_names = {exc.split(".")[-1] for exc in found_excs_full_names}
missing_excs = expected_excs - found_excs_class_names
self._add_raise_message(missing_excs, func_node)
def visit_return(self, node: nodes.Return) -> None:
if not utils.returns_something(node):
return
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc, self.config.default_docstring_type)
if not doc.is_valid() and self.config.accept_no_return_doc:
return
is_property = checker_utils.decorated_with_property(func_node)
if not (doc.has_returns() or (doc.has_property_returns() and is_property)):
self.add_message("missing-return-doc", node=func_node)
if func_node.returns:
return
if not (doc.has_rtype() or (doc.has_property_type() and is_property)):
self.add_message("missing-return-type-doc", node=func_node)
def visit_yield(self, node: nodes.Yield) -> None:
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc, self.config.default_docstring_type)
if not doc.is_valid() and self.config.accept_no_yields_doc:
return
if doc.supports_yields:
doc_has_yields = doc.has_yields()
doc_has_yields_type = doc.has_yields_type()
else:
doc_has_yields = doc.has_returns()
doc_has_yields_type = doc.has_rtype()
if not doc_has_yields:
self.add_message("missing-yield-doc", node=func_node)
if not (doc_has_yields_type or func_node.returns):
self.add_message("missing-yield-type-doc", node=func_node)
def visit_yieldfrom(self, node: nodes.YieldFrom) -> None:
self.visit_yield(node)
def _compare_missing_args(
self,
found_argument_names,
message_id,
not_needed_names,
expected_argument_names,
warning_node,
):
"""Compare the found argument names with the expected ones and
generate a message if there are arguments missing.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param not_needed_names: names that may be omitted
:type not_needed_names: set
:param expected_argument_names: Expected argument names
:type expected_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
missing_argument_names = (
expected_argument_names - found_argument_names
) - not_needed_names
if missing_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(missing_argument_names)),),
node=warning_node,
)
def _compare_different_args(
self,
found_argument_names,
message_id,
not_needed_names,
expected_argument_names,
warning_node,
):
"""Compare the found argument names with the expected ones and
generate a message if there are extra arguments found.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param not_needed_names: names that may be omitted
:type not_needed_names: set
:param expected_argument_names: Expected argument names
:type expected_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
differing_argument_names = (
(expected_argument_names ^ found_argument_names)
- not_needed_names
- expected_argument_names
)
if differing_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(differing_argument_names)),),
node=warning_node,
)
def _compare_ignored_args(
self,
found_argument_names,
message_id,
ignored_argument_names,
warning_node,
):
"""Compare the found argument names with the ignored ones and
generate a message if there are ignored arguments found.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param ignored_argument_names: Expected argument names
:type ignored_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
existing_ignored_argument_names = ignored_argument_names & found_argument_names
if existing_ignored_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(existing_ignored_argument_names)),),
node=warning_node,
)
def check_arguments_in_docstring(
self,
doc: Docstring,
arguments_node: astroid.Arguments,
warning_node: astroid.NodeNG,
accept_no_param_doc: Optional[bool] = None,
):
"""Check that all parameters in a function, method or class constructor
on the one hand and the parameters mentioned in the parameter
documentation (e.g. the Sphinx tags 'param' and 'type') on the other
hand are consistent with each other.
* Undocumented parameters except 'self' are noticed.
* Undocumented parameter types except for 'self' and the ``*<args>``
and ``**<kwargs>`` parameters are noticed.
* Parameters mentioned in the parameter documentation that don't or no
longer exist in the function parameter list are noticed.
* If the text "For the parameters, see" or "For the other parameters,
see" (ignoring additional whitespace) is mentioned in the docstring,
missing parameter documentation is tolerated.
* If there's no Sphinx style, Google style or NumPy style parameter
documentation at all, i.e. ``:param`` is never mentioned etc., the
checker assumes that the parameters are documented in another format
and the absence is tolerated.
:param doc: Docstring for the function, method or class.
:type doc: :class:`Docstring`
:param arguments_node: Arguments node for the function, method or
class constructor.
:type arguments_node: :class:`astroid.scoped_nodes.Arguments`
:param warning_node: The node to assign the warnings to
:type warning_node: :class:`astroid.scoped_nodes.Node`
:param accept_no_param_doc: Whether or not to allow no parameters
to be documented.
If None then this value is read from the configuration.
:type accept_no_param_doc: bool or None
"""
# Tolerate missing param or type declarations if there is a link to
# another method carrying the same name.
if not doc.doc:
return
if accept_no_param_doc is None:
accept_no_param_doc = self.config.accept_no_param_doc
tolerate_missing_params = doc.params_documented_elsewhere()
# Collect the function arguments.
expected_argument_names = {arg.name for arg in arguments_node.args}
expected_argument_names.update(arg.name for arg in arguments_node.kwonlyargs)
not_needed_type_in_docstring = self.not_needed_param_in_docstring.copy()
expected_but_ignored_argument_names = set()
ignored_argument_names = get_global_option(self, "ignored-argument-names")
if ignored_argument_names:
expected_but_ignored_argument_names = {
arg
for arg in expected_argument_names
if ignored_argument_names.match(arg)
}
if arguments_node.vararg is not None:
expected_argument_names.add(arguments_node.vararg)
not_needed_type_in_docstring.add(arguments_node.vararg)
if arguments_node.kwarg is not None:
expected_argument_names.add(arguments_node.kwarg)
not_needed_type_in_docstring.add(arguments_node.kwarg)
params_with_doc, params_with_type = doc.match_param_docs()
# Tolerate no parameter documentation at all.
if not params_with_doc and not params_with_type and accept_no_param_doc:
tolerate_missing_params = True
if not tolerate_missing_params:
self._compare_missing_args(
params_with_doc,
"missing-param-doc",
self.not_needed_param_in_docstring
| expected_but_ignored_argument_names,
expected_argument_names,
warning_node,
)
# This is before the update of param_with_type because this must check only
# the type documented in a docstring, not the one using pep484
# See #4117 and #4593
self._compare_ignored_args(
params_with_type,
"useless-type-doc",
expected_but_ignored_argument_names,
warning_node,
)
for index, arg_name in enumerate(arguments_node.args):
if arguments_node.annotations[index]:
params_with_type.add(arg_name.name)
for index, arg_name in enumerate(arguments_node.kwonlyargs):
if arguments_node.kwonlyargs_annotations[index]:
params_with_type.add(arg_name.name)
if not tolerate_missing_params:
self._compare_missing_args(
params_with_type,
"missing-type-doc",
not_needed_type_in_docstring | expected_but_ignored_argument_names,
expected_argument_names,
warning_node,
)
self._compare_different_args(
params_with_doc,
"differing-param-doc",
self.not_needed_param_in_docstring,
expected_argument_names,
warning_node,
)
self._compare_different_args(
params_with_type,
"differing-type-doc",
not_needed_type_in_docstring,
expected_argument_names,
warning_node,
)
self._compare_ignored_args(
params_with_doc,
"useless-param-doc",
expected_but_ignored_argument_names,
warning_node,
)
def check_single_constructor_params(self, class_doc, init_doc, class_node):
if class_doc.has_params() and init_doc.has_params():
self.add_message(
"multiple-constructor-doc", args=(class_node.name,), node=class_node
)
def _handle_no_raise_doc(self, excs, node):
if self.config.accept_no_raise_doc:
return
self._add_raise_message(excs, node)
def _add_raise_message(self, missing_excs, node):
"""
Adds a message on :param:`node` for the missing exception type.
:param missing_excs: A list of missing exception types.
:type missing_excs: set(str)
:param node: The node show the message on.
:type node: nodes.NodeNG
"""
if node.is_abstract():
try:
missing_excs.remove("NotImplementedError")
except KeyError:
pass
if not missing_excs:
return
self.add_message(
"missing-raises-doc", args=(", ".join(sorted(missing_excs)),), node=node
)
def register(linter):
"""Required method to auto register this checker.
:param linter: Main interface object for Pylint plugins
:type linter: Pylint object
"""
linter.register_checker(DocstringParameterChecker(linter))
| 1 | 16,317 | We could add an old names here, the ideal would be to not force to disable missing-any-param when the old one was already disabled. But they are not really equivalent so maybe you were right to not add it. | PyCQA-pylint | py |
@@ -8,11 +8,13 @@
public const string BenchF = "BenchF";
public const string BenchI = "BenchI";
public const string Inlining = "Inlining";
- public const string SIMD = "SIMD";
- public const string Span = "Span";
public const string V8 = "V8";
public const string Perflab = "Perflab";
public const string CoreFX = "CoreFX";
+
+ public const string LINQ = "LINQ";
+ public const string SIMD = "SIMD";
+ public const string Span = "Span";
}
} | 1 | namespace Benchmarks
{
public static class Categories
{
public const string CoreCLR = "CoreCLR";
public const string BenchmarksGame = "BenchmarksGame";
public const string Benchstones = "Benchstones";
public const string BenchF = "BenchF";
public const string BenchI = "BenchI";
public const string Inlining = "Inlining";
public const string SIMD = "SIMD";
public const string Span = "Span";
public const string V8 = "V8";
public const string Perflab = "Perflab";
public const string CoreFX = "CoreFX";
}
} | 1 | 7,273 | >public const string LINQ = "LINQ"; [](start = 8, length = 34) Are there duplicated benchmarks here? #Closed | dotnet-performance | .cs |
@@ -129,14 +129,16 @@ module Beaker
end
end
- def do_install hosts, version, path, pre_30, options = {}
+ def do_install hosts, options = {}
#convenience methods for installation
########################################################
- def installer_cmd(host, version, installer)
+ def installer_cmd(host, options)
if host['platform'] =~ /windows/
+ version = options[:pe_ver_win] || host['pe_ver_win']
"cd #{host['working_dir']} && msiexec.exe /qn /i puppet-enterprise-#{version}.msi"
else
- "cd #{host['working_dir']}/#{host['dist']} && ./#{installer}"
+ version = options[:pe_ver] || host['pe_ver']
+ "cd #{host['working_dir']}/#{host['dist']} && ./#{options[:installer]}"
end
end
def link_exists?(link) | 1 | require 'pathname'
module Beaker
module DSL
#
# This module contains methods to help cloning, extracting git info,
# ordering of Puppet packages, and installing ruby projects that
# contain an `install.rb` script.
module InstallUtils
# The default install path
SourcePath = "/opt/puppet-git-repos"
# A regex to know if the uri passed is pointing to a git repo
GitURI = %r{^(git|https?|file)://|^git@}
# Github's ssh signature for cloning via ssh
GitHubSig = 'github.com,207.97.227.239 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=='
# @param [String] uri A uri in the format of <git uri>#<revision>
# the `git://`, `http://`, `https://`, and ssh
# (if cloning as the remote git user) protocols
# are valid for <git uri>
#
# @example Usage
# project = extract_repo_info_from 'git@github.com:puppetlabs/SuperSecretSauce#what_is_justin_doing'
#
# puts project[:name]
# #=> 'SuperSecretSauce'
#
# puts project[:rev]
# #=> 'what_is_justin_doing'
#
# @return [Hash{Symbol=>String}] Returns a hash containing the project
# name, repository path, and revision
# (defaults to HEAD)
#
# @api dsl
def extract_repo_info_from uri
project = {}
repo, rev = uri.split('#', 2)
project[:name] = Pathname.new(repo).basename('.git').to_s
project[:path] = repo
project[:rev] = rev || 'HEAD'
return project
end
# Takes an array of package info hashes (like that returned from
# {#extract_repo_info_from}) and sorts the `puppet`, `facter`, `hiera`
# packages so that puppet's dependencies will be installed first.
#
# @!visibility private
def order_packages packages_array
puppet = packages_array.select {|e| e[:name] == 'puppet' }
puppet_depends_on = packages_array.select do |e|
e[:name] == 'hiera' or e[:name] == 'facter'
end
depends_on_puppet = (packages_array - puppet) - puppet_depends_on
[puppet_depends_on, puppet, depends_on_puppet].flatten
end
# @param [Host] host An object implementing {Beaker::Hosts}'s
# interface.
# @param [String] path The path on the remote [host] to the repository
# @param [Hash{Symbol=>String}] repository A hash representing repo
# info like that emitted by
# {#extract_repo_info_from}
#
# @example Getting multiple project versions
# versions = [puppet_repo, facter_repo, hiera_repo].inject({}) do |vers, repo_info|
# vers.merge(find_git_repo_versions(host, '/opt/git-puppet-repos', repo_info) )
# end
# @return [Hash] Executes git describe on [host] and returns a Hash
# with the key of [repository[:name]] and value of
# the output from git describe.
#
# @note This requires the helper methods:
# * {Beaker::DSL::Structure#step}
# * {Beaker::DSL::Helpers#on}
#
# @api dsl
def find_git_repo_versions host, path, repository
version = {}
step "Grab version for #{repository[:name]}" do
on host, "cd #{path}/#{repository[:name]} && " +
"git describe || true" do
version[repository[:name]] = stdout.chomp
end
end
version
end
#
# @see #find_git_repo_versions
def install_from_git host, path, repository
name = repository[:name]
repo = repository[:path]
rev = repository[:rev]
target = "#{path}/#{name}"
step "Clone #{repo} if needed" do
on host, "test -d #{path} || mkdir -p #{path}"
on host, "test -d #{target} || git clone #{repo} #{target}"
end
step "Update #{name} and check out revision #{rev}" do
commands = ["cd #{target}",
"remote rm origin",
"remote add origin #{repo}",
"fetch origin",
"clean -fdx",
"checkout -f #{rev}"]
on host, commands.join(" && git ")
end
step "Install #{name} on the system" do
# The solaris ruby IPS package has bindir set to /usr/ruby/1.8/bin.
# However, this is not the path to which we want to deliver our
# binaries. So if we are using solaris, we have to pass the bin and
# sbin directories to the install.rb
install_opts = ''
install_opts = '--bindir=/usr/bin --sbindir=/usr/sbin' if
host['platform'].include? 'solaris'
on host, "cd #{target} && " +
"if [ -f install.rb ]; then " +
"ruby ./install.rb #{install_opts}; " +
"else true; fi"
end
end
def do_install hosts, version, path, pre_30, options = {}
#convenience methods for installation
########################################################
def installer_cmd(host, version, installer)
if host['platform'] =~ /windows/
"cd #{host['working_dir']} && msiexec.exe /qn /i puppet-enterprise-#{version}.msi"
else
"cd #{host['working_dir']}/#{host['dist']} && ./#{installer}"
end
end
def link_exists?(link)
require "net/http"
require "open-uri"
url = URI.parse(link)
Net::HTTP.start(url.host, url.port) do |http|
return http.head(url.request_uri).code == "200"
end
end
def fetch_puppet(hosts, version, path)
local = File.directory?(path)
hosts.each do |host|
filename = ""
extension = ""
if host['platform'] =~ /windows/
filename = "puppet-enterprise-#{version}"
extension = ".msi"
else
filename = "#{host['dist']}"
extension = ""
if local
extension = File.exists?("#{path}/#{filename}.tar.gz") ? ".tar.gz" : ".tar"
else
extension = link_exists?("#{path}/#{filename}.tar.gz") ? ".tar.gz" : ".tar"
end
end
if local
if not File.exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
scp_to host, "#{path}/#{filename}#{extension}", "#{host['working_dir']}/#{filename}#{extension}"
else
if not link_exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
on host, "cd #{host['working_dir']}; curl #{path}/#{filename}#{extension} -o #{filename}#{extension}"
end
if extension =~ /gz/
on host, "cd #{host['working_dir']}; gunzip #{filename}#{extension}"
end
if extension =~ /tar/
on host, "cd #{host['working_dir']}; tar -xvf #{filename}.tar"
end
end
end
########################################################
#start installation steps here
options[:installer] = 'puppet-enterprise-installer' unless options[:installer]
options[:type] = :install unless options[:type]
hostcert='uname | grep -i sunos > /dev/null && hostname || hostname -s'
master_certname = on(master, hostcert).stdout.strip
answers = Beaker::Answers.answers(version, hosts, master_certname, options)
special_nodes = [master, database, dashboard].uniq
real_agents = agents - special_nodes
# Set PE distribution for all the hosts, create working dir
use_all_tar = ENV['PE_USE_ALL_TAR'] == 'true'
hosts.each do |host|
platform = use_all_tar ? 'all' : host['platform']
host['dist'] = "puppet-enterprise-#{version}-#{platform}"
host['working_dir'] = "/tmp/" + Time.new.strftime("%Y-%m-%d_%H.%M.%S") #unique working dirs make me happy
on host, "mkdir #{host['working_dir']}"
end
fetch_puppet(hosts, version, path)
hosts.each do |host|
# Database host was added in 3.0. Skip it if installing an older version
next if host == database and host != master and host != dashboard and pre_30
if host['platform'] =~ /windows/
on host, "#{installer_cmd(host, version, options[:installer])} PUPPET_MASTER_SERVER=#{master} PUPPET_AGENT_CERTNAME=#{host}"
else
create_remote_file host, "#{host['working_dir']}/answers", Beaker::Answers.answer_string(host, answers)
on host, "#{installer_cmd(host, version, options[:installer])} -a #{host['working_dir']}/answers"
end
end
# If we're installing a version less than 3.0, ignore the database host
install_hosts = hosts.dup
install_hosts.delete(database) if pre_30 and database != master and database != dashboard
# On each agent, we ensure the certificate is signed then shut down the agent
install_hosts.each do |host|
sign_certificate(host)
stop_agent(host)
end
# Wait for PuppetDB to be totally up and running
sleep_until_puppetdb_started(database) unless pre_30
# Run the agent once to ensure everything is in the dashboard
install_hosts.each do |host|
on host, puppet_agent('-t'), :acceptable_exit_codes => [0,2]
# Workaround for PE-1105 when deploying 3.0.0
# The installer did not respect our database host answers in 3.0.0,
# and would cause puppetdb to be bounced by the agent run. By sleeping
# again here, we ensure that if that bounce happens during an upgrade
# test we won't fail early in the install process.
if version == '3.0.0' and host == database
sleep_until_puppetdb_started(database)
end
end
install_hosts.each do |host|
wait_for_host_in_dashboard(host)
end
if pre_30
task = 'nodegroup:add_all_nodes group=default'
else
task = 'defaultgroup:ensure_default_group'
end
on dashboard, "/opt/puppet/bin/rake -sf /opt/puppet/share/puppet-dashboard/Rakefile #{task} RAILS_ENV=production"
# Now that all hosts are in the dashbaord, run puppet one more
# time to configure mcollective
on install_hosts, puppet_agent('-t'), :acceptable_exit_codes => [0,2]
end
#is version a < version b
#3.0.0-160-gac44cfb is greater than 3.0.0, and 2.8.2
def version_is_less a, b
a = a.split('-')[0].split('.')
b = b.split('-')[0].split('.')
(0...a.length).each do |i|
if i < b.length
if a[i] < b[i]
return true
elsif a[i] > b[i]
return false
end
else
return false
end
end
return false
end
def install_pe version, path
pre_30 = version_is_less(version, '3.0')
step "Install #{version} PE on #{path}"
do_install hosts, version, path, pre_30
end
def upgrade_pe version, path, from
pre_30 = version_is_less(version, '3.0')
if pre_30
do_install(hosts, version, path, pre_30, :type => :upgrade, :installer => 'puppet-enterprise-upgrader', :from => from)
else
do_install(hosts, version, path, pre_30, :type => :upgrade, :from => from)
end
end
end
end
end
| 1 | 4,579 | Probably worth taking the opportunity to add yardocs to this method now. Esp. curious about what the options hash accepts. | voxpupuli-beaker | rb |
@@ -271,6 +271,12 @@ static void roots_cursor_press_button(struct roots_cursor *cursor,
break;
case WLR_BUTTON_PRESSED:
roots_seat_set_focus(seat, view);
+ if (surface && !view) {
+ struct wlr_layer_surface *layer = surface->role_data;
+ if (layer->current.keyboard_interactive) {
+ roots_seat_set_focus_layer(seat, layer);
+ }
+ }
break;
}
} | 1 | #define _XOPEN_SOURCE 700
#include <math.h>
#include <stdlib.h>
#include <wlr/types/wlr_xcursor_manager.h>
#include <wlr/util/edges.h>
#include <wlr/util/log.h>
#ifdef __linux__
#include <linux/input-event-codes.h>
#elif __FreeBSD__
#include <dev/evdev/input-event-codes.h>
#endif
#include "rootston/cursor.h"
#include "rootston/desktop.h"
#include "rootston/xcursor.h"
struct roots_cursor *roots_cursor_create(struct roots_seat *seat) {
struct roots_cursor *cursor = calloc(1, sizeof(struct roots_cursor));
if (!cursor) {
return NULL;
}
cursor->cursor = wlr_cursor_create();
if (!cursor->cursor) {
free(cursor);
return NULL;
}
cursor->default_xcursor = ROOTS_XCURSOR_DEFAULT;
return cursor;
}
void roots_cursor_destroy(struct roots_cursor *cursor) {
// TODO
}
static void seat_view_deco_motion(struct roots_seat_view *view, double deco_sx, double deco_sy) {
struct roots_cursor *cursor = view->seat->cursor;
double sx = deco_sx;
double sy = deco_sy;
if (view->has_button_grab) {
sx = view->grab_sx;
sy = view->grab_sy;
}
enum roots_deco_part parts = view_get_deco_part(view->view, sx, sy);
bool is_titlebar = (parts & ROOTS_DECO_PART_TITLEBAR);
uint32_t edges = 0;
if (parts & ROOTS_DECO_PART_LEFT_BORDER) {
edges |= WLR_EDGE_LEFT;
} else if (parts & ROOTS_DECO_PART_RIGHT_BORDER) {
edges |= WLR_EDGE_RIGHT;
} else if (parts & ROOTS_DECO_PART_BOTTOM_BORDER) {
edges |= WLR_EDGE_BOTTOM;
} else if (parts & ROOTS_DECO_PART_TOP_BORDER) {
edges |= WLR_EDGE_TOP;
}
if (view->has_button_grab) {
if (is_titlebar) {
roots_seat_begin_move(view->seat, view->view);
} else if (edges) {
roots_seat_begin_resize(view->seat, view->view, edges);
}
view->has_button_grab = false;
} else {
if (is_titlebar) {
wlr_xcursor_manager_set_cursor_image(cursor->xcursor_manager,
cursor->default_xcursor, cursor->cursor);
} else if (edges) {
const char *resize_name = wlr_xcursor_get_resize_name(edges);
wlr_xcursor_manager_set_cursor_image(cursor->xcursor_manager,
resize_name, cursor->cursor);
}
}
}
static void seat_view_deco_leave(struct roots_seat_view *view) {
struct roots_cursor *cursor = view->seat->cursor;
wlr_xcursor_manager_set_cursor_image(cursor->xcursor_manager,
cursor->default_xcursor, cursor->cursor);
view->has_button_grab = false;
}
static void seat_view_deco_button(struct roots_seat_view *view, double sx,
double sy, uint32_t button, uint32_t state) {
if (button == BTN_LEFT && state == WLR_BUTTON_PRESSED) {
view->has_button_grab = true;
view->grab_sx = sx;
view->grab_sy = sy;
} else {
view->has_button_grab = false;
}
enum roots_deco_part parts = view_get_deco_part(view->view, sx, sy);
if (state == WLR_BUTTON_RELEASED && (parts & ROOTS_DECO_PART_TITLEBAR)) {
struct roots_cursor *cursor = view->seat->cursor;
wlr_xcursor_manager_set_cursor_image(cursor->xcursor_manager,
cursor->default_xcursor, cursor->cursor);
}
}
static void roots_passthrough_cursor(struct roots_cursor *cursor,
uint32_t time) {
double sx, sy;
struct roots_view *view = NULL;
struct roots_seat *seat = cursor->seat;
struct roots_desktop *desktop = seat->input->server->desktop;
struct wlr_surface *surface = desktop_surface_at(desktop,
cursor->cursor->x, cursor->cursor->y, &sx, &sy, &view);
struct wl_client *client = NULL;
if (surface) {
client = wl_resource_get_client(surface->resource);
}
if (cursor->cursor_client != client) {
wlr_xcursor_manager_set_cursor_image(cursor->xcursor_manager,
cursor->default_xcursor, cursor->cursor);
cursor->cursor_client = client;
}
if (view) {
struct roots_seat_view *seat_view =
roots_seat_view_from_view(seat, view);
if (cursor->pointer_view && (surface ||
seat_view != cursor->pointer_view)) {
seat_view_deco_leave(cursor->pointer_view);
cursor->pointer_view = NULL;
}
if (!surface) {
cursor->pointer_view = seat_view;
seat_view_deco_motion(seat_view, sx, sy);
}
}
if (surface) {
wlr_seat_pointer_notify_enter(seat->seat, surface, sx, sy);
wlr_seat_pointer_notify_motion(seat->seat, time, sx, sy);
} else {
wlr_seat_pointer_clear_focus(seat->seat);
}
struct roots_drag_icon *drag_icon;
wl_list_for_each(drag_icon, &seat->drag_icons, link) {
roots_drag_icon_update_position(drag_icon);
}
}
static void roots_cursor_update_position(
struct roots_cursor *cursor, uint32_t time) {
struct roots_seat *seat = cursor->seat;
struct roots_view *view;
switch (cursor->mode) {
case ROOTS_CURSOR_PASSTHROUGH:
roots_passthrough_cursor(cursor, time);
break;
case ROOTS_CURSOR_MOVE:
view = roots_seat_get_focus(seat);
if (view != NULL) {
double dx = cursor->cursor->x - cursor->offs_x;
double dy = cursor->cursor->y - cursor->offs_y;
view_move(view, cursor->view_x + dx,
cursor->view_y + dy);
}
break;
case ROOTS_CURSOR_RESIZE:
view = roots_seat_get_focus(seat);
if (view != NULL) {
double dx = cursor->cursor->x - cursor->offs_x;
double dy = cursor->cursor->y - cursor->offs_y;
double x = view->x;
double y = view->y;
int width = cursor->view_width;
int height = cursor->view_height;
if (cursor->resize_edges & WLR_EDGE_TOP) {
y = cursor->view_y + dy;
height -= dy;
if (height < 1) {
y += height;
}
} else if (cursor->resize_edges & WLR_EDGE_BOTTOM) {
height += dy;
}
if (cursor->resize_edges & WLR_EDGE_LEFT) {
x = cursor->view_x + dx;
width -= dx;
if (width < 1) {
x += width;
}
} else if (cursor->resize_edges & WLR_EDGE_RIGHT) {
width += dx;
}
view_move_resize(view, x, y,
width < 1 ? 1 : width,
height < 1 ? 1 : height);
}
break;
case ROOTS_CURSOR_ROTATE:
view = roots_seat_get_focus(seat);
if (view != NULL) {
int ox = view->x + view->wlr_surface->current->width/2,
oy = view->y + view->wlr_surface->current->height/2;
int ux = cursor->offs_x - ox,
uy = cursor->offs_y - oy;
int vx = cursor->cursor->x - ox,
vy = cursor->cursor->y - oy;
float angle = atan2(ux*vy - uy*vx, vx*ux + vy*uy);
int steps = 12;
angle = round(angle/M_PI*steps) / (steps/M_PI);
view_rotate(view, cursor->view_rotation + angle);
}
break;
}
}
static void roots_cursor_press_button(struct roots_cursor *cursor,
struct wlr_input_device *device, uint32_t time, uint32_t button,
uint32_t state, double lx, double ly) {
struct roots_seat *seat = cursor->seat;
struct roots_desktop *desktop = seat->input->server->desktop;
bool is_touch = device->type == WLR_INPUT_DEVICE_TOUCH;
double sx, sy;
struct roots_view *view;
struct wlr_surface *surface = desktop_surface_at(desktop,
lx, ly, &sx, &sy, &view);
if (state == WLR_BUTTON_PRESSED && view &&
roots_seat_has_meta_pressed(seat)) {
roots_seat_set_focus(seat, view);
uint32_t edges;
switch (button) {
case BTN_LEFT:
roots_seat_begin_move(seat, view);
break;
case BTN_RIGHT:
edges = 0;
if (sx < view->wlr_surface->current->width/2) {
edges |= WLR_EDGE_LEFT;
} else {
edges |= WLR_EDGE_RIGHT;
}
if (sy < view->wlr_surface->current->height/2) {
edges |= WLR_EDGE_TOP;
} else {
edges |= WLR_EDGE_BOTTOM;
}
roots_seat_begin_resize(seat, view, edges);
break;
case BTN_MIDDLE:
roots_seat_begin_rotate(seat, view);
break;
}
} else {
if (view && !surface && cursor->pointer_view) {
seat_view_deco_button(cursor->pointer_view,
sx, sy, button, state);
}
if (state == WLR_BUTTON_RELEASED &&
cursor->mode != ROOTS_CURSOR_PASSTHROUGH) {
cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
}
switch (state) {
case WLR_BUTTON_RELEASED:
if (!is_touch) {
roots_cursor_update_position(cursor, time);
}
break;
case WLR_BUTTON_PRESSED:
roots_seat_set_focus(seat, view);
break;
}
}
if (!is_touch) {
wlr_seat_pointer_notify_button(seat->seat, time, button, state);
}
}
void roots_cursor_handle_motion(struct roots_cursor *cursor,
struct wlr_event_pointer_motion *event) {
wlr_cursor_move(cursor->cursor, event->device,
event->delta_x, event->delta_y);
roots_cursor_update_position(cursor, event->time_msec);
}
void roots_cursor_handle_motion_absolute(struct roots_cursor *cursor,
struct wlr_event_pointer_motion_absolute *event) {
wlr_cursor_warp_absolute(cursor->cursor,
event->device, event->x, event->y);
roots_cursor_update_position(cursor, event->time_msec);
}
void roots_cursor_handle_button(struct roots_cursor *cursor,
struct wlr_event_pointer_button *event) {
roots_cursor_press_button(cursor, event->device, event->time_msec,
event->button, event->state, cursor->cursor->x, cursor->cursor->y);
}
void roots_cursor_handle_axis(struct roots_cursor *cursor,
struct wlr_event_pointer_axis *event) {
wlr_seat_pointer_notify_axis(cursor->seat->seat, event->time_msec,
event->orientation, event->delta);
}
void roots_cursor_handle_touch_down(struct roots_cursor *cursor,
struct wlr_event_touch_down *event) {
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
double lx, ly;
bool result = wlr_cursor_absolute_to_layout_coords(cursor->cursor,
event->device, event->x, event->y, &lx, &ly);
if (!result) {
return;
}
double sx, sy;
struct wlr_surface *surface = desktop_surface_at(
desktop, lx, ly, &sx, &sy, NULL);
uint32_t serial = 0;
if (surface) {
serial = wlr_seat_touch_notify_down(cursor->seat->seat, surface,
event->time_msec, event->touch_id, sx, sy);
}
if (serial && wlr_seat_touch_num_points(cursor->seat->seat) == 1) {
cursor->seat->touch_id = event->touch_id;
cursor->seat->touch_x = lx;
cursor->seat->touch_y = ly;
roots_cursor_press_button(cursor, event->device, event->time_msec,
BTN_LEFT, 1, lx, ly);
}
}
void roots_cursor_handle_touch_up(struct roots_cursor *cursor,
struct wlr_event_touch_up *event) {
struct wlr_touch_point *point =
wlr_seat_touch_get_point(cursor->seat->seat, event->touch_id);
if (!point) {
return;
}
if (wlr_seat_touch_num_points(cursor->seat->seat) == 1) {
roots_cursor_press_button(cursor, event->device, event->time_msec,
BTN_LEFT, 0, cursor->seat->touch_x, cursor->seat->touch_y);
}
wlr_seat_touch_notify_up(cursor->seat->seat, event->time_msec,
event->touch_id);
}
void roots_cursor_handle_touch_motion(struct roots_cursor *cursor,
struct wlr_event_touch_motion *event) {
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
struct wlr_touch_point *point =
wlr_seat_touch_get_point(cursor->seat->seat, event->touch_id);
if (!point) {
return;
}
double lx, ly;
bool result = wlr_cursor_absolute_to_layout_coords(cursor->cursor,
event->device, event->x, event->y, &lx, &ly);
if (!result) {
return;
}
double sx, sy;
struct wlr_surface *surface = desktop_surface_at(
desktop, lx, ly, &sx, &sy, NULL);
if (surface) {
wlr_seat_touch_point_focus(cursor->seat->seat, surface,
event->time_msec, event->touch_id, sx, sy);
wlr_seat_touch_notify_motion(cursor->seat->seat, event->time_msec,
event->touch_id, sx, sy);
} else {
wlr_seat_touch_point_clear_focus(cursor->seat->seat, event->time_msec,
event->touch_id);
}
if (event->touch_id == cursor->seat->touch_id) {
cursor->seat->touch_x = lx;
cursor->seat->touch_y = ly;
}
}
void roots_cursor_handle_tool_axis(struct roots_cursor *cursor,
struct wlr_event_tablet_tool_axis *event) {
if ((event->updated_axes & WLR_TABLET_TOOL_AXIS_X) &&
(event->updated_axes & WLR_TABLET_TOOL_AXIS_Y)) {
wlr_cursor_warp_absolute(cursor->cursor, event->device,
event->x, event->y);
roots_cursor_update_position(cursor, event->time_msec);
} else if ((event->updated_axes & WLR_TABLET_TOOL_AXIS_X)) {
wlr_cursor_warp_absolute(cursor->cursor, event->device, event->x, -1);
roots_cursor_update_position(cursor, event->time_msec);
} else if ((event->updated_axes & WLR_TABLET_TOOL_AXIS_Y)) {
wlr_cursor_warp_absolute(cursor->cursor, event->device, -1, event->y);
roots_cursor_update_position(cursor, event->time_msec);
}
}
void roots_cursor_handle_tool_tip(struct roots_cursor *cursor,
struct wlr_event_tablet_tool_tip *event) {
roots_cursor_press_button(cursor, event->device,
event->time_msec, BTN_LEFT, event->state, cursor->cursor->x,
cursor->cursor->y);
}
void roots_cursor_handle_request_set_cursor(struct roots_cursor *cursor,
struct wlr_seat_pointer_request_set_cursor_event *event) {
struct wlr_surface *focused_surface =
event->seat_client->seat->pointer_state.focused_surface;
bool has_focused =
focused_surface != NULL && focused_surface->resource != NULL;
struct wl_client *focused_client = NULL;
if (has_focused) {
focused_client = wl_resource_get_client(focused_surface->resource);
}
if (event->seat_client->client != focused_client ||
cursor->mode != ROOTS_CURSOR_PASSTHROUGH) {
wlr_log(L_DEBUG, "Denying request to set cursor from unfocused client");
return;
}
wlr_cursor_set_surface(cursor->cursor, event->surface, event->hotspot_x,
event->hotspot_y);
cursor->cursor_client = event->seat_client->client;
}
| 1 | 10,643 | This is Very Meh . We want to get rid of `role_data`, and it's an internal field. | swaywm-wlroots | c |
@@ -97,6 +97,11 @@ class Database {
return this.tryCall('selectOne', sql, params);
}
+ async loadExtension(path) {
+ const result = await this.driver()['loadExtension'](path);
+ return result;
+ }
+
async selectAll(sql, params = null) {
return this.tryCall('selectAll', sql, params);
} | 1 | const { Logger } = require('lib/logger.js');
const { time } = require('lib/time-utils.js');
const Mutex = require('async-mutex').Mutex;
class Database {
constructor(driver) {
this.debugMode_ = false;
this.driver_ = driver;
this.logger_ = new Logger();
this.logExcludedQueryTypes_ = [];
this.batchTransactionMutex_ = new Mutex();
}
setLogExcludedQueryTypes(v) {
this.logExcludedQueryTypes_ = v;
}
// Converts the SQLite error to a regular JS error
// so that it prints a stacktrace when passed to
// console.error()
sqliteErrorToJsError(error, sql = null, params = null) {
return this.driver().sqliteErrorToJsError(error, sql, params);
}
setLogger(l) {
this.logger_ = l;
}
logger() {
return this.logger_;
}
driver() {
return this.driver_;
}
async open(options) {
try {
await this.driver().open(options);
} catch (error) {
throw new Error(`Cannot open database: ${error.message}: ${JSON.stringify(options)}`);
}
this.logger().info('Database was open successfully');
}
escapeField(field) {
if (field == '*') return '*';
const p = field.split('.');
if (p.length == 1) return `\`${field}\``;
if (p.length == 2) return `${p[0]}.\`${p[1]}\``;
throw new Error(`Invalid field format: ${field}`);
}
escapeFields(fields) {
if (fields == '*') return '*';
const output = [];
for (let i = 0; i < fields.length; i++) {
output.push(this.escapeField(fields[i]));
}
return output;
}
async tryCall(callName, sql, params) {
if (typeof sql === 'object') {
params = sql.params;
sql = sql.sql;
}
let waitTime = 50;
let totalWaitTime = 0;
while (true) {
try {
this.logQuery(sql, params);
const result = await this.driver()[callName](sql, params);
return result; // No exception was thrown
} catch (error) {
if (error && (error.code == 'SQLITE_IOERR' || error.code == 'SQLITE_BUSY')) {
if (totalWaitTime >= 20000) throw this.sqliteErrorToJsError(error, sql, params);
// NOTE: don't put logger statements here because it might log to the database, which
// could result in an error being thrown again.
// this.logger().warn(sprintf('Error %s: will retry in %s milliseconds', error.code, waitTime));
// this.logger().warn('Error was: ' + error.toString());
await time.msleep(waitTime);
totalWaitTime += waitTime;
waitTime *= 1.5;
} else {
throw this.sqliteErrorToJsError(error, sql, params);
}
}
}
}
async selectOne(sql, params = null) {
return this.tryCall('selectOne', sql, params);
}
async selectAll(sql, params = null) {
return this.tryCall('selectAll', sql, params);
}
async selectAllFields(sql, params, field) {
const rows = await this.tryCall('selectAll', sql, params);
const output = [];
for (let i = 0; i < rows.length; i++) {
const v = rows[i][field];
if (!v) throw new Error(`No such field: ${field}. Query was: ${sql}`);
output.push(rows[i][field]);
}
return output;
}
async exec(sql, params = null) {
return this.tryCall('exec', sql, params);
}
async transactionExecBatch(queries) {
if (queries.length <= 0) return;
if (queries.length == 1) {
const q = this.wrapQuery(queries[0]);
await this.exec(q.sql, q.params);
return;
}
// There can be only one transaction running at a time so use a mutex
const release = await this.batchTransactionMutex_.acquire();
try {
await this.exec('BEGIN TRANSACTION');
for (let i = 0; i < queries.length; i++) {
const query = this.wrapQuery(queries[i]);
await this.exec(query.sql, query.params);
}
await this.exec('COMMIT');
} catch (error) {
await this.exec('ROLLBACK');
throw error;
} finally {
release();
}
}
static enumId(type, s) {
if (type == 'settings') {
if (s == 'int') return 1;
if (s == 'string') return 2;
}
if (type == 'fieldType') {
if (s) s = s.toUpperCase();
if (s == 'INTEGER') s = 'INT';
if (!(`TYPE_${s}` in this)) throw new Error(`Unkonwn fieldType: ${s}`);
return this[`TYPE_${s}`];
}
if (type == 'syncTarget') {
if (s == 'memory') return 1;
if (s == 'filesystem') return 2;
if (s == 'onedrive') return 3;
}
throw new Error(`Unknown enum type or value: ${type}, ${s}`);
}
static enumName(type, id) {
if (type === 'fieldType') {
if (id === Database.TYPE_UNKNOWN) return 'unknown';
if (id === Database.TYPE_INT) return 'int';
if (id === Database.TYPE_TEXT) return 'text';
if (id === Database.TYPE_NUMERIC) return 'numeric';
throw new Error(`Invalid type id: ${id}`);
}
}
static formatValue(type, value) {
if (value === null || value === undefined) return null;
if (type == this.TYPE_INT) return Number(value);
if (type == this.TYPE_TEXT) return value;
if (type == this.TYPE_NUMERIC) return Number(value);
throw new Error(`Unknown type: ${type}`);
}
sqlStringToLines(sql) {
const output = [];
const lines = sql.split('\n');
let statement = '';
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
if (line == '') continue;
if (line.substr(0, 2) == '--') continue;
statement += line.trim();
if (line[line.length - 1] == ',') statement += ' ';
if (line[line.length - 1] == ';') {
output.push(statement);
statement = '';
}
}
return output;
}
logQuery(sql, params = null) {
if (this.logExcludedQueryTypes_.length) {
const temp = sql.toLowerCase();
for (let i = 0; i < this.logExcludedQueryTypes_.length; i++) {
if (temp.indexOf(this.logExcludedQueryTypes_[i].toLowerCase()) === 0) return;
}
}
this.logger().debug(sql);
if (params !== null && params.length) this.logger().debug(JSON.stringify(params));
}
static insertQuery(tableName, data) {
if (!data || !Object.keys(data).length) throw new Error('Data is empty');
let keySql = '';
let valueSql = '';
const params = [];
for (const key in data) {
if (!data.hasOwnProperty(key)) continue;
if (key[key.length - 1] == '_') continue;
if (keySql != '') keySql += ', ';
if (valueSql != '') valueSql += ', ';
keySql += `\`${key}\``;
valueSql += '?';
params.push(data[key]);
}
return {
sql: `INSERT INTO \`${tableName}\` (${keySql}) VALUES (${valueSql})`,
params: params,
};
}
static updateQuery(tableName, data, where) {
if (!data || !Object.keys(data).length) throw new Error('Data is empty');
let sql = '';
const params = [];
for (const key in data) {
if (!data.hasOwnProperty(key)) continue;
if (key[key.length - 1] == '_') continue;
if (sql != '') sql += ', ';
sql += `\`${key}\`=?`;
params.push(data[key]);
}
if (typeof where != 'string') {
const s = [];
for (const n in where) {
if (!where.hasOwnProperty(n)) continue;
params.push(where[n]);
s.push(`\`${n}\`=?`);
}
where = s.join(' AND ');
}
return {
sql: `UPDATE \`${tableName}\` SET ${sql} WHERE ${where}`,
params: params,
};
}
alterColumnQueries(tableName, fields) {
const fieldsNoType = [];
for (const n in fields) {
if (!fields.hasOwnProperty(n)) continue;
fieldsNoType.push(n);
}
const fieldsWithType = [];
for (const n in fields) {
if (!fields.hasOwnProperty(n)) continue;
fieldsWithType.push(`${this.escapeField(n)} ${fields[n]}`);
}
let sql = `
CREATE TEMPORARY TABLE _BACKUP_TABLE_NAME_(_FIELDS_TYPE_);
INSERT INTO _BACKUP_TABLE_NAME_ SELECT _FIELDS_NO_TYPE_ FROM _TABLE_NAME_;
DROP TABLE _TABLE_NAME_;
CREATE TABLE _TABLE_NAME_(_FIELDS_TYPE_);
INSERT INTO _TABLE_NAME_ SELECT _FIELDS_NO_TYPE_ FROM _BACKUP_TABLE_NAME_;
DROP TABLE _BACKUP_TABLE_NAME_;
`;
sql = sql.replace(/_BACKUP_TABLE_NAME_/g, this.escapeField(`${tableName}_backup`));
sql = sql.replace(/_TABLE_NAME_/g, this.escapeField(tableName));
sql = sql.replace(/_FIELDS_NO_TYPE_/g, this.escapeFields(fieldsNoType).join(','));
sql = sql.replace(/_FIELDS_TYPE_/g, fieldsWithType.join(','));
return sql.trim().split('\n');
}
wrapQueries(queries) {
const output = [];
for (let i = 0; i < queries.length; i++) {
output.push(this.wrapQuery(queries[i]));
}
return output;
}
wrapQuery(sql, params = null) {
if (!sql) throw new Error(`Cannot wrap empty string: ${sql}`);
if (sql.constructor === Array) {
const output = {};
output.sql = sql[0];
output.params = sql.length >= 2 ? sql[1] : null;
return output;
} else if (typeof sql === 'string') {
return { sql: sql, params: params };
} else {
return sql; // Already wrapped
}
}
}
Database.TYPE_UNKNOWN = 0;
Database.TYPE_INT = 1;
Database.TYPE_TEXT = 2;
Database.TYPE_NUMERIC = 3;
module.exports = { Database };
| 1 | 15,185 | `await this.driver().loadExtension(path);` should work | laurent22-joplin | js |
@@ -98,12 +98,12 @@ func TestAddDepRescan(t *testing.T) {
target1.AddDependency(buildLabel("//package1:target4"))
// Fake test: calling this now should have no effect because rescan is not true.
- state.QueueTarget(buildLabel("//package1:target1"), core.OriginalTarget, false, false)
+ state.QueueTarget(buildLabel("//package1:target1"), core.OriginalTarget, false)
assertPendingParses(t, state)
assertPendingBuilds(t, state) // Note that the earlier call to assertPendingBuilds cleared it.
// Now running this should activate it
- rescanDeps(state, map[*core.BuildTarget]struct{}{target1: {}})
+ // rescanDeps(state, map[*core.BuildTarget]struct{}{target1: {}})
time.Sleep(time.Millisecond * 100)
assertPendingBuilds(t, state, "//package1:target4") | 1 | // Tests for general parse functions.
package parse
import (
"github.com/stretchr/testify/assert"
"github.com/thought-machine/please/src/core"
"testing"
"time"
)
const tid = 1
// TODO(jpoole): Use brain to figure out what we're actually waiting for here instead of just sleeping 100ms
func TestAddDepSimple(t *testing.T) {
// Simple case with only one package parsed and one target added
state := makeState(true, false)
activateTarget(tid, state, nil, buildLabel("//package1:target1"), core.OriginalTarget, false)
time.Sleep(time.Millisecond * 100)
assertPendingParses(t, state, "//package2:target1", "//package2:target1")
assertPendingBuilds(t, state) // None until package2 parses
assert.Equal(t, 5, state.NumActive())
}
func TestAddDepMultiple(t *testing.T) {
// Similar to above but doing all targets in that package
state := makeState(true, false)
activateTarget(tid, state, nil, buildLabel("//package1:target1"), core.OriginalTarget, false)
activateTarget(tid, state, nil, buildLabel("//package1:target2"), core.OriginalTarget, false)
activateTarget(tid, state, nil, buildLabel("//package1:target3"), core.OriginalTarget, false)
time.Sleep(time.Millisecond * 100)
// We get an additional dep on target2, but not another on package2:target1 because target2
// is already activated since package1:target1 depends on it
assertPendingParses(t, state, "//package2:target1", "//package2:target1", "//package2:target2")
assertPendingBuilds(t, state) // None until package2 parses
assert.Equal(t, 7, state.NumActive())
}
func TestAddDepMultiplePackages(t *testing.T) {
// This time we already have package2 parsed
state := makeState(true, true)
activateTarget(tid, state, nil, buildLabel("//package1:target1"), core.OriginalTarget, false)
time.Sleep(time.Millisecond * 100)
assertPendingBuilds(t, state, "//package2:target2") // This is the only candidate target
assertPendingParses(t, state) // None, we have both packages already
assert.Equal(t, 6, state.NumActive())
}
func TestAddDepNoBuild(t *testing.T) {
// Tag state as not needing build. We shouldn't get any pending builds at this point.
state := makeState(true, true)
state.NeedBuild = false
activateTarget(tid, state, nil, buildLabel("//package1:target1"), core.OriginalTarget, false)
time.Sleep(time.Millisecond * 100)
assertPendingParses(t, state) // None, we have both packages already
assertPendingBuilds(t, state) // Nothing because we don't need to build.
}
func TestAddParseDep(t *testing.T) {
// Tag state as not needing build. Any target that needs to be built to complete parse
// should still get queued for build though. Recall that we indicate this with :all...
state := makeState(true, true)
state.NeedBuild = false
activateTarget(tid, state, nil, buildLabel("//package2:target2"), buildLabel("//package3:all"), false)
time.Sleep(time.Millisecond * 100)
assertPendingBuilds(t, state, "//package2:target2") // Queued because it's needed for parse
assertPendingParses(t, state) // None, we have both packages already
assert.Equal(t, 2, state.NumActive())
}
func TestAddDepRescan(t *testing.T) {
t.Skip("Not convinced this test is a good reflection of reality")
// Simulate a post-build function and rescan.
state := makeState(true, true)
activateTarget(tid, state, nil, buildLabel("//package1:target1"), core.OriginalTarget, false)
time.Sleep(time.Millisecond * 100)
assertPendingBuilds(t, state, "//package2:target2") // This is the only candidate target
assertPendingParses(t, state) // None, we have both packages already
assert.Equal(t, 6, state.NumActive())
// Add new target & dep to target1
target4 := makeTarget("//package1:target4")
state.Graph.Package("package1", "").AddTarget(target4)
state.Graph.AddTarget(target4)
target1 := state.Graph.TargetOrDie(buildLabel("//package1:target1"))
target1.AddDependency(buildLabel("//package1:target4"))
// Fake test: calling this now should have no effect because rescan is not true.
state.QueueTarget(buildLabel("//package1:target1"), core.OriginalTarget, false, false)
assertPendingParses(t, state)
assertPendingBuilds(t, state) // Note that the earlier call to assertPendingBuilds cleared it.
// Now running this should activate it
rescanDeps(state, map[*core.BuildTarget]struct{}{target1: {}})
time.Sleep(time.Millisecond * 100)
assertPendingBuilds(t, state, "//package1:target4")
assertPendingParses(t, state)
}
func TestAddParseDepDeferred(t *testing.T) {
t.Skip("Not convinced this test is a good reflection of reality")
// Similar to TestAddParseDep but where we scan the package once and come back later because
// something else asserts a dependency on it.
state := makeState(true, true)
state.NeedBuild = false
assert.Equal(t, 1, state.NumActive())
activateTarget(tid, state, nil, buildLabel("//package2:target2"), core.OriginalTarget, false)
time.Sleep(time.Millisecond * 100)
assertPendingParses(t, state)
assertPendingBuilds(t, state) // Not yet.
// Now the undefer kicks off...
activateTarget(tid, state, nil, buildLabel("//package2:target2"), buildLabel("//package1:all"), false)
time.Sleep(time.Millisecond * 100)
assertPendingBuilds(t, state, "//package2:target2") // This time!
assertPendingParses(t, state)
assert.Equal(t, 2, state.NumActive())
}
func makeTarget(label string, deps ...string) *core.BuildTarget {
target := core.NewBuildTarget(core.ParseBuildLabel(label, ""))
for _, dep := range deps {
target.AddDependency(core.ParseBuildLabel(dep, ""))
}
return target
}
// makeState creates a new build state with optionally one or two packages in it.
// Used in various tests above.
func makeState(withPackage1, withPackage2 bool) *core.BuildState {
state := core.NewDefaultBuildState()
if withPackage1 {
pkg := core.NewPackage("package1")
state.Graph.AddPackage(pkg)
pkg.AddTarget(makeTarget("//package1:target1", "//package1:target2", "//package2:target1"))
pkg.AddTarget(makeTarget("//package1:target2", "//package2:target1"))
pkg.AddTarget(makeTarget("//package1:target3", "//package2:target2"))
state.Graph.AddTarget(pkg.Target("target1"))
state.Graph.AddTarget(pkg.Target("target2"))
state.Graph.AddTarget(pkg.Target("target3"))
addDeps(state.Graph, pkg)
}
if withPackage2 {
pkg := core.NewPackage("package2")
state.Graph.AddPackage(pkg)
pkg.AddTarget(makeTarget("//package2:target1", "//package2:target2", "//package1:target3"))
pkg.AddTarget(makeTarget("//package2:target2"))
state.Graph.AddTarget(pkg.Target("target1"))
state.Graph.AddTarget(pkg.Target("target2"))
addDeps(state.Graph, pkg)
}
return state
}
func addDeps(graph *core.BuildGraph, pkg *core.Package) {
for _, target := range pkg.AllTargets() {
for _, dep := range target.DeclaredDependencies() {
target.AddDependency(dep)
}
}
}
func assertPendingParses(t *testing.T, state *core.BuildState, targets ...string) {
parses, _ := getAllPending(state)
assert.ElementsMatch(t, targets, parses)
}
func assertPendingBuilds(t *testing.T, state *core.BuildState, targets ...string) {
_, builds := getAllPending(state)
assert.ElementsMatch(t, targets, builds)
}
func getAllPending(state *core.BuildState) ([]string, []string) {
parses, builds, _, tests, _ := state.TaskQueues()
state.Stop()
var pendingParses, pendingBuilds []string
for parses != nil || builds != nil || tests != nil {
select {
case p, ok := <-parses:
if !ok {
parses = nil
break
}
pendingParses = append(pendingParses, p.Label.String())
case l, ok := <-builds:
if !ok {
builds = nil
break
}
pendingBuilds = append(pendingBuilds, l.String())
case _, ok := <-tests:
if !ok {
tests = nil
break
}
}
}
return pendingParses, pendingBuilds
}
func buildLabel(bl string) core.BuildLabel {
return core.ParseBuildLabel(bl, "")
}
| 1 | 10,134 | Is any of this needed anymore? | thought-machine-please | go |
@@ -26,6 +26,16 @@
#include "ostree-repo-private.h"
#include "otutil.h"
+/* See ostree-repo.c for a bit more info about these ABI checks */
+#if __SIZEOF_POINTER__ == 8 && __SIZEOF_LONG__ == 8 && __SIZEOF_INT__ == 4
+G_STATIC_ASSERT(sizeof(OstreeDiffDirsOptions) ==
+ sizeof(int) * 2 +
+ sizeof(gpointer) +
+ sizeof(int) * (7+6) +
+ sizeof(int) + /* hole */
+ sizeof(gpointer) * 7);
+#endif
+
static gboolean
get_file_checksum (OstreeDiffFlags flags,
GFile *f, | 1 | /*
* Copyright (C) 2011 Colin Walters <walters@verbum.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Author: Colin Walters <walters@verbum.org>
*/
#include "config.h"
#include "libglnx.h"
#include "ostree.h"
#include "ostree-repo-private.h"
#include "otutil.h"
static gboolean
get_file_checksum (OstreeDiffFlags flags,
GFile *f,
GFileInfo *f_info,
char **out_checksum,
GCancellable *cancellable,
GError **error)
{
g_autofree char *ret_checksum = NULL;
if (OSTREE_IS_REPO_FILE (f))
{
ret_checksum = g_strdup (ostree_repo_file_get_checksum ((OstreeRepoFile*)f));
}
else
{
g_autoptr(GVariant) xattrs = NULL;
g_autoptr(GInputStream) in = NULL;
if (!(flags & OSTREE_DIFF_FLAGS_IGNORE_XATTRS))
{
if (!glnx_dfd_name_get_all_xattrs (AT_FDCWD, gs_file_get_path_cached (f),
&xattrs, cancellable, error))
return FALSE;
}
if (g_file_info_get_file_type (f_info) == G_FILE_TYPE_REGULAR)
{
in = (GInputStream*)g_file_read (f, cancellable, error);
if (!in)
return FALSE;
}
g_autofree guchar *csum = NULL;
if (!ostree_checksum_file_from_input (f_info, xattrs, in,
OSTREE_OBJECT_TYPE_FILE,
&csum, cancellable, error))
return FALSE;
ret_checksum = ostree_checksum_from_bytes (csum);
}
ot_transfer_out_value(out_checksum, &ret_checksum);
return TRUE;
}
OstreeDiffItem *
ostree_diff_item_ref (OstreeDiffItem *diffitem)
{
g_atomic_int_inc (&diffitem->refcount);
return diffitem;
}
void
ostree_diff_item_unref (OstreeDiffItem *diffitem)
{
if (!g_atomic_int_dec_and_test (&diffitem->refcount))
return;
g_clear_object (&diffitem->src);
g_clear_object (&diffitem->target);
g_clear_object (&diffitem->src_info);
g_clear_object (&diffitem->target_info);
g_free (diffitem->src_checksum);
g_free (diffitem->target_checksum);
g_free (diffitem);
}
G_DEFINE_BOXED_TYPE(OstreeDiffItem, ostree_diff_item,
ostree_diff_item_ref,
ostree_diff_item_unref);
static OstreeDiffItem *
diff_item_new (GFile *a,
GFileInfo *a_info,
GFile *b,
GFileInfo *b_info,
char *checksum_a,
char *checksum_b)
{
OstreeDiffItem *ret = g_new0 (OstreeDiffItem, 1);
ret->refcount = 1;
ret->src = a ? g_object_ref (a) : NULL;
ret->src_info = a_info ? g_object_ref (a_info) : NULL;
ret->target = b ? g_object_ref (b) : NULL;
ret->target_info = b_info ? g_object_ref (b_info) : b_info;
ret->src_checksum = g_strdup (checksum_a);
ret->target_checksum = g_strdup (checksum_b);
return ret;
}
static gboolean
diff_files (OstreeDiffFlags flags,
GFile *a,
GFileInfo *a_info,
GFile *b,
GFileInfo *b_info,
OstreeDiffItem **out_item,
GCancellable *cancellable,
GError **error)
{
g_autofree char *checksum_a = NULL;
g_autofree char *checksum_b = NULL;
if (!get_file_checksum (flags, a, a_info, &checksum_a, cancellable, error))
return FALSE;
if (!get_file_checksum (flags, b, b_info, &checksum_b, cancellable, error))
return FALSE;
g_autoptr(OstreeDiffItem) ret_item = NULL;
if (strcmp (checksum_a, checksum_b) != 0)
{
ret_item = diff_item_new (a, a_info, b, b_info,
checksum_a, checksum_b);
}
ot_transfer_out_value(out_item, &ret_item);
return TRUE;
}
static gboolean
diff_add_dir_recurse (GFile *d,
GPtrArray *added,
GCancellable *cancellable,
GError **error)
{
g_autoptr(GFileEnumerator) dir_enum =
g_file_enumerate_children (d, OSTREE_GIO_FAST_QUERYINFO,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
cancellable,
error);
if (!dir_enum)
return FALSE;
while (TRUE)
{
GFileInfo *child_info;
const char *name;
if (!g_file_enumerator_iterate (dir_enum, &child_info, NULL,
cancellable, error))
return FALSE;
if (child_info == NULL)
break;
name = g_file_info_get_name (child_info);
g_autoptr(GFile) child = g_file_get_child (d, name);
g_ptr_array_add (added, g_object_ref (child));
if (g_file_info_get_file_type (child_info) == G_FILE_TYPE_DIRECTORY)
{
if (!diff_add_dir_recurse (child, added, cancellable, error))
return FALSE;
}
}
return TRUE;
}
/**
* ostree_diff_dirs:
* @flags: Flags
* @a: First directory path, or %NULL
* @b: First directory path
* @modified: (element-type OstreeDiffItem): Modified files
* @removed: (element-type Gio.File): Removed files
* @added: (element-type Gio.File): Added files
* @cancellable: Cancellable
* @error: Error
*
* Compute the difference between directory @a and @b as 3 separate
* sets of #OstreeDiffItem in @modified, @removed, and @added.
*/
gboolean
ostree_diff_dirs (OstreeDiffFlags flags,
GFile *a,
GFile *b,
GPtrArray *modified,
GPtrArray *removed,
GPtrArray *added,
GCancellable *cancellable,
GError **error)
{
return ostree_diff_dirs_with_options (flags, a, b, modified,
removed, added, NULL,
cancellable, error);
}
/**
* ostree_diff_dirs_with_options:
* @flags: Flags
* @a: First directory path, or %NULL
* @b: First directory path
* @modified: (element-type OstreeDiffItem): Modified files
* @removed: (element-type Gio.File): Removed files
* @added: (element-type Gio.File): Added files
* @cancellable: Cancellable
* @options: (allow-none): Options
* @error: Error
*
* Compute the difference between directory @a and @b as 3 separate
* sets of #OstreeDiffItem in @modified, @removed, and @added.
*/
gboolean
ostree_diff_dirs_with_options (OstreeDiffFlags flags,
GFile *a,
GFile *b,
GPtrArray *modified,
GPtrArray *removed,
GPtrArray *added,
OstreeDiffDirsOptions *options,
GCancellable *cancellable,
GError **error)
{
gboolean ret = FALSE;
GError *temp_error = NULL;
g_autoptr(GFileEnumerator) dir_enum = NULL;
g_autoptr(GFile) child_a = NULL;
g_autoptr(GFile) child_b = NULL;
g_autoptr(GFileInfo) child_a_info = NULL;
g_autoptr(GFileInfo) child_b_info = NULL;
OstreeDiffDirsOptions default_opts = OSTREE_DIFF_DIRS_OPTIONS_INIT;
if (!options)
options = &default_opts;
/* If we're diffing versus a repo, and either of them have xattrs disabled,
* then disable for both.
*/
OstreeRepo *repo;
if (OSTREE_IS_REPO_FILE (a))
repo = ostree_repo_file_get_repo ((OstreeRepoFile*)a);
else if (OSTREE_IS_REPO_FILE (b))
repo = ostree_repo_file_get_repo ((OstreeRepoFile*)b);
else
repo = NULL;
if (repo != NULL && repo->disable_xattrs)
flags |= OSTREE_DIFF_FLAGS_IGNORE_XATTRS;
if (a == NULL)
{
if (!diff_add_dir_recurse (b, added, cancellable, error))
goto out;
ret = TRUE;
goto out;
}
child_a_info = g_file_query_info (a, OSTREE_GIO_FAST_QUERYINFO,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
cancellable, error);
if (!child_a_info)
goto out;
child_b_info = g_file_query_info (b, OSTREE_GIO_FAST_QUERYINFO,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
cancellable, error);
if (!child_b_info)
goto out;
/* Fast path test for unmodified directories */
if (g_file_info_get_file_type (child_a_info) == G_FILE_TYPE_DIRECTORY
&& g_file_info_get_file_type (child_b_info) == G_FILE_TYPE_DIRECTORY
&& OSTREE_IS_REPO_FILE (a)
&& OSTREE_IS_REPO_FILE (b))
{
OstreeRepoFile *a_repof = (OstreeRepoFile*) a;
OstreeRepoFile *b_repof = (OstreeRepoFile*) b;
if (strcmp (ostree_repo_file_tree_get_contents_checksum (a_repof),
ostree_repo_file_tree_get_contents_checksum (b_repof)) == 0)
{
ret = TRUE;
goto out;
}
}
g_clear_object (&child_a_info);
g_clear_object (&child_b_info);
dir_enum = g_file_enumerate_children (a, OSTREE_GIO_FAST_QUERYINFO,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
cancellable, error);
if (!dir_enum)
goto out;
while ((child_a_info = g_file_enumerator_next_file (dir_enum, cancellable, &temp_error)) != NULL)
{
const char *name;
GFileType child_a_type;
GFileType child_b_type;
name = g_file_info_get_name (child_a_info);
g_clear_object (&child_a);
child_a = g_file_get_child (a, name);
child_a_type = g_file_info_get_file_type (child_a_info);
g_clear_object (&child_b);
child_b = g_file_get_child (b, name);
g_clear_object (&child_b_info);
child_b_info = g_file_query_info (child_b, OSTREE_GIO_FAST_QUERYINFO,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
cancellable,
&temp_error);
if (!child_b_info)
{
if (g_error_matches (temp_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND))
{
g_clear_error (&temp_error);
g_ptr_array_add (removed, g_object_ref (child_a));
}
else
{
g_propagate_error (error, temp_error);
goto out;
}
}
else
{
if (options->owner_uid >= 0)
g_file_info_set_attribute_uint32 (child_b_info, "unix::uid", options->owner_uid);
if (options->owner_gid >= 0)
g_file_info_set_attribute_uint32 (child_b_info, "unix::gid", options->owner_gid);
child_b_type = g_file_info_get_file_type (child_b_info);
if (child_a_type != child_b_type)
{
OstreeDiffItem *diff_item = diff_item_new (child_a, child_a_info,
child_b, child_b_info, NULL, NULL);
g_ptr_array_add (modified, diff_item);
}
else
{
OstreeDiffItem *diff_item = NULL;
if (!diff_files (flags, child_a, child_a_info, child_b, child_b_info, &diff_item,
cancellable, error))
goto out;
if (diff_item)
g_ptr_array_add (modified, diff_item); /* Transfer ownership */
if (child_a_type == G_FILE_TYPE_DIRECTORY)
{
if (!ostree_diff_dirs_with_options (flags, child_a, child_b, modified,
removed, added, options,
cancellable, error))
goto out;
}
}
}
g_clear_object (&child_a_info);
}
if (temp_error != NULL)
{
g_propagate_error (error, temp_error);
goto out;
}
g_clear_object (&dir_enum);
dir_enum = g_file_enumerate_children (b, OSTREE_GIO_FAST_QUERYINFO,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
cancellable, error);
if (!dir_enum)
goto out;
g_clear_object (&child_b_info);
while ((child_b_info = g_file_enumerator_next_file (dir_enum, cancellable, &temp_error)) != NULL)
{
const char *name;
name = g_file_info_get_name (child_b_info);
g_clear_object (&child_a);
child_a = g_file_get_child (a, name);
g_clear_object (&child_b);
child_b = g_file_get_child (b, name);
g_clear_object (&child_a_info);
child_a_info = g_file_query_info (child_a, OSTREE_GIO_FAST_QUERYINFO,
G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
cancellable,
&temp_error);
if (!child_a_info)
{
if (g_error_matches (temp_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND))
{
g_clear_error (&temp_error);
g_ptr_array_add (added, g_object_ref (child_b));
if (g_file_info_get_file_type (child_b_info) == G_FILE_TYPE_DIRECTORY)
{
if (!diff_add_dir_recurse (child_b, added, cancellable, error))
goto out;
}
}
else
{
g_propagate_error (error, temp_error);
goto out;
}
}
g_clear_object (&child_b_info);
}
if (temp_error != NULL)
{
g_propagate_error (error, temp_error);
goto out;
}
ret = TRUE;
out:
return ret;
}
static void
print_diff_item (char prefix,
GFile *base,
GFile *file)
{
if (g_file_is_native (file))
{
g_autofree char *relpath = g_file_get_relative_path (base, file);
g_print ("%c %s\n", prefix, relpath);
}
else
{
g_print ("%c %s\n", prefix, gs_file_get_path_cached (file));
}
}
/**
* ostree_diff_print:
* @a: First directory path
* @b: First directory path
* @modified: (element-type OstreeDiffItem): Modified files
* @removed: (element-type Gio.File): Removed files
* @added: (element-type Gio.File): Added files
*
* Print the contents of a diff to stdout.
*/
void
ostree_diff_print (GFile *a,
GFile *b,
GPtrArray *modified,
GPtrArray *removed,
GPtrArray *added)
{
guint i;
for (i = 0; i < modified->len; i++)
{
OstreeDiffItem *diff = modified->pdata[i];
print_diff_item ('M', a, diff->src);
}
for (i = 0; i < removed->len; i++)
{
GFile *removed_file = removed->pdata[i];
print_diff_item ('D', a, removed_file);
}
for (i = 0; i < added->len; i++)
{
GFile *added_f = added->pdata[i];
print_diff_item ('A', b, added_f);
}
}
| 1 | 12,716 | Can you add a mention here and in `ostree-repo.c` that the holes were found with `pahole`? | ostreedev-ostree | c |
@@ -78,7 +78,17 @@ module Blacklight::SolrHelper
# Returns a two-element array (aka duple) with first the solr response object,
# and second an array of SolrDocuments representing the response.docs
def get_search_results(user_params = params || {}, extra_controller_params = {})
- solr_response = query_solr(user_params, extra_controller_params)
+ Deprecation.warn(self, "get_search_results is deprecated and will be removed in blacklight-6.0. Use `search_results' instead")
+ search_results(user_params, extra_controller_params, solr_search_params_logic)
+ end
+
+ # a solr query method
+ # @param [Hash,HashWithIndifferentAccess] user_params ({}) the user provided parameters (e.g. query, facets, sort, etc)
+ # @param [Hash,HashWithIndifferentAccess] extra_controller_params ({}) extra parameters to add to the search
+ # @param [List<Symbol] processor_chain a list of filter methods to run
+ # @return [Blacklight::SolrResponse] the solr response object
+ def search_results(user_params, extra_controller_params, solr_search_parms_logic)
+ solr_response = query_solr(user_params, extra_controller_params, solr_search_params_logic)
case
when (solr_response.grouped? && grouped_key_for_results) | 1 | # -*- encoding : utf-8 -*-
# SolrHelper is a controller layer mixin. It is in the controller scope: request params, session etc.
#
# NOTE: Be careful when creating variables here as they may be overriding something that already exists.
# The ActionController docs: http://api.rubyonrails.org/classes/ActionController/Base.html
#
# Override these methods in your own controller for customizations:
#
# class CatalogController < ActionController::Base
#
# include Blacklight::Catalog
#
# def solr_search_params
# super.merge :per_page=>10
# end
# end
#
# Or by including in local extensions:
# module LocalSolrHelperExtension
# [ local overrides ]
# end
#
# class CatalogController < ActionController::Base
#
# include Blacklight::Catalog
# include LocalSolrHelperExtension
#
# def solr_search_params
# super.merge :per_page=>10
# end
# end
#
# Or by using ActiveSupport::Concern:
#
# module LocalSolrHelperExtension
# extend ActiveSupport::Concern
# include Blacklight::SolrHelper
#
# [ local overrides ]
# end
#
# class CatalogController < ApplicationController
# include LocalSolrHelperExtension
# include Blacklight::Catalog
# end
module Blacklight::SolrHelper
extend ActiveSupport::Concern
extend Deprecation
self.deprecation_horizon = 'blacklight 6.0'
include Blacklight::RequestBuilders
##
# Execute a solr query
# @see [Blacklight::SolrRepository#send_and_receive]
# @return [Blacklight::SolrResponse] the solr response object
def find *args
solr_params = args.extract_options!
path = args.first || blacklight_config.solr_path
solr_params[:qt] ||= blacklight_config.qt
solr_repository.send_and_receive path, solr_params
end
deprecation_deprecate :find
# returns a params hash for finding a single solr document (CatalogController #show action)
def solr_doc_params(id=nil)
default_solr_doc_params(id)
end
deprecation_deprecate :solr_doc_params
# a solr query method
# given a user query, return a solr response containing both result docs and facets
# - mixes in the Blacklight::Solr::SpellingSuggestions module
# - the response will have a spelling_suggestions method
# Returns a two-element array (aka duple) with first the solr response object,
# and second an array of SolrDocuments representing the response.docs
def get_search_results(user_params = params || {}, extra_controller_params = {})
solr_response = query_solr(user_params, extra_controller_params)
case
when (solr_response.grouped? && grouped_key_for_results)
[solr_response.group(grouped_key_for_results), []]
when (solr_response.grouped? && solr_response.grouped.length == 1)
[solr_response.grouped.first, []]
else
[solr_response, solr_response.documents]
end
end
# a solr query method
# given a user query,
# @return [Blacklight::SolrResponse] the solr response object
def query_solr(user_params = params || {}, extra_controller_params = {})
solr_params = self.solr_search_params(user_params).merge(extra_controller_params)
solr_repository.search(solr_params)
end
# a solr query method
# retrieve a solr document, given the doc id
# @return [Blacklight::SolrResponse, Blacklight::SolrDocument] the solr response object and the first document
def get_solr_response_for_doc_id(id=nil, extra_controller_params={})
if id.nil?
Deprecation.warn Blacklight::SolrHelper, "Calling #get_solr_response_for_doc_id without an explicit id argument is deprecated"
id ||= params[:id]
end
old_solr_doc_params = Deprecation.silence(Blacklight::SolrHelper) do
solr_doc_params(id)
end
if default_solr_doc_params(id) != old_solr_doc_params
Deprecation.warn Blacklight::SolrHelper, "The #solr_doc_params method is deprecated. Instead, you should provide a custom SolrRepository implementation for the additional behavior you're offering"
extra_controller_params = extra_controller_params.merge(old_solr_doc_params)
end
solr_response = solr_repository.find id, extra_controller_params
[solr_response, solr_response.documents.first]
end
##
# Retrieve a set of documents by id
# @overload get_solr_response_for_document_ids(ids, extra_controller_params)
# @overload get_solr_response_for_document_ids(ids, user_params, extra_controller_params)
def get_solr_response_for_document_ids(ids=[], *args)
# user_params = params || {}, extra_controller_params = {}
if args.length == 1
user_params = params
extra_controller_params = args.first || {}
else
user_params, extra_controller_params = args
user_params ||= params
extra_controller_params ||= {}
end
solr_response = query_solr(user_params, extra_controller_params.merge(solr_document_ids_params(ids)))
[solr_response, solr_response.documents]
end
# given a field name and array of values, get the matching SOLR documents
# @return [Blacklight::SolrResponse, Array<Blacklight::SolrDocument>] the solr response object and a list of solr documents
def get_solr_response_for_field_values(field, values, extra_controller_params = {})
solr_response = query_solr(params, extra_controller_params.merge(solr_documents_by_field_values_params(field, values)))
[solr_response, solr_response.documents]
end
deprecation_deprecate :get_solr_response_for_field_values
##
# Get the solr response when retrieving only a single facet field
# @return [Blacklight::SolrResponse] the solr response
def get_facet_field_response(facet_field, user_params = params || {}, extra_controller_params = {})
solr_params = solr_facet_params(facet_field, user_params, extra_controller_params)
query_solr(user_params, extra_controller_params.merge(solr_facet_params(facet_field, user_params, extra_controller_params)))
end
# a solr query method
# used to paginate through a single facet field's values
# /catalog/facet/language_facet
def get_facet_pagination(facet_field, user_params=params || {}, extra_controller_params={})
# Make the solr call
response = get_facet_field_response(facet_field, user_params, extra_controller_params)
limit = response.params[:"f.#{facet_field}.facet.limit"].to_s.to_i - 1
# Actually create the paginator!
# NOTE: The sniffing of the proper sort from the solr response is not
# currently tested for, tricky to figure out how to test, since the
# default setup we test against doesn't use this feature.
return Blacklight::Solr::FacetPaginator.new(response.facets.first.items,
:offset => response.params[:"f.#{facet_field}.facet.offset"],
:limit => limit,
:sort => response.params[:"f.#{facet_field}.facet.sort"] || response.params["facet.sort"]
)
end
deprecation_deprecate :get_facet_pagination
# a solr query method
# this is used when selecting a search result: we have a query and a
# position in the search results and possibly some facets
# Pass in an index where 1 is the first document in the list, and
# the Blacklight app-level request params that define the search.
# @return [Blacklight::SolrDocument, nil] the found document or nil if not found
def get_single_doc_via_search(index, request_params)
solr_params = solr_search_params(request_params)
solr_params[:start] = (index - 1) # start at 0 to get 1st doc, 1 to get 2nd.
solr_params[:rows] = 1
solr_params[:fl] = '*'
solr_response = solr_repository.search(solr_params)
solr_response.documents.first
end
deprecation_deprecate :get_single_doc_via_search
# Get the previous and next document from a search result
# @return [Blacklight::SolrResponse, Array<Blacklight::SolrDocument>] the solr response and a list of the first and last document
def get_previous_and_next_documents_for_search(index, request_params, extra_controller_params={})
solr_response = query_solr(request_params, extra_controller_params.merge(previous_and_next_document_params(index)))
document_list = solr_response.documents
# only get the previous doc if there is one
prev_doc = document_list.first if index > 0
next_doc = document_list.last if (index + 1) < solr_response.total
[solr_response, [prev_doc, next_doc]]
end
# a solr query method
# does a standard search but returns a simplified object.
# an array is returned, the first item is the query string,
# the second item is an other array. This second array contains
# all of the field values for each of the documents...
# where the field is the "field" argument passed in.
def get_opensearch_response(field=nil, request_params = params || {}, extra_controller_params={})
field ||= blacklight_config.view_config('opensearch').title_field
response = query_solr(request_params, solr_opensearch_params(field).merge(extra_controller_params))
[response.params[:q], response.documents.flat_map {|doc| doc[field] }.uniq]
end
##
# The key to use to retrieve the grouped field to display
def grouped_key_for_results
blacklight_config.index.group
end
def solr_repository
@solr_repository ||= Blacklight::SolrRepository.new(blacklight_config)
end
def blacklight_solr
solr_repository.blacklight_solr
end
deprecation_deprecate :blacklight_solr
private
##
# @deprecated
def default_solr_doc_params(id=nil)
id ||= params[:id]
# add our document id to the document_unique_id_param query parameter
p = blacklight_config.default_document_solr_params.merge({
# this assumes the request handler will map the unique id param
# to the unique key field using either solr local params, the
# real-time get handler, etc.
blacklight_config.document_unique_id_param => id
})
p[:qt] ||= blacklight_config.document_solr_request_handler
p
end
end
| 1 | 5,676 | While we're changing this, I wonder if we can do away with `extra_controller_params`.. Maybe a new type of `solr_search_params_logic` that appends the attributes? | projectblacklight-blacklight | rb |
@@ -564,6 +564,8 @@ module Beaker
yield self if block_given?
+ rescue Beaker::DSL::Assertions, Minitest::Assertion => early_assertion
+ fail_test(early_assertion)
rescue Exception => early_exception
original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n")
raise(original_exception) | 1 | # -*- coding: utf-8 -*-
require 'resolv'
require 'inifile'
require 'timeout'
require 'beaker/dsl/outcomes'
require 'beaker/options'
require 'hocon'
require 'hocon/config_error'
module Beaker
module DSL
# This is the heart of the Puppet Acceptance DSL. Here you find a helper
# to proxy commands to hosts, more commands to move files between hosts
# and execute remote scripts, confine test cases to certain hosts and
# prepare the state of a test case.
#
# To mix this is into a class you need the following:
# * a method *hosts* that yields any hosts implementing
# {Beaker::Host}'s interface to act upon.
# * a method *options* that provides an options hash, see {Beaker::Options::OptionsHash}
# * a method *logger* that yields a logger implementing
# {Beaker::Logger}'s interface.
# * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing
# {Beaker::Host}'s interface to act upon
# * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation
#
#
# @api dsl
module Helpers
# @!macro common_opts
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :silent (false) Do not produce log output
# @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array
# (or range) of integer exit codes that should be considered
# acceptable. An error will be thrown if the exit code does not
# match one of the values in this list.
# @option opts [Hash{String=>String}] :environment ({}) These will be
# treated as extra environment variables that should be set before
# running the command.
#
# The primary method for executing commands *on* some set of hosts.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# on hosts, 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# on agents, 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if on(host, 'ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# on agent, 'cat /etc/puppet/puppet.conf' do
# assert_match stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @example Using a role (defined in a String) to identify the host
# on "master", "echo hello"
#
# @example Using a role (defined in a Symbol) to identify the host
# on :dashboard, "echo hello"
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def on(host, command, opts = {}, &block)
block_on host do | host |
cur_command = command
if command.is_a? Command
cur_command = command.cmd_line(host)
end
cmd_opts = {}
#add any additional environment variables to the command
if opts[:environment]
cmd_opts['ENV'] = opts[:environment]
end
@result = host.exec(Command.new(cur_command.to_s, [], cmd_opts), opts)
# Also, let additional checking be performed by the caller.
if block_given?
case block.arity
#block with arity of 0, just hand back yourself
when 0
yield self
#block with arity of 1 or greater, hand back the result object
else
yield @result
end
end
@result
end
end
# The method for executing commands on the default host
#
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# shell 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# shell 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if shell('ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# shell('cat /etc/puppet/puppet.conf') do |result|
# assert_match result.stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def shell(command, opts = {}, &block)
on(default, command, opts, &block)
end
# @deprecated
# An proxy for the last {Beaker::Result#stdout} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stdout
return nil if @result.nil?
@result.stdout
end
# @deprecated
# An proxy for the last {Beaker::Result#stderr} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stderr
return nil if @result.nil?
@result.stderr
end
# @deprecated
# An proxy for the last {Beaker::Result#exit_code} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def exit_code
return nil if @result.nil?
@result.exit_code
end
# Move a file from a remote to a local path
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec).
#
# @param [Host, #do_scp_from] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] from_path A remote path to a file.
# @param [String] to_path A local path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_from host, from_path, to_path, opts = {}
block_on host do | host |
@result = host.do_scp_from(from_path, to_path, opts)
@result.log logger
@result
end
end
# Move a local file to a remote host
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec.
# When using SCP with Windows it will now auto expand path when
# using `cygpath instead of failing or requiring full path
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_to}.
# @param [String] from_path A local path to a file.
# @param [String] to_path A remote path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_to host, from_path, to_path, opts = {}
block_on host do | host |
if host['platform'] =~ /windows/ && to_path.match('`cygpath')
result = on host, "echo #{to_path}"
to_path = result.raw_output.chomp
end
@result = host.do_scp_to(from_path, to_path, opts)
@result.log logger
@result
end
end
# Deploy packaging configurations generated by
# https://github.com/puppetlabs/packaging to a host.
#
# @note To ensure the repo configs are available for deployment,
# you should run `rake pl:jenkins:deb_repo_configs` and
# `rake pl:jenkins:rpm_repo_configs` on your project checkout
#
# @param [Host] host
# @param [String] path The path to the generated repository config
# files. ex: /myproject/pkg/repo_configs
# @param [String] name A human-readable name for the repository
# @param [String] version The version of the project, as used by the
# packaging tools. This can be determined with
# `rake pl:print_build_params` from the packaging
# repo.
def deploy_package_repo host, path, name, version
host.deploy_package_repo path, name, version
end
# Create a remote file out of a string
# @note This method uses Tempfile in Ruby's STDLIB as well as {#scp_to}.
#
# @param [Host, #do_scp_to] hosts One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] file_path A remote path to place *file_content* at.
# @param [String] file_content The contents of the file to be placed.
# @!macro common_opts
#
# @return [Result] Returns the result of the underlying SCP operation.
def create_remote_file(hosts, file_path, file_content, opts = {})
Tempfile.open 'beaker' do |tempfile|
File.open(tempfile.path, 'w') {|file| file.puts file_content }
scp_to hosts, tempfile.path, file_path, opts
end
end
# Create a temp directory on remote host owned by specified user.
#
# @param [Host] host A single remote host on which to create and adjust
# the ownership of a temp directory.
# @param [String] name A remote path prefix for the new temp
# directory. Default value is '/tmp/beaker'
# @param [String] user The name of user that should own the temp
# directory. If no username is specified, use `puppet master
# --configprint user` to obtain username from master. Raise RuntimeError
# if this puppet command returns a non-zero exit code.
#
# @return [String] Returns the name of the newly-created file.
def create_tmpdir_for_user(host, name='/tmp/beaker', user=nil)
if not user
result = on host, puppet("master --configprint user")
if not result.exit_code == 0
raise "`puppet master --configprint` failed, check that puppet is installed on #{host} or explicitly pass in a user name."
end
user = result.stdout.strip
end
if not on(host, "getent passwd #{user}").exit_code == 0
raise "User #{user} does not exist on #{host}."
end
if defined? host.tmpdir
dir = host.tmpdir(name)
on host, "chown #{user}:#{user} #{dir}"
return dir
else
raise "Host platform not supported by `create_tmpdir_for_user`."
end
end
# Move a local script to a remote host and execute it
# @note this relies on {#on} and {#scp_to}
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] script A local path to find an executable script at.
# @!macro common_opts
# @param [Proc] block Additional tests to run after script has executed
#
# @return [Result] Returns the result of the underlying SCP operation.
def run_script_on(host, script, opts = {}, &block)
# this is unsafe as it uses the File::SEPARATOR will be set to that
# of the coordinator node. This works for us because we use cygwin
# which will properly convert the paths. Otherwise this would not
# work for running tests on a windows machine when the coordinator
# that the harness is running on is *nix. We should use
# {Beaker::Host#temp_path} instead. TODO
remote_path = File.join("", "tmp", File.basename(script))
scp_to host, script, remote_path
on host, remote_path, opts, &block
end
# Move a local script to default host and execute it
# @see #run_script_on
def run_script(script, opts = {}, &block)
run_script_on(default, script, opts, &block)
end
# Limit the hosts a test case is run against
# @note This will modify the {Beaker::TestCase#hosts} member
# in place unless an array of hosts is passed into it and
# {Beaker::TestCase#logger} yielding an object that responds
# like {Beaker::Logger#warn}, as well as
# {Beaker::DSL::Outcomes#skip_test}, and optionally
# {Beaker::TestCase#hosts}.
#
# @param [Symbol] type The type of confinement to do. Valid parameters
# are *:to* to confine the hosts to only those that
# match *criteria* or *:except* to confine the test
# case to only those hosts that do not match
# criteria.
# @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}]
# criteria Specify the criteria with which a host should be
# considered for inclusion or exclusion. The key is any attribute
# of the host that will be yielded by {Beaker::Host#[]}.
# The value can be any string/regex or array of strings/regexp.
# The values are compared using [Enumerable#any?] so that if one
# value of an array matches the host is considered a match for that
# criteria.
# @param [Array<Host>] host_array This creatively named parameter is
# an optional array of hosts to confine to. If not passed in, this
# method will modify {Beaker::TestCase#hosts} in place.
# @param [Proc] block Addition checks to determine suitability of hosts
# for confinement. Each host that is still valid after checking
# *criteria* is then passed in turn into this block. The block
# should return true if the host matches this additional criteria.
#
# @example Basic usage to confine to debian OSes.
# confine :to, :platform => 'debian'
#
# @example Confining to anything but Windows and Solaris
# confine :except, :platform => ['windows', 'solaris']
#
# @example Using additional block to confine to Solaris global zone.
# confine :to, :platform => 'solaris' do |solaris|
# on( solaris, 'zonename' ) =~ /global/
# end
#
# @return [Array<Host>] Returns an array of hosts that are still valid
# targets for this tests case.
# @raise [SkipTest] Raises skip test if there are no valid hosts for
# this test case after confinement.
def confine(type, criteria, host_array = nil, &block)
hosts_to_modify = host_array || hosts
case type
when :except
hosts_to_modify = hosts_to_modify - select_hosts(criteria, hosts_to_modify, &block)
when :to
hosts_to_modify = select_hosts(criteria, hosts_to_modify, &block)
else
raise "Unknown option #{type}"
end
if hosts_to_modify.empty?
logger.warn "No suitable hosts with: #{criteria.inspect}"
skip_test 'No suitable hosts found'
end
self.hosts = hosts_to_modify
hosts_to_modify
end
# Ensures that host restrictions as specifid by type, criteria and
# host_array are confined to activity within the passed block.
# TestCase#hosts is reset after block has executed.
#
# @see #confine
def confine_block(type, criteria, host_array = nil, &block)
begin
original_hosts = self.hosts.dup
confine(type, criteria, host_array)
yield
ensure
self.hosts = original_hosts
end
end
#Return a set of hosts that meet the given criteria
# @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}]
# criteria Specify the criteria with which a host should be
# considered for inclusion. The key is any attribute
# of the host that will be yielded by {Beaker::Host#[]}.
# The value can be any string/regex or array of strings/regexp.
# The values are compared using [Enumerable#any?] so that if one
# value of an array matches the host is considered a match for that
# criteria.
# @param [Array<Host>] host_array This creatively named parameter is
# an optional array of hosts to confine to. If not passed in, this
# method will modify {Beaker::TestCase#hosts} in place.
# @param [Proc] block Addition checks to determine suitability of hosts
# for selection. Each host that is still valid after checking
# *criteria* is then passed in turn into this block. The block
# should return true if the host matches this additional criteria.
#
# @return [Array<Host>] Returns an array of hosts that meet the provided criteria
def select_hosts(criteria, host_array = nil, &block)
hosts_to_select_from = host_array || hosts
criteria.each_pair do |property, value|
hosts_to_select_from = hosts_to_select_from.select do |host|
inspect_host host, property, value
end
end
if block_given?
hosts_to_select_from = hosts_to_select_from.select do |host|
yield host
end
end
hosts_to_select_from
end
# Return the name of the puppet user.
#
# @param [Host] host One object that acts like a Beaker::Host
#
# @note This method assumes puppet is installed on the host.
#
def puppet_user(host)
return host.puppet('master')['user']
end
# Return the name of the puppet group.
#
# @param [Host] host One object that acts like a Beaker::Host
#
# @note This method assumes puppet is installed on the host.
#
def puppet_group(host)
return host.puppet('master')['group']
end
# @!visibility private
def inspect_host(host, property, one_or_more_values)
values = Array(one_or_more_values)
return values.any? do |value|
true_false = false
case value
when String
true_false = host[property.to_s].include? value
when Regexp
true_false = host[property.to_s] =~ value
end
true_false
end
end
# Test Puppet running in a certain run mode with specific options.
# This ensures the following steps are performed:
# 1. The pre-test Puppet configuration is backed up
# 2. A new Puppet configuraton file is layed down
# 3. Puppet is started or restarted in the specified run mode
# 4. Ensure Puppet has started correctly
# 5. Further tests are yielded to
# 6. Revert Puppet to the pre-test state
# 7. Testing artifacts are saved in a folder named for the test
#
# @param [Host] host One object that act like Host
#
# @param [Hash{Symbol=>String}] conf_opts Represents puppet settings.
# Sections of the puppet.conf may be
# specified, if no section is specified the
# a puppet.conf file will be written with the
# options put in a section named after [mode]
# @option conf_opts [String] :__commandline_args__ A special setting for
# command_line arguments such as --debug or
# --logdest, which cannot be set in
# puppet.conf. For example:
#
# :__commandline_args__ => '--logdest /tmp/a.log'
#
# These will only be applied when starting a FOSS
# master, as a pe master is just bounced.
# @option conf_opts [Hash] :__service_args__ A special setting of options
# for controlling how the puppet master service is
# handled. The only setting currently is
# :bypass_service_script, which if set true will
# force stopping and starting a webrick master
# using the start_puppet_from_source_* methods,
# even if it seems the host has passenger.
# This is needed in FOSS tests to initialize
# SSL.
# @param [File] testdir The temporary directory which will hold backup
# configuration, and other test artifacts.
#
# @param [Block] block The point of this method, yields so
# tests may be ran. After the block is finished
# puppet will revert to a previous state.
#
# @example A simple use case to ensure a master is running
# with_puppet_running_on( master ) do
# ...tests that require a master...
# end
#
# @example Fully utilizing the possiblities of config options
# with_puppet_running_on( master,
# :main => {:logdest => '/var/blah'},
# :master => {:masterlog => '/elswhere'},
# :agent => {:server => 'localhost'} ) do
#
# ...tests to be ran...
# end
#
# @api dsl
def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash)
cmdline_args = conf_opts[:__commandline_args__]
service_args = conf_opts[:__service_args__] || {}
conf_opts = conf_opts.reject { |k,v| [:__commandline_args__, :__service_args__].include?(k) }
curl_retries = host['master-start-curl-retries'] || options['master-start-curl-retries']
logger.debug "Setting curl retries to #{curl_retries}"
if options[:is_puppetserver]
confdir = host.puppet('master')['confdir']
vardir = host.puppet('master')['vardir']
if cmdline_args
split_args = cmdline_args.split()
split_args.each do |arg|
case arg
when /--confdir=(.*)/
confdir = $1
when /--vardir=(.*)/
vardir = $1
end
end
end
puppetserver_opts = { "jruby-puppet" => {
"master-conf-dir" => confdir,
"master-var-dir" => vardir,
}}
puppetserver_conf = File.join("#{host['puppetserver-confdir']}", "puppetserver.conf")
modify_tk_config(host, puppetserver_conf, puppetserver_opts)
end
begin
backup_file = backup_the_file(host, host['puppetconfdir'], testdir, 'puppet.conf')
lay_down_new_puppet_conf host, conf_opts, testdir
if host.use_service_scripts? && !service_args[:bypass_service_script]
bounce_service( host, host['puppetservice'], curl_retries )
else
puppet_master_started = start_puppet_from_source_on!( host, cmdline_args )
end
yield self if block_given?
rescue Exception => early_exception
original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n")
raise(original_exception)
ensure
begin
if host.use_service_scripts? && !service_args[:bypass_service_script]
restore_puppet_conf_from_backup( host, backup_file )
bounce_service( host, host['puppetservice'], curl_retries )
else
if puppet_master_started
stop_puppet_from_source_on( host )
else
dump_puppet_log(host)
end
restore_puppet_conf_from_backup( host, backup_file )
end
rescue Exception => teardown_exception
begin
if !host.is_pe?
dump_puppet_log(host)
end
rescue Exception => dumping_exception
logger.error("Raised during attempt to dump puppet logs: #{dumping_exception}")
end
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception
else
raise teardown_exception
end
end
end
end
# Test Puppet running in a certain run mode with specific options,
# on the default host
# @api dsl
# @see #with_puppet_running_on
def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
with_puppet_running_on(default, conf_opts, testdir, &block)
end
# @!visibility private
def restore_puppet_conf_from_backup( host, backup_file )
puppetpath = host['puppetconfdir']
puppet_conf = File.join(puppetpath, "puppet.conf")
if backup_file
host.exec( Command.new( "if [ -f '#{backup_file}' ]; then " +
"cat '#{backup_file}' > " +
"'#{puppet_conf}'; " +
"rm -f '#{backup_file}'; " +
"fi" ) )
else
host.exec( Command.new( "rm -f '#{puppet_conf}'" ))
end
end
# Back up the given file in the current_dir to the new_dir
#
# @!visibility private
#
# @param host [Beaker::Host] The target host
# @param current_dir [String] The directory containing the file to back up
# @param new_dir [String] The directory to copy the file to
# @param filename [String] The file to back up. Defaults to 'puppet.conf'
#
# @return [String, nil] The path to the file if the file exists, nil if it
# doesn't exist.
def backup_the_file host, current_dir, new_dir, filename = 'puppet.conf'
old_location = current_dir + '/' + filename
new_location = new_dir + '/' + filename + '.bak'
if host.file_exist? old_location
host.exec( Command.new( "cp #{old_location} #{new_location}" ) )
return new_location
else
logger.warn "Could not backup file '#{old_location}': no such file"
nil
end
end
# @!visibility private
def start_puppet_from_source_on! host, args = ''
host.exec( puppet( 'master', args ) )
logger.debug 'Waiting for the puppet master to start'
unless port_open_within?( host, 8140, 10 )
raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion'
end
logger.debug 'The puppet master has started'
return true
end
# @!visibility private
def stop_puppet_from_source_on( host )
pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp
host.exec( Command.new( "kill #{pid}" ) )
Timeout.timeout(10) do
while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do
# until kill -0 finds no process and we know that puppet has finished cleaning up
sleep 1
end
end
end
# @!visibility private
def dump_puppet_log(host)
syslogfile = case host['platform']
when /fedora|centos|el|redhat|scientific/ then '/var/log/messages'
when /ubuntu|debian|cumulus/ then '/var/log/syslog'
else return
end
logger.notify "\n*************************"
logger.notify "* Dumping master log *"
logger.notify "*************************"
host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1])
logger.notify "*************************\n"
end
# @!visibility private
def lay_down_new_puppet_conf( host, configuration_options, testdir )
puppetconf_test = "#{testdir}/puppet.conf"
puppetconf_main = "#{host['puppetconfdir']}/puppet.conf"
new_conf = puppet_conf_for( host, configuration_options )
create_remote_file host, puppetconf_test, new_conf.to_s
host.exec(
Command.new( "cat #{puppetconf_test} > #{puppetconf_main}" ),
:silent => true
)
host.exec( Command.new( "cat #{puppetconf_main}" ) )
end
# @!visibility private
def puppet_conf_for host, conf_opts
puppetconf = host.exec( Command.new( "cat #{host['puppetconfdir']}/puppet.conf" ) ).stdout
new_conf = IniFile.new( puppetconf ).merge( conf_opts )
new_conf
end
# Modify the given TrapperKeeper config file.
#
# @param [Host] host A host object
# @param [OptionsHash] options_hash New hash which will be merged into
# the given TrapperKeeper config.
# @param [String] config_file_path Path to the TrapperKeeper config on
# the given host which is to be
# modified.
# @param [Bool] replace If set true, instead of updating the existing
# TrapperKeeper configuration, replace it entirely
# with the contents of the given hash.
#
# @note TrapperKeeper config files can be HOCON, JSON, or Ini. We don't
# particularly care which of these the file named by `config_file_path` on
# the SUT actually is, just that the contents can be parsed into a map.
#
def modify_tk_config(host, config_file_path, options_hash, replace=false)
if options_hash.empty?
return nil
end
new_hash = Beaker::Options::OptionsHash.new
if replace
new_hash.merge!(options_hash)
else
if not host.file_exist?( config_file_path )
raise "Error: #{config_file_path} does not exist on #{host}"
end
file_string = host.exec( Command.new( "cat #{config_file_path}" )).stdout
begin
tk_conf_hash = read_tk_config_string(file_string)
rescue RuntimeError
raise "Error reading trapperkeeper config: #{config_file_path} at host: #{host}"
end
new_hash.merge!(tk_conf_hash)
new_hash.merge!(options_hash)
end
file_string = JSON.dump(new_hash)
create_remote_file host, config_file_path, file_string
end
# The Trapperkeeper config service will accept HOCON (aka typesafe), JSON,
# or Ini configuration files which means we need to safely handle the the
# exceptions that might come from parsing the given string with the wrong
# parser and fall back to the next valid parser in turn. We finally raise
# a RuntimeException if none of the parsers succeed.
#
# @!visibility private
def read_tk_config_string( string )
begin
return Hocon.parse(string)
rescue Hocon::ConfigError
nil
end
begin
return JSON.parse(string)
rescue JSON::JSONError
nil
end
begin
return IniFile.new(string)
rescue IniFile::Error
nil
end
raise "Failed to read TrapperKeeper config!"
end
# @!visibility private
def bounce_service host, service, curl_retries = 120
if host.graceful_restarts?
apachectl_path = host.is_pe? ? "#{host['puppetsbindir']}/apache2ctl" : 'apache2ctl'
host.exec(Command.new("#{apachectl_path} graceful"))
else
host.exec puppet_resource('service', service, 'ensure=stopped')
host.exec puppet_resource('service', service, 'ensure=running')
end
curl_with_retries(" #{service} ", host, "https://localhost:8140", [35, 60], curl_retries)
end
# Blocks until the port is open on the host specified, returns false
# on failure
def port_open_within?( host, port = 8140, seconds = 120 )
repeat_for( seconds ) do
host.port_open?( port )
end
end
# Runs 'puppet apply' on a remote host, piping manifest through stdin
#
# @param [Host] host The host that this command should be run on
#
# @param [String] manifest The puppet manifest to apply
#
# @!macro common_opts
# @option opts [Boolean] :parseonly (false) If this key is true, the
# "--parseonly" command line parameter will
# be passed to the 'puppet apply' command.
#
# @option opts [Boolean] :trace (false) If this key exists in the Hash,
# the "--trace" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [Array<Integer>] :acceptable_exit_codes ([0]) The list of exit
# codes that will NOT raise an error when found upon
# command completion. If provided, these values will
# be combined with those used in :catch_failures and
# :expect_failures to create the full list of
# passing exit codes.
#
# @option opts [Hash] :environment Additional environment variables to be
# passed to the 'puppet apply' command
#
# @option opts [Boolean] :catch_failures (false) By default `puppet
# --apply` will exit with 0, which does not count
# as a test failure, even if there were errors or
# changes when applying the manifest. This option
# enables detailed exit codes and causes a test
# failure if `puppet --apply` indicates there was
# a failure during its execution.
#
# @option opts [Boolean] :catch_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# changes or failures during its execution.
#
# @option opts [Boolean] :expect_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# no resource changes during its execution.
#
# @option opts [Boolean] :expect_failures (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates there were no
# failure during its execution.
#
# @option opts [Boolean] :future_parser (false) This option enables
# the future parser option that is available
# from Puppet verion 3.2
# By default it will use the 'current' parser.
#
# @option opts [Boolean] :noop (false) If this option exists, the
# the "--noop" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [String] :modulepath The search path for modules, as
# a list of directories separated by the system
# path separator character. (The POSIX path separator
# is ‘:’, and the Windows path separator is ‘;’.)
#
# @option opts [String] :debug (false) If this option exists,
# the "--debug" command line parameter
# will be passed to the 'puppet apply' command.
#
# @param [Block] block This method will yield to a block of code passed
# by the caller; this can be used for additional
# validation, etc.
#
def apply_manifest_on(host, manifest, opts = {}, &block)
block_on host do | host |
on_options = {}
on_options[:acceptable_exit_codes] = Array(opts[:acceptable_exit_codes])
puppet_apply_opts = {}
if opts[:debug]
puppet_apply_opts[:debug] = nil
else
puppet_apply_opts[:verbose] = nil
end
puppet_apply_opts[:parseonly] = nil if opts[:parseonly]
puppet_apply_opts[:trace] = nil if opts[:trace]
puppet_apply_opts[:parser] = 'future' if opts[:future_parser]
puppet_apply_opts[:modulepath] = opts[:modulepath] if opts[:modulepath]
puppet_apply_opts[:noop] = nil if opts[:noop]
# From puppet help:
# "... an exit code of '2' means there were changes, an exit code of
# '4' means there were failures during the transaction, and an exit
# code of '6' means there were both changes and failures."
if [opts[:catch_changes],opts[:catch_failures],opts[:expect_failures],opts[:expect_changes]].compact.length > 1
raise(ArgumentError,
'Cannot specify more than one of `catch_failures`, ' +
'`catch_changes`, `expect_failures`, or `expect_changes` ' +
'for a single manifest')
end
if opts[:catch_changes]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after idempotency so allow exit code 0 only.
on_options[:acceptable_exit_codes] |= [0]
elsif opts[:catch_failures]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after only complete success so allow exit codes 0 and 2 only.
on_options[:acceptable_exit_codes] |= [0, 2]
elsif opts[:expect_failures]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after failures specifically so allow exit codes 1, 4, and 6 only.
on_options[:acceptable_exit_codes] |= [1, 4, 6]
elsif opts[:expect_changes]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after changes specifically so allow exit code 2 only.
on_options[:acceptable_exit_codes] |= [2]
else
# Either use the provided acceptable_exit_codes or default to [0]
on_options[:acceptable_exit_codes] |= [0]
end
# Not really thrilled with this implementation, might want to improve it
# later. Basically, there is a magic trick in the constructor of
# PuppetCommand which allows you to pass in a Hash for the last value in
# the *args Array; if you do so, it will be treated specially. So, here
# we check to see if our caller passed us a hash of environment variables
# that they want to set for the puppet command. If so, we set the final
# value of *args to a new hash with just one entry (the value of which
# is our environment variables hash)
if opts.has_key?(:environment)
puppet_apply_opts['ENV'] = opts[:environment]
end
file_path = host.tmpfile('apply_manifest.pp')
create_remote_file(host, file_path, manifest + "\n")
if host[:default_apply_opts].respond_to? :merge
puppet_apply_opts = host[:default_apply_opts].merge( puppet_apply_opts )
end
on host, puppet('apply', file_path, puppet_apply_opts), on_options, &block
end
end
# Runs 'puppet apply' on default host, piping manifest through stdin
# @see #apply_manifest_on
def apply_manifest(manifest, opts = {}, &block)
apply_manifest_on(default, manifest, opts, &block)
end
# @deprecated
def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test',
options={}, &block)
block_on host do | host |
on host, puppet_agent(arg), options, &block
end
end
# FIX: this should be moved into host/platform
# @visibility private
def run_cron_on(host, action, user, entry="", &block)
block_on host do | host |
platform = host['platform']
if platform.include?('solaris') || platform.include?('aix') then
case action
when :list then args = '-l'
when :remove then args = '-r'
when :add
on( host,
"echo '#{entry}' > /var/spool/cron/crontabs/#{user}",
&block )
end
else # default for GNU/Linux platforms
case action
when :list then args = '-l -u'
when :remove then args = '-r -u'
when :add
on( host,
"echo '#{entry}' > /tmp/#{user}.cron && " +
"crontab -u #{user} /tmp/#{user}.cron",
&block )
end
end
if args
case action
when :list, :remove then on(host, "crontab #{args} #{user}", &block)
end
end
end
end
# This method using the puppet resource 'host' will setup host aliases
# and register the remove of host aliases via Beaker::TestCase#teardown
#
# A teardown step is also added to make sure unstubbing of the host is
# removed always.
#
# @param [Host, Array<Host>, String, Symbol] machine One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @example Stub puppetlabs.com on the master to 127.0.0.1
# stub_hosts_on(master, 'puppetlabs.com' => '127.0.0.1')
def stub_hosts_on(machine, ip_spec)
block_on machine do | host |
ip_spec.each do |address, ip|
logger.notify("Stubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=present', "ip=#{ip}") )
end
teardown do
ip_spec.each do |address, ip|
logger.notify("Unstubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=absent') )
end
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @example Stub puppetlabs.com on the master to 127.0.0.1
# with_host_stubbed_on(master, 'forgeapi.puppetlabs.com' => '127.0.0.1') do
# puppet( "module install puppetlabs-stdlib" )
# end
def with_host_stubbed_on(host, ip_spec, &block)
begin
block_on host do |host|
ip_spec.each_pair do |address, ip|
logger.notify("Stubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=present', "ip=#{ip}") )
end
end
block.call
ensure
ip_spec.each do |address, ip|
logger.notify("Unstubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=absent') )
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block on the default host
#
# @example Stub puppetlabs.com on the default host to 127.0.0.1
# stub_hosts('puppetlabs.com' => '127.0.0.1')
# @see #stub_hosts_on
def stub_hosts(ip_spec)
stub_hosts_on(default, ip_spec)
end
# This wraps the method `stub_hosts_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param machine [String] the host to perform the stub on
# @param forge_host [String] The URL to use as the forge alias, will default to using :forge_host in the
# global options hash
def stub_forge_on(machine, forge_host = nil)
#use global options hash
forge_host ||= options[:forge_host]
@forge_ip ||= Resolv.getaddress(forge_host)
block_on machine do | host |
stub_hosts_on(host, 'forge.puppetlabs.com' => @forge_ip)
stub_hosts_on(host, 'forgeapi.puppetlabs.com' => @forge_ip)
end
end
# This wraps the method `with_host_stubbed_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param host [String] the host to perform the stub on
# @param forge_host [String] The URL to use as the forge alias, will default to using :forge_host in the
# global options hash
def with_forge_stubbed_on( host, forge_host = nil, &block )
#use global options hash
forge_host ||= options[:forge_host]
@forge_ip ||= Resolv.getaddress(forge_host)
with_host_stubbed_on( host,
{'forge.puppetlabs.com' => @forge_ip,
'forgeapi.puppetlabs.com' => @forge_ip},
&block )
end
# This wraps `with_forge_stubbed_on` and provides it the default host
# @see with_forge_stubbed_on
def with_forge_stubbed( forge_host = nil, &block )
with_forge_stubbed_on( default, forge_host, &block )
end
# This wraps the method `stub_hosts` and makes the stub specific to
# the forge alias.
#
# @see #stub_forge_on
def stub_forge(forge_host = nil)
#use global options hash
forge_host ||= options[:forge_host]
stub_forge_on(default, forge_host)
end
def sleep_until_puppetdb_started(host)
curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120)
curl_with_retries("start puppetdb (ssl)",
host, "https://#{host.node_name}:8081", [35, 60])
end
def sleep_until_puppetserver_started(host)
curl_with_retries("start puppetserver (ssl)",
host, "https://#{host.node_name}:8140", [35, 60])
end
def sleep_until_nc_started(host)
curl_with_retries("start nodeclassifier (ssl)",
host, "https://#{host.node_name}:4433", [35, 60])
end
def curl_with_retries(desc, host, url, desired_exit_codes, max_retries = 60, retry_interval = 1)
opts = {
:desired_exit_codes => desired_exit_codes,
:max_retries => max_retries,
:retry_interval => retry_interval
}
retry_on(host, "curl -m 1 #{url}", opts)
end
# This command will execute repeatedly until success or it runs out with an error
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String, Command] command The command to execute on *host*.
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @param [Proc] block Additional actions or assertions.
#
# @option opts [Array<Fixnum>, Fixnum] :desired_exit_codes (0) An array
# or integer exit code(s) that should be considered
# acceptable. An error will be thrown if the exit code never
# matches one of the values in this list.
# @option opts [Fixnum] :max_retries (60) number of times the
# command will be tried before failing
# @option opts [Float] :retry_interval (1) number of seconds
# that we'll wait between tries
# @option opts [Boolean] :verbose (false)
def retry_on(host, command, opts = {}, &block)
option_exit_codes = opts[:desired_exit_codes]
option_max_retries = opts[:max_retries].to_i
option_retry_interval = opts[:retry_interval].to_f
desired_exit_codes = option_exit_codes ? [option_exit_codes].flatten : [0]
desired_exit_codes = [0] if desired_exit_codes.empty?
max_retries = option_max_retries == 0 ? 60 : option_max_retries # nil & "" both return 0
retry_interval = option_retry_interval == 0 ? 1 : option_retry_interval
verbose = true.to_s == opts[:verbose]
log_prefix = host.log_prefix
logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{command}"
logger.debug " Trying command #{max_retries} times."
logger.debug ".", add_newline=false
result = on host, command, {:acceptable_exit_codes => (0...127), :silent => !verbose}, &block
num_retries = 0
until desired_exit_codes.include?(result.exit_code)
sleep retry_interval
result = on host, command, {:acceptable_exit_codes => (0...127), :silent => !verbose}, &block
num_retries += 1
logger.debug ".", add_newline=false
if (num_retries > max_retries)
logger.debug " Command \`#{command}\` failed."
fail("Command \`#{command}\` failed.")
end
end
logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{command} ostensibly successful."
result
end
#Is semver-ish version a less than semver-ish version b
#@param [String] a A version of the from '\d.\d.\d.*'
#@param [String] b A version of the form '\d.\d.\d.*'
#@return [Boolean] true if a is less than b, otherwise return false
#
#@note 3.0.0-160-gac44cfb is greater than 3.0.0, and 2.8.2
#@note -rc being less than final builds is not yet implemented.
def version_is_less a, b
a_nums = a.split('-')[0].split('.')
b_nums = b.split('-')[0].split('.')
(0...a_nums.length).each do |i|
if i < b_nums.length
if a_nums[i] < b_nums[i]
return true
elsif a_nums[i] > b_nums[i]
return false
end
else
return false
end
end
#checks all dots, they are equal so examine the rest
a_rest = a.split('-', 2)[1]
b_rest = b.split('-', 2)[1]
if a_rest and b_rest and a_rest < b_rest
return false
elsif a_rest and not b_rest
return false
elsif not a_rest and b_rest
return true
end
return false
end
#stops the puppet agent running on the host
# @param [Host, Array<Host>, String, Symbol] agent One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
def stop_agent_on(agent)
block_on agent do | host |
vardir = agent.puppet['vardir']
agent_running = true
while agent_running
result = on host, "[ -e '#{vardir}/state/agent_catalog_run.lock' ]", :acceptable_exit_codes => [0,1]
agent_running = (result.exit_code == 0)
sleep 2 unless agent_running
end
# The agent service is `pe-puppet` everywhere EXCEPT certain linux distros on PE 2.8
# In all the case that it is different, this init script will exist. So we can assume
# that if the script doesn't exist, we should just use `pe-puppet`
result = on agent, "[ -e /etc/init.d/pe-puppet-agent ]", :acceptable_exit_codes => [0,1]
agent_service = (result.exit_code == 0) ? 'pe-puppet-agent' : 'pe-puppet'
# Under a number of stupid circumstances, we can't stop the
# agent using puppet. This is usually because of issues with
# the init script or system on that particular configuration.
avoid_puppet_at_all_costs = false
avoid_puppet_at_all_costs ||= agent['platform'] =~ /el-4/
avoid_puppet_at_all_costs ||= agent['pe_ver'] && version_is_less(agent['pe_ver'], '3.2') && agent['platform'] =~ /sles/
if avoid_puppet_at_all_costs
# When upgrading, puppet is already stopped. On EL4, this causes an exit code of '1'
on agent, "/etc/init.d/#{agent_service} stop", :acceptable_exit_codes => [0, 1]
else
on agent, puppet_resource('service', agent_service, 'ensure=stopped')
end
end
end
#stops the puppet agent running on the default host
# @see #stop_agent_on
def stop_agent
stop_agent_on(default)
end
#wait for a given host to appear in the dashboard
def wait_for_host_in_dashboard(host)
hostname = host.node_name
retry_on(dashboard, "! curl --tlsv1 -k -I https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'")
end
# Ensure the host has requested a cert, then sign it
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
#
# @return nil
# @raise [FailTest] if process times out
def sign_certificate_for(host)
block_on host do | host |
if [master, dashboard, database].include? host
on host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2]
on master, puppet( "cert --allow-dns-alt-names sign #{host}" ), :acceptable_exit_codes => [0,24]
else
hostname = Regexp.escape host.node_name
last_sleep = 0
next_sleep = 1
(0..10).each do |i|
fail_test("Failed to sign cert for #{hostname}") if i == 10
on master, puppet("cert --sign --all --allow-dns-alt-names"), :acceptable_exit_codes => [0,24]
break if on(master, puppet("cert --list --all")).stdout =~ /\+ "?#{hostname}"?/
sleep next_sleep
(last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep
end
end
end
end
#prompt the master to sign certs then check to confirm the cert for the default host is signed
#@see #sign_certificate_for
def sign_certificate
sign_certificate_for(default)
end
# Get a facter fact from a provided host
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String] name The name of the fact to query for
# @!macro common_opts
#
# @return String The value of the fact 'name' on the provided host
# @raise [FailTest] Raises an exception if call to facter fails
def fact_on(host, name, opts = {})
result = on host, facter(name, opts)
if result.kind_of?(Array)
result.map { |res| res.stdout.chomp }
else
result.stdout.chomp
end
end
# Get a facter fact from the default host
# @see #fact_on
def fact(name, opts = {})
fact_on(default, name, opts)
end
#Run a curl command on the provided host(s)
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String, Command] cmd The curl command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
def curl_on(host, cmd, opts = {}, &block)
if options.is_pe? #check global options hash
on host, "curl --tlsv1 %s" % cmd, opts, &block
else
on host, "curl %s" % cmd, opts, &block
end
end
# Write hiera config file on one or more provided hosts
#
# @param[Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param[Array] One or more hierarchy paths
def write_hiera_config_on(host, hierarchy)
block_on host do |host|
hiera_config=Hash.new
hiera_config[:backends] = 'yaml'
hiera_config[:yaml] = {}
hiera_config[:yaml][:datadir] = host[:hieradatadir]
hiera_config[:hierarchy] = hierarchy
hiera_config[:logger] = 'console'
create_remote_file host, host[:hieraconf], hiera_config.to_yaml
end
end
# Write hiera config file for the default host
# @see #write_hiera_config_on
def write_hiera_config(hierarchy)
write_hiera_config_on(default, hierarchy)
end
# Copy hiera data files to one or more provided hosts
#
# @param[Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param[String] Directory containing the hiera data files.
def copy_hiera_data_to(host, source)
scp_to host, File.expand_path(source), host[:hieradatadir]
end
# Copy hiera data files to the default host
# @see #copy_hiera_data_to
def copy_hiera_data(source)
copy_hiera_data_to(default, source)
end
end
end
end
| 1 | 8,829 | I believe that you only need to rescue Beaker::DSL::Assertions, as they include Minitest::Assertions. | voxpupuli-beaker | rb |
@@ -372,6 +372,7 @@ namespace OpenTelemetry.Trace
private void RunGetRequestedDataAlwaysOffSampler(Activity activity)
{
activity.IsAllDataRequested = false;
+ activity.ActivityTraceFlags &= ActivityTraceFlags.None;
}
private void RunGetRequestedDataOtherSampler(Activity activity) | 1 | // <copyright file="TracerProviderSdk.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Text.RegularExpressions;
using OpenTelemetry.Internal;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Trace
{
internal class TracerProviderSdk : TracerProvider
{
internal int ShutdownCount;
private readonly List<object> instrumentations = new List<object>();
private readonly ActivityListener listener;
private readonly Sampler sampler;
private readonly Dictionary<string, bool> legacyActivityOperationNames;
private BaseProcessor<Activity> processor;
private Action<Activity> getRequestedDataAction;
private bool supportLegacyActivity;
internal TracerProviderSdk(
Resource resource,
IEnumerable<string> sources,
IEnumerable<TracerProviderBuilderSdk.InstrumentationFactory> instrumentationFactories,
Sampler sampler,
List<BaseProcessor<Activity>> processors,
Dictionary<string, bool> legacyActivityOperationNames)
{
this.Resource = resource;
this.sampler = sampler;
this.legacyActivityOperationNames = legacyActivityOperationNames;
this.supportLegacyActivity = legacyActivityOperationNames.Count > 0;
foreach (var processor in processors)
{
this.AddProcessor(processor);
}
if (instrumentationFactories.Any())
{
foreach (var instrumentationFactory in instrumentationFactories)
{
this.instrumentations.Add(instrumentationFactory.Factory());
}
}
var listener = new ActivityListener
{
// Callback when Activity is started.
ActivityStarted = (activity) =>
{
OpenTelemetrySdkEventSource.Log.ActivityStarted(activity);
if (this.supportLegacyActivity && string.IsNullOrEmpty(activity.Source.Name))
{
// We have a legacy activity in hand now
if (legacyActivityOperationNames.ContainsKey(activity.OperationName))
{
// Legacy activity matches the user configured list.
// Call sampler for the legacy activity
// unless suppressed.
if (!Sdk.SuppressInstrumentation)
{
this.getRequestedDataAction(activity);
}
else
{
activity.IsAllDataRequested = false;
}
}
else
{
// Legacy activity doesn't match the user configured list. No need to proceed further.
return;
}
}
if (!activity.IsAllDataRequested)
{
return;
}
if (SuppressInstrumentationScope.IncrementIfTriggered() == 0)
{
this.processor?.OnStart(activity);
}
},
// Callback when Activity is stopped.
ActivityStopped = (activity) =>
{
OpenTelemetrySdkEventSource.Log.ActivityStopped(activity);
if (this.supportLegacyActivity && string.IsNullOrEmpty(activity.Source.Name))
{
// We have a legacy activity in hand now
if (!legacyActivityOperationNames.ContainsKey(activity.OperationName))
{
// Legacy activity doesn't match the user configured list. No need to proceed further.
return;
}
}
if (!activity.IsAllDataRequested)
{
return;
}
// Spec says IsRecording must be false once span ends.
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#isrecording
// However, Activity has slightly different semantic
// than Span and we don't have strong reason to do this
// now, as Activity anyway allows read/write always.
// Intentionally commenting the following line.
// activity.IsAllDataRequested = false;
if (SuppressInstrumentationScope.DecrementIfTriggered() == 0)
{
this.processor?.OnEnd(activity);
}
},
};
if (sampler is AlwaysOnSampler)
{
listener.Sample = (ref ActivityCreationOptions<ActivityContext> options) =>
!Sdk.SuppressInstrumentation ? ActivitySamplingResult.AllDataAndRecorded : ActivitySamplingResult.None;
this.getRequestedDataAction = this.RunGetRequestedDataAlwaysOnSampler;
}
else if (sampler is AlwaysOffSampler)
{
listener.Sample = (ref ActivityCreationOptions<ActivityContext> options) =>
!Sdk.SuppressInstrumentation ? PropagateOrIgnoreData(options.Parent.TraceId) : ActivitySamplingResult.None;
this.getRequestedDataAction = this.RunGetRequestedDataAlwaysOffSampler;
}
else
{
// This delegate informs ActivitySource about sampling decision when the parent context is an ActivityContext.
listener.Sample = (ref ActivityCreationOptions<ActivityContext> options) =>
!Sdk.SuppressInstrumentation ? ComputeActivitySamplingResult(options, sampler) : ActivitySamplingResult.None;
this.getRequestedDataAction = this.RunGetRequestedDataOtherSampler;
}
if (sources.Any())
{
// Sources can be null. This happens when user
// is only interested in InstrumentationLibraries
// which do not depend on ActivitySources.
var wildcardMode = false;
// Validation of source name is already done in builder.
foreach (var name in sources)
{
if (name.Contains('*'))
{
wildcardMode = true;
}
}
if (wildcardMode)
{
var pattern = "^(" + string.Join("|", from name in sources select '(' + Regex.Escape(name).Replace("\\*", ".*") + ')') + ")$";
var regex = new Regex(pattern, RegexOptions.Compiled | RegexOptions.IgnoreCase);
// Function which takes ActivitySource and returns true/false to indicate if it should be subscribed to
// or not.
listener.ShouldListenTo = (activitySource) =>
this.supportLegacyActivity ?
string.IsNullOrEmpty(activitySource.Name) || regex.IsMatch(activitySource.Name) :
regex.IsMatch(activitySource.Name);
}
else
{
var activitySources = new Dictionary<string, bool>(StringComparer.OrdinalIgnoreCase);
foreach (var name in sources)
{
activitySources[name] = true;
}
if (this.supportLegacyActivity)
{
activitySources[string.Empty] = true;
}
// Function which takes ActivitySource and returns true/false to indicate if it should be subscribed to
// or not.
listener.ShouldListenTo = (activitySource) => activitySources.ContainsKey(activitySource.Name);
}
}
else
{
if (this.supportLegacyActivity)
{
listener.ShouldListenTo = (activitySource) => string.IsNullOrEmpty(activitySource.Name);
}
}
ActivitySource.AddActivityListener(listener);
this.listener = listener;
}
internal Resource Resource { get; }
internal TracerProviderSdk AddProcessor(BaseProcessor<Activity> processor)
{
if (processor == null)
{
throw new ArgumentNullException(nameof(processor));
}
processor.SetParentProvider(this);
if (this.processor == null)
{
this.processor = processor;
}
else if (this.processor is CompositeProcessor<Activity> compositeProcessor)
{
compositeProcessor.AddProcessor(processor);
}
else
{
this.processor = new CompositeProcessor<Activity>(new[]
{
this.processor,
processor,
});
}
return this;
}
internal bool OnForceFlush(int timeoutMilliseconds)
{
return this.processor?.ForceFlush(timeoutMilliseconds) ?? true;
}
/// <summary>
/// Called by <c>Shutdown</c>. This function should block the current
/// thread until shutdown completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number of milliseconds to wait, or <c>Timeout.Infinite</c> to
/// wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when shutdown succeeded; otherwise, <c>false</c>.
/// </returns>
/// <remarks>
/// This function is called synchronously on the thread which made the
/// first call to <c>Shutdown</c>. This function should not throw
/// exceptions.
/// </remarks>
internal bool OnShutdown(int timeoutMilliseconds)
{
// TO DO Put OnShutdown logic in a task to run within the user provider timeOutMilliseconds
bool? result;
if (this.instrumentations != null)
{
foreach (var item in this.instrumentations)
{
(item as IDisposable)?.Dispose();
}
this.instrumentations.Clear();
}
result = this.processor?.Shutdown(timeoutMilliseconds);
this.listener?.Dispose();
return result ?? true;
}
protected override void Dispose(bool disposing)
{
if (this.instrumentations != null)
{
foreach (var item in this.instrumentations)
{
(item as IDisposable)?.Dispose();
}
this.instrumentations.Clear();
}
(this.sampler as IDisposable)?.Dispose();
// Wait for up to 5 seconds grace period
this.processor?.Shutdown(5000);
this.processor?.Dispose();
// Shutdown the listener last so that anything created while instrumentation cleans up will still be processed.
// Redis instrumentation, for example, flushes during dispose which creates Activity objects for any profiling
// sessions that were open.
this.listener?.Dispose();
base.Dispose(disposing);
}
private static ActivitySamplingResult ComputeActivitySamplingResult(
in ActivityCreationOptions<ActivityContext> options,
Sampler sampler)
{
var samplingParameters = new SamplingParameters(
options.Parent,
options.TraceId,
options.Name,
options.Kind,
options.Tags,
options.Links);
var shouldSample = sampler.ShouldSample(samplingParameters);
var activitySamplingResult = shouldSample.Decision switch
{
SamplingDecision.RecordAndSample => ActivitySamplingResult.AllDataAndRecorded,
SamplingDecision.RecordOnly => ActivitySamplingResult.AllData,
_ => ActivitySamplingResult.PropagationData
};
if (activitySamplingResult != ActivitySamplingResult.PropagationData)
{
foreach (var att in shouldSample.Attributes)
{
options.SamplingTags.Add(att.Key, att.Value);
}
return activitySamplingResult;
}
return PropagateOrIgnoreData(options.Parent.TraceId);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static ActivitySamplingResult PropagateOrIgnoreData(ActivityTraceId traceId)
{
var isRootSpan = traceId == default;
// If it is the root span select PropagationData so the trace ID is preserved
// even if no activity of the trace is recorded (sampled per OpenTelemetry parlance).
return isRootSpan
? ActivitySamplingResult.PropagationData
: ActivitySamplingResult.None;
}
private void RunGetRequestedDataAlwaysOnSampler(Activity activity)
{
activity.IsAllDataRequested = true;
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
}
private void RunGetRequestedDataAlwaysOffSampler(Activity activity)
{
activity.IsAllDataRequested = false;
}
private void RunGetRequestedDataOtherSampler(Activity activity)
{
ActivityContext parentContext;
// Check activity.ParentId alone is sufficient to normally determine if a activity is root or not. But if one uses activity.SetParentId to override the TraceId (without intending to set an actual parent), then additional check of parentspanid being empty is required to confirm if an activity is root or not.
// This checker can be removed, once Activity exposes an API to customize ID Generation (https://github.com/dotnet/runtime/issues/46704) or issue https://github.com/dotnet/runtime/issues/46706 is addressed.
if (string.IsNullOrEmpty(activity.ParentId) || activity.ParentSpanId.ToHexString() == "0000000000000000")
{
parentContext = default;
}
else if (activity.Parent != null)
{
parentContext = activity.Parent.Context;
}
else
{
parentContext = new ActivityContext(
activity.TraceId,
activity.ParentSpanId,
activity.ActivityTraceFlags,
activity.TraceStateString,
isRemote: true);
}
var samplingParameters = new SamplingParameters(
parentContext,
activity.TraceId,
activity.DisplayName,
activity.Kind,
activity.TagObjects,
activity.Links);
var samplingResult = this.sampler.ShouldSample(samplingParameters);
switch (samplingResult.Decision)
{
case SamplingDecision.Drop:
activity.IsAllDataRequested = false;
break;
case SamplingDecision.RecordOnly:
activity.IsAllDataRequested = true;
break;
case SamplingDecision.RecordAndSample:
activity.IsAllDataRequested = true;
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
break;
}
if (samplingResult.Decision != SamplingDecision.Drop)
{
foreach (var att in samplingResult.Attributes)
{
activity.SetTag(att.Key, att.Value);
}
}
}
}
}
| 1 | 19,750 | Do we need `&=` or `=` is sufficient? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -161,8 +161,8 @@ $settings['trusted_host_patterns'] = ['.*'];
$settings['class_loader_auto_detect'] = FALSE;
// This specifies the default configuration sync directory.
-if (empty($config_directories[CONFIG_SYNC_DIRECTORY])) {
- $config_directories[CONFIG_SYNC_DIRECTORY] = '{{ joinPath $config.SitePath $config.SyncDir }}';
+if (empty($settings['config_sync_directory'])) {
+ $settings['config_sync_directory'] = '{{ joinPath $config.SitePath $config.SyncDir }}';
}
`
) | 1 | package ddevapp
import (
"fmt"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/nodeps"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
"io/ioutil"
"os"
"path"
"path/filepath"
"text/template"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/archive"
)
// DrupalSettings encapsulates all the configurations for a Drupal site.
type DrupalSettings struct {
DeployName string
DeployURL string
DatabaseName string
DatabaseUsername string
DatabasePassword string
DatabaseHost string
DatabaseDriver string
DatabasePort string
DatabasePrefix string
HashSalt string
Signature string
SitePath string
SiteSettings string
SiteSettingsDdev string
SyncDir string
DockerIP string
DBPublishedPort int
}
// NewDrupalSettings produces a DrupalSettings object with default.
func NewDrupalSettings(app *DdevApp) *DrupalSettings {
dockerIP, _ := dockerutil.GetDockerIP()
dbPublishedPort, _ := app.GetPublishedPort("db")
return &DrupalSettings{
DatabaseName: "db",
DatabaseUsername: "db",
DatabasePassword: "db",
DatabaseHost: "db",
DatabaseDriver: "mysql",
DatabasePort: GetPort("db"),
DatabasePrefix: "",
HashSalt: util.RandString(64),
Signature: DdevFileSignature,
SitePath: path.Join("sites", "default"),
SiteSettings: "settings.php",
SiteSettingsDdev: "settings.ddev.php",
SyncDir: path.Join("files", "sync"),
DockerIP: dockerIP,
DBPublishedPort: dbPublishedPort,
}
}
// drupal8SettingsTemplate defines the template that will become a Drupal 8 app's settings.php
// in the event that one does not already exist.
const drupal8SettingsTemplate = `<?php
{{ $config := . }}
// {{ $config.Signature }}: Automatically generated Drupal settings file.
if (file_exists($app_root . '/' . $site_path . '/{{ $config.SiteSettingsDdev }}')) {
include $app_root . '/' . $site_path . '/{{ $config.SiteSettingsDdev }}';
}
`
// drupal8SettingsAppendTemplate defines the template that will be appended to
// a Drupal 8 app's settings.php in the event that one exists.
const drupal8SettingsAppendTemplate = `{{ $config := . }}
// Automatically generated include for settings managed by ddev.
if (file_exists($app_root . '/' . $site_path . '/{{ $config.SiteSettingsDdev }}')) {
include $app_root . '/' . $site_path . '/{{ $config.SiteSettingsDdev }}';
}
`
// drupal7SettingsTemplate defines the template that will become a Drupal 7
// app's settings.php in the event that one does not already exist.
const drupal7SettingsTemplate = `<?php
{{ $config := . }}
// {{ $config.Signature }}: Automatically generated Drupal settings file.
$ddev_settings = dirname(__FILE__) . '/{{ $config.SiteSettingsDdev }}';
if (is_readable($ddev_settings)) {
require $ddev_settings;
}
`
// drupal7SettingsAppendTemplate defines the template that will be appended to
// a Drupal 7 app's settings.php in the event that one exists.
const drupal7SettingsAppendTemplate = `{{ $config := . }}
// Automatically generated include for settings managed by ddev.
$ddev_settings = dirname(__FILE__) . '/{{ $config.SiteSettingsDdev }}';
if (is_readable($ddev_settings)) {
require $ddev_settings;
}
`
// drupal6SettingsTemplate defines the template that will become a Drupal 6
// app's settings.php in the event that one does not already exist.
const drupal6SettingsTemplate = drupal7SettingsTemplate
// drupal7SettingsAppendTemplate defines the template that will be appended to
// a Drupal 7 app's settings.php in the event that one exists.
const drupal6SettingsAppendTemplate = drupal7SettingsAppendTemplate
const (
drupal8DdevSettingsTemplate = `<?php
{{ $config := . }}
/**
* @file
* {{ $config.Signature }}: Automatically generated Drupal settings file.
* ddev manages this file and may delete or overwrite the file unless this
* comment is removed.
*/
$host = "{{ $config.DatabaseHost }}";
$port = {{ $config.DatabasePort }};
// If DDEV_PHP_VERSION is not set, it means we're running on the host,
// so use the host-side bind port on docker IP
if (empty(getenv('DDEV_PHP_VERSION'))) {
$host = "{{ $config.DockerIP }}";
$port = {{ $config.DBPublishedPort }};
}
$databases['default']['default'] = array(
'database' => "{{ $config.DatabaseName }}",
'username' => "{{ $config.DatabaseUsername }}",
'password' => "{{ $config.DatabasePassword }}",
'host' => $host,
'driver' => "{{ $config.DatabaseDriver }}",
'port' => $port,
'prefix' => "{{ $config.DatabasePrefix }}",
);
ini_set('session.gc_probability', 1);
ini_set('session.gc_divisor', 100);
ini_set('session.gc_maxlifetime', 200000);
ini_set('session.cookie_lifetime', 2000000);
$settings['hash_salt'] = '{{ $config.HashSalt }}';
// This will prevent Drupal from setting read-only permissions on sites/default.
$settings['skip_permissions_hardening'] = TRUE;
// This will ensure the site can only be accessed through the intended host
// names. Additional host patterns can be added for custom configurations.
$settings['trusted_host_patterns'] = ['.*'];
// Don't use Symfony's APCLoader. ddev includes APCu; Composer's APCu loader has
// better performance.
$settings['class_loader_auto_detect'] = FALSE;
// This specifies the default configuration sync directory.
if (empty($config_directories[CONFIG_SYNC_DIRECTORY])) {
$config_directories[CONFIG_SYNC_DIRECTORY] = '{{ joinPath $config.SitePath $config.SyncDir }}';
}
`
)
const (
drupal7DdevSettingsTemplate = `<?php
{{ $config := . }}
/**
* @file
* {{ $config.Signature }}: Automatically generated Drupal settings file.
* ddev manages this file and may delete or overwrite the file unless this
* comment is removed.
*/
$host = "{{ $config.DatabaseHost }}";
$port = {{ $config.DatabasePort }};
// If DDEV_PHP_VERSION is not set, it means we're running on the host,
// so use the host-side bind port on docker IP
if (empty(getenv('DDEV_PHP_VERSION'))) {
$host = "{{ $config.DockerIP }}";
$port = {{ $config.DBPublishedPort }};
}
$databases['default']['default'] = array(
'database' => "{{ $config.DatabaseName }}",
'username' => "{{ $config.DatabaseUsername }}",
'password' => "{{ $config.DatabasePassword }}",
'host' => $host,
'driver' => "{{ $config.DatabaseDriver }}",
'port' => $port,
'prefix' => "{{ $config.DatabasePrefix }}",
);
ini_set('session.gc_probability', 1);
ini_set('session.gc_divisor', 100);
ini_set('session.gc_maxlifetime', 200000);
ini_set('session.cookie_lifetime', 2000000);
$drupal_hash_salt = '{{ $config.HashSalt }}';
`
)
const (
drupal6DdevSettingsTemplate = `<?php
{{ $config := . }}
/**
* @file
* {{ $config.Signature }}: Automatically generated Drupal settings file.
* ddev manages this file and may delete or overwrite the file unless this
* comment is removed.
*/
$host = "{{ $config.DatabaseHost }}";
$port = {{ $config.DatabasePort }};
// If DDEV_PHP_VERSION is not set, it means we're running on the host,
// so use the host-side bind port on docker IP
if (empty(getenv('DDEV_PHP_VERSION'))) {
$host = "{{ $config.DockerIP }}";
$port = {{ $config.DBPublishedPort }};
}
$db_url = "{{ $config.DatabaseDriver }}://{{ $config.DatabaseUsername }}:{{ $config.DatabasePassword }}@$host:$port/{{ $config.DatabaseName }}";
ini_set('session.gc_probability', 1);
ini_set('session.gc_divisor', 100);
ini_set('session.gc_maxlifetime', 200000);
ini_set('session.cookie_lifetime', 2000000);
`
)
// manageDrupalSettingsFile will direct inspecting and writing of settings.php.
func manageDrupalSettingsFile(app *DdevApp, drupalConfig *DrupalSettings, settingsTemplate, appendTemplate string) error {
// We'll be writing/appending to the settings files and parent directory, make sure we have permissions to do so
if err := drupalEnsureWritePerms(app); err != nil {
return err
}
if !fileutil.FileExists(app.SiteSettingsPath) {
output.UserOut.Printf("No %s file exists, creating one", drupalConfig.SiteSettings)
if err := writeDrupalSettingsFile(drupalConfig, app.SiteSettingsPath, settingsTemplate); err != nil {
return fmt.Errorf("failed to write: %v", err)
}
}
included, err := settingsHasInclude(drupalConfig, app.SiteSettingsPath)
if err != nil {
return fmt.Errorf("failed to check for include: %v", err)
}
if included {
output.UserOut.Printf("Existing %s file includes %s", drupalConfig.SiteSettings, drupalConfig.SiteSettingsDdev)
} else {
output.UserOut.Printf("Existing %s file does not include %s, modifying to include ddev settings", drupalConfig.SiteSettings, drupalConfig.SiteSettingsDdev)
if err := appendIncludeToDrupalSettingsFile(drupalConfig, app.SiteSettingsPath, appendTemplate); err != nil {
return fmt.Errorf("failed to include %s in %s: %v", drupalConfig.SiteSettingsDdev, drupalConfig.SiteSettings, err)
}
}
return nil
}
// writeDrupalSettingsFile creates the app's settings.php or equivalent,
// which does nothing more than import the ddev-managed settings.ddev.php.
func writeDrupalSettingsFile(drupalConfig *DrupalSettings, filePath string, versionTemplate string) error {
tmpl, err := template.New("settings").Funcs(getTemplateFuncMap()).Parse(versionTemplate)
if err != nil {
return err
}
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
// Create file
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
}
defer util.CheckClose(file)
if err := tmpl.Execute(file, drupalConfig); err != nil {
return err
}
return nil
}
// createDrupal7SettingsFile manages creation and modification of settings.php and settings.ddev.php.
// If a settings.php file already exists, it will be modified to ensure that it includes
// settings.ddev.php, which contains ddev-specific configuration.
func createDrupal7SettingsFile(app *DdevApp) (string, error) {
// Currently there isn't any customization done for the drupal config, but
// we may want to do some kind of customization in the future.
drupalConfig := NewDrupalSettings(app)
if err := manageDrupalSettingsFile(app, drupalConfig, drupal7SettingsTemplate, drupal7SettingsAppendTemplate); err != nil {
return "", err
}
if err := writeDrupal7DdevSettingsFile(drupalConfig, app.SiteDdevSettingsFile); err != nil {
return "", fmt.Errorf("`failed to write` Drupal settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return app.SiteDdevSettingsFile, nil
}
// createDrupal8SettingsFile manages creation and modification of settings.php and settings.ddev.php.
// If a settings.php file already exists, it will be modified to ensure that it includes
// settings.ddev.php, which contains ddev-specific configuration.
func createDrupal8SettingsFile(app *DdevApp) (string, error) {
// Currently there isn't any customization done for the drupal config, but
// we may want to do some kind of customization in the future.
drupalConfig := NewDrupalSettings(app)
if err := manageDrupalSettingsFile(app, drupalConfig, drupal8SettingsTemplate, drupal8SettingsAppendTemplate); err != nil {
return "", err
}
if err := writeDrupal8DdevSettingsFile(drupalConfig, app.SiteDdevSettingsFile); err != nil {
return "", fmt.Errorf("failed to write Drupal settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return app.SiteDdevSettingsFile, nil
}
// createDrupal6SettingsFile manages creation and modification of settings.php and settings.ddev.php.
// If a settings.php file already exists, it will be modified to ensure that it includes
// settings.ddev.php, which contains ddev-specific configuration.
func createDrupal6SettingsFile(app *DdevApp) (string, error) {
// Currently there isn't any customization done for the drupal config, but
// we may want to do some kind of customization in the future.
drupalConfig := NewDrupalSettings(app)
// mysqli is required in latest D6LTS and works fine in ddev in old D6
drupalConfig.DatabaseDriver = "mysqli"
if err := manageDrupalSettingsFile(app, drupalConfig, drupal6SettingsTemplate, drupal6SettingsAppendTemplate); err != nil {
return "", err
}
if err := writeDrupal6DdevSettingsFile(drupalConfig, app.SiteDdevSettingsFile); err != nil {
return "", fmt.Errorf("failed to write Drupal settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return app.SiteDdevSettingsFile, nil
}
// writeDrupal8DdevSettingsFile dynamically produces valid settings.ddev.php file by combining a configuration
// object with a data-driven template.
func writeDrupal8DdevSettingsFile(settings *DrupalSettings, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
tmpl, err := template.New("settings").Funcs(getTemplateFuncMap()).Parse(drupal8DdevSettingsTemplate)
if err != nil {
return err
}
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
defer util.CheckClose(file)
if err := tmpl.Execute(file, settings); err != nil {
return err
}
return nil
}
// writeDrupal7DdevSettingsFile dynamically produces valid settings.ddev.php file by combining a configuration
// object with a data-driven template.
func writeDrupal7DdevSettingsFile(settings *DrupalSettings, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
tmpl, err := template.New("settings").Funcs(getTemplateFuncMap()).Parse(drupal7DdevSettingsTemplate)
if err != nil {
return err
}
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
err = tmpl.Execute(file, settings)
if err != nil {
return err
}
util.CheckClose(file)
return nil
}
// writeDrupal6DdevSettingsFile dynamically produces valid settings.ddev.php file by combining a configuration
// object with a data-driven template.
func writeDrupal6DdevSettingsFile(settings *DrupalSettings, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
tmpl, err := template.New("settings").Funcs(getTemplateFuncMap()).Parse(drupal6DdevSettingsTemplate)
if err != nil {
return err
}
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
err = tmpl.Execute(file, settings)
if err != nil {
return err
}
util.CheckClose(file)
return nil
}
// WriteDrushrc writes out drushrc.php based on passed-in values.
// This works on Drupal 6 and Drupal 7 or with drush8 and older
func WriteDrushrc(app *DdevApp, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
uri := app.GetHTTPSURL()
if GetCAROOT() == "" {
uri = app.GetHTTPURL()
}
drushContents := []byte(`<?php
/** ` + DdevFileSignature + `: Automatically generated drushrc.php file.
ddev manages this file and may delete or overwrite the file unless this comment is removed.
*/
$options['l'] = "` + uri + `";
`)
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err := os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
err := ioutil.WriteFile(filePath, drushContents, 0666)
if err != nil {
return err
}
return nil
}
// WriteDrushYML writes a drush.yaml to set the default uri
func WriteDrushYML(app *DdevApp, filePath string) error {
if fileutil.FileExists(filePath) {
// Check if the file is managed by ddev.
signatureFound, err := fileutil.FgrepStringInFile(filePath, DdevFileSignature)
if err != nil {
return err
}
// If the signature wasn't found, warn the user and return.
if !signatureFound {
util.Warning("%s already exists and is managed by the user.", filepath.Base(filePath))
return nil
}
}
uri := app.GetHTTPSURL()
if GetCAROOT() == "" {
uri = app.GetHTTPURL()
}
drushContents := []byte(`
#` + DdevFileSignature + `: Automatically generated drush.yml file.
# ddev manages this file and may delete or overwrite the file unless this comment is removed.
options:
uri: "` + uri + `"
`)
// Ensure target directory exists and is writable
dir := filepath.Dir(filePath)
if err := os.Chmod(dir, 0755); os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return err
}
} else if err != nil {
return err
}
err := ioutil.WriteFile(filePath, drushContents, 0666)
if err != nil {
return err
}
return nil
}
// getDrupalUploadDir will return a custom upload dir if defined, returning a default path if not.
func getDrupalUploadDir(app *DdevApp) string {
if app.UploadDir == "" {
return "sites/default/files"
}
return app.UploadDir
}
// Drupal8Hooks adds a d8-specific hooks example for post-import-db
const Drupal8Hooks = `# post-import-db:
# - exec: drush cr
# - exec: drush updb
`
// Drupal7Hooks adds a d7-specific hooks example for post-import-db
const Drupal7Hooks = `# post-import-db:
# - exec: drush cc all
`
// getDrupal7Hooks for appending as byte array
func getDrupal7Hooks() []byte {
return []byte(Drupal7Hooks)
}
// getDrupal6Hooks for appending as byte array
func getDrupal6Hooks() []byte {
// We don't have anything new to add yet, so just use Drupal7 version
return []byte(Drupal7Hooks)
}
// getDrupal8Hooks for appending as byte array
func getDrupal8Hooks() []byte {
return []byte(Drupal8Hooks)
}
// setDrupalSiteSettingsPaths sets the paths to settings.php/settings.ddev.php
// for templating.
func setDrupalSiteSettingsPaths(app *DdevApp) {
drupalConfig := NewDrupalSettings(app)
settingsFileBasePath := filepath.Join(app.AppRoot, app.Docroot)
app.SiteSettingsPath = filepath.Join(settingsFileBasePath, drupalConfig.SitePath, drupalConfig.SiteSettings)
app.SiteDdevSettingsFile = filepath.Join(settingsFileBasePath, drupalConfig.SitePath, drupalConfig.SiteSettingsDdev)
}
// isDrupal7App returns true if the app is of type drupal7
func isDrupal7App(app *DdevApp) bool {
if _, err := os.Stat(filepath.Join(app.AppRoot, app.Docroot, "misc/ajax.js")); err == nil {
return true
}
return false
}
// isDrupal8App returns true if the app is of type drupal8
func isDrupal8App(app *DdevApp) bool {
if _, err := os.Stat(filepath.Join(app.AppRoot, app.Docroot, "core/scripts/drupal.sh")); err == nil {
return true
}
return false
}
// isDrupal6App returns true if the app is of type Drupal6
func isDrupal6App(app *DdevApp) bool {
if _, err := os.Stat(filepath.Join(app.AppRoot, app.Docroot, "misc/ahah.js")); err == nil {
return true
}
return false
}
// drupal6ConfigOverrideAction overrides php_version for D6, since it is incompatible
// with php7+
func drupal6ConfigOverrideAction(app *DdevApp) error {
app.PHPVersion = nodeps.PHP56
return nil
}
// drupal7ConfigOverrideAction overrides php_version for D7,
// since it is not yet compatible with php7.3
func drupal7ConfigOverrideAction(app *DdevApp) error {
app.PHPVersion = nodeps.PHP72
return nil
}
// drupal8PostStartAction handles default post-start actions for D8 apps, like ensuring
// useful permissions settings on sites/default.
func drupal8PostStartAction(app *DdevApp) error {
if err := createDrupal8SyncDir(app); err != nil {
return err
}
if err := drupalEnsureWritePerms(app); err != nil {
return err
}
// Write both drush.yml and drushrc.php for Drupal 8, because we can't know
// what version of drush may be in use. drush8 is happy with drushrc.php
// drush9 wants drush.yml
err := WriteDrushYML(app, filepath.Join(filepath.Dir(app.SiteSettingsPath), "..", "all", "drush", "drush.yml"))
if err != nil {
util.Warning("Failed to WriteDrushYML: %v", err)
}
err = WriteDrushrc(app, filepath.Join(filepath.Dir(app.SiteSettingsPath), "drushrc.php"))
if err != nil {
util.Warning("Failed to WriteDrushrc: %v", err)
}
if _, err = app.CreateSettingsFile(); err != nil {
return fmt.Errorf("failed to write settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return nil
}
// drupal7PostStartAction handles default post-start actions for D7 apps, like ensuring
// useful permissions settings on sites/default.
func drupal7PostStartAction(app *DdevApp) error {
if err := drupalEnsureWritePerms(app); err != nil {
return err
}
err := WriteDrushrc(app, filepath.Join(filepath.Dir(app.SiteSettingsPath), "drushrc.php"))
if err != nil {
util.Warning("Failed to WriteDrushrc: %v", err)
}
if _, err = app.CreateSettingsFile(); err != nil {
return fmt.Errorf("failed to write settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return nil
}
// drupal6PostStartAction handles default post-start actions for D6 apps, like ensuring
// useful permissions settings on sites/default.
func drupal6PostStartAction(app *DdevApp) error {
if err := drupalEnsureWritePerms(app); err != nil {
return err
}
err := WriteDrushrc(app, filepath.Join(filepath.Dir(app.SiteSettingsPath), "drushrc.php"))
if err != nil {
util.Warning("Failed to WriteDrushrc: %v", err)
}
if _, err = app.CreateSettingsFile(); err != nil {
return fmt.Errorf("failed to write settings file %s: %v", app.SiteDdevSettingsFile, err)
}
return nil
}
// drupalEnsureWritePerms will ensure sites/default and sites/default/settings.php will
// have the appropriate permissions for development.
func drupalEnsureWritePerms(app *DdevApp) error {
output.UserOut.Printf("Ensuring write permissions for %s", app.GetName())
var writePerms os.FileMode = 0200
settingsDir := path.Dir(app.SiteSettingsPath)
makeWritable := []string{
settingsDir,
app.SiteSettingsPath,
app.SiteDdevSettingsFile,
path.Join(settingsDir, "services.yml"),
}
for _, o := range makeWritable {
stat, err := os.Stat(o)
if err != nil {
if !os.IsNotExist(err) {
util.Warning("Unable to ensure write permissions: %v", err)
}
continue
}
if err := os.Chmod(o, stat.Mode()|writePerms); err != nil {
// Warn the user, but continue.
util.Warning("Unable to set permissions: %v", err)
}
}
return nil
}
// createDrupal8SyncDir creates a Drupal 8 app's sync directory
func createDrupal8SyncDir(app *DdevApp) error {
// Currently there isn't any customization done for the drupal config, but
// we may want to do some kind of customization in the future.
drupalConfig := NewDrupalSettings(app)
syncDirPath := path.Join(app.GetAppRoot(), app.GetDocroot(), drupalConfig.SyncDir)
if fileutil.FileExists(syncDirPath) {
return nil
}
if err := os.MkdirAll(syncDirPath, 0755); err != nil {
return fmt.Errorf("failed to create sync directory (%s): %v", syncDirPath, err)
}
return nil
}
// settingsHasInclude determines if the settings.php or equivalent includes settings.ddev.php or equivalent.
// This is done by looking for the ddev settings file (settings.ddev.php) in settings.php.
func settingsHasInclude(drupalConfig *DrupalSettings, siteSettingsPath string) (bool, error) {
included, err := fileutil.FgrepStringInFile(siteSettingsPath, drupalConfig.SiteSettingsDdev)
if err != nil {
return false, err
}
return included, nil
}
// appendIncludeToDrupalSettingsFile modifies the settings.php file to include the settings.ddev.php
// file, which contains ddev-specific configuration.
func appendIncludeToDrupalSettingsFile(drupalConfig *DrupalSettings, siteSettingsPath string, appendTemplate string) error {
// Check if file is empty
contents, err := ioutil.ReadFile(siteSettingsPath)
if err != nil {
return err
}
// If the file is empty, write the complete settings template and return
if len(contents) == 0 {
return writeDrupalSettingsFile(drupalConfig, siteSettingsPath, appendTemplate)
}
// The file is not empty, open it for appending
file, err := os.OpenFile(siteSettingsPath, os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
defer util.CheckClose(file)
tmpl, err := template.New("settings").Funcs(getTemplateFuncMap()).Parse(appendTemplate)
if err != nil {
return err
}
// Write the template to the file
if err := tmpl.Execute(file, drupalConfig); err != nil {
return err
}
return nil
}
// drupalImportFilesAction defines the Drupal workflow for importing project files.
func drupalImportFilesAction(app *DdevApp, importPath, extPath string) error {
destPath := filepath.Join(app.GetAppRoot(), app.GetDocroot(), app.GetUploadDir())
// parent of destination dir should exist
if !fileutil.FileExists(filepath.Dir(destPath)) {
return fmt.Errorf("unable to import to %s: parent directory does not exist", destPath)
}
// parent of destination dir should be writable.
if err := os.Chmod(filepath.Dir(destPath), 0755); err != nil {
return err
}
// If the destination path exists, remove it as was warned
if fileutil.FileExists(destPath) {
if err := os.RemoveAll(destPath); err != nil {
return fmt.Errorf("failed to cleanup %s before import: %v", destPath, err)
}
}
if isTar(importPath) {
if err := archive.Untar(importPath, destPath, extPath); err != nil {
return fmt.Errorf("failed to extract provided archive: %v", err)
}
return nil
}
if isZip(importPath) {
if err := archive.Unzip(importPath, destPath, extPath); err != nil {
return fmt.Errorf("failed to extract provided archive: %v", err)
}
return nil
}
if err := fileutil.CopyDir(importPath, destPath); err != nil {
return err
}
return nil
}
| 1 | 13,951 | Let's keep both of these here. It should work on most any version of Drupal 8 then true? | drud-ddev | go |
@@ -699,7 +699,7 @@ namespace OpenTelemetry.Trace.Test
private void AssertApproxSameTimestamp(DateTime one, DateTime two)
{
var timeShift = Math.Abs((one - two).TotalMilliseconds);
- Assert.InRange(timeShift, double.Epsilon, 10);
+ Assert.InRange(timeShift, double.Epsilon, 20);
}
}
} | 1 | // <copyright file="SpanTest.cs" company="OpenTelemetry Authors">
// Copyright 2018, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using OpenTelemetry.Tests;
namespace OpenTelemetry.Trace.Test
{
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Threading.Tasks;
using Moq;
using OpenTelemetry.Abstractions.Utils;
using OpenTelemetry.Trace;
using OpenTelemetry.Trace.Config;
using Xunit;
public class SpanTest : IDisposable
{
private const string SpanName = "MySpanName";
private const string EventDescription = "MyEvent";
private readonly IDictionary<string, object> attributes = new Dictionary<String, object>();
private readonly List<KeyValuePair<string, object>> expectedAttributes;
private readonly IStartEndHandler startEndHandler = Mock.Of<IStartEndHandler>();
public SpanTest()
{
attributes.Add("MyStringAttributeKey", "MyStringAttributeValue");
attributes.Add("MyLongAttributeKey", 123L);
attributes.Add("MyBooleanAttributeKey", false);
expectedAttributes = new List<KeyValuePair<string, object>>(attributes)
{
new KeyValuePair<string, object>("MySingleStringAttributeKey", "MySingleStringAttributeValue"),
};
}
[Fact]
public void ToSpanData_NoRecordEvents()
{
var activityLink = new Activity(SpanName)
.SetIdFormat(ActivityIdFormat.W3C)
.Start();
activityLink.Stop();
var activity = new Activity(SpanName)
.SetIdFormat(ActivityIdFormat.W3C)
.Start();
Assert.False(activity.Recorded);
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
// Check that adding trace events after Span#End() does not throw any exception.
foreach (var attribute in attributes)
{
span.SetAttribute(attribute);
}
span.AddEvent(Event.Create(EventDescription));
span.AddEvent(EventDescription, attributes);
span.AddLink(Link.FromActivity(activityLink));
span.End();
// exception.expect(IllegalStateException);
Assert.Throws<InvalidOperationException>(() => ((Span)span).ToSpanData());
}
[Fact]
public void GetSpanContextFromActivity()
{
var tracestate = Tracestate.Builder.Set("k1", "v1").Build();
var activity = new Activity(SpanName).Start();
activity.TraceStateString = tracestate.ToString();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var span =
Span.StartSpan(
activity,
tracestate,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
Assert.NotNull(span.Context);
Assert.Equal(activity.TraceId, span.Context.TraceId);
Assert.Equal(activity.SpanId, span.Context.SpanId);
Assert.Equal(activity.ParentSpanId, ((Span)span).ParentSpanId);
Assert.Equal(activity.ActivityTraceFlags, span.Context.TraceOptions);
Assert.Same(tracestate, span.Context.Tracestate);
}
[Fact]
public void GetSpanContextFromActivityRecordedWithParent()
{
var tracestate = Tracestate.Builder.Set("k1", "v1").Build();
var parent = new Activity(SpanName).Start();
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var span =
Span.StartSpan(
activity,
tracestate,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
Assert.NotNull(span.Context);
Assert.Equal(activity.TraceId, span.Context.TraceId);
Assert.Equal(activity.SpanId, span.Context.SpanId);
Assert.Equal(activity.ParentSpanId, ((Span)span).ParentSpanId);
Assert.Equal(activity.ActivityTraceFlags, span.Context.TraceOptions);
Assert.Same(tracestate, span.Context.Tracestate);
}
[Fact]
public void NoEventsRecordedAfterEnd()
{
var activityLink = new Activity(SpanName)
.SetIdFormat(ActivityIdFormat.W3C)
.Start();
activityLink.Stop();
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var spanStartTime = PreciseTimestamp.GetUtcNow();
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
var spaEndTime = PreciseTimestamp.GetUtcNow();
span.End();
// Check that adding trace events after Span#End() does not throw any exception and are not
// recorded.
foreach (var attribute in attributes)
{
span.SetAttribute(attribute);
}
span.SetAttribute(
"MySingleStringAttributeKey",
"MySingleStringAttributeValue");
span.AddEvent(Event.Create(EventDescription));
span.AddEvent(EventDescription, attributes);
span.AddLink(Link.FromActivity(activityLink));
var spanData = ((Span)span).ToSpanData();
AssertApproxSameTimestamp(spanData.StartTimestamp, spanStartTime);
Assert.Empty(spanData.Attributes.AttributeMap);
Assert.Empty(spanData.Events.Events);
Assert.Empty(spanData.Links.Links);
Assert.Equal(Status.Ok, spanData.Status);
AssertApproxSameTimestamp(spaEndTime, spanData.EndTimestamp);
}
[Fact]
public async Task ToSpanData_ActiveSpan()
{
var activityLink = new Activity(SpanName)
.SetIdFormat(ActivityIdFormat.W3C)
.Start();
activityLink.Stop();
var activity = new Activity(SpanName)
.SetParentId(ActivityTraceId.CreateRandom(), ActivitySpanId.CreateRandom())
.Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var spanStartTime = PreciseTimestamp.GetUtcNow();
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
span.SetAttribute(
"MySingleStringAttributeKey",
"MySingleStringAttributeValue");
foreach (var attribute in attributes)
{
span.SetAttribute(attribute);
}
var firstEventTime = PreciseTimestamp.GetUtcNow();
span.AddEvent(Event.Create(EventDescription));
await Task.Delay(TimeSpan.FromMilliseconds(100));
var secondEventTime = PreciseTimestamp.GetUtcNow();
span.AddEvent(EventDescription, attributes);
var link = Link.FromActivity(activityLink);
span.AddLink(link);
var spanData = ((Span)span).ToSpanData();
Assert.Equal(activity.TraceId, spanData.Context.TraceId);
Assert.Equal(activity.SpanId, spanData.Context.SpanId);
Assert.Equal(activity.ParentSpanId, spanData.ParentSpanId);
Assert.Equal(activity.ActivityTraceFlags, spanData.Context.TraceOptions);
Assert.Same(Tracestate.Empty, spanData.Context.Tracestate);
Assert.Equal(SpanName, spanData.Name);
Assert.Equal(activity.ParentSpanId, spanData.ParentSpanId);
Assert.Equal(0, spanData.Attributes.DroppedAttributesCount);
spanData.Attributes.AssertAreSame(expectedAttributes);
Assert.Equal(0, spanData.Events.DroppedEventsCount);
Assert.Equal(2, spanData.Events.Events.Count());
AssertApproxSameTimestamp(spanData.Events.Events.ToList()[0].Timestamp, firstEventTime);
AssertApproxSameTimestamp(spanData.Events.Events.ToList()[1].Timestamp, secondEventTime);
Assert.Equal(Event.Create(EventDescription), spanData.Events.Events.ToList()[0].Event);
Assert.Equal(Event.Create(EventDescription, attributes), spanData.Events.Events.ToList()[1].Event);
Assert.Equal(0, spanData.Links.DroppedLinksCount);
Assert.Single(spanData.Links.Links);
Assert.Equal(link, spanData.Links.Links.First());
AssertApproxSameTimestamp(spanStartTime, spanData.StartTimestamp);
Assert.False(spanData.Status.IsValid);
Assert.Equal(default, spanData.EndTimestamp);
var startEndMock = Mock.Get<IStartEndHandler>(startEndHandler);
startEndMock.Verify(s => s.OnStart(span), Times.Once);
}
[Fact]
public async Task GoSpanData_EndedSpan()
{
var activityLink = new Activity(SpanName)
.SetIdFormat(ActivityIdFormat.W3C)
.Start();
activityLink.Stop();
var activity = new Activity(SpanName)
.SetParentId(ActivityTraceId.CreateRandom(), ActivitySpanId.CreateRandom())
.Start();
var spanStartTime = PreciseTimestamp.GetUtcNow();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var span =
(Span)Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
span.SetAttribute(
"MySingleStringAttributeKey",
"MySingleStringAttributeValue");
foreach (var attribute in attributes)
{
span.SetAttribute(attribute);
}
await Task.Delay(TimeSpan.FromMilliseconds(100));
var firstEventTime = PreciseTimestamp.GetUtcNow();
span.AddEvent(Event.Create(EventDescription));
await Task.Delay(TimeSpan.FromMilliseconds(100));
var secondEventTime = PreciseTimestamp.GetUtcNow();
span.AddEvent(EventDescription, attributes);
var link = Link.FromActivity(activityLink);
span.AddLink(link);
span.Status = Status.Cancelled;
var spanEndTime = PreciseTimestamp.GetUtcNow();
span.End();
var spanData = ((Span)span).ToSpanData();
Assert.Equal(activity.TraceId, spanData.Context.TraceId);
Assert.Equal(activity.SpanId, spanData.Context.SpanId);
Assert.Equal(activity.ParentSpanId, spanData.ParentSpanId);
Assert.Equal(activity.ActivityTraceFlags, spanData.Context.TraceOptions);
Assert.Equal(SpanName, spanData.Name);
Assert.Equal(activity.ParentSpanId, spanData.ParentSpanId);
Assert.Equal(0, spanData.Attributes.DroppedAttributesCount);
spanData.Attributes.AssertAreSame(expectedAttributes);
Assert.Equal(0, spanData.Events.DroppedEventsCount);
Assert.Equal(2, spanData.Events.Events.Count());
AssertApproxSameTimestamp(spanData.Events.Events.ToList()[0].Timestamp, firstEventTime);
AssertApproxSameTimestamp(spanData.Events.Events.ToList()[1].Timestamp, secondEventTime);
Assert.Equal(Event.Create(EventDescription), spanData.Events.Events.ToList()[0].Event);
Assert.Equal(0, spanData.Links.DroppedLinksCount);
Assert.Single(spanData.Links.Links);
Assert.Equal(link, spanData.Links.Links.First());
AssertApproxSameTimestamp(spanData.StartTimestamp, spanStartTime);
Assert.Equal(Status.Cancelled, spanData.Status);
AssertApproxSameTimestamp(spanEndTime, spanData.EndTimestamp);
var startEndMock = Mock.Get<IStartEndHandler>(startEndHandler);
startEndMock.Verify(s => s.OnStart(span), Times.Once);
startEndMock.Verify(s => s.OnEnd(span), Times.Once);
}
[Fact]
public void Status_ViaSetStatus()
{
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var span =
(Span)Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
Assert.Equal(Status.Ok, span.Status);
((Span)span).Status = Status.Cancelled;
Assert.Equal(Status.Cancelled, span.Status);
span.End();
Assert.Equal(Status.Cancelled, span.Status);
var startEndMock = Mock.Get<IStartEndHandler>(startEndHandler);
startEndMock.Verify(s => s.OnStart(span), Times.Once);
}
[Fact]
public void status_ViaEndSpanOptions()
{
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var span =
(Span)Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
Assert.Equal(Status.Ok, span.Status);
((Span)span).Status = Status.Cancelled;
Assert.Equal(Status.Cancelled, span.Status);
span.Status = Status.Aborted;
span.End();
Assert.Equal(Status.Aborted, span.Status);
var startEndMock = Mock.Get<IStartEndHandler>(startEndHandler);
startEndMock.Verify(s => s.OnStart(span), Times.Once);
}
[Fact]
public void DroppingAttributes()
{
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var maxNumberOfAttributes = 8;
var traceParams =
TraceParams.Default.ToBuilder().SetMaxNumberOfAttributes(maxNumberOfAttributes).Build();
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
traceParams,
startEndHandler);
for (var i = 0; i < 2 * maxNumberOfAttributes; i++)
{
IDictionary<string, object> attributes = new Dictionary<string, object>();
attributes.Add("MyStringAttributeKey" + i, i);
foreach (var attribute in attributes)
{
span.SetAttribute(attribute);
}
}
var spanData = ((Span)span).ToSpanData();
Assert.Equal(maxNumberOfAttributes, spanData.Attributes.DroppedAttributesCount);
Assert.Equal(maxNumberOfAttributes, spanData.Attributes.AttributeMap.Count());
for (var i = 0; i < maxNumberOfAttributes; i++)
{
Assert.Equal(
i + maxNumberOfAttributes,
spanData
.Attributes
.GetValue("MyStringAttributeKey" + (i + maxNumberOfAttributes)));
}
span.End();
spanData = ((Span)span).ToSpanData();
Assert.Equal(maxNumberOfAttributes, spanData.Attributes.DroppedAttributesCount);
Assert.Equal(maxNumberOfAttributes, spanData.Attributes.AttributeMap.Count());
for (var i = 0; i < maxNumberOfAttributes; i++)
{
Assert.Equal(
i + maxNumberOfAttributes,
spanData
.Attributes
.GetValue("MyStringAttributeKey" + (i + maxNumberOfAttributes)));
}
}
[Fact]
public void DroppingAndAddingAttributes()
{
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var maxNumberOfAttributes = 8;
var traceParams =
TraceParams.Default.ToBuilder().SetMaxNumberOfAttributes(maxNumberOfAttributes).Build();
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
traceParams,
startEndHandler);
for (var i = 0; i < 2 * maxNumberOfAttributes; i++)
{
IDictionary<String, object> attributes = new Dictionary<String, object>();
attributes.Add("MyStringAttributeKey" + i, i);
foreach (var attribute in attributes)
{
span.SetAttribute(attribute);
}
}
var spanData = ((Span)span).ToSpanData();
Assert.Equal(maxNumberOfAttributes, spanData.Attributes.DroppedAttributesCount);
Assert.Equal(maxNumberOfAttributes, spanData.Attributes.AttributeMap.Count());
for (var i = 0; i < maxNumberOfAttributes; i++)
{
Assert.Equal(
i + maxNumberOfAttributes,
spanData
.Attributes
.GetValue("MyStringAttributeKey" + (i + maxNumberOfAttributes)));
}
for (var i = 0; i < maxNumberOfAttributes / 2; i++)
{
IDictionary<String, object> attributes = new Dictionary<String, object>();
attributes.Add("MyStringAttributeKey" + i, i);
foreach (var attribute in attributes)
{
span.SetAttribute(attribute);
}
}
spanData = ((Span)span).ToSpanData();
Assert.Equal(maxNumberOfAttributes * 3 / 2, spanData.Attributes.DroppedAttributesCount);
Assert.Equal(maxNumberOfAttributes, spanData.Attributes.AttributeMap.Count());
// Test that we still have in the attributes map the latest maxNumberOfAttributes / 2 entries.
for (var i = 0; i < maxNumberOfAttributes / 2; i++)
{
Assert.Equal(
i + maxNumberOfAttributes * 3 / 2,
spanData
.Attributes
.GetValue("MyStringAttributeKey" + (i + maxNumberOfAttributes * 3 / 2)));
}
// Test that we have the newest re-added initial entries.
for (var i = 0; i < maxNumberOfAttributes / 2; i++)
{
Assert.Equal(i,
spanData.Attributes.GetValue("MyStringAttributeKey" + i));
}
}
[Fact]
public async Task DroppingEvents()
{
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var maxNumberOfEvents = 8;
var traceParams =
TraceParams.Default.ToBuilder().SetMaxNumberOfEvents(maxNumberOfEvents).Build();
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
traceParams,
startEndHandler);
var testEvent = Event.Create(EventDescription);
var eventTimestamps = new DateTime[2 * maxNumberOfEvents];
for (int i = 0; i < 2 * maxNumberOfEvents; i++)
{
eventTimestamps[i] = PreciseTimestamp.GetUtcNow();
span.AddEvent(testEvent);
await Task.Delay(10);
}
var spanData = ((Span)span).ToSpanData();
Assert.Equal(maxNumberOfEvents, spanData.Events.DroppedEventsCount);
Assert.Equal(maxNumberOfEvents, spanData.Events.Events.Count());
var events = spanData.Events.Events.ToArray();
for (int i = 0; i < maxNumberOfEvents; i++)
{
AssertApproxSameTimestamp(events[i].Timestamp, eventTimestamps[i + maxNumberOfEvents]);
}
span.End();
spanData = ((Span)span).ToSpanData();
Assert.Equal(maxNumberOfEvents, spanData.Events.DroppedEventsCount);
Assert.Equal(maxNumberOfEvents, spanData.Events.Events.Count());
}
[Fact]
public void DroppingLinks()
{
var activityLink = new Activity(SpanName)
.SetIdFormat(ActivityIdFormat.W3C)
.Start();
activityLink.Stop();
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var maxNumberOfLinks = 8;
var traceParams =
TraceParams.Default.ToBuilder().SetMaxNumberOfLinks(maxNumberOfLinks).Build();
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
traceParams,
startEndHandler);
var link = Link.FromActivity(activityLink);
for (var i = 0; i < 2 * maxNumberOfLinks; i++)
{
span.AddLink(link);
}
var spanData = ((Span)span).ToSpanData();
Assert.Equal(maxNumberOfLinks, spanData.Links.DroppedLinksCount);
Assert.Equal(maxNumberOfLinks, spanData.Links.Links.Count());
foreach (var actualLink in spanData.Links.Links)
{
Assert.Equal(link, actualLink);
}
span.End();
spanData = ((Span)span).ToSpanData();
Assert.Equal(maxNumberOfLinks, spanData.Links.DroppedLinksCount);
Assert.Equal(maxNumberOfLinks, spanData.Links.Links.Count());
foreach (var actualLink in spanData.Links.Links)
{
Assert.Equal(link, actualLink);
}
}
[Fact]
public void BadArguments()
{
var activity = new Activity(SpanName).Start();
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler);
Assert.Throws<ArgumentException>(() => span.Status = new Status());
Assert.Throws<ArgumentNullException>(() => span.UpdateName(null));
Assert.Throws<ArgumentNullException>(() => span.SetAttribute(null, string.Empty));
Assert.Throws<ArgumentNullException>(() => span.SetAttribute(string.Empty, null));
Assert.Throws<ArgumentNullException>(() =>
span.SetAttribute(null, "foo"));
Assert.Throws<ArgumentNullException>(() => span.SetAttribute(null, 1L));
Assert.Throws<ArgumentNullException>(() => span.SetAttribute(null, 0.1d));
Assert.Throws<ArgumentNullException>(() => span.SetAttribute(null, true));
Assert.Throws<ArgumentNullException>(() => span.AddEvent((string)null));
Assert.Throws<ArgumentNullException>(() => span.AddEvent((IEvent)null));
Assert.Throws<ArgumentNullException>(() => span.AddLink(null));
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public void EndSpanStopsActivity(bool recordEvents)
{
var parentActivity = new Activity(SpanName).Start();
var activity = new Activity(SpanName).Start();
if (recordEvents)
{
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
}
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler,
ownsActivity: true);
span.End();
Assert.Same(parentActivity, Activity.Current);
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public void EndSpanDoesNotStopActivityWhenDoesNotOwnIt(bool recordEvents)
{
var activity = new Activity(SpanName).Start();
if (recordEvents)
{
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
}
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler,
ownsActivity: false);
span.End();
Assert.Equal(recordEvents, span.HasEnded);
Assert.Same(activity, Activity.Current);
}
[Theory]
[InlineData(true, true)]
[InlineData(true, false)]
[InlineData(false, true)]
[InlineData(false, false)]
public void EndSpanStopActivity_NotCurrentActivity(bool recordEvents, bool ownsActivity)
{
var activity = new Activity(SpanName).Start();
if (recordEvents)
{
activity.ActivityTraceFlags |= ActivityTraceFlags.Recorded;
}
var span =
Span.StartSpan(
activity,
Tracestate.Empty,
SpanKind.Internal,
TraceParams.Default,
startEndHandler,
ownsActivity: ownsActivity);
var anotherActivity = new Activity(SpanName).Start();
span.End();
Assert.Equal(recordEvents, span.HasEnded);
Assert.Same(anotherActivity, Activity.Current);
}
public void Dispose()
{
Activity.Current = null;
}
private void AssertApproxSameTimestamp(DateTime one, DateTime two)
{
var timeShift = Math.Abs((one - two).TotalMilliseconds);
Assert.InRange(timeShift, double.Epsilon, 10);
}
}
}
| 1 | 12,092 | @lmolkova This ok? I'm getting random failures from the build checks that don't happen locally from a few non-deterministic time related comparisons. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -1,5 +1,5 @@
/*
- * Copyright ConsenSys AG.
+ * Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.config;
import java.math.BigInteger;
import java.util.Map;
import java.util.Optional;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.ImmutableMap;
import org.apache.tuweni.bytes.Bytes;
public class BftConfigOptions {
public static final BftConfigOptions DEFAULT =
new BftConfigOptions(JsonUtil.createEmptyObjectNode());
private static final long DEFAULT_EPOCH_LENGTH = 30_000;
private static final int DEFAULT_BLOCK_PERIOD_SECONDS = 1;
private static final int DEFAULT_ROUND_EXPIRY_SECONDS = 1;
// In a healthy network this can be very small. This default limit will allow for suitable
// protection for on a typical 20 node validator network with multiple rounds
private static final int DEFAULT_GOSSIPED_HISTORY_LIMIT = 1000;
private static final int DEFAULT_MESSAGE_QUEUE_LIMIT = 1000;
private static final int DEFAULT_DUPLICATE_MESSAGE_LIMIT = 100;
private static final int DEFAULT_FUTURE_MESSAGES_LIMIT = 1000;
private static final int DEFAULT_FUTURE_MESSAGES_MAX_DISTANCE = 10;
protected final ObjectNode bftConfigRoot;
BftConfigOptions(final ObjectNode bftConfigRoot) {
this.bftConfigRoot = bftConfigRoot;
}
public long getEpochLength() {
return JsonUtil.getLong(bftConfigRoot, "epochlength", DEFAULT_EPOCH_LENGTH);
}
public int getBlockPeriodSeconds() {
return JsonUtil.getInt(bftConfigRoot, "blockperiodseconds", DEFAULT_BLOCK_PERIOD_SECONDS);
}
public int getRequestTimeoutSeconds() {
return JsonUtil.getInt(bftConfigRoot, "requesttimeoutseconds", DEFAULT_ROUND_EXPIRY_SECONDS);
}
public int getGossipedHistoryLimit() {
return JsonUtil.getInt(bftConfigRoot, "gossipedhistorylimit", DEFAULT_GOSSIPED_HISTORY_LIMIT);
}
public int getMessageQueueLimit() {
return JsonUtil.getInt(bftConfigRoot, "messagequeuelimit", DEFAULT_MESSAGE_QUEUE_LIMIT);
}
public int getDuplicateMessageLimit() {
return JsonUtil.getInt(bftConfigRoot, "duplicatemessagelimit", DEFAULT_DUPLICATE_MESSAGE_LIMIT);
}
public int getFutureMessagesLimit() {
return JsonUtil.getInt(bftConfigRoot, "futuremessageslimit", DEFAULT_FUTURE_MESSAGES_LIMIT);
}
public int getFutureMessagesMaxDistance() {
return JsonUtil.getInt(
bftConfigRoot, "futuremessagesmaxdistance", DEFAULT_FUTURE_MESSAGES_MAX_DISTANCE);
}
public Optional<String> getMiningBeneficiary() {
return JsonUtil.getString(bftConfigRoot, "miningbeneficiary");
}
public BigInteger getBlockRewardWei() {
final Optional<String> configFileContent = JsonUtil.getString(bftConfigRoot, "blockreward");
if (configFileContent.isEmpty()) {
return BigInteger.ZERO;
}
final String weiStr = configFileContent.get();
if (weiStr.startsWith("0x")) {
return new BigInteger(1, Bytes.fromHexStringLenient(weiStr).toArrayUnsafe());
}
return new BigInteger(weiStr);
}
Map<String, Object> asMap() {
final ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
if (bftConfigRoot.has("epochlength")) {
builder.put("epochLength", getEpochLength());
}
if (bftConfigRoot.has("blockperiodseconds")) {
builder.put("blockPeriodSeconds", getBlockPeriodSeconds());
}
if (bftConfigRoot.has("requesttimeoutseconds")) {
builder.put("requestTimeoutSeconds", getRequestTimeoutSeconds());
}
if (bftConfigRoot.has("gossipedhistorylimit")) {
builder.put("gossipedHistoryLimit", getGossipedHistoryLimit());
}
if (bftConfigRoot.has("messagequeuelimit")) {
builder.put("messageQueueLimit", getMessageQueueLimit());
}
if (bftConfigRoot.has("duplicatemessagelimit")) {
builder.put("duplicateMessageLimit", getDuplicateMessageLimit());
}
if (bftConfigRoot.has("futuremessageslimit")) {
builder.put("futureMessagesLimit", getFutureMessagesLimit());
}
if (bftConfigRoot.has("futuremessagesmaxdistance")) {
builder.put("futureMessagesMaxDistance", getFutureMessagesMaxDistance());
}
return builder.build();
}
}
| 1 | 26,205 | Q: are we supposed to change this for files that already exist? | hyperledger-besu | java |
@@ -477,7 +477,7 @@ def initialize_unbounded(obj, dimensions, key):
"""
select = dict(zip([d.name for d in dimensions], key))
try:
- obj.select([DynamicMap], **select)
+ obj.select(selection_specs=[DynamicMap], **select)
except KeyError:
pass
| 1 | from __future__ import unicode_literals, absolute_import, division
from collections import defaultdict, namedtuple
import re
import traceback
import warnings
import bisect
import numpy as np
import param
from ..core import (HoloMap, DynamicMap, CompositeOverlay, Layout,
Overlay, GridSpace, NdLayout, NdOverlay)
from ..core.options import Cycle
from ..core.ndmapping import item_check
from ..core.spaces import get_nested_streams
from ..core.util import (match_spec, wrap_tuple, basestring, get_overlay_spec,
unique_iterator, closest_match, is_number, isfinite,
python2sort, disable_constant, arraylike_types)
from ..streams import LinkedStream
from ..util.transform import dim
def displayable(obj):
"""
Predicate that returns whether the object is displayable or not
(i.e whether the object obeys the nesting hierarchy
"""
if isinstance(obj, Overlay) and any(isinstance(o, (HoloMap, GridSpace))
for o in obj):
return False
if isinstance(obj, HoloMap):
return not (obj.type in [Layout, GridSpace, NdLayout, DynamicMap])
if isinstance(obj, (GridSpace, Layout, NdLayout)):
for el in obj.values():
if not displayable(el):
return False
return True
return True
class Warning(param.Parameterized): pass
display_warning = Warning(name='Warning')
def collate(obj):
if isinstance(obj, Overlay):
nested_type = [type(o).__name__ for o in obj
if isinstance(o, (HoloMap, GridSpace))][0]
display_warning.param.warning(
"Nesting %ss within an Overlay makes it difficult to "
"access your data or control how it appears; we recommend "
"calling .collate() on the Overlay in order to follow the "
"recommended nesting structure shown in the Composing Data "
"user guide (http://goo.gl/2YS8LJ)" % nested_type)
return obj.collate()
if isinstance(obj, DynamicMap):
if obj.type in [DynamicMap, HoloMap]:
obj_name = obj.type.__name__
raise Exception("Nesting a %s inside a DynamicMap is not "
"supported. Ensure that the DynamicMap callback "
"returns an Element or (Nd)Overlay. If you have "
"applied an operation ensure it is not dynamic by "
"setting dynamic=False." % obj_name)
return obj.collate()
if isinstance(obj, HoloMap):
display_warning.param.warning(
"Nesting {0}s within a {1} makes it difficult to access "
"your data or control how it appears; we recommend "
"calling .collate() on the {1} in order to follow the "
"recommended nesting structure shown in the Composing "
"Data user guide (https://goo.gl/2YS8LJ)".format(
obj.type.__name__, type(obj).__name__))
return obj.collate()
elif isinstance(obj, (Layout, NdLayout)):
try:
display_warning.param.warning(
"Layout contains HoloMaps which are not nested in the "
"recommended format for accessing your data; calling "
".collate() on these objects will resolve any violations "
"of the recommended nesting presented in the Composing Data "
"tutorial (https://goo.gl/2YS8LJ)")
expanded = []
for el in obj.values():
if isinstance(el, HoloMap) and not displayable(el):
collated_layout = Layout.from_values(el.collate())
expanded.extend(collated_layout.values())
return Layout(expanded)
except:
raise Exception(undisplayable_info(obj))
else:
raise Exception(undisplayable_info(obj))
def isoverlay_fn(obj):
"""
Determines whether object is a DynamicMap returning (Nd)Overlay types.
"""
return isinstance(obj, DynamicMap) and (isinstance(obj.last, CompositeOverlay))
def overlay_depth(obj):
"""
Computes the depth of a DynamicMap overlay if it can be determined
otherwise return None.
"""
if isinstance(obj, DynamicMap):
if isinstance(obj.last, CompositeOverlay):
return len(obj.last)
elif obj.last is None:
return None
return 1
else:
return 1
def compute_overlayable_zorders(obj, path=[]):
"""
Traverses an overlayable composite container to determine which
objects are associated with specific (Nd)Overlay layers by
z-order, making sure to take DynamicMap Callables into
account. Returns a mapping between the zorders of each layer and a
corresponding lists of objects.
Used to determine which overlaid subplots should be linked with
Stream callbacks.
"""
path = path+[obj]
zorder_map = defaultdict(list)
# Process non-dynamic layers
if not isinstance(obj, DynamicMap):
if isinstance(obj, CompositeOverlay):
for z, o in enumerate(obj):
zorder_map[z] = [o, obj]
elif isinstance(obj, HoloMap):
for el in obj.values():
if isinstance(el, CompositeOverlay):
for k, v in compute_overlayable_zorders(el, path).items():
zorder_map[k] += v + [obj]
else:
zorder_map[0] += [obj, el]
else:
if obj not in zorder_map[0]:
zorder_map[0].append(obj)
return zorder_map
isoverlay = isinstance(obj.last, CompositeOverlay)
isdynoverlay = obj.callback._is_overlay
if obj not in zorder_map[0] and not isoverlay:
zorder_map[0].append(obj)
depth = overlay_depth(obj)
# Process the inputs of the DynamicMap callback
dmap_inputs = obj.callback.inputs if obj.callback.link_inputs else []
for z, inp in enumerate(dmap_inputs):
no_zorder_increment = False
if any(not (isoverlay_fn(p) or p.last is None) for p in path) and isoverlay_fn(inp):
# If overlay has been collapsed do not increment zorder
no_zorder_increment = True
input_depth = overlay_depth(inp)
if depth is not None and input_depth is not None and depth < input_depth:
# Skips branch of graph where the number of elements in an
# overlay has been reduced but still contains more than one layer
if depth > 1:
continue
else:
no_zorder_increment = True
# Recurse into DynamicMap.callback.inputs and update zorder_map
z = z if isdynoverlay else 0
deep_zorders = compute_overlayable_zorders(inp, path=path)
offset = max(zorder_map.keys())
for dz, objs in deep_zorders.items():
global_z = offset+z if no_zorder_increment else offset+dz+z
zorder_map[global_z] = list(unique_iterator(zorder_map[global_z]+objs))
# If object branches but does not declare inputs (e.g. user defined
# DynamicMaps returning (Nd)Overlay) add the items on the DynamicMap.last
found = any(isinstance(p, DynamicMap) and p.callback._is_overlay for p in path)
linked = any(isinstance(s, LinkedStream) and s.linked for s in obj.streams)
if (found or linked) and isoverlay and not isdynoverlay:
offset = max(zorder_map.keys())
for z, o in enumerate(obj.last):
if isoverlay and linked:
zorder_map[offset+z].append(obj)
if o not in zorder_map[offset+z]:
zorder_map[offset+z].append(o)
return zorder_map
def is_dynamic_overlay(dmap):
"""
Traverses a DynamicMap graph and determines if any components
were overlaid dynamically (i.e. by * on a DynamicMap).
"""
if not isinstance(dmap, DynamicMap):
return False
elif dmap.callback._is_overlay:
return True
else:
return any(is_dynamic_overlay(dm) for dm in dmap.callback.inputs)
def split_dmap_overlay(obj, depth=0):
"""
Splits a DynamicMap into the original component layers it was
constructed from by traversing the graph to search for dynamically
overlaid components (i.e. constructed by using * on a DynamicMap).
Useful for assigning subplots of an OverlayPlot the streams that
are responsible for driving their updates. Allows the OverlayPlot
to determine if a stream update should redraw a particular
subplot.
"""
layers = []
if isinstance(obj, DynamicMap):
if issubclass(obj.type, NdOverlay) and not depth:
for v in obj.last.values():
layers.append(obj)
elif issubclass(obj.type, Overlay):
if obj.callback.inputs and is_dynamic_overlay(obj):
for inp in obj.callback.inputs:
layers += split_dmap_overlay(inp, depth+1)
else:
for v in obj.last.values():
layers.append(obj)
else:
layers.append(obj)
return layers
if isinstance(obj, Overlay):
for k, v in obj.items():
layers.append(v)
else:
layers.append(obj)
return layers
def initialize_dynamic(obj):
"""
Initializes all DynamicMap objects contained by the object
"""
dmaps = obj.traverse(lambda x: x, specs=[DynamicMap])
for dmap in dmaps:
if dmap.unbounded:
# Skip initialization until plotting code
continue
if not len(dmap):
dmap[dmap._initial_key()]
def get_plot_frame(map_obj, key_map, cached=False):
"""Returns the current frame in a mapping given a key mapping.
Args:
obj: Nested Dimensioned object
key_map: Dictionary mapping between dimensions and key value
cached: Whether to allow looking up key in cache
Returns:
The item in the mapping corresponding to the supplied key.
"""
if map_obj.kdims and len(map_obj.kdims) == 1 and map_obj.kdims[0] == 'Frame':
# Special handling for static plots
return map_obj.last
key = tuple(key_map[kd.name] for kd in map_obj.kdims if kd.name in key_map)
if key in map_obj.data and cached:
return map_obj.data[key]
else:
try:
return map_obj[key]
except KeyError:
return None
except StopIteration as e:
raise e
except Exception:
print(traceback.format_exc())
return None
def get_nested_plot_frame(obj, key_map, cached=False):
"""Extracts a single frame from a nested object.
Replaces any HoloMap or DynamicMap in the nested data structure,
with the item corresponding to the supplied key.
Args:
obj: Nested Dimensioned object
key_map: Dictionary mapping between dimensions and key value
cached: Whether to allow looking up key in cache
Returns:
Nested datastructure where maps are replaced with single frames
"""
clone = obj.map(lambda x: x)
# Ensure that DynamicMaps in the cloned frame have
# identical callback inputs to allow memoization to work
for it1, it2 in zip(obj.traverse(lambda x: x), clone.traverse(lambda x: x)):
if isinstance(it1, DynamicMap):
with disable_constant(it2.callback):
it2.callback.inputs = it1.callback.inputs
with item_check(False):
return clone.map(lambda x: get_plot_frame(x, key_map, cached=cached),
[DynamicMap, HoloMap], clone=False)
def undisplayable_info(obj, html=False):
"Generate helpful message regarding an undisplayable object"
collate = '<tt>collate</tt>' if html else 'collate'
info = "For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)"
if isinstance(obj, HoloMap):
error = "HoloMap of %s objects cannot be displayed." % obj.type.__name__
remedy = "Please call the %s method to generate a displayable object" % collate
elif isinstance(obj, Layout):
error = "Layout containing HoloMaps of Layout or GridSpace objects cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
elif isinstance(obj, GridSpace):
error = "GridSpace containing HoloMaps of Layouts cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
if not html:
return '\n'.join([error, remedy, info])
else:
return "<center>{msg}</center>".format(msg=('<br>'.join(
['<b>%s</b>' % error, remedy, '<i>%s</i>' % info])))
def compute_sizes(sizes, size_fn, scaling_factor, scaling_method, base_size):
"""
Scales point sizes according to a scaling factor,
base size and size_fn, which will be applied before
scaling.
"""
if sizes.dtype.kind not in ('i', 'f'):
return None
if scaling_method == 'area':
pass
elif scaling_method == 'width':
scaling_factor = scaling_factor**2
else:
raise ValueError(
'Invalid value for argument "scaling_method": "{}". '
'Valid values are: "width", "area".'.format(scaling_method))
sizes = size_fn(sizes)
return (base_size*scaling_factor*sizes)
def get_axis_padding(padding):
"""
Process a padding value supplied as a tuple or number and returns
padding values for x-, y- and z-axis.
"""
if isinstance(padding, tuple):
if len(padding) == 2:
xpad, ypad = padding
zpad = 0
elif len(padding) == 3:
xpad, ypad, zpad = padding
else:
raise ValueError('Padding must be supplied as an number applied '
'to all axes or a length two or three tuple '
'corresponding to the x-, y- and optionally z-axis')
else:
xpad, ypad, zpad = (padding,)*3
return (xpad, ypad, zpad)
def get_minimum_span(low, high, span):
"""
If lower and high values are equal ensures they are separated by
the defined span.
"""
if is_number(low) and low == high:
if isinstance(low, np.datetime64):
span = span * np.timedelta64(1, 's')
low, high = low-span, high+span
return low, high
def get_range(element, ranges, dimension):
"""
Computes the data, soft- and hard-range along a dimension given
an element and a dictionary of ranges.
"""
if dimension and dimension != 'categorical':
if ranges and dimension.name in ranges:
drange = ranges[dimension.name]['data']
srange = ranges[dimension.name]['soft']
hrange = ranges[dimension.name]['hard']
else:
drange = element.range(dimension, dimension_range=False)
srange = dimension.soft_range
hrange = dimension.range
else:
drange = srange = hrange = (np.NaN, np.NaN)
return drange, srange, hrange
def get_sideplot_ranges(plot, element, main, ranges):
"""
Utility to find the range for an adjoined
plot given the plot, the element, the
Element the plot is adjoined to and the
dictionary of ranges.
"""
key = plot.current_key
dims = element.dimensions()
dim = dims[0] if 'frequency' in dims[1].name else dims[1]
range_item = main
if isinstance(main, HoloMap):
if issubclass(main.type, CompositeOverlay):
range_item = [hm for hm in main._split_overlays()[1]
if dim in hm.dimensions('all')][0]
else:
range_item = HoloMap({0: main}, kdims=['Frame'])
ranges = match_spec(range_item.last, ranges)
if dim.name in ranges:
main_range = ranges[dim.name]['combined']
else:
framewise = plot.lookup_options(range_item.last, 'norm').options.get('framewise')
if framewise and range_item.get(key, False):
main_range = range_item[key].range(dim)
else:
main_range = range_item.range(dim)
# If .main is an NdOverlay or a HoloMap of Overlays get the correct style
if isinstance(range_item, HoloMap):
range_item = range_item.last
if isinstance(range_item, CompositeOverlay):
range_item = [ov for ov in range_item
if dim in ov.dimensions('all')][0]
return range_item, main_range, dim
def within_range(range1, range2):
"""Checks whether range1 is within the range specified by range2."""
range1 = [r if isfinite(r) else None for r in range1]
range2 = [r if isfinite(r) else None for r in range2]
return ((range1[0] is None or range2[0] is None or range1[0] >= range2[0]) and
(range1[1] is None or range2[1] is None or range1[1] <= range2[1]))
def validate_unbounded_mode(holomaps, dynmaps):
composite = HoloMap(enumerate(holomaps), kdims=['testing_kdim'])
holomap_kdims = set(unique_iterator([kd.name for dm in holomaps for kd in dm.kdims]))
hmranges = {d: composite.range(d) for d in holomap_kdims}
if any(not set(d.name for d in dm.kdims) <= holomap_kdims
for dm in dynmaps):
raise Exception('DynamicMap that are unbounded must have key dimensions that are a '
'subset of dimensions of the HoloMap(s) defining the keys.')
elif not all(within_range(hmrange, dm.range(d)) for dm in dynmaps
for d, hmrange in hmranges.items() if d in dm.kdims):
raise Exception('HoloMap(s) have keys outside the ranges specified on '
'the DynamicMap(s).')
def get_dynamic_mode(composite):
"Returns the common mode of the dynamic maps in given composite object"
dynmaps = composite.traverse(lambda x: x, [DynamicMap])
holomaps = composite.traverse(lambda x: x, ['HoloMap'])
dynamic_unbounded = any(m.unbounded for m in dynmaps)
if holomaps:
validate_unbounded_mode(holomaps, dynmaps)
elif dynamic_unbounded and not holomaps:
raise Exception("DynamicMaps in unbounded mode must be displayed alongside "
"a HoloMap to define the sampling.")
return dynmaps and not holomaps, dynamic_unbounded
def initialize_unbounded(obj, dimensions, key):
"""
Initializes any DynamicMaps in unbounded mode.
"""
select = dict(zip([d.name for d in dimensions], key))
try:
obj.select([DynamicMap], **select)
except KeyError:
pass
def dynamic_update(plot, subplot, key, overlay, items):
"""
Given a plot, subplot and dynamically generated (Nd)Overlay
find the closest matching Element for that plot.
"""
match_spec = get_overlay_spec(overlay,
wrap_tuple(key),
subplot.current_frame)
specs = [(i, get_overlay_spec(overlay, wrap_tuple(k), el))
for i, (k, el) in enumerate(items)]
closest = closest_match(match_spec, specs)
if closest is None:
return closest, None, False
matched = specs[closest][1]
return closest, matched, match_spec == matched
def map_colors(arr, crange, cmap, hex=True):
"""
Maps an array of values to RGB hex strings, given
a color range and colormap.
"""
if isinstance(crange, arraylike_types):
xsorted = np.argsort(crange)
ypos = np.searchsorted(crange, arr)
arr = xsorted[ypos]
else:
if isinstance(crange, tuple):
cmin, cmax = crange
else:
cmin, cmax = np.nanmin(arr), np.nanmax(arr)
arr = (arr - cmin) / (cmax-cmin)
arr = np.ma.array(arr, mask=np.logical_not(np.isfinite(arr)))
arr = cmap(arr)
if hex:
return rgb2hex(arr)
else:
return arr
def mplcmap_to_palette(cmap, ncolors=None, categorical=False):
"""
Converts a matplotlib colormap to palette of RGB hex strings."
"""
from matplotlib.colors import Colormap, ListedColormap
ncolors = ncolors or 256
if not isinstance(cmap, Colormap):
import matplotlib.cm as cm
# Alias bokeh Category cmaps with mpl tab cmaps
if cmap.startswith('Category'):
cmap = cmap.replace('Category', 'tab')
try:
cmap = cm.get_cmap(cmap)
except:
cmap = cm.get_cmap(cmap.lower())
if isinstance(cmap, ListedColormap):
if categorical:
palette = [rgb2hex(cmap.colors[i%cmap.N]) for i in range(ncolors)]
return palette
elif cmap.N > ncolors:
palette = [rgb2hex(c) for c in cmap(np.arange(cmap.N))]
if len(palette) != ncolors:
palette = [palette[int(v)] for v in np.linspace(0, len(palette)-1, ncolors)]
return palette
return [rgb2hex(c) for c in cmap(np.linspace(0, 1, ncolors))]
def bokeh_palette_to_palette(cmap, ncolors=None, categorical=False):
from bokeh import palettes
# Handle categorical colormaps to avoid interpolation
categories = ['accent', 'category', 'dark', 'colorblind', 'pastel',
'set1', 'set2', 'set3', 'paired']
cmap_categorical = any(cat in cmap.lower() for cat in categories)
reverse = False
if cmap.endswith('_r'):
cmap = cmap[:-2]
reverse = True
# Some colormaps are inverted compared to matplotlib
inverted = (not cmap_categorical and not cmap.capitalize() in palettes.mpl)
if inverted:
reverse=not reverse
ncolors = ncolors or 256
# Alias mpl tab cmaps with bokeh Category cmaps
if cmap.startswith('tab'):
cmap = cmap.replace('tab', 'Category')
# Process as bokeh palette
palette = getattr(palettes, cmap, getattr(palettes, cmap.capitalize(), None))
if palette is None:
raise ValueError("Supplied palette %s not found among bokeh palettes" % cmap)
elif isinstance(palette, dict) and (cmap in palette or cmap.capitalize() in palette):
# Some bokeh palettes are doubly nested
palette = palette.get(cmap, palette.get(cmap.capitalize()))
if isinstance(palette, dict):
palette = palette[max(palette)]
if not cmap_categorical:
if len(palette) < ncolors:
palette = polylinear_gradient(palette, ncolors)
elif callable(palette):
palette = palette(ncolors)
if reverse: palette = palette[::-1]
if len(palette) != ncolors:
if categorical and cmap_categorical:
palette = [palette[i%len(palette)] for i in range(ncolors)]
else:
lpad, rpad = -0.5, 0.49999999999
indexes = np.linspace(lpad, (len(palette)-1)+rpad, ncolors)
palette = [palette[int(np.round(v))] for v in indexes]
return palette
def linear_gradient(start_hex, finish_hex, n=10):
"""
Interpolates the color gradient between to hex colors
"""
s = hex2rgb(start_hex)
f = hex2rgb(finish_hex)
gradient = [s]
for t in range(1, n):
curr_vector = [int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
gradient.append(curr_vector)
return [rgb2hex([c/255. for c in rgb]) for rgb in gradient]
def polylinear_gradient(colors, n):
"""
Interpolates the color gradients between a list of hex colors.
"""
n_out = int(float(n) / (len(colors)-1))
gradient = linear_gradient(colors[0], colors[1], n_out)
if len(colors) == len(gradient):
return gradient
for col in range(1, len(colors) - 1):
next_colors = linear_gradient(colors[col], colors[col+1], n_out+1)
gradient += next_colors[1:] if len(next_colors) > 1 else next_colors
return gradient
cmap_info=[]
CMapInfo=namedtuple('CMapInfo',['name','provider','category','source','bg'])
providers = ['matplotlib', 'bokeh', 'colorcet']
def _list_cmaps(provider=None, records=False):
"""
List available colormaps by combining matplotlib, bokeh, and
colorcet colormaps or palettes if available. May also be
narrowed down to a particular provider or list of providers.
"""
if provider is None:
provider = providers
elif isinstance(provider, basestring):
if provider not in providers:
raise ValueError('Colormap provider %r not recognized, must '
'be one of %r' % (provider, providers))
provider = [provider]
cmaps = []
def info(provider,names):
return [CMapInfo(name=n,provider=provider,category=None,source=None,bg=None) for n in names] \
if records else list(names)
if 'matplotlib' in provider:
try:
import matplotlib.cm as cm
cmaps += info('matplotlib',
[cmap for cmap in cm.cmap_d if not
(cmap.startswith('cet_') or # duplicates list below
cmap.startswith('Vega') or # deprecated in matplotlib=2.1
cmap.startswith('spectral') )]) # deprecated in matplotlib=2.1
except:
pass
if 'bokeh' in provider:
try:
from bokeh import palettes
cmaps += info('bokeh', palettes.all_palettes)
cmaps += info('bokeh', [p+'_r' for p in palettes.all_palettes])
except:
pass
if 'colorcet' in provider:
try:
from colorcet import palette_n, glasbey_hv
cet_maps = palette_n.copy()
cet_maps['glasbey_hv'] = glasbey_hv # Add special hv-specific map
cmaps += info('colorcet', cet_maps)
cmaps += info('colorcet', [p+'_r' for p in cet_maps])
except:
pass
return sorted(unique_iterator(cmaps))
def register_cmaps(category, provider, source, bg, names):
"""
Maintain descriptions of colormaps that include the following information:
name - string name for the colormap
category - intended use or purpose, mostly following matplotlib
provider - package providing the colormap directly
source - original source or creator of the colormaps
bg - base/background color expected for the map
('light','dark','medium','any' (unknown or N/A))
"""
for name in names:
bisect.insort(cmap_info, CMapInfo(name=name, provider=provider,
category=category, source=source,
bg=bg))
def list_cmaps(provider=None, records=False, name=None, category=None, source=None,
bg=None, reverse=None):
"""
Return colormap names matching the specified filters.
"""
# Only uses names actually imported and currently available
available = _list_cmaps(provider=provider, records=True)
matches = set()
for avail in available:
aname=avail.name
matched=False
basename=aname[:-2] if aname.endswith('_r') else aname
if (reverse is None or
(reverse==True and aname.endswith('_r')) or
(reverse==False and not aname.endswith('_r'))):
for r in cmap_info:
if (r.name==basename):
matched=True
# cmap_info stores only non-reversed info, so construct
# suitable values for reversed version if appropriate
r=r._replace(name=aname)
if aname.endswith('_r') and (r.category != 'Diverging'):
if r.bg=='light':
r=r._replace(bg='dark')
elif r.bg=='dark':
r=r._replace(bg='light')
if (( name is None or name in r.name) and
(provider is None or provider in r.provider) and
(category is None or category in r.category) and
( source is None or source in r.source) and
( bg is None or bg in r.bg)):
matches.add(r)
if not matched and (category is None or category=='Miscellaneous'):
# Return colormaps that exist but are not found in cmap_info
# under the 'Miscellaneous' category, with no source or bg
r = CMapInfo(aname,provider=avail.provider,category='Miscellaneous',source=None,bg=None)
matches.add(r)
# Return results sorted by category if category information is provided
if records:
return list(unique_iterator(python2sort(matches,
key=lambda r: (r.category.split(" ")[-1],r.bg,r.name.lower(),r.provider,r.source))))
else:
return list(unique_iterator(sorted([rec.name for rec in matches], key=lambda n:n.lower())))
register_cmaps('Uniform Sequential', 'matplotlib', 'bids', 'dark',
['viridis', 'plasma', 'inferno', 'magma', 'cividis'])
register_cmaps('Mono Sequential', 'matplotlib', 'colorbrewer', 'light',
['Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'light',
['gist_yarg', 'binary'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'dark',
['afmhot', 'gray', 'bone', 'gist_gray', 'gist_heat',
'hot', 'pink'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'any',
['copper', 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia'])
register_cmaps('Diverging', 'matplotlib', 'colorbrewer', 'light',
['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy',
'RdYlBu', 'RdYlGn', 'Spectral'])
register_cmaps('Diverging', 'matplotlib', 'misc', 'light',
['coolwarm', 'bwr', 'seismic'])
register_cmaps('Categorical', 'matplotlib', 'colorbrewer', 'any',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2',
'Set1', 'Set2', 'Set3'])
register_cmaps('Categorical', 'matplotlib', 'd3', 'any',
['tab10', 'tab20', 'tab20b', 'tab20c'])
register_cmaps('Rainbow', 'matplotlib', 'misc', 'dark',
['nipy_spectral', 'gist_ncar'])
register_cmaps('Rainbow', 'matplotlib', 'misc', 'any',
['brg', 'hsv', 'gist_rainbow', 'rainbow', 'jet'])
register_cmaps('Miscellaneous', 'matplotlib', 'misc', 'dark',
['CMRmap', 'cubehelix', 'gist_earth', 'gist_stern',
'gnuplot', 'gnuplot2', 'ocean', 'terrain'])
register_cmaps('Miscellaneous', 'matplotlib', 'misc', 'any',
['flag', 'prism'])
register_cmaps('Uniform Sequential', 'colorcet', 'cet', 'dark',
['bgyw', 'bgy', 'kbc', 'bmw', 'bmy', 'kgy', 'gray',
'dimgray', 'fire'])
register_cmaps('Uniform Sequential', 'colorcet', 'cet', 'any',
['blues', 'kr', 'kg', 'kb'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'light',
['coolwarm', 'gwv', 'bwy', 'cwr'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'dark',
['bkr', 'bky'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'medium',
['bjy'])
register_cmaps('Uniform Rainbow', 'colorcet', 'cet', 'any',
['rainbow', 'colorwheel','isolum'])
register_cmaps('Uniform Sequential', 'bokeh', 'bids', 'dark',
['Viridis', 'Plasma', 'Inferno', 'Magma'])
register_cmaps('Mono Sequential', 'bokeh', 'colorbrewer', 'light',
['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys',
'OrRd', 'Oranges', 'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd'])
register_cmaps('Diverging', 'bokeh', 'colorbrewer', 'light',
['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy',
'RdYlBu', 'RdYlGn', 'Spectral'])
register_cmaps('Categorical', 'bokeh', 'd3', 'any',
['Category10', 'Category20', 'Category20b', 'Category20c'])
register_cmaps('Categorical', 'bokeh', 'colorbrewer', 'any',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2',
'Set1', 'Set2', 'Set3'])
register_cmaps('Categorical', 'bokeh', 'misc', 'any',
['Colorblind'])
register_cmaps('Uniform Categorical', 'colorcet', 'cet', 'any',
['glasbey', 'glasbey_cool', 'glasbey_warm', 'glasbey_hv'])
register_cmaps('Uniform Categorical', 'colorcet', 'cet', 'dark',
['glasbey_light'])
register_cmaps('Uniform Categorical', 'colorcet', 'cet', 'light',
['glasbey_dark'])
def process_cmap(cmap, ncolors=None, provider=None, categorical=False):
"""
Convert valid colormap specifications to a list of colors.
"""
providers_checked="matplotlib, bokeh, or colorcet" if provider is None else provider
if isinstance(cmap, Cycle):
palette = [rgb2hex(c) if isinstance(c, tuple) else c for c in cmap.values]
elif isinstance(cmap, list):
palette = cmap
elif isinstance(cmap, basestring):
mpl_cmaps = _list_cmaps('matplotlib')
bk_cmaps = _list_cmaps('bokeh')
cet_cmaps = _list_cmaps('colorcet')
if provider=='matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)):
palette = mplcmap_to_palette(cmap, ncolors, categorical)
elif provider=='bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)):
palette = bokeh_palette_to_palette(cmap, ncolors, categorical)
elif provider=='colorcet' or (provider is None and cmap in cet_cmaps):
from colorcet import palette
if cmap.endswith('_r'):
palette = list(reversed(palette[cmap[:-2]]))
else:
palette = palette[cmap]
else:
raise ValueError("Supplied cmap %s not found among %s colormaps." %
(cmap,providers_checked))
else:
try:
# Try processing as matplotlib colormap
palette = mplcmap_to_palette(cmap, ncolors)
except:
palette = None
if not isinstance(palette, list):
raise TypeError("cmap argument %s expects a list, Cycle or valid %s colormap or palette."
% (cmap,providers_checked))
if ncolors and len(palette) != ncolors:
return [palette[i%len(palette)] for i in range(ncolors)]
return palette
def color_intervals(colors, levels, clip=None, N=255):
"""
Maps the supplied colors into bins defined by the supplied levels.
If a clip tuple is defined the bins are clipped to the defined
range otherwise the range is computed from the levels and returned.
Arguments
---------
colors: list
List of colors (usually hex string or named colors)
levels: list or array_like
Levels specifying the bins to map the colors to
clip: tuple (optional)
Lower and upper limits of the color range
N: int
Number of discrete colors to map the range onto
Returns
-------
cmap: list
List of colors
clip: tuple
Lower and upper bounds of the color range
"""
if len(colors) != len(levels)-1:
raise ValueError('The number of colors in the colormap '
'must match the intervals defined in the '
'color_levels, expected %d colors found %d.'
% (N, len(colors)))
intervals = np.diff(levels)
cmin, cmax = min(levels), max(levels)
interval = cmax-cmin
cmap = []
for intv, c in zip(intervals, colors):
cmap += [c]*int(round(N*(intv/interval)))
if clip is not None:
clmin, clmax = clip
lidx = int(round(N*((clmin-cmin)/interval)))
uidx = int(round(N*((cmax-clmax)/interval)))
uidx = N-uidx
if lidx == uidx:
uidx = lidx+1
cmap = cmap[lidx:uidx]
if clmin == clmax:
idx = np.argmin(np.abs(np.array(levels)-clmin))
clip = levels[idx: idx+2] if len(levels) > idx+2 else levels[idx-1: idx+1]
return cmap, clip
def dim_axis_label(dimensions, separator=', '):
"""
Returns an axis label for one or more dimensions.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
return separator.join([d.pprint_label for d in dimensions])
def attach_streams(plot, obj, precedence=1.1):
"""
Attaches plot refresh to all streams on the object.
"""
def append_refresh(dmap):
for stream in get_nested_streams(dmap):
if plot.refresh not in stream._subscribers:
stream.add_subscriber(plot.refresh, precedence)
return obj.traverse(append_refresh, [DynamicMap])
def traverse_setter(obj, attribute, value):
"""
Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types.
"""
obj.traverse(lambda x: setattr(x, attribute, value))
def _get_min_distance_numpy(element):
"""
NumPy based implementation of get_min_distance
"""
xys = element.array([0, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered in')
xys = xys.astype('float32').view(np.complex64)
distances = np.abs(xys.T-xys)
np.fill_diagonal(distances, np.inf)
distances = distances[distances>0]
if len(distances):
return distances.min()
return 0
def get_min_distance(element):
"""
Gets the minimum sampling distance of the x- and y-coordinates
in a grid.
"""
try:
from scipy.spatial.distance import pdist
return pdist(element.array([0, 1])).min()
except:
return _get_min_distance_numpy(element)
def get_directed_graph_paths(element, arrow_length):
"""
Computes paths for a directed path which include an arrow to
indicate the directionality of each edge.
"""
edgepaths = element._split_edgepaths
edges = edgepaths.split(datatype='array', dimensions=edgepaths.kdims)
arrows = []
for e in edges:
sx, sy = e[0]
ex, ey = e[1]
rad = np.arctan2(ey-sy, ex-sx)
xa0 = ex - np.cos(rad+np.pi/8)*arrow_length
ya0 = ey - np.sin(rad+np.pi/8)*arrow_length
xa1 = ex - np.cos(rad-np.pi/8)*arrow_length
ya1 = ey - np.sin(rad-np.pi/8)*arrow_length
arrow = np.array([(sx, sy), (ex, ey), (np.nan, np.nan),
(xa0, ya0), (ex, ey), (xa1, ya1)])
arrows.append(arrow)
return arrows
def rgb2hex(rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb))
def dim_range_key(eldim):
"""
Returns the key to look up a dimension range.
"""
if isinstance(eldim, dim):
dim_name = repr(eldim)
if dim_name.startswith("'") and dim_name.endswith("'"):
dim_name = dim_name[1:-1]
else:
dim_name = eldim.name
return dim_name
def hex2rgb(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
RGB_HEX_REGEX = re.compile(r'^#(?:[0-9a-fA-F]{3}){1,2}$')
COLOR_ALIASES = {
'b': (0, 0, 1),
'c': (0, 0.75, 0.75),
'g': (0, 0.5, 0),
'k': (0, 0, 0),
'm': (0.75, 0, 0.75),
'r': (1, 0, 0),
'w': (1, 1, 1),
'y': (0.75, 0.75, 0),
'transparent': (0, 0, 0, 0)
}
# linear_kryw_0_100_c71 (aka "fire"):
# A perceptually uniform equivalent of matplotlib's "hot" colormap, from
# http://peterkovesi.com/projects/colourmaps
fire_colors = linear_kryw_0_100_c71 = [\
[0, 0, 0 ], [0.027065, 2.143e-05, 0 ],
[0.052054, 7.4728e-05, 0 ], [0.071511, 0.00013914, 0 ],
[0.08742, 0.0002088, 0 ], [0.10109, 0.00028141, 0 ],
[0.11337, 0.000356, 2.4266e-17], [0.12439, 0.00043134, 3.3615e-17],
[0.13463, 0.00050796, 2.1604e-17], [0.14411, 0.0005856, 0 ],
[0.15292, 0.00070304, 0 ], [0.16073, 0.0013432, 0 ],
[0.16871, 0.0014516, 0 ], [0.17657, 0.0012408, 0 ],
[0.18364, 0.0015336, 0 ], [0.19052, 0.0017515, 0 ],
[0.19751, 0.0015146, 0 ], [0.20401, 0.0015249, 0 ],
[0.20994, 0.0019639, 0 ], [0.21605, 0.002031, 0 ],
[0.22215, 0.0017559, 0 ], [0.22808, 0.001546, 1.8755e-05],
[0.23378, 0.0016315, 3.5012e-05], [0.23955, 0.0017194, 3.3352e-05],
[0.24531, 0.0018097, 1.8559e-05], [0.25113, 0.0019038, 1.9139e-05],
[0.25694, 0.0020015, 3.5308e-05], [0.26278, 0.0021017, 3.2613e-05],
[0.26864, 0.0022048, 2.0338e-05], [0.27451, 0.0023119, 2.2453e-05],
[0.28041, 0.0024227, 3.6003e-05], [0.28633, 0.0025363, 2.9817e-05],
[0.29229, 0.0026532, 1.9559e-05], [0.29824, 0.0027747, 2.7666e-05],
[0.30423, 0.0028999, 3.5752e-05], [0.31026, 0.0030279, 2.3231e-05],
[0.31628, 0.0031599, 1.2902e-05], [0.32232, 0.0032974, 3.2915e-05],
[0.32838, 0.0034379, 3.2803e-05], [0.33447, 0.0035819, 2.0757e-05],
[0.34057, 0.003731, 2.3831e-05], [0.34668, 0.0038848, 3.502e-05 ],
[0.35283, 0.0040418, 2.4468e-05], [0.35897, 0.0042032, 1.1444e-05],
[0.36515, 0.0043708, 3.2793e-05], [0.37134, 0.0045418, 3.012e-05 ],
[0.37756, 0.0047169, 1.4846e-05], [0.38379, 0.0048986, 2.796e-05 ],
[0.39003, 0.0050848, 3.2782e-05], [0.3963, 0.0052751, 1.9244e-05],
[0.40258, 0.0054715, 2.2667e-05], [0.40888, 0.0056736, 3.3223e-05],
[0.41519, 0.0058798, 2.159e-05 ], [0.42152, 0.0060922, 1.8214e-05],
[0.42788, 0.0063116, 3.2525e-05], [0.43424, 0.0065353, 2.2247e-05],
[0.44062, 0.006765, 1.5852e-05], [0.44702, 0.0070024, 3.1769e-05],
[0.45344, 0.0072442, 2.1245e-05], [0.45987, 0.0074929, 1.5726e-05],
[0.46631, 0.0077499, 3.0976e-05], [0.47277, 0.0080108, 1.8722e-05],
[0.47926, 0.0082789, 1.9285e-05], [0.48574, 0.0085553, 3.0063e-05],
[0.49225, 0.0088392, 1.4313e-05], [0.49878, 0.0091356, 2.3404e-05],
[0.50531, 0.0094374, 2.8099e-05], [0.51187, 0.0097365, 6.4695e-06],
[0.51844, 0.010039, 2.5791e-05], [0.52501, 0.010354, 2.4393e-05],
[0.53162, 0.010689, 1.6037e-05], [0.53825, 0.011031, 2.7295e-05],
[0.54489, 0.011393, 1.5848e-05], [0.55154, 0.011789, 2.3111e-05],
[0.55818, 0.012159, 2.5416e-05], [0.56485, 0.012508, 1.5064e-05],
[0.57154, 0.012881, 2.541e-05 ], [0.57823, 0.013283, 1.6166e-05],
[0.58494, 0.013701, 2.263e-05 ], [0.59166, 0.014122, 2.3316e-05],
[0.59839, 0.014551, 1.9432e-05], [0.60514, 0.014994, 2.4323e-05],
[0.6119, 0.01545, 1.3929e-05], [0.61868, 0.01592, 2.1615e-05],
[0.62546, 0.016401, 1.5846e-05], [0.63226, 0.016897, 2.0838e-05],
[0.63907, 0.017407, 1.9549e-05], [0.64589, 0.017931, 2.0961e-05],
[0.65273, 0.018471, 2.0737e-05], [0.65958, 0.019026, 2.0621e-05],
[0.66644, 0.019598, 2.0675e-05], [0.67332, 0.020187, 2.0301e-05],
[0.68019, 0.020793, 2.0029e-05], [0.68709, 0.021418, 2.0088e-05],
[0.69399, 0.022062, 1.9102e-05], [0.70092, 0.022727, 1.9662e-05],
[0.70784, 0.023412, 1.7757e-05], [0.71478, 0.024121, 1.8236e-05],
[0.72173, 0.024852, 1.4944e-05], [0.7287, 0.025608, 2.0245e-06],
[0.73567, 0.02639, 1.5013e-07], [0.74266, 0.027199, 0 ],
[0.74964, 0.028038, 0 ], [0.75665, 0.028906, 0 ],
[0.76365, 0.029806, 0 ], [0.77068, 0.030743, 0 ],
[0.77771, 0.031711, 0 ], [0.78474, 0.032732, 0 ],
[0.79179, 0.033741, 0 ], [0.79886, 0.034936, 0 ],
[0.80593, 0.036031, 0 ], [0.81299, 0.03723, 0 ],
[0.82007, 0.038493, 0 ], [0.82715, 0.039819, 0 ],
[0.83423, 0.041236, 0 ], [0.84131, 0.042647, 0 ],
[0.84838, 0.044235, 0 ], [0.85545, 0.045857, 0 ],
[0.86252, 0.047645, 0 ], [0.86958, 0.049578, 0 ],
[0.87661, 0.051541, 0 ], [0.88365, 0.053735, 0 ],
[0.89064, 0.056168, 0 ], [0.89761, 0.058852, 0 ],
[0.90451, 0.061777, 0 ], [0.91131, 0.065281, 0 ],
[0.91796, 0.069448, 0 ], [0.92445, 0.074684, 0 ],
[0.93061, 0.08131, 0 ], [0.93648, 0.088878, 0 ],
[0.94205, 0.097336, 0 ], [0.9473, 0.10665, 0 ],
[0.9522, 0.1166, 0 ], [0.95674, 0.12716, 0 ],
[0.96094, 0.13824, 0 ], [0.96479, 0.14963, 0 ],
[0.96829, 0.16128, 0 ], [0.97147, 0.17303, 0 ],
[0.97436, 0.18489, 0 ], [0.97698, 0.19672, 0 ],
[0.97934, 0.20846, 0 ], [0.98148, 0.22013, 0 ],
[0.9834, 0.23167, 0 ], [0.98515, 0.24301, 0 ],
[0.98672, 0.25425, 0 ], [0.98815, 0.26525, 0 ],
[0.98944, 0.27614, 0 ], [0.99061, 0.28679, 0 ],
[0.99167, 0.29731, 0 ], [0.99263, 0.30764, 0 ],
[0.9935, 0.31781, 0 ], [0.99428, 0.3278, 0 ],
[0.995, 0.33764, 0 ], [0.99564, 0.34735, 0 ],
[0.99623, 0.35689, 0 ], [0.99675, 0.3663, 0 ],
[0.99722, 0.37556, 0 ], [0.99765, 0.38471, 0 ],
[0.99803, 0.39374, 0 ], [0.99836, 0.40265, 0 ],
[0.99866, 0.41145, 0 ], [0.99892, 0.42015, 0 ],
[0.99915, 0.42874, 0 ], [0.99935, 0.43724, 0 ],
[0.99952, 0.44563, 0 ], [0.99966, 0.45395, 0 ],
[0.99977, 0.46217, 0 ], [0.99986, 0.47032, 0 ],
[0.99993, 0.47838, 0 ], [0.99997, 0.48638, 0 ],
[1, 0.4943, 0 ], [1, 0.50214, 0 ],
[1, 0.50991, 1.2756e-05], [1, 0.51761, 4.5388e-05],
[1, 0.52523, 9.6977e-05], [1, 0.5328, 0.00016858],
[1, 0.54028, 0.0002582 ], [1, 0.54771, 0.00036528],
[1, 0.55508, 0.00049276], [1, 0.5624, 0.00063955],
[1, 0.56965, 0.00080443], [1, 0.57687, 0.00098902],
[1, 0.58402, 0.0011943 ], [1, 0.59113, 0.0014189 ],
[1, 0.59819, 0.0016626 ], [1, 0.60521, 0.0019281 ],
[1, 0.61219, 0.0022145 ], [1, 0.61914, 0.0025213 ],
[1, 0.62603, 0.0028496 ], [1, 0.6329, 0.0032006 ],
[1, 0.63972, 0.0035741 ], [1, 0.64651, 0.0039701 ],
[1, 0.65327, 0.0043898 ], [1, 0.66, 0.0048341 ],
[1, 0.66669, 0.005303 ], [1, 0.67336, 0.0057969 ],
[1, 0.67999, 0.006317 ], [1, 0.68661, 0.0068648 ],
[1, 0.69319, 0.0074406 ], [1, 0.69974, 0.0080433 ],
[1, 0.70628, 0.0086756 ], [1, 0.71278, 0.0093486 ],
[1, 0.71927, 0.010023 ], [1, 0.72573, 0.010724 ],
[1, 0.73217, 0.011565 ], [1, 0.73859, 0.012339 ],
[1, 0.74499, 0.01316 ], [1, 0.75137, 0.014042 ],
[1, 0.75772, 0.014955 ], [1, 0.76406, 0.015913 ],
[1, 0.77039, 0.016915 ], [1, 0.77669, 0.017964 ],
[1, 0.78298, 0.019062 ], [1, 0.78925, 0.020212 ],
[1, 0.7955, 0.021417 ], [1, 0.80174, 0.02268 ],
[1, 0.80797, 0.024005 ], [1, 0.81418, 0.025396 ],
[1, 0.82038, 0.026858 ], [1, 0.82656, 0.028394 ],
[1, 0.83273, 0.030013 ], [1, 0.83889, 0.031717 ],
[1, 0.84503, 0.03348 ], [1, 0.85116, 0.035488 ],
[1, 0.85728, 0.037452 ], [1, 0.8634, 0.039592 ],
[1, 0.86949, 0.041898 ], [1, 0.87557, 0.044392 ],
[1, 0.88165, 0.046958 ], [1, 0.88771, 0.04977 ],
[1, 0.89376, 0.052828 ], [1, 0.8998, 0.056209 ],
[1, 0.90584, 0.059919 ], [1, 0.91185, 0.063925 ],
[1, 0.91783, 0.068579 ], [1, 0.92384, 0.073948 ],
[1, 0.92981, 0.080899 ], [1, 0.93576, 0.090648 ],
[1, 0.94166, 0.10377 ], [1, 0.94752, 0.12051 ],
[1, 0.9533, 0.14149 ], [1, 0.959, 0.1672 ],
[1, 0.96456, 0.19823 ], [1, 0.96995, 0.23514 ],
[1, 0.9751, 0.2786 ], [1, 0.97992, 0.32883 ],
[1, 0.98432, 0.38571 ], [1, 0.9882, 0.44866 ],
[1, 0.9915, 0.51653 ], [1, 0.99417, 0.58754 ],
[1, 0.99625, 0.65985 ], [1, 0.99778, 0.73194 ],
[1, 0.99885, 0.80259 ], [1, 0.99953, 0.87115 ],
[1, 0.99989, 0.93683 ], [1, 1, 1 ]]
# Bokeh palette
fire = [str('#{0:02x}{1:02x}{2:02x}'.format(int(r*255),int(g*255),int(b*255)))
for r,g,b in fire_colors]
# Matplotlib colormap
try:
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.cm import register_cmap
fire_cmap = LinearSegmentedColormap.from_list("fire", fire_colors, N=len(fire_colors))
fire_r_cmap = LinearSegmentedColormap.from_list("fire_r", list(reversed(fire_colors)), N=len(fire_colors))
register_cmap("fire", cmap=fire_cmap)
register_cmap("fire_r", cmap=fire_r_cmap)
except ImportError:
pass
| 1 | 23,030 | I count only four times where `selection_specs` had to be specified as a keyword instead of by position! If that is how often it was used that positional argument in our own codebase, I am pretty certain users barely used it (if at all). | holoviz-holoviews | py |
@@ -24,5 +24,10 @@ describe HashDiffDecorator do
output = HashDiffDecorator.html_for(['~', 'foo', '', 'bar'])
expect(output).to eq("<code>foo</code> was changed from <code>[empty]</code> to <code>"bar"</code>")
end
+
+ it "renders numeric events" do
+ output = HashDiffDecorator.html_for(['~', 'a_number', 456, 123])
+ expect(output).to eq("<code>a_number</code> was changed from <code>456.00</code> to <code>123.00</code>")
+ end
end
end | 1 | describe HashDiffDecorator do
describe '.html_for' do
it "renders add events" do
output = HashDiffDecorator.html_for(['+', 'foo', 'bar'])
expect(output).to eq("<code>foo</code> was set to <code>"bar"</code>.")
end
it "renders modification events" do
output = HashDiffDecorator.html_for(['~', 'foo', 'bar', 'baz'])
expect(output).to eq("<code>foo</code> was changed from <code>"bar"</code> to <code>"baz"</code>")
end
it "renders removal events" do
output = HashDiffDecorator.html_for(['-', 'foo'])
expect(output).to eq("<code>foo</code> was removed.")
end
it "renders original-was-nil events" do
output = HashDiffDecorator.html_for(['~', 'foo', nil, 'bar'])
expect(output).to eq("<code>foo</code> was changed from <code>[nil]</code> to <code>"bar"</code>")
end
it "renders original-was-empty-string events" do
output = HashDiffDecorator.html_for(['~', 'foo', '', 'bar'])
expect(output).to eq("<code>foo</code> was changed from <code>[empty]</code> to <code>"bar"</code>")
end
end
end
| 1 | 15,309 | is this test for the case above? seems to cover a numeric rather than empty val? | 18F-C2 | rb |
@@ -245,6 +245,9 @@ class FlowHandler(RequestHandler):
request.port = int(v)
elif k == "headers":
request.headers.set_state(v)
+ elif k == "content":
+ print(v)
+ response.content = str(v)
else:
print("Warning: Unknown update {}.{}: {}".format(a, k, v))
| 1 | from __future__ import absolute_import, print_function, division
import base64
import json
import logging
import os.path
import re
import six
import tornado.websocket
from io import BytesIO
from mitmproxy.flow import FlowWriter, FlowReader
from mitmproxy import filt
from mitmproxy import models
from netlib import version
def convert_flow_to_json_dict(flow):
# type: (models.Flow) -> dict
"""
Remove flow message content and cert to save transmission space.
Args:
flow: The original flow.
"""
f = {
"id": flow.id,
"intercepted": flow.intercepted,
"client_conn": flow.client_conn.get_state(),
"server_conn": flow.server_conn.get_state(),
"type": flow.type
}
if flow.error:
f["error"] = flow.error.get_state()
if isinstance(flow, models.HTTPFlow):
if flow.request:
f["request"] = {
"method": flow.request.method,
"scheme": flow.request.scheme,
"host": flow.request.host,
"port": flow.request.port,
"path": flow.request.path,
"http_version": flow.request.http_version,
"headers": tuple(flow.request.headers.items(True)),
"contentLength": len(flow.request.content) if flow.request.content is not None else None,
"timestamp_start": flow.request.timestamp_start,
"timestamp_end": flow.request.timestamp_end,
"is_replay": flow.request.is_replay,
}
if flow.response:
f["response"] = {
"http_version": flow.response.http_version,
"status_code": flow.response.status_code,
"reason": flow.response.reason,
"headers": tuple(flow.response.headers.items(True)),
"contentLength": len(flow.response.content) if flow.response.content is not None else None,
"timestamp_start": flow.response.timestamp_start,
"timestamp_end": flow.response.timestamp_end,
"is_replay": flow.response.is_replay,
}
f.get("server_conn", {}).pop("cert", None)
return f
class APIError(tornado.web.HTTPError):
pass
class BasicAuth(object):
def set_auth_headers(self):
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm=MITMWeb')
self._transforms = []
self.finish()
def prepare(self):
wauthenticator = self.application.settings['wauthenticator']
if wauthenticator:
auth_header = self.request.headers.get('Authorization')
if auth_header is None or not auth_header.startswith('Basic '):
self.set_auth_headers()
else:
self.auth_decoded = base64.decodestring(auth_header[6:])
self.username, self.password = self.auth_decoded.split(':', 2)
if not wauthenticator.test(self.username, self.password):
self.set_auth_headers()
raise APIError(401, "Invalid username or password.")
class RequestHandler(BasicAuth, tornado.web.RequestHandler):
def set_default_headers(self):
super(RequestHandler, self).set_default_headers()
self.set_header("Server", version.MITMPROXY)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws://* ; "
"style-src 'self' 'unsafe-inline'"
)
@property
def json(self):
if not self.request.headers.get("Content-Type").startswith("application/json"):
return None
return json.loads(self.request.body)
@property
def state(self):
return self.application.master.state
@property
def master(self):
return self.application.master
@property
def flow(self):
flow_id = str(self.path_kwargs["flow_id"])
flow = self.state.flows.get(flow_id)
if flow:
return flow
else:
raise APIError(400, "Flow not found.")
def write_error(self, status_code, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super(RequestHandler, self).write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def get(self):
token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
assert token
self.render("index.html")
class FiltHelp(RequestHandler):
def get(self):
self.write(dict(
commands=filt.help
))
class WebSocketEventBroadcaster(BasicAuth, tornado.websocket.WebSocketHandler):
# raise an error if inherited class doesn't specify its own instance.
connections = None
def open(self):
self.connections.add(self)
def on_close(self):
self.connections.remove(self)
@classmethod
def broadcast(cls, **kwargs):
message = json.dumps(kwargs, ensure_ascii=False)
for conn in cls.connections:
try:
conn.write_message(message)
except:
logging.error("Error sending message", exc_info=True)
class ClientConnection(WebSocketEventBroadcaster):
connections = set()
class Flows(RequestHandler):
def get(self):
self.write(dict(
data=[convert_flow_to_json_dict(f) for f in self.state.flows]
))
class DumpFlows(RequestHandler):
def get(self):
self.set_header("Content-Disposition", "attachment; filename=flows")
self.set_header("Content-Type", "application/octet-stream")
bio = BytesIO()
fw = FlowWriter(bio)
for f in self.state.flows:
fw.add(f)
self.write(bio.getvalue())
bio.close()
def post(self):
self.state.clear()
content = self.request.files.values()[0][0]["body"]
bio = BytesIO(content)
self.state.load_flows(FlowReader(bio).stream())
bio.close()
class ClearAll(RequestHandler):
def post(self):
self.state.clear()
class AcceptFlows(RequestHandler):
def post(self):
self.state.flows.accept_all(self.master)
class AcceptFlow(RequestHandler):
def post(self, flow_id):
self.flow.accept_intercept(self.master)
class FlowHandler(RequestHandler):
def delete(self, flow_id):
self.flow.kill(self.master)
self.state.delete_flow(self.flow)
def put(self, flow_id):
flow = self.flow
flow.backup()
for a, b in six.iteritems(self.json):
if a == "request":
request = flow.request
for k, v in six.iteritems(b):
if k in ["method", "scheme", "host", "path", "http_version"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.set_state(v)
else:
print("Warning: Unknown update {}.{}: {}".format(a, k, v))
elif a == "response":
response = flow.response
for k, v in six.iteritems(b):
if k == "msg":
response.msg = str(v)
elif k == "code":
response.status_code = int(v)
elif k == "http_version":
response.http_version = str(v)
elif k == "headers":
response.headers.set_state(v)
else:
print("Warning: Unknown update {}.{}: {}".format(a, k, v))
else:
print("Warning: Unknown update {}: {}".format(a, b))
self.state.update_flow(flow)
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
self.master.duplicate_flow(self.flow)
class RevertFlow(RequestHandler):
def post(self, flow_id):
self.state.revert(self.flow)
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.flow.backup()
self.flow.response = None
self.state.update_flow(self.flow)
r = self.master.replay_request(self.flow)
if r:
raise APIError(400, r)
class FlowContent(RequestHandler):
def get(self, flow_id, message):
message = getattr(self.flow, message)
if not message.content:
raise APIError(400, "No content.")
content_encoding = message.headers.get("Content-Encoding", None)
if content_encoding:
content_encoding = re.sub(r"[^\w]", "", content_encoding)
self.set_header("Content-Encoding", content_encoding)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
filename = re.search("filename=([\w\" \.\-\(\)]+)", original_cd)
if filename:
filename = filename.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r"[^\w\" \.\-\(\)]", "", filename)
cd = "attachment; filename={}".format(filename)
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.content)
class Events(RequestHandler):
def get(self):
self.write(dict(
data=list(self.state.events)
))
class Settings(RequestHandler):
def get(self):
self.write(dict(
data=dict(
version=version.VERSION,
mode=str(self.master.server.config.mode),
intercept=self.state.intercept_txt,
showhost=self.master.options.showhost,
no_upstream_cert=self.master.server.config.no_upstream_cert,
rawtcp=self.master.server.config.rawtcp,
http2=self.master.server.config.http2,
anticache=self.master.options.anticache,
anticomp=self.master.options.anticomp,
stickyauth=self.master.stickyauth_txt,
stickycookie=self.master.stickycookie_txt,
stream= self.master.stream_large_bodies.max_size if self.master.stream_large_bodies else False
)
))
def put(self):
update = {}
for k, v in six.iteritems(self.json):
if k == "intercept":
self.state.set_intercept(v)
update[k] = v
elif k == "showhost":
self.master.options.showhost = v
update[k] = v
elif k == "no_upstream_cert":
self.master.server.config.no_upstream_cert = v
update[k] = v
elif k == "rawtcp":
self.master.server.config.rawtcp = v
update[k] = v
elif k == "http2":
self.master.server.config.http2 = v
update[k] = v
elif k == "anticache":
self.master.options.anticache = v
update[k] = v
elif k == "anticomp":
self.master.options.anticomp = v
update[k] = v
elif k == "stickycookie":
self.master.set_stickycookie(v)
update[k] = v
elif k == "stickyauth":
self.master.set_stickyauth(v)
update[k] = v
elif k == "stream":
self.master.set_stream_large_bodies(v)
update[k] = v
else:
print("Warning: Unknown setting {}: {}".format(k, v))
ClientConnection.broadcast(
type="UPDATE_SETTINGS",
cmd="update",
data=update
)
class Application(tornado.web.Application):
def __init__(self, master, debug, wauthenticator):
self.master = master
handlers = [
(r"/", IndexHandler),
(r"/filter-help", FiltHelp),
(r"/updates", ClientConnection),
(r"/events", Events),
(r"/flows", Flows),
(r"/flows/dump", DumpFlows),
(r"/flows/accept", AcceptFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/accept", AcceptFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content", FlowContent),
(r"/settings", Settings),
(r"/clear", ClearAll),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=os.urandom(256),
debug=debug,
wauthenticator=wauthenticator,
)
super(Application, self).__init__(handlers, **settings)
| 1 | 11,885 | I like the general idea, but this will break: - JSON is not binary-safe, so anything binary will break this. - JSON is super slow for multiple-MB things - We want to have drag-and-drop upload - the easiest way to implement this is FormData upload, so we should have a multipart/formdata endpoint. Can we put to /flow/content? | mitmproxy-mitmproxy | py |
@@ -121,6 +121,7 @@ bool dr_preinjected = false;
static bool dynamo_exiting = false;
#endif
bool dynamo_exited = false;
+bool dynamo_exited_synched = false;
bool dynamo_exited_and_cleaned = false;
#ifdef DEBUG
bool dynamo_exited_log_and_stats = false; | 1 | /* **********************************************************
* Copyright (c) 2010-2017 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* dynamo.c -- initialization and cleanup routines for DynamoRIO
*/
#include "globals.h"
#include "configure_defines.h"
#include "link.h"
#include "fragment.h"
#include "fcache.h"
#include "emit.h"
#include "dispatch.h"
#include "utils.h"
#include "monitor.h"
#include "vmareas.h"
#ifdef SIDELINE
# include "sideline.h"
#endif
#ifdef PAPI
# include "perfctr.h"
#endif
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
#include "hotpatch.h"
#include "moduledb.h"
#include "module_shared.h"
#include "synch.h"
#include "native_exec.h"
#include "jit_opt.h"
#ifdef ANNOTATIONS
# include "annotations.h"
#endif
#include <string.h>
#ifdef WINDOWS
/* for close handle, duplicate handle, free memory and constants associated with them */
/* also for nt_terminate_process_for_app() */
#include "ntdll.h"
#include "nudge.h" /* to get generic_nudge_target() address for an assert */
#endif
#ifdef RCT_IND_BRANCH
# include "rct.h"
#endif
#include "perscache.h"
#ifdef VMX86_SERVER
# include "vmkuw.h"
#endif
/* global thread-shared variables */
bool dynamo_initialized = false;
bool dynamo_heap_initialized = false;
bool automatic_startup = false;
bool control_all_threads = false;
#ifdef WINDOWS
bool dr_early_injected = false;
int dr_early_injected_location = INJECT_LOCATION_Invalid;
bool dr_earliest_injected = false;
static void * dr_earliest_inject_args;
/* should be set if we are controlling the primary thread, either by
* injecting initially (!dr_injected_secondary_thread), or by retaking
* over (dr_late_injected_primary_thread). Used only for debugging
* purposes, yet can't rely on !dr_injected_secondary_thread very
* early in the process
*/
bool dr_injected_primary_thread = false;
bool dr_injected_secondary_thread = false;
/* should be set once we retakeover the primary thread for -inject_primary */
bool dr_late_injected_primary_thread = false;
#endif /* WINDOWS */
/* flags to indicate when DR is being initialized / exited using the API */
bool dr_api_entry = false;
bool dr_api_exit = false;
#ifdef RETURN_AFTER_CALL
bool dr_preinjected = false;
#endif /* RETURN_AFTER_CALL */
#ifdef UNIX
static bool dynamo_exiting = false;
#endif
bool dynamo_exited = false;
bool dynamo_exited_and_cleaned = false;
#ifdef DEBUG
bool dynamo_exited_log_and_stats = false;
#endif
/* Only used in release build to decide whether synch is needed, justifying
* its placement in .nspdata. If we use it for more we should protect it.
*/
DECLARE_NEVERPROT_VAR(bool dynamo_all_threads_synched, false);
bool dynamo_resetting = false;
#if defined(CLIENT_INTERFACE) || defined(STANDALONE_UNIT_TEST)
bool standalone_library = false;
#endif
#ifdef UNIX
bool post_execve = false;
#endif
/* initial stack so we don't have to use app's */
byte * initstack;
#if defined(WINDOWS) && defined(STACK_GUARD_PAGE)
/* PR203701: separate stack for error reporting when the dstack is exhausted */
#define EXCEPTION_STACK_SIZE (2 * PAGE_SIZE)
DECLARE_NEVERPROT_VAR(byte *exception_stack, NULL);
#endif
/*******************************************************/
/* separate segment of Non-Self-Protected data to avoid data section
* protection issues -- we need to write to these vars in bootstrapping
* spots where we cannot unprotect first
*/
START_DATA_SECTION(NEVER_PROTECTED_SECTION, "w");
/* spinlock used in assembly trampolines when we can't spare registers for more */
mutex_t initstack_mutex VAR_IN_SECTION(NEVER_PROTECTED_SECTION)
= INIT_SPINLOCK_FREE(initstack_mutex);
byte * initstack_app_xsp VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = 0;
/* keeps track of how many threads are in cleanup_and_terminate */
int exiting_thread_count VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = 0;
/* This is unprotected to allow stats to be written while the data
* segment is still protected (right now the only ones are selfmod stats)
*/
static dr_statistics_t nonshared_stats VAR_IN_SECTION(NEVER_PROTECTED_SECTION)
= {{0},};
/* Each lock protects its corresponding datasec_start, datasec_end, and
* datasec_writable variables.
*/
static mutex_t datasec_lock[DATASEC_NUM] VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {{0}};
/* back to normal section */
END_DATA_SECTION()
/*******************************************************/
/* Like a recursive lock: 0==readonly, 1+=writable.
* This would be a simple array, but we need each in its own protected
* section, as this could be exploited.
*/
const uint datasec_writable_neverprot = 1; /* always writable */
uint datasec_writable_rareprot = 1;
DECLARE_FREQPROT_VAR(uint datasec_writable_freqprot, 1);
DECLARE_CXTSWPROT_VAR(uint datasec_writable_cxtswprot, 1);
static app_pc datasec_start[DATASEC_NUM];
static app_pc datasec_end[DATASEC_NUM];
const uint DATASEC_SELFPROT[] = {
0,
SELFPROT_DATA_RARE,
SELFPROT_DATA_FREQ,
SELFPROT_DATA_CXTSW,
};
const char * const DATASEC_NAMES[] = {
NEVER_PROTECTED_SECTION,
RARELY_PROTECTED_SECTION,
FREQ_PROTECTED_SECTION,
CXTSW_PROTECTED_SECTION,
};
/* kept in unprotected heap to avoid issues w/ data segment being RO */
typedef struct _protect_info_t {
/* FIXME: this needs to be a recursive lock to handle signals
* and exceptions!
*/
mutex_t lock;
int num_threads_unprot; /* # threads in DR code */
int num_threads_suspended;
} protect_info_t;
static protect_info_t *protect_info;
static void data_section_init(void);
static void data_section_exit(void);
#ifdef DEBUG /*************************/
#include <time.h>
/* FIXME: not all dynamo_options references are #ifdef DEBUG
* are we trying to hardcode the options for a release build?
*/
# ifdef UNIX
/* linux include files for mmap stuff*/
# include <sys/ipc.h>
# include <sys/types.h>
# include <unistd.h>
# endif
static uint starttime;
file_t main_logfile = INVALID_FILE;
#endif /* DEBUG ****************************/
dr_statistics_t *stats = NULL;
DECLARE_FREQPROT_VAR(static int num_known_threads, 0);
#ifdef UNIX
/* i#237/PR 498284: vfork threads that execve need to be separately delay-freed */
DECLARE_FREQPROT_VAR(int num_execve_threads, 0);
#endif
DECLARE_FREQPROT_VAR(static uint threads_ever_count, 0);
/* FIXME : not static so os.c can hand walk it for dump core */
/* FIXME: use new generic_table_t and generic_hash_* routines */
thread_record_t ** all_threads; /* ALL_THREADS_HASH_BITS-bit addressed hash table */
/* these locks are used often enough that we put them in .cspdata: */
/* not static so can be referenced in win32/os.c for SuspendThread handling,
* FIXME : is almost completely redundant in usage with thread_initexit_lock
* maybe replace this lock with thread_initexit_lock? */
DECLARE_CXTSWPROT_VAR(mutex_t all_threads_lock, INIT_LOCK_FREE(all_threads_lock));
/* used for synch to prevent thread creation/deletion in critical periods
* due to its use for flushing, this lock cannot be held while couldbelinking!
*/
DECLARE_CXTSWPROT_VAR(mutex_t thread_initexit_lock,
INIT_LOCK_FREE(thread_initexit_lock));
/* recursive to handle signals/exceptions while in DR code */
DECLARE_CXTSWPROT_VAR(static recursive_lock_t thread_in_DR_exclusion,
INIT_RECURSIVE_LOCK(thread_in_DR_exclusion));
/****************************************************************************/
#ifdef DEBUG
static const char *
main_logfile_name(void)
{
return get_app_name_for_path();
}
static const char *
thread_logfile_name(void)
{
return "log";
}
#endif /* DEBUG */
/****************************************************************************/
static void
statistics_pre_init(void)
{
/* until it's set up for real, point at static var
* really only logmask and loglevel are meaningful, so be careful!
* statistics_init and create_log_directory are the only routines that
* use stats before it's set up for real, currently
*/
/* The indirection here is left over from when we used to allow alternative
* locations for stats (namely shared memory for the old MIT gui). */
stats = &nonshared_stats;
stats->process_id = get_process_id();
strncpy(stats->process_name, get_application_name(), MAXIMUM_PATH);
stats->process_name[MAXIMUM_PATH-1] = '\0';
ASSERT(strlen(stats->process_name) > 0);
stats->num_stats = 0;
}
static void
statistics_init(void)
{
/* should have called statistics_pre_init() first */
ASSERT(stats == &nonshared_stats);
ASSERT(stats->num_stats == 0);
#ifndef DEBUG
if (!DYNAMO_OPTION(global_rstats)) {
/* references to stat values should return 0 (static var) */
return;
}
#endif
stats->num_stats = 0
#ifdef DEBUG
# define STATS_DEF(desc, name) +1
#else
# define RSTATS_DEF(desc, name) +1
#endif
# include "statsx.h"
#undef STATS_DEF
#undef RSTATS_DEF
;
/* We inline the stat description to make it easy for external processes
* to view our stats: they don't have to chase pointers, and we could put
* this in shared memory easily. However, we do waste some memory, but
* not much in release build.
*/
#ifdef DEBUG
# define STATS_DEF(desc, statname) \
strncpy(stats->statname##_pair.name, desc, \
BUFFER_SIZE_ELEMENTS(stats->statname##_pair.name)); \
NULL_TERMINATE_BUFFER(stats->statname##_pair.name);
#else
# define RSTATS_DEF(desc, statname) \
strncpy(stats->statname##_pair.name, desc, \
BUFFER_SIZE_ELEMENTS(stats->statname##_pair.name)); \
NULL_TERMINATE_BUFFER(stats->statname##_pair.name);
#endif
# include "statsx.h"
#undef STATS_DEF
#undef RSTATS_DEF
}
static void
statistics_exit(void)
{
stats = NULL;
}
dr_statistics_t *
get_dr_stats(void)
{
return stats;
}
/* initialize per-process dynamo state; this must be called before any
* threads are created and before any other API calls are made;
* returns zero on success, non-zero on failure
*/
DYNAMORIO_EXPORT int
dynamorio_app_init(void)
{
int size;
if (!dynamo_initialized /* we do enter if nullcalls is on */) {
#ifdef UNIX
os_page_size_init((const char **)our_environ);
#endif
#ifdef WINDOWS
/* MUST do this before making any system calls */
syscalls_init();
#endif
/* avoid time() for libc independence */
DODEBUG(starttime = query_time_seconds(););
#ifdef UNIX
if (getenv(DYNAMORIO_VAR_EXECVE) != NULL) {
post_execve = true;
# ifdef VMX86_SERVER
/* PR 458917: our gdt slot was not cleared on exec so we need to
* clear it now to ensure we don't leak it and eventually run out of
* slots. We could alternatively call os_tls_exit() prior to
* execve, since syscalls use thread-private fcache_enter, but
* complex to recover from execve failure, so instead we pass which
* TLS index we had.
*/
os_tls_pre_init(atoi(getenv(DYNAMORIO_VAR_EXECVE)));
# endif
/* important to remove it, don't want to propagate to forked children, etc. */
/* i#909: unsetenv is unsafe as it messes up auxv access, so we disable */
disable_env(DYNAMORIO_VAR_EXECVE);
/* check that it's gone: we've had problems with unsetenv */
ASSERT(getenv(DYNAMORIO_VAR_EXECVE) == NULL);
} else
post_execve = false;
#endif
/* default non-zero dynamo settings (options structure is
* initialized to 0 automatically)
*/
#ifdef DEBUG
# ifndef INTERNAL
nonshared_stats.logmask = LOG_ALL_RELEASE;
# else
nonshared_stats.logmask = LOG_ALL;
# endif
statistics_pre_init();
#endif
config_init();
options_init();
#ifdef WINDOWS
syscalls_init_options_read(); /* must be called after options_init
* but before init_syscall_trampolines */
#endif
utils_init();
data_section_init();
#ifdef DEBUG
/* decision: nullcalls WILL create a dynamorio.log file and
* fill it with perfctr stats!
*/
if (stats->loglevel > 0) {
main_logfile = open_log_file(main_logfile_name(), NULL, 0);
LOG(GLOBAL, LOG_TOP, 1, "global log file fd=%d\n", main_logfile);
} else {
/* loglevel 0 means we don't create a log file!
* if the loglevel is later raised, too bad! it all goes to stderr!
* N.B.: when checking for no logdir, we check for empty string or
* first char '<'!
*/
strncpy(stats->logdir, "<none (loglevel was 0 on startup)>", MAXIMUM_PATH-1);
stats->logdir[MAXIMUM_PATH-1] = '\0'; /* if max no null */
main_logfile = INVALID_FILE;
}
# ifdef PAPI
/* setup hardware performance counting */
hardware_perfctr_init();
# endif
DOLOG(1, LOG_TOP, {
print_version_and_app_info(GLOBAL);
});
/* now exit if nullcalls, now that perfctrs are set up */
if (INTERNAL_OPTION(nullcalls)) {
print_file(main_logfile,
"** nullcalls is set, NOT taking over execution **\n\n");
return SUCCESS;
}
LOG(GLOBAL, LOG_TOP, 1, PRODUCT_NAME"'s stack size: %d Kb\n",
DYNAMORIO_STACK_SIZE/1024);
#endif /* !DEBUG */
/* set up exported statistics struct */
#ifndef DEBUG
statistics_pre_init();
#endif
statistics_init();
#ifdef VMX86_SERVER
/* Must be before {vmm_,}heap_init() */
vmk_init_lib();
#endif
vmm_heap_init_constraints(); /* before client libs are loaded! */
#ifdef CLIENT_INTERFACE
/* PR 200207: load the client lib before callback_interception_init
* since the client library load would hit our own hooks (xref hotpatch
* cases about that) -- though -private_loader removes that issue. */
/* Must be before [vmm_]heap_init() so we can register the client lib as
* reachable from the dr heap. Xref PR 215395. */
instrument_load_client_libs();
#endif
/* initialize components (CAUTION: order is important here) */
vmm_heap_init(); /* must be called even if not using vmm heap */
heap_init();
dynamo_heap_initialized = true;
/* The process start event should be done after os_init() but before
* process_control_int() because the former initializes event logging
* and the latter can kill the process if a violation occurs.
*/
SYSLOG(SYSLOG_INFORMATION,
IF_CLIENT_INTERFACE_ELSE(INFO_PROCESS_START_CLIENT, INFO_PROCESS_START),
IF_CLIENT_INTERFACE_ELSE(2, 3),
get_application_name(), get_application_pid()
_IF_NOT_CLIENT_INTERFACE(get_application_md5()));
#ifdef PROCESS_CONTROL
if (IS_PROCESS_CONTROL_ON()) /* Case 8594. */
process_control_init();
#endif
#ifdef WINDOWS
/* Now that DR is set up, perform any final clean-up, before
* we do our address space scans.
*/
if (dr_earliest_injected)
earliest_inject_cleanup(dr_earliest_inject_args);
#endif
dynamo_vm_areas_init();
decode_init();
proc_init();
modules_init(); /* before vm_areas_init() */
os_init();
config_heap_init(); /* after heap_init */
/* Setup for handling faults in loader_init() */
/* initial stack so we don't have to use app's
* N.B.: we never de-allocate initstack (see comments in app_exit)
*/
initstack = (byte *) stack_alloc(DYNAMORIO_STACK_SIZE, NULL);
LOG(GLOBAL, LOG_SYNCH, 2, "initstack is "PFX"-"PFX"\n",
initstack - DYNAMORIO_STACK_SIZE, initstack);
#if defined(WINDOWS) && defined(STACK_GUARD_PAGE)
/* PR203701: separate stack for error reporting when the
* dstack is exhausted
*/
exception_stack = (byte *) stack_alloc(EXCEPTION_STACK_SIZE, NULL);
#endif
#ifdef WINDOWS
if (!INTERNAL_OPTION(noasynch)) {
/* We split the hooks up: first we put in just Ki* to catch
* exceptions in client init routines (PR 200207), but we don't want
* syscall hooks so client init can scan syscalls.
* Xref PR 216934 where this was originally down below 1st thread init,
* before we had GLOBAL_DCONTEXT.
*/
callback_interception_init_start();
}
#endif /* WINDOWS */
/* loader initialization, finalize the private lib load.
* FIXME i#338: this must be before arch_init() for Windows, but Linux
* wants it later.
*/
loader_init();
arch_init();
synch_init();
#ifdef KSTATS
kstat_init();
#endif
monitor_init();
fcache_init();
link_init();
fragment_init();
moduledb_init(); /* before vm_areas_init, after heap_init */
perscache_init(); /* before vm_areas_init */
native_exec_init(); /* before vm_areas_init, after arch_init */
if (!DYNAMO_OPTION(thin_client)) {
#ifdef HOT_PATCHING_INTERFACE
/* must init hotp before vm_areas_init() calls find_executable_vm_areas() */
if (DYNAMO_OPTION(hot_patching))
hotp_init();
#endif
}
#ifdef INTERNAL
{
char initial_options[MAX_OPTIONS_STRING];
get_dynamo_options_string(&dynamo_options,
initial_options, sizeof(initial_options), true);
SYSLOG_INTERNAL_INFO("Initial options = %s", initial_options);
DOLOG(1, LOG_TOP, {
get_pcache_dynamo_options_string(&dynamo_options, initial_options,
sizeof(initial_options),
OP_PCACHE_LOCAL);
LOG(GLOBAL, LOG_TOP, 1, "Initial pcache-affecting options = %s\n",
initial_options);
});
}
#endif /* INTERNAL */
LOG(GLOBAL, LOG_TOP, 1, "\n");
/* initialize thread hashtable */
/* Note: for thin_client, this isn't needed if it is only going to
* look for spawned processes; however, if we plan to promote from
* thin_client to hotp_only mode (highly likely), this would be needed.
* For now, leave it in there unless thin_client footprint becomes an
* issue.
*/
size = HASHTABLE_SIZE(ALL_THREADS_HASH_BITS) * sizeof(thread_record_t*);
all_threads = (thread_record_t**) global_heap_alloc(size HEAPACCT(ACCT_THREAD_MGT));
memset(all_threads, 0, size);
if (!INTERNAL_OPTION(nop_initial_bblock)
IF_WINDOWS(|| !check_sole_thread())) /* some other thread is already here! */
bb_lock_start = true;
#ifdef SIDELINE
/* initialize sideline thread after thread table is set up */
if (dynamo_options.sideline)
sideline_init();
#endif
/* thread-specific initialization for the first thread we inject in
* (in a race with injected threads, sometimes it is not the primary thread)
*/
/* i#117/PR 395156: it'd be nice to have mc here but would
* require changing start/stop API
*/
dynamo_thread_init(NULL, NULL _IF_CLIENT_INTERFACE(false));
#ifdef UNIX
/* i#27: we need to special-case the 1st thread */
signal_thread_inherit(get_thread_private_dcontext(), NULL);
#endif
/* We move vm_areas_init() below dynamo_thread_init() so we can have
* two things: 1) a dcontext and 2) a SIGSEGV handler, for TRY/EXCEPT
* inside vm_areas_init() for PR 361594's probes and for safe_read().
* This means vm_areas_thread_init() runs before vm_areas_init().
*/
if (!DYNAMO_OPTION(thin_client)) {
vm_areas_init();
#ifdef RCT_IND_BRANCH
/* relies on is_in_dynamo_dll() which needs vm_areas_init */
rct_init();
#endif
} else {
/* This is needed to handle exceptions in thin_client mode, mostly
* internal ones, but can be app ones too. */
dynamo_vm_areas_lock();
find_dynamo_library_vm_areas();
dynamo_vm_areas_unlock();
}
#ifdef ANNOTATIONS
annotation_init();
#endif
jitopt_init();
#ifdef CLIENT_INTERFACE
/* client last, in case it depends on other inits: must be after
* dynamo_thread_init so the client can use a dcontext (PR 216936).
* Note that we *load* the client library before installing our hooks,
* but call the client's init routine afterward so that we correctly
* report crashes (PR 200207).
* Note: DllMain in client libraries can crash and we still won't
* report; better document that client libraries shouldn't have
* DllMain.
*/
instrument_init();
/* To give clients a chance to process pcaches as we load them, we
* delay the loading until we've initialized the clients.
*/
vm_area_delay_load_coarse_units();
#endif
#ifdef WINDOWS
if (!INTERNAL_OPTION(noasynch))
callback_interception_init_finish(); /* split for PR 200207: see above */
#endif
if (SELF_PROTECT_ON_CXT_SWITCH) {
protect_info = (protect_info_t *)
global_unprotected_heap_alloc(sizeof(protect_info_t) HEAPACCT(ACCT_OTHER));
ASSIGN_INIT_LOCK_FREE(protect_info->lock, protect_info);
protect_info->num_threads_unprot = 0; /* ENTERING_DR() below will inc to 1 */
protect_info->num_threads_suspended = 0;
if (INTERNAL_OPTION(single_privileged_thread)) {
/* FIXME: thread_initexit_lock must be a recursive lock! */
ASSERT_NOT_IMPLEMENTED(false);
/* grab the lock now -- the thread that is in dynamo must be holding
* the lock, and we are the initial thread in dynamo!
*/
mutex_lock(&thread_initexit_lock);
}
/* ENTERING_DR will increment, so decrement first
* FIXME: waste of protection change since will nop-unprotect!
*/
if (TEST(SELFPROT_DATA_CXTSW, DYNAMO_OPTION(protect_mask)))
datasec_writable_cxtswprot = 0;
/* FIXME case 8073: remove once freqprot not every cxt sw */
if (TEST(SELFPROT_DATA_FREQ, DYNAMO_OPTION(protect_mask)))
datasec_writable_freqprot = 0;
}
/* this thread is now entering DR */
ENTERING_DR();
#ifdef WINDOWS
if (DYNAMO_OPTION(early_inject)) {
/* AFTER callback_interception_init and self protect init and
* ENTERING_DR() */
early_inject_init();
}
#endif
}
dynamo_initialized = true;
/* Protect .data, assuming all vars there have been initialized. */
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
/* internal-only options for testing run-once (case 3990) */
if (INTERNAL_OPTION(unsafe_crash_process)) {
SYSLOG_INTERNAL_ERROR("Crashing the process deliberately!");
*((int *)PTR_UINT_MINUS_1) = 0;
}
if (INTERNAL_OPTION(unsafe_hang_process)) {
event_t never_signaled = create_event();
SYSLOG_INTERNAL_ERROR("Hanging the process deliberately!");
wait_for_event(never_signaled);
destroy_event(never_signaled);
}
return SUCCESS;
}
#ifdef UNIX
void
dynamorio_fork_init(dcontext_t *dcontext)
{
/* on a fork we want to re-initialize some data structures, especially
* log files, which we want a separate directory for
*/
thread_record_t **threads;
int i, num_threads;
# ifdef DEBUG
char parent_logdir[MAXIMUM_PATH];
# endif
/* re-cache app name, etc. that are using parent pid before we
* create log dirs (xref i#189/PR 452168)
*/
os_fork_init(dcontext);
/* sanity check, plus need to set this for statistics_init:
* even if parent did an execve, env var should be reset by now
*/
post_execve = (getenv(DYNAMORIO_VAR_EXECVE) != NULL);
ASSERT(!post_execve);
# ifdef DEBUG
/* copy stats->logdir
* stats->logdir is static, so current copy is fine, don't need
* frozen copy
*/
strncpy(parent_logdir, stats->logdir, MAXIMUM_PATH-1);
stats->logdir[MAXIMUM_PATH-1] = '\0'; /* if max no null */
# endif
if (get_log_dir(PROCESS_DIR, NULL, NULL)) {
/* we want brand new log dir */
enable_new_log_dir();
create_log_dir(PROCESS_DIR);
}
# ifdef DEBUG
/* just like dynamorio_app_init, create main_logfile before stats */
if (stats->loglevel > 0) {
/* we want brand new log files. os_fork_init() closed inherited files. */
main_logfile = open_log_file(main_logfile_name(), NULL, 0);
print_file(main_logfile, "%s\n", dynamorio_version_string);
print_file(main_logfile, "New log file for child %d forked by parent %d\n",
get_thread_id(), get_parent_id());
print_file(main_logfile, "Parent's log dir: %s\n", parent_logdir);
}
stats->process_id = get_process_id();
if (stats->loglevel > 0) {
/* FIXME: share these few lines of code w/ dynamorio_app_init? */
LOG(GLOBAL, LOG_TOP, 1, "Running: %s\n", stats->process_name);
# ifndef _WIN32_WCE
LOG(GLOBAL, LOG_TOP, 1, "DYNAMORIO_OPTIONS: %s\n", option_string);
# endif
}
# endif /* DEBUG */
/* must re-hash parent entry in threads table, plus no longer have any
* other threads (fork -> we're alone in address space), so clear
* out entire thread table, then add child
*/
mutex_lock(&thread_initexit_lock);
get_list_of_threads_ex(&threads, &num_threads, true/*include execve*/);
for (i=0; i<num_threads; i++) {
if (threads[i] == dcontext->thread_record)
remove_thread(threads[i]->id);
else
dynamo_other_thread_exit(threads[i]);
}
mutex_unlock(&thread_initexit_lock);
global_heap_free(threads, num_threads*sizeof(thread_record_t*)
HEAPACCT(ACCT_THREAD_MGT));
add_thread(get_process_id(), get_thread_id(), true/*under dynamo control*/,
dcontext);
GLOBAL_STAT(num_threads) = 1;
# ifdef DEBUG
if (stats->loglevel > 0) {
/* need a new thread-local logfile */
dcontext->logfile = open_log_file(thread_logfile_name(), NULL, 0);
print_file(dcontext->logfile, "%s\n", dynamorio_version_string);
print_file(dcontext->logfile, "New log file for child %d forked by parent %d\n",
get_thread_id(), get_parent_id());
LOG(THREAD, LOG_TOP|LOG_THREADS, 1,
"THREAD %d (dcontext "PFX")\n\n", get_thread_id(), dcontext);
}
# endif
num_threads = 1;
/* FIXME: maybe should have a callback list for who wants to be notified
* on a fork -- probably everyone who makes a log file on init.
*/
fragment_fork_init(dcontext);
/* this must be called after dynamo_other_thread_exit() above */
signal_fork_init(dcontext);
# ifdef CLIENT_INTERFACE
if (CLIENTS_EXIST()) {
instrument_fork_init(dcontext);
}
# endif
}
#endif /* UNIX */
#if defined(CLIENT_INTERFACE) || defined(STANDALONE_UNIT_TEST)
/* To make DynamoRIO useful as a library for a standalone client
* application (as opposed to a client library that works with
* DynamoRIO in executing a target application). This makes DynamoRIO
* useful as an IA-32 disassembly library, etc.
*/
dcontext_t *
standalone_init(void)
{
dcontext_t *dcontext;
standalone_library = true;
/* We have release-build stats now so this is not just DEBUG */
stats = &nonshared_stats;
#if defined(INTERNAL) && defined(DEADLOCK_AVOIDANCE)
/* avoid issues w/ GLOBAL_DCONTEXT instead of thread dcontext */
dynamo_options.deadlock_avoidance = false;
#endif
#ifdef UNIX
os_page_size_init((const char **)our_environ);
#endif
#ifdef WINDOWS
/* MUST do this before making any system calls */
if (!syscalls_init())
return NULL; /* typically b/c of unsupported OS version */
#endif
config_init();
options_init();
vmm_heap_init();
heap_init();
dynamo_heap_initialized = true;
dynamo_vm_areas_init();
decode_init();
proc_init();
os_init();
config_heap_init();
#ifdef STANDALONE_UNIT_TEST
os_tls_init();
dcontext = create_new_dynamo_context(true/*initial*/, NULL, NULL);
set_thread_private_dcontext(dcontext);
/* sanity check */
ASSERT(get_thread_private_dcontext() == dcontext);
heap_thread_init(dcontext);
# ifdef DEBUG
/* FIXME: share code w/ main init routine? */
nonshared_stats.logmask = LOG_ALL;
options_init();
if (stats->loglevel > 0) {
char initial_options[MAX_OPTIONS_STRING];
main_logfile = open_log_file(main_logfile_name(), NULL, 0);
print_file(main_logfile, "%s\n", dynamorio_version_string);
print_file(main_logfile, "Log file for standalone unit test\n");
get_dynamo_options_string(&dynamo_options,
initial_options, sizeof(initial_options), true);
SYSLOG_INTERNAL_INFO("Initial options = %s", initial_options);
print_file(main_logfile, "\n");
}
# endif /* DEBUG */
#else
/* rather than ask the user to call some thread-init routine in
* every thread, we just use global dcontext everywhere (i#548)
*/
dcontext = GLOBAL_DCONTEXT;
#endif
/* since we do not export any dr_standalone_exit(), we clean up any .1config
* file right now. the only loss if that we can't synch options: but that
* should be less important for standalone. we disabling synching.
*/
/* options are never made read-only for standalone */
dynamo_options.dynamic_options = false;
dynamo_initialized = true;
return dcontext;
}
void
standalone_exit(void)
{
/* should clean up here */
config_exit();
}
#endif
/* Perform exit tasks that require full thread data structs, which we have
* already cleaned up by the time we reach dynamo_shared_exit() for both
* debug and detach paths.
*/
void
dynamo_process_exit_with_thread_info(void)
{
perscache_fast_exit(); /* "fast" b/c called in release as well */
}
/* shared between app_exit and detach */
int
dynamo_shared_exit(thread_record_t *toexit /* must ==cur thread for Linux */
_IF_WINDOWS(bool detach_stacked_callbacks))
{
DEBUG_DECLARE(uint endtime);
/* set this now, could already be set */
dynamo_exited = true;
/* avoid time() for libc independence */
DODEBUG(endtime = query_time_seconds(););
LOG(GLOBAL, LOG_STATS, 1, "\n#### Statistics for entire process:\n");
LOG(GLOBAL, LOG_STATS, 1, "Total running time: %d seconds\n",
endtime - starttime);
#ifdef PAPI
hardware_perfctr_exit();
#endif
#ifdef DEBUG
# if defined(INTERNAL) && defined(X86)
print_optimization_stats();
# endif /* INTERNAL && X86 */
DOLOG(1, LOG_STATS, {
dump_global_stats(false);
});
#endif /* DEBUG */
if (SELF_PROTECT_ON_CXT_SWITCH) {
DELETE_LOCK(protect_info->lock);
global_unprotected_heap_free(protect_info, sizeof(protect_info_t) HEAPACCT(ACCT_OTHER));
}
dynamo_exited_and_cleaned = true;
/* call all component exit routines (CAUTION: order is important here) */
DELETE_RECURSIVE_LOCK(thread_in_DR_exclusion);
DOSTATS({
LOG(GLOBAL, LOG_TOP|LOG_THREADS, 1,
"fcache_stats_exit: before fragment cleanup\n");
DOLOG(1, LOG_CACHE, fcache_stats_exit(););
});
#ifdef RCT_IND_BRANCH
if (!DYNAMO_OPTION(thin_client))
rct_exit();
#endif
fragment_exit();
#ifdef ANNOTATIONS
annotation_exit();
#endif
jitopt_exit();
#ifdef CLIENT_INTERFACE
/* We tell the client as soon as possible in case it wants to use services from other
* components. Must be after fragment_exit() so that the client gets all the
* fragment_deleted() callbacks (xref PR 228156). FIXME - might be issues with the
* client trying to use api routines that depend on fragment state.
*/
instrument_exit();
#endif
/* we want dcontext around for loader_exit() */
if (get_thread_private_dcontext() != NULL)
loader_thread_exit(get_thread_private_dcontext());
loader_exit();
if (toexit != NULL) {
/* free detaching thread's dcontext */
#ifdef WINDOWS
/* If we use dynamo_thread_exit() when toexit is the current thread,
* it results in asserts in the win32.tls test, so we stick with this.
*/
mutex_lock(&thread_initexit_lock);
dynamo_other_thread_exit(toexit, false);
mutex_unlock(&thread_initexit_lock);
#else
/* On Linux, restoring segment registers can only be done
* on the current thread, which must be toexit.
*/
ASSERT(toexit->id == get_thread_id());
dynamo_thread_exit();
#endif
}
if (IF_WINDOWS_ELSE(!detach_stacked_callbacks, true)) {
/* We don't fully free cur thread until after client exit event (PR 536058) */
if (thread_lookup(get_thread_id()) == NULL) {
LOG(GLOBAL, LOG_TOP|LOG_THREADS, 1,
"Current thread never under DynamoRIO control, not exiting it\n");
} else {
/* call thread_exit even if !under_dynamo_control, could have
* been at one time
*/
/* exit this thread now */
dynamo_thread_exit();
}
}
/* now that the final thread is exited, free the all_threads memory */
mutex_lock(&all_threads_lock);
global_heap_free(all_threads,
HASHTABLE_SIZE(ALL_THREADS_HASH_BITS) * sizeof(thread_record_t*)
HEAPACCT(ACCT_THREAD_MGT));
all_threads = NULL;
mutex_unlock(&all_threads_lock);
#ifdef WINDOWS
# ifdef CLIENT_INTERFACE
/* for -private_loader we do this here to catch more exit-time crashes */
if (!INTERNAL_OPTION(noasynch) && INTERNAL_OPTION(private_loader) && !doing_detach)
callback_interception_unintercept();
# endif
/* callback_interception_exit must be after fragment exit for CLIENT_INTERFACE so
* that fragment_exit->frees fragments->instrument_fragment_deleted->
* hide_tag_from_fragment->is_intercepted_app_pc won't crash. xref PR 228156 */
if (!INTERNAL_OPTION(noasynch)) {
callback_interception_exit();
}
#endif
link_exit();
fcache_exit();
monitor_exit();
synch_exit();
arch_exit(IF_WINDOWS(detach_stacked_callbacks));
#ifdef CALL_PROFILE
/* above os_exit to avoid eventlog_mutex trigger if we're the first to
* create a log file
*/
profile_callers_exit();
#endif
os_fast_exit();
os_slow_exit();
native_exec_exit(); /* before vm_areas_exit for using dynamo_areas */
vm_areas_exit();
perscache_slow_exit(); /* fast called in dynamo_process_exit_with_thread_info() */
modules_exit(); /* after aslr_exit() from os_slow_exit(),
* after vm_areas & perscache exits */
moduledb_exit(); /* before heap_exit */
#ifdef HOT_PATCHING_INTERFACE
if (DYNAMO_OPTION(hot_patching))
hotp_exit();
#endif
#if defined(WINDOWS) && defined(STACK_GUARD_PAGE) && defined(DEBUG)
/* Free exception stack before calling heap_exit */
stack_free(exception_stack, EXCEPTION_STACK_SIZE);
exception_stack = NULL;
#endif
config_heap_exit();
heap_exit();
vmm_heap_exit();
diagnost_exit();
data_section_exit();
/* funny dependences: options exit just frees lock, not destroying
* any options that are needed for other exits, so do it prior to
* checking locks in debug build
*/
options_exit();
utils_exit();
config_exit();
#ifdef KSTATS
kstat_exit();
#endif
DELETE_LOCK(all_threads_lock);
DELETE_LOCK(thread_initexit_lock);
DOLOG(1, LOG_STATS, {
/* dump after cleaning up to make it easy to check if stats that
* are inc-ed and dec-ed actually come down to 0
*/
dump_global_stats(false);
});
statistics_exit();
#ifdef DEBUG
# ifdef DEADLOCK_AVOIDANCE
ASSERT(locks_not_closed() == 0);
# endif
dynamo_exited_log_and_stats = true;
if (main_logfile != STDERR) {
/* do it this way just in case someone tries to log to the global file
* right now */
file_t file_temp = main_logfile;
main_logfile = INVALID_FILE;
close_log_file(file_temp);
}
#else
# ifdef DEADLOCK_AVOIDANCE
ASSERT(locks_not_closed() == 0);
# endif
#endif /* DEBUG */
dynamo_initialized = false;
return SUCCESS;
}
int
dynamorio_app_exit(void)
{
return dynamo_process_exit();
}
/* synchs with all threads using synch type synch_res.
* also sets dynamo_exited to true.
* does not resume the threads but does release the thread_initexit_lock.
*/
static void
synch_with_threads_at_exit(thread_synch_state_t synch_res)
{
int num_threads;
thread_record_t **threads;
DEBUG_DECLARE(bool ok;)
LOG(GLOBAL, LOG_TOP|LOG_THREADS, 1,
"\nsynch_with_threads_at_exit: cleaning up %d un-terminated threads\n",
get_num_threads());
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
/* make sure client nudges are finished */
wait_for_outstanding_nudges();
#endif
/* xref case 8747, requesting suspended is preferable to terminated and it
* doesn't make a difference here which we use (since the process is about
* to die).
* On Linux, however, we do not have dependencies on OS thread
* properties like we do on Windows (TEB, etc.), and our suspended
* threads use their sigstacks and ostd data structs, making cleanup
* while still catching other leaks more difficult: thus it's
* simpler to terminate and then clean up. FIXME: by terminating
* we'll raise SIGCHLD that may not have been raised natively if the
* whole group went down in a single SYS_exit_group. Instead we
* could have the suspended thread move from the sigstack-reliant
* loop to a stack-free loop (xref i#95).
*/
IF_UNIX(dynamo_exiting = true;) /* include execve-exited vfork threads */
DEBUG_DECLARE(ok =)
synch_with_all_threads(synch_res,
&threads, &num_threads,
/* Case 6821: other synch-all-thread uses that
* only care about threads carrying fcache
* state can ignore us
*/
THREAD_SYNCH_NO_LOCKS_NO_XFER,
/* if we fail to suspend a thread (e.g., privilege
* problems) ignore it. FIXME: retry instead? */
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE);
ASSERT(ok);
ASSERT(threads == NULL && num_threads == 0); /* We asked for CLEANED */
/* the synch_with_all_threads function grabbed the
* thread_initexit_lock for us! */
/* do this now after all threads we know about are killed and
* while we hold the thread_initexit_lock so any new threads that
* are waiting on it won't get in our way (see thread_init()) */
dynamo_exited = true;
end_synch_with_all_threads(threads, num_threads, false/*don't resume*/);
}
static thread_synch_state_t
exit_synch_state(void)
{
thread_synch_state_t synch_res =
IF_WINDOWS_ELSE(THREAD_SYNCH_SUSPENDED_AND_CLEANED,
THREAD_SYNCH_TERMINATED_AND_CLEANED);
#if defined(DR_APP_EXPORTS) && defined(UNIX)
if (dr_api_exit) {
/* Don't terminate the app's threads in case the app plans to continue
* after dr_app_cleanup(). Note that today we don't fully support that
* anyway: the app should use dr_app_stop_and_cleanup() whose detach
* code won't come here.
*/
synch_res = THREAD_SYNCH_SUSPENDED_AND_CLEANED;
}
#endif
return synch_res;
}
#ifdef DEBUG
/* cleanup after the application has exited */
static int
dynamo_process_exit_cleanup(void)
{
/* CAUTION: this should only be invoked after all app threads have stopped */
if (!dynamo_exited && !INTERNAL_OPTION(nullcalls)) {
dcontext_t *dcontext;
APP_EXPORT_ASSERT(dynamo_initialized, "Improper DynamoRIO initialization");
dcontext = get_thread_private_dcontext();
/* we deliberately do NOT clean up initstack (which was
* allocated using a separate mmap and so is not part of some
* large unit that is de-allocated), as it is used in special
* circumstances to call us...FIXME: is this memory leak ok?
* is there a better solution besides assuming the app stack?
*/
#ifdef SIDELINE
if (dynamo_options.sideline) {
/* exit now to make thread cleanup simpler */
sideline_exit();
}
#endif
/* perform exit tasks that require full thread data structs */
dynamo_process_exit_with_thread_info();
if (INTERNAL_OPTION(single_privileged_thread)) {
mutex_unlock(&thread_initexit_lock);
}
/* if ExitProcess called before all threads terminated, they won't
* all have gone through dynamo_thread_exit, so clean them up now
* so we can get stats about them
*
* we don't check control_all_threads b/c we're just killing
* the threads we know about here
*/
synch_with_threads_at_exit(exit_synch_state());
/* now that APC interception point is unpatched and
* dynamorio_exited is set and we've killed all the theads we know
* about, assumption is that no other threads will be running in
* dynamorio code from here on out (esp. when we get into shared exit)
* that will do anything that could be dangerous (could possibly be
* a thread in the APC interception code prior to reaching thread_init
* but it will only global log and do thread_lookup which should be
* safe throughout) */
/* In order to pass the client a dcontext in the process exit event
* we do some thread cleanup early for the final thread so we can delay
* the rest (PR 536058). This is a little risky in that we
* clean up dcontext->fragment_field, which is used for lots of
* things like couldbelinking (and thus we have to disable some API
* routines in the thread exit event: i#1989).
*/
dynamo_thread_exit_pre_client(get_thread_private_dcontext(), get_thread_id());
#ifdef WINDOWS
/* FIXME : our call un-interception isn't atomic so (miniscule) chance
* of something going wrong if new thread is just hitting its init APC
*/
/* w/ the app's loader we must remove our LdrUnloadDll hook
* before we unload the client lib (and thus we miss client
* exit crashes): xref PR 200207.
*/
if (!INTERNAL_OPTION(noasynch)
IF_CLIENT_INTERFACE(&& !INTERNAL_OPTION(private_loader))) {
callback_interception_unintercept();
}
#else /* UNIX */
unhook_vsyscall();
#endif /* UNIX */
return dynamo_shared_exit(NULL /* not detaching */
_IF_WINDOWS(false /* not detaching */));
}
return SUCCESS;
}
#endif /* DEBUG */
int
dynamo_nullcalls_exit(void)
{
/* this routine is used when nullcalls is turned on
* simply to get perfctr numbers in a log file
*/
ASSERT(INTERNAL_OPTION(nullcalls));
#ifdef PAPI
hardware_perfctr_exit();
#endif
#ifdef DEBUG
if (main_logfile != STDERR) {
close_log_file(main_logfile);
main_logfile = INVALID_FILE;
}
#endif /* DEBUG */
dynamo_exited = true;
return SUCCESS;
}
/* called when we see that the process is about to exit */
int
dynamo_process_exit(void)
{
#ifndef DEBUG
bool each_thread;
#endif
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
synchronize_dynamic_options();
SYSLOG(SYSLOG_INFORMATION, INFO_PROCESS_STOP,
2, get_application_name(), get_application_pid());
#ifdef DEBUG
if (!dynamo_exited) {
if (INTERNAL_OPTION(nullcalls)) {
/* if nullcalls is on we still do perfctr stats, and this is
* the only place we can print them out and exit
*/
dynamo_nullcalls_exit();
}
else {
/* we don't check automatic_startup -- even if the app_
* interface is used, we are about to be gone from the process
* address space, so we clean up now
*/
LOG(GLOBAL, LOG_TOP, 1,
"\ndynamo_process_exit from thread "TIDFMT" -- cleaning up dynamo\n",
get_thread_id());
dynamo_process_exit_cleanup();
}
}
return SUCCESS;
#else
if (dynamo_exited)
return SUCCESS;
/* don't need to do much!
* we didn't create any IPC objects or anything that might be persistent
* beyond our death, we're not holding any systemwide locks, etc.
*/
/* It is not clear whether the Event Log service can handle well unterminated connections */
/* Do we need profile data for each thread?
* Note that windows prof_pcs duplicates the thread walk in os_exit()
* FIXME: should combine that thread walk with this one
*/
each_thread = TRACEDUMP_ENABLED();
# ifdef UNIX
each_thread = each_thread || INTERNAL_OPTION(profile_pcs);
# endif
# ifdef KSTATS
each_thread = each_thread || DYNAMO_OPTION(kstats);
# endif
# ifdef CLIENT_INTERFACE
each_thread = each_thread ||
/* If we don't need a thread exit event, avoid the possibility of
* racy crashes (PR 470957) by not calling instrument_thread_exit()
*/
(!INTERNAL_OPTION(nullcalls) && dr_thread_exit_hook_exists() &&
!DYNAMO_OPTION(skip_thread_exit_at_exit));
# endif
if (DYNAMO_OPTION(synch_at_exit)
/* by default we synch if any exit event exists */
IF_CLIENT_INTERFACE(|| (!DYNAMO_OPTION(multi_thread_exit) &&
dr_exit_hook_exists())
|| (!DYNAMO_OPTION(skip_thread_exit_at_exit) &&
dr_thread_exit_hook_exists()))) {
/* needed primarily for CLIENT_INTERFACE but technically all configurations
* can have racy crashes at exit time (xref PR 470957)
*/
synch_with_threads_at_exit(exit_synch_state());
} else
dynamo_exited = true;
if (each_thread) {
thread_record_t **threads;
int num, i;
mutex_lock(&thread_initexit_lock);
get_list_of_threads(&threads, &num);
for (i = 0; i < num; i++) {
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(threads[i]->dcontext))
continue;
#endif
/* FIXME: separate trace dump from rest of fragment cleanup code */
if (TRACEDUMP_ENABLED() IF_CLIENT_INTERFACE(|| true))
/* We always want to call this for CI builds so we can get the
* dr_fragment_deleted() callbacks.
*/
fragment_thread_exit(threads[i]->dcontext);
# ifdef UNIX
if (INTERNAL_OPTION(profile_pcs))
pcprofile_thread_exit(threads[i]->dcontext);
# endif
# ifdef KSTATS
if (DYNAMO_OPTION(kstats))
kstat_thread_exit(threads[i]->dcontext);
# endif
# ifdef CLIENT_INTERFACE
/* Inform client of all thread exits */
if (!INTERNAL_OPTION(nullcalls) && !DYNAMO_OPTION(skip_thread_exit_at_exit)) {
instrument_thread_exit_event(threads[i]->dcontext);
/* i#1617: ensure we do all cleanup of priv libs */
if (threads[i]->id != get_thread_id()) /* i#1617: must delay this */
loader_thread_exit(threads[i]->dcontext);
}
# endif
}
global_heap_free(threads, num*sizeof(thread_record_t*)
HEAPACCT(ACCT_THREAD_MGT));
mutex_unlock(&thread_initexit_lock);
}
/* PR 522783: must be before we clear dcontext (if CLIENT_INTERFACE)! */
/* must also be prior to fragment_exit so we actually freeze pcaches (i#703) */
dynamo_process_exit_with_thread_info();
/* FIXME: separate trace dump from rest of fragment cleanup code. For client
* interface we need to call fragment_exit to get all the fragment deleted events. */
if (TRACEDUMP_ENABLED() IF_CLIENT_INTERFACE(|| dr_fragment_deleted_hook_exists()))
fragment_exit();
/* Inform client of process exit */
#ifdef CLIENT_INTERFACE
if (!INTERNAL_OPTION(nullcalls)) {
# ifdef WINDOWS
/* instrument_exit() unloads the client library, so make sure
* LdrUnloadDll isn't hooked if using the app loader.
*/
if (!INTERNAL_OPTION(noasynch)
IF_CLIENT_INTERFACE(&& !INTERNAL_OPTION(private_loader))) {
callback_interception_unintercept();
}
# endif
/* Must be after fragment_exit() so that the client gets all the
* fragment_deleted() callbacks (xref PR 228156). FIXME - might be issues
* with the client trying to use api routines that depend on fragment state.
*/
instrument_exit();
# ifdef CLIENT_INTERFACE
/* i#1617: We need to call client library fini routines for global
* destructors, etc.
*/
if (!INTERNAL_OPTION(nullcalls) && !DYNAMO_OPTION(skip_thread_exit_at_exit))
loader_thread_exit(get_thread_private_dcontext());
loader_exit();
# endif
/* for -private_loader we do this here to catch more exit-time crashes */
# ifdef WINDOWS
if (!INTERNAL_OPTION(noasynch)
IF_CLIENT_INTERFACE(&& INTERNAL_OPTION(private_loader)))
callback_interception_unintercept();
# endif
}
#endif
#ifdef CALL_PROFILE
profile_callers_exit();
#endif
# ifdef KSTATS
if (DYNAMO_OPTION(kstats))
kstat_exit();
# endif
/* so make sure eventlog connection is terminated (if present) */
os_fast_exit();
return SUCCESS;
#endif /* !DEBUG */
}
void
dynamo_exit_post_detach(void)
{
/* i#2157: best-effort re-init in case of re-attach */
do_once_generation++; /* Increment the generation in case we re-attach */
dynamo_initialized = false;
dynamo_heap_initialized = false;
automatic_startup = false;
control_all_threads = false;
dr_api_entry = false;
dr_api_exit = false;
#ifdef UNIX
dynamo_exiting = false;
#endif
dynamo_exited = false;
dynamo_exited_and_cleaned = false;
#ifdef DEBUG
dynamo_exited_log_and_stats = false;
#endif
dynamo_resetting = false;
#ifdef UNIX
post_execve = false;
#endif
}
dcontext_t *
create_new_dynamo_context(bool initial, byte *dstack_in, priv_mcontext_t *mc)
{
dcontext_t *dcontext;
size_t alloc = sizeof(dcontext_t) + proc_get_cache_line_size();
void *alloc_start = (void *)
((TEST(SELFPROT_GLOBAL, dynamo_options.protect_mask) &&
!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) ?
/* if protecting global but not dcontext, put whole thing in unprot mem */
global_unprotected_heap_alloc(alloc HEAPACCT(ACCT_OTHER)) :
global_heap_alloc(alloc HEAPACCT(ACCT_OTHER)));
dcontext = (dcontext_t*) proc_bump_to_end_of_cache_line((ptr_uint_t)alloc_start);
ASSERT(proc_is_cache_aligned(dcontext));
#ifdef X86
/* 264138: ensure xmm/ymm slots are aligned so we can use vmovdqa */
ASSERT(ALIGNED(get_mcontext(dcontext)->ymm, YMM_REG_SIZE));
/* also ensure we don't have extra padding beyond x86.asm defines */
ASSERT(sizeof(priv_mcontext_t) == IF_X64_ELSE(18,10)*sizeof(reg_t) +
PRE_XMM_PADDING + XMM_SLOTS_SIZE);
#elif defined(ARM)
/* FIXME i#1551: add arm alignment check if any */
#endif /* X86/ARM */
/* Put here all one-time dcontext field initialization
* Make sure to update create_callback_dcontext to shared
* fields across callback dcontexts for the same thread.
*/
/* must set to 0 so can tell if initialized for callbacks! */
memset(dcontext, 0x0, sizeof(dcontext_t));
dcontext->allocated_start = alloc_start;
/* we share a single dstack across all callbacks */
if (initial) {
/* DrMi#1723: our dstack needs to be at a higher address than the app
* stack. If mc passed, use its xsp; else use cur xsp (initial thread
* is on the app stack here: xref i#1105), for lower bound for dstack.
*/
byte *app_xsp;
if (mc == NULL)
GET_STACK_PTR(app_xsp);
else
app_xsp = (byte *) mc->xsp;
if (dstack_in == NULL) {
dcontext->dstack = (byte *) stack_alloc(DYNAMORIO_STACK_SIZE, app_xsp);
} else
dcontext->dstack = dstack_in; /* xref i#149/PR 403015 */
#ifdef WINDOWS
DOCHECK(1, {
if (dcontext->dstack < app_xsp)
SYSLOG_INTERNAL_WARNING_ONCE("dstack is below app xsp");
});
#endif
} else {
/* dstack may be pre-allocated only at thread init, not at callback */
ASSERT(dstack_in == NULL);
}
if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
dcontext->upcontext.separate_upcontext =
global_unprotected_heap_alloc(sizeof(unprotected_context_t) HEAPACCT(ACCT_OTHER));
/* don't need to initialize upcontext */
LOG(GLOBAL, LOG_TOP, 2, "new dcontext="PFX", dcontext->upcontext="PFX"\n",
dcontext, dcontext->upcontext.separate_upcontext);
dcontext->upcontext_ptr = dcontext->upcontext.separate_upcontext;
} else
dcontext->upcontext_ptr = &(dcontext->upcontext.upcontext);
#ifdef HOT_PATCHING_INTERFACE
/* Set the hot patch exception state to be empty/unused. */
DODEBUG(memset(&dcontext->hotp_excpt_state, -1, sizeof(dr_jmp_buf_t)););
#endif
ASSERT(dcontext->try_except.try_except_state == NULL);
DODEBUG({dcontext->logfile = INVALID_FILE;});
dcontext->owning_thread = get_thread_id();
#ifdef UNIX
dcontext->owning_process = get_process_id();
#endif
/* thread_record is set in add_thread */
/* all of the thread-private fcache and hashtable fields are shared
* among all dcontext instances of a thread, so the caller must
* set those fields
*/
/* rest of dcontext initialization happens in initialize_dynamo_context(),
* which is executed for each dr_app_start() and each
* callback start
*/
return dcontext;
}
static void
delete_dynamo_context(dcontext_t *dcontext, bool free_stack)
{
if (free_stack) {
ASSERT(dcontext->dstack != NULL);
ASSERT(!is_currently_on_dstack(dcontext));
stack_free(dcontext->dstack, DYNAMORIO_STACK_SIZE);
} /* else will be cleaned up by caller */
ASSERT(dcontext->try_except.try_except_state == NULL);
if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
global_unprotected_heap_free(dcontext->upcontext.separate_upcontext,
sizeof(unprotected_context_t) HEAPACCT(ACCT_OTHER));
}
if (TEST(SELFPROT_GLOBAL, dynamo_options.protect_mask) &&
!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
/* if protecting global but not dcontext, we put whole thing in unprot mem */
global_unprotected_heap_free(dcontext->allocated_start,
sizeof(dcontext_t) + proc_get_cache_line_size()
HEAPACCT(ACCT_OTHER));
} else {
global_heap_free(dcontext->allocated_start,
sizeof(dcontext_t) + proc_get_cache_line_size()
HEAPACCT(ACCT_OTHER));
}
}
/* This routine is called not only at thread initialization,
* but for every callback, etc. that gets a fresh execution
* environment!
*/
void
initialize_dynamo_context(dcontext_t *dcontext)
{
/* we can't just zero out the whole thing b/c we have persistent state
* (fields kept across callbacks, like dstack, module-private fields, next & prev, etc.)
*/
memset(dcontext->upcontext_ptr, 0, sizeof(unprotected_context_t));
dcontext->initialized = true;
dcontext->whereami = WHERE_APP;
dcontext->next_tag = NULL;
dcontext->native_exec_postsyscall = NULL;
memset(dcontext->native_retstack, 0, sizeof(dcontext->native_retstack));
dcontext->native_retstack_cur = 0;
dcontext->isa_mode = DEFAULT_ISA_MODE;
#ifdef ARM
dcontext->encode_state[0] = 0;
dcontext->encode_state[1] = 0;
dcontext->decode_state[0] = 0;
dcontext->decode_state[1] = 0;
#endif
dcontext->sys_num = 0;
#ifdef WINDOWS
#ifdef CLIENT_INTERFACE
dcontext->app_errno = 0;
# ifdef DEBUG
dcontext->is_client_thread_exiting = false;
# endif
#endif
dcontext->sys_param_base = NULL;
/* always initialize aslr_context */
dcontext->aslr_context.sys_aslr_clobbered = 0;
dcontext->aslr_context.randomized_section_handle = INVALID_HANDLE_VALUE;
dcontext->aslr_context.original_image_section_handle = INVALID_HANDLE_VALUE;
dcontext->aslr_context.original_section_base = ASLR_INVALID_SECTION_BASE;
# ifdef DEBUG
dcontext->aslr_context.last_app_section_handle = INVALID_HANDLE_VALUE;
# endif
/* note that aslr_context.last_child_padded is preserved across callbacks */
dcontext->ignore_enterexit = false;
#else
dcontext->sys_param0 = 0;
dcontext->sys_param1 = 0;
dcontext->sys_param2 = 0;
#endif
#ifdef UNIX
dcontext->signals_pending = false;
#endif
/* all thread-private fields are initialized in dynamo_thread_init
* or in create_callback_dcontext because they must be initialized differently
* in those two cases
*/
set_last_exit(dcontext, (linkstub_t *) get_starting_linkstub());
#ifdef PROFILE_RDTSC
dcontext->start_time = (uint64) 0;
dcontext->prev_fragment = NULL;
dcontext->cache_frag_count = (uint64) 0;
{
int i;
for (i=0; i<10; i++) {
dcontext->cache_time[i] = (uint64) 0;
dcontext->cache_count[i] = (uint64) 0;
}
}
#endif
#ifdef DEBUG
dcontext->in_opnd_disassemble = false;
#endif
#ifdef WINDOWS
/* Other pieces of DR -- callback & APC handling, detach -- test
* asynch_target to determine where the next app pc to execute is
* stored. Init it to 0 to indicate that this context's most recent
* syscall was not executed from handle_system_call().
*/
dcontext->asynch_target = NULL;
/* next_saved and prev_unused are zeroed out when dcontext is
* created; we shouldn't zero them here, they may have valid data
*/
dcontext->valid = true;
#endif
#ifdef HOT_PATCHING_INTERFACE
dcontext->nudge_thread = false; /* Fix for case 5367. */
#endif
#ifdef CHECK_RETURNS_SSE2
/* initialize sse2 index with 0
* go ahead and use eax, it's dead (about to return)
*/
# ifdef UNIX
asm("movl $0, %eax");
asm("pinsrw $7,%eax,%xmm7");
# else
# error NYI
# endif
#endif
/* We don't need to initialize dcontext->coarse_exit as it is only
* read when last_exit indicates a coarse exit, which sets the fields.
*/
dcontext->go_native = false;
}
#ifdef WINDOWS
/* on windows we use a new dcontext for each callback context */
dcontext_t *
create_callback_dcontext(dcontext_t *old_dcontext)
{
dcontext_t *new_dcontext = create_new_dynamo_context(false, NULL, NULL);
new_dcontext->valid = false;
/* all of these fields are shared among all dcontexts of a thread: */
new_dcontext->owning_thread = old_dcontext->owning_thread;
#ifdef UNIX
new_dcontext->owning_process = old_dcontext->owning_process;
#endif
new_dcontext->thread_record = old_dcontext->thread_record;
/* now that we have clean stack usage we can share a single stack */
ASSERT(old_dcontext->dstack != NULL);
new_dcontext->dstack = old_dcontext->dstack;
new_dcontext->isa_mode = old_dcontext->isa_mode;
new_dcontext->link_field = old_dcontext->link_field;
new_dcontext->monitor_field = old_dcontext->monitor_field;
new_dcontext->fcache_field = old_dcontext->fcache_field;
new_dcontext->fragment_field = old_dcontext->fragment_field;
new_dcontext->heap_field = old_dcontext->heap_field;
new_dcontext->vm_areas_field = old_dcontext->vm_areas_field;
new_dcontext->os_field = old_dcontext->os_field;
new_dcontext->synch_field = old_dcontext->synch_field;
/* case 8958: copy win32_start_addr in case we produce a forensics file
* from within a callback.
*/
new_dcontext->win32_start_addr = old_dcontext->win32_start_addr;
#ifdef CLIENT_INTERFACE
/* FlsData is persistent across callbacks */
new_dcontext->app_fls_data = old_dcontext->app_fls_data;
new_dcontext->priv_fls_data = old_dcontext->priv_fls_data;
new_dcontext->app_nt_rpc = old_dcontext->app_nt_rpc;
new_dcontext->priv_nt_rpc = old_dcontext->priv_nt_rpc;
new_dcontext->app_nls_cache = old_dcontext->app_nls_cache;
new_dcontext->priv_nls_cache = old_dcontext->priv_nls_cache;
#endif
new_dcontext->app_stack_limit = old_dcontext->app_stack_limit;
new_dcontext->app_stack_base = old_dcontext->app_stack_base;
new_dcontext->teb_base = old_dcontext->teb_base;
#ifdef UNIX
new_dcontext->signal_field = old_dcontext->signal_field;
new_dcontext->pcprofile_field = old_dcontext->pcprofile_field;
#endif
new_dcontext->private_code = old_dcontext->private_code;
#ifdef CLIENT_INTERFACE
new_dcontext->client_data = old_dcontext->client_data;
#endif
#ifdef DEBUG
new_dcontext->logfile = old_dcontext->logfile;
new_dcontext->thread_stats = old_dcontext->thread_stats;
#endif
#ifdef DEADLOCK_AVOIDANCE
new_dcontext->thread_owned_locks = old_dcontext->thread_owned_locks;
#endif
#ifdef KSTATS
new_dcontext->thread_kstats = old_dcontext->thread_kstats;
#endif
/* at_syscall is real time based, not app context based, so shared
*
* FIXME: Yes need to share when swapping at NtCallbackReturn, but
* want to keep old so when return from cb will do post-syscall for
* syscall that triggered cb in the first place!
* Plus, new cb calls initialize_dynamo_context(), which clears this field
* anyway! This all works now b/c we don't have alertable syscalls
* that we do post-syscall processing on.
*/
new_dcontext->upcontext_ptr->at_syscall = old_dcontext->upcontext_ptr->at_syscall;
#ifdef HOT_PATCHING_INTERFACE /* Fix for case 5367. */
/* hotp_excpt_state should be unused at this point. If it is used, it can
* be only because a hot patch made a system call with a callback. This is
* a bug because hot patches can't do system calls, let alone one with
* callbacks.
*/
DOCHECK(1, {
dr_jmp_buf_t empty;
memset(&empty, -1, sizeof(dr_jmp_buf_t));
ASSERT(memcmp(&old_dcontext->hotp_excpt_state, &empty,
sizeof(dr_jmp_buf_t)) == 0);
});
new_dcontext->nudge_thread = old_dcontext->nudge_thread;
#endif
/* our exceptions should be handled within one DR context switch */
ASSERT(old_dcontext->try_except.try_except_state == NULL);
new_dcontext->local_state = old_dcontext->local_state;
#ifdef WINDOWS
new_dcontext->aslr_context.last_child_padded =
old_dcontext->aslr_context.last_child_padded;
#endif
LOG(new_dcontext->logfile, LOG_TOP, 2,
"made new dcontext "PFX" (old="PFX")\n", new_dcontext, old_dcontext);
return new_dcontext;
}
#endif
bool
is_thread_initialized(void)
{
#if defined(UNIX) && defined(HAVE_TLS)
/* We don't want to pay the get_thread_id() cost on every
* get_thread_private_dcontext() when we only really need the
* check for this call here, so we explicitly check.
*/
if (get_tls_thread_id() != get_sys_thread_id())
return false;
#endif
return (get_thread_private_dcontext() != NULL);
}
bool
is_thread_known(thread_id_t tid)
{
return (thread_lookup(tid) != NULL);
}
#ifdef UNIX
/* i#237/PR 498284: a thread about to execute SYS_execve should be considered
* exited, but we can't easily clean up it for real immediately
*/
void
mark_thread_execve(thread_record_t *tr, bool execve)
{
ASSERT((execve && !tr->execve) || (!execve && tr->execve));
tr->execve = execve;
mutex_lock(&all_threads_lock);
if (execve) {
/* since we free on a second vfork we should never accumulate
* more than one
*/
ASSERT(num_execve_threads == 0);
num_execve_threads++;
} else {
ASSERT(num_execve_threads > 0);
num_execve_threads--;
}
mutex_unlock(&all_threads_lock);
}
#endif /* UNIX */
int
get_num_threads(void)
{
return num_known_threads IF_UNIX(- num_execve_threads);
}
bool
is_last_app_thread(void)
{
return (get_num_threads() ==
IF_CLIENT_INTERFACE(get_num_client_threads() +) 1);
}
/* This routine takes a snapshot of all the threads known to DR,
* NOT LIMITED to those currently under DR control!
* It returns an array of thread_record_t* and the length of the array
* The caller must free the array using global_heap_free
* The caller must hold the thread_initexit_lock to ensure that threads
* are not created or destroyed before the caller is done with the list
* The caller CANNOT be could_be_linking, else a deadlock with flushing
* can occur (unless the caller is the one flushing)
*/
static void
get_list_of_threads_common(thread_record_t ***list, int *num
_IF_UNIX(bool include_execve))
{
int i, cur = 0, max_num;
thread_record_t *tr;
thread_record_t **mylist;
/* Only a flushing thread can get the thread snapshot while being
* couldbelinking -- else a deadlock w/ flush!
* FIXME: this assert should be on any acquisition of thread_initexit_lock!
*/
ASSERT(is_self_flushing() || !is_self_couldbelinking());
ASSERT(all_threads != NULL);
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
mutex_lock(&all_threads_lock);
/* Do not include vfork threads that exited via execve, unless we're exiting */
max_num = IF_UNIX_ELSE((include_execve || dynamo_exiting) ?
num_known_threads : get_num_threads(),
get_num_threads());
mylist = (thread_record_t **) global_heap_alloc(max_num*sizeof(thread_record_t*)
HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) {
for (tr = all_threads[i]; tr != NULL; tr = tr->next) {
/* include those for which !tr->under_dynamo_control */
/* don't include those that exited for execve. there should be
* no race b/c vfork suspends the parent. xref i#237/PR 498284.
*/
if (IF_UNIX_ELSE(!tr->execve || include_execve || dynamo_exiting, true)) {
mylist[cur] = tr;
cur++;
}
}
}
ASSERT(cur > 0);
IF_WINDOWS(ASSERT(cur == max_num));
if (cur < max_num) {
mylist = (thread_record_t **)
global_heap_realloc(mylist, max_num, cur, sizeof(thread_record_t*)
HEAPACCT(ACCT_THREAD_MGT));
}
*num = cur;
*list = mylist;
mutex_unlock(&all_threads_lock);
}
void
get_list_of_threads(thread_record_t ***list, int *num)
{
get_list_of_threads_common(list, num _IF_UNIX(false));
}
#ifdef UNIX
void
get_list_of_threads_ex(thread_record_t ***list, int *num, bool include_execve)
{
get_list_of_threads_common(list, num, include_execve);
}
#endif
/* assumes caller can ensure that thread is either suspended or self to
* avoid races
*/
thread_record_t *
thread_lookup(thread_id_t tid)
{
thread_record_t *tr;
uint hindex;
/* check that caller is self or has initexit_lock
* FIXME: no way to tell who has initexit_lock
*/
ASSERT(mutex_testlock(&thread_initexit_lock) || tid == get_thread_id());
hindex = HASH_FUNC_BITS(tid, ALL_THREADS_HASH_BITS);
mutex_lock(&all_threads_lock);
if (all_threads == NULL) {
tr = NULL;
} else {
tr = all_threads[hindex];
}
while (tr != NULL) {
if (tr->id == tid) {
mutex_unlock(&all_threads_lock);
return tr;
}
tr = tr->next;
}
mutex_unlock(&all_threads_lock);
return NULL;
}
/* assumes caller can ensure that thread is either suspended or self to
* avoid races
*/
uint
get_thread_num(thread_id_t tid)
{
thread_record_t *tr = thread_lookup(tid);
if (tr != NULL)
return tr->num;
else
return 0; /* yes can't distinguish from 1st thread, who cares */
}
void
add_thread(IF_WINDOWS_ELSE_NP(HANDLE hthread, process_id_t pid),
thread_id_t tid, bool under_dynamo_control,
dcontext_t *dcontext)
{
thread_record_t *tr;
uint hindex;
ASSERT(all_threads != NULL);
/* add entry to thread hashtable */
tr = (thread_record_t *) global_heap_alloc(sizeof(thread_record_t)
HEAPACCT(ACCT_THREAD_MGT));
#ifdef WINDOWS
/* we duplicate the thread pseudo-handle, this should give us full rights
* Note that instead asking explicitly for THREAD_ALL_ACCESS or just for
* THREAD_TERMINATE|THREAD_SUSPEND_RESUME|THREAD_GET_CONTEXT|THREAD_SET_CONTEXT
* does not seem able to acquire more rights than simply duplicating the
* app handle gives.
*/
LOG(GLOBAL, LOG_THREADS, 1, "Thread %d app handle rights: "PFX"\n",
tid, nt_get_handle_access_rights(hthread));
duplicate_handle(NT_CURRENT_PROCESS, hthread, NT_CURRENT_PROCESS,
&tr->handle, 0, 0,
DUPLICATE_SAME_ACCESS|DUPLICATE_SAME_ATTRIBUTES);
/* We prob. only need TERMINATE (for kill thread), SUSPEND/RESUME/GET_CONTEXT
* (for synchronizing), and SET_CONTEXT (+ synchronizing requirements, for
* detach). All access includes this and quite a bit more. */
# if 0
/* eventually should be a real assert, but until we have a story for the
* injected detach threads, have to ifdef out even the ASSERT_CURIOSITY
* (even a syslog internal warning is prob. to noisy for QA) */
ASSERT_CURIOSITY(TESTALL(THREAD_ALL_ACCESS, nt_get_handle_access_rights(tr->handle)));
# endif
LOG(GLOBAL, LOG_THREADS, 1, "Thread %d our handle rights: "PFX"\n",
tid, nt_get_handle_access_rights(tr->handle));
tr->retakeover = false;
#else
tr->pid = pid;
tr->execve = false;
#endif
tr->id = tid;
ASSERT(tid != INVALID_THREAD_ID); /* ensure os never assigns invalid id to a thread */
tr->under_dynamo_control = under_dynamo_control;
tr->dcontext = dcontext;
if (dcontext != NULL) /* we allow NULL for dr_create_client_thread() */
dcontext->thread_record = tr;
mutex_lock(&all_threads_lock);
tr->num = threads_ever_count++;
hindex = HASH_FUNC_BITS(tr->id, ALL_THREADS_HASH_BITS);
tr->next = all_threads[hindex];
all_threads[hindex] = tr;
/* must be inside all_threads_lock to avoid race w/ get_list_of_threads */
RSTATS_ADD_PEAK(num_threads, 1);
RSTATS_INC(num_threads_created);
num_known_threads++;
mutex_unlock(&all_threads_lock);
}
/* return false if couldn't find the thread */
bool
remove_thread(IF_WINDOWS_(HANDLE hthread) thread_id_t tid)
{
thread_record_t *tr = NULL, *prevtr;
uint hindex = HASH_FUNC_BITS(tid, ALL_THREADS_HASH_BITS);
ASSERT(all_threads != NULL);
mutex_lock(&all_threads_lock);
for (tr = all_threads[hindex], prevtr = NULL; tr; prevtr = tr, tr = tr->next) {
if (tr->id == tid) {
if (prevtr)
prevtr->next = tr->next;
else
all_threads[hindex] = tr->next;
/* must be inside all_threads_lock to avoid race w/ get_list_of_threads */
RSTATS_DEC(num_threads);
#ifdef UNIX
if (tr->execve) {
ASSERT(num_execve_threads > 0);
num_execve_threads--;
}
#endif
num_known_threads--;
#ifdef WINDOWS
close_handle(tr->handle);
#endif
global_heap_free(tr, sizeof(thread_record_t) HEAPACCT(ACCT_THREAD_MGT));
break;
}
}
mutex_unlock(&all_threads_lock);
return (tr != NULL);
}
/* this bool is protected by reset_pending_lock */
DECLARE_FREQPROT_VAR(static bool reset_at_nth_thread_triggered, false);
#ifdef DEBUG
bool dynamo_thread_init_during_process_exit = false;
#endif
/* thread-specific initialization
* if dstack_in is NULL, then a dstack is allocated; else dstack_in is used
* as the thread's dstack
* mc can be NULL for the initial thread
* returns -1 if current thread has already been initialized
*/
int
dynamo_thread_init(byte *dstack_in, priv_mcontext_t *mc
_IF_CLIENT_INTERFACE(bool client_thread))
{
dcontext_t *dcontext;
/* due to lock issues (see below) we need another var */
bool reset_at_nth_thread_pending = false;
bool under_dynamo_control = true;
APP_EXPORT_ASSERT(dynamo_initialized || dynamo_exited ||
get_num_threads() == 0 IF_CLIENT_INTERFACE(|| client_thread),
PRODUCT_NAME" not initialized");
if (INTERNAL_OPTION(nullcalls))
return SUCCESS;
/* note that ENTERING_DR is assumed to have already happened: in apc handler
* for win32, in new_thread_setup for linux, in main init for 1st thread
*/
#if defined(WINDOWS) && defined(DR_APP_EXPORTS)
/* We need to identify a thread we intercepted in its APC when we
* take over all threads on dr_app_start(). Stack and pc checks aren't
* simple b/c it can be in ntdll waiting on a lock.
*/
if (dr_api_entry)
os_take_over_mark_thread(get_thread_id());
#endif
/* Try to handle externally injected threads */
if (dynamo_initialized && !bb_lock_start)
pre_second_thread();
/* synch point so thread creation can be prevented for critical periods */
mutex_lock(&thread_initexit_lock);
/* The assumption is that if dynamo_exited, then we are about to exit and
* clean up, initializing this thread then would be dangerous, better to
* wait here for the app to die. Is safe with detach, since a thread
* should never reach here when dynamo_exited is true during detach */
/* under current implementation of process exit, can happen only under
* debug build, or app_start app_exit interface */
while (dynamo_exited) {
/* FIXME i#2075: free the dstack. */
DODEBUG({dynamo_thread_init_during_process_exit = true; });
/* logging should be safe, though might not actually result in log
* message */
DODEBUG_ONCE(LOG(GLOBAL, LOG_THREADS, 1,
"Thread %d reached initialization point while dynamo exiting, "
"waiting for app to exit\n", get_thread_id()););
mutex_unlock(&thread_initexit_lock);
os_thread_yield();
/* just in case we want to support exited and then restarted at some
* point */
mutex_lock(&thread_initexit_lock);
}
if (is_thread_initialized()) {
mutex_unlock(&thread_initexit_lock);
#if defined(WINDOWS) && defined(DR_APP_EXPORTS)
if (dr_api_entry)
os_take_over_unmark_thread(get_thread_id());
#endif
return -1;
}
os_tls_init();
dcontext = create_new_dynamo_context(true/*initial*/, dstack_in, mc);
initialize_dynamo_context(dcontext);
set_thread_private_dcontext(dcontext);
/* sanity check */
ASSERT(get_thread_private_dcontext() == dcontext);
/* set local state pointer for access from other threads */
dcontext->local_state = get_local_state();
/* set initial mcontext, if known */
if (mc != NULL)
*get_mcontext(dcontext) = *mc;
/* For hotp_only, the thread should run native, not under dr. However,
* the core should still get control of the thread at hook points to track
* what the application is doing & at patched points to execute hot patches.
* It is the same for thin_client except that there are fewer hooks, only to
* follow children.
*/
if (RUNNING_WITHOUT_CODE_CACHE())
under_dynamo_control = false;
/* add entry to thread hashtable before creating logdir so have thread num.
* otherwise we'd like to do this only after we'd fully initialized the thread, but we
* hold the thread_initexit_lock, so nobody should be listing us -- thread_lookup
* on other than self, or a thread list, should only be done while the initexit_lock
* is held. CHECK: is this always correct? thread_lookup does have an assert
* to try and enforce but cannot tell who has the lock.
*/
add_thread(IF_WINDOWS_ELSE(NT_CURRENT_THREAD, get_process_id()), get_thread_id(),
under_dynamo_control, dcontext);
#if defined(WINDOWS) && defined(DR_APP_EXPORTS)
/* Now that the thread is in the main thread table we don't need to remember it */
if (dr_api_entry)
os_take_over_unmark_thread(get_thread_id());
#endif
LOG(GLOBAL, LOG_TOP|LOG_THREADS, 1,
"\ndynamo_thread_init: %d thread(s) now, dcontext="PFX", #=%d, id="
TIDFMT", pid="PIDFMT"\n\n",
GLOBAL_STAT(num_threads), dcontext, get_thread_num(get_thread_id()),
get_thread_id(), get_process_id());
DOLOG(1, LOG_STATS, {
dump_global_stats(false);
});
#ifdef DEBUG
if (stats->loglevel > 0) {
dcontext->logfile = open_log_file(thread_logfile_name(), NULL, 0);
print_file(dcontext->logfile, "%s\n", dynamorio_version_string);
} else {
dcontext->logfile = INVALID_FILE;
}
DOLOG(1, LOG_TOP|LOG_THREADS, {
LOG(THREAD, LOG_TOP|LOG_THREADS, 1, PRODUCT_NAME" built with: %s\n", DYNAMORIO_DEFINES);
LOG(THREAD, LOG_TOP|LOG_THREADS, 1, PRODUCT_NAME" built on: %s\n", dynamorio_buildmark);
});
LOG(THREAD, LOG_TOP|LOG_THREADS, 1,
"%sTHREAD %d (dcontext "PFX")\n\n",
IF_CLIENT_INTERFACE_ELSE(client_thread ? "CLIENT " : "", ""),
get_thread_id(), dcontext);
LOG(THREAD, LOG_TOP|LOG_THREADS, 1,
"DR stack is "PFX"-"PFX"\n", dcontext->dstack - DYNAMORIO_STACK_SIZE,
dcontext->dstack);
#endif
#ifdef DEADLOCK_AVOIDANCE
locks_thread_init(dcontext);
#endif
heap_thread_init(dcontext);
DOSTATS({ stats_thread_init(dcontext); });
#ifdef KSTATS
kstat_thread_init(dcontext);
#endif
os_thread_init(dcontext);
arch_thread_init(dcontext);
synch_thread_init(dcontext);
if (!DYNAMO_OPTION(thin_client))
vm_areas_thread_init(dcontext);
monitor_thread_init(dcontext);
fcache_thread_init(dcontext);
link_thread_init(dcontext);
fragment_thread_init(dcontext);
/* This lock has served its purposes: A) a barrier to thread creation for those
* iterating over threads, B) mutex for add_thread, and C) mutex for synch_field
* to be set up.
* So we release it to shrink the time spent w/ this big lock, in particular
* to avoid holding it while running private lib thread init code (i#875).
*/
mutex_unlock(&thread_initexit_lock);
#ifdef CLIENT_INTERFACE
/* Set up client data needed in loader_thread_init for IS_CLIENT_THREAD */
instrument_client_thread_init(dcontext, client_thread);
#endif
loader_thread_init(dcontext);
if (!DYNAMO_OPTION(thin_client)) {
#ifdef CLIENT_INTERFACE
/* put client last, may depend on other thread inits.
* Note that we are calling this prior to instrument_init()
* now (PR 216936), which is required to initialize
* the client dcontext field prior to instrument_init().
*/
instrument_thread_init(dcontext, client_thread, mc != NULL);
#endif
#ifdef SIDELINE
if (dynamo_options.sideline) {
/* wake up sideline thread -- ok to call if thread already awake */
sideline_start();
}
#endif
}
/* must check # threads while holding thread_initexit_lock, yet cannot
* call fcache_reset_all_caches_proactively while holding it due to
* rank order of reset_pending_lock which we must also hold -- so we
* set a local bool reset_at_nth_thread_pending
*/
if (DYNAMO_OPTION(reset_at_nth_thread) != 0 && !reset_at_nth_thread_triggered
&& (uint) get_num_threads() == DYNAMO_OPTION(reset_at_nth_thread)) {
mutex_lock(&reset_pending_lock);
if (!reset_at_nth_thread_triggered) {
reset_at_nth_thread_triggered = true;
reset_at_nth_thread_pending = true;
}
mutex_unlock(&reset_pending_lock);
}
DOLOG(1, LOG_STATS, {
dump_thread_stats(dcontext, false);
});
if (reset_at_nth_thread_pending) {
mutex_lock(&reset_pending_lock);
/* fcache_reset_all_caches_proactively() will unlock */
fcache_reset_all_caches_proactively(RESET_ALL);
}
return SUCCESS;
}
/* We don't free cur thread until after client exit event (PR 536058) except for
* fragment_thread_exit(). Since this is called outside of dynamo_thread_exit()
* on process exit we assume fine to skip enter_threadexit().
*/
void
dynamo_thread_exit_pre_client(dcontext_t *dcontext, thread_id_t id)
{
/* fcache stats needs to examine fragment state, so run it before
* fragment exit, but real fcache exit needs to be after fragment exit
*/
#ifdef DEBUG
fcache_thread_exit_stats(dcontext);
#endif
/* must abort now to avoid deleting possibly un-deletable fragments
* monitor_thread_exit remains later b/c of monitor_remove_fragment calls
*/
trace_abort_and_delete(dcontext);
fragment_thread_exit(dcontext);
#ifdef CLIENT_INTERFACE
IF_WINDOWS(loader_pre_client_thread_exit(dcontext));
instrument_thread_exit_event(dcontext);
#endif
}
/* thread-specific cleanup */
/* Note : if this routine is not called by thread id, then other_thread should
* be true and the calling thread should hold the thread_initexit_lock
*/
static int
dynamo_thread_exit_common(dcontext_t *dcontext, thread_id_t id,
IF_WINDOWS_(bool detach_stacked_callbacks) bool other_thread)
{
dcontext_t *dcontext_tmp;
#ifdef WINDOWS
dcontext_t *dcontext_next;
int num_dcontext;
#endif
bool on_dstack = !other_thread && is_currently_on_dstack(dcontext);
/* cache this now for use after freeing dcontext */
local_state_t *local_state = dcontext->local_state;
if (INTERNAL_OPTION(nullcalls) || dcontext == NULL)
return SUCCESS;
/* make sure don't get into deadlock w/ flusher */
enter_threadexit(dcontext);
/* synch point so thread exiting can be prevented for critical periods */
/* see comment at start of method for other thread exit */
if (!other_thread)
mutex_lock(&thread_initexit_lock);
ASSERT_OWN_MUTEX(true, &thread_initexit_lock);
#ifdef WINDOWS
/* need to clean up thread stack before clean up other thread data, but
* after we're made nolinking
*/
os_thread_stack_exit(dcontext);
/* free the thread's application stack if requested */
if (dcontext->free_app_stack) {
byte *base;
/* only used for nudge threads currently */
ASSERT(dcontext->nudge_target == generic_nudge_target);
if (get_stack_bounds(dcontext, &base, NULL)) {
NTSTATUS res;
ASSERT(base != NULL);
res = nt_free_virtual_memory(base);
ASSERT(NT_SUCCESS(res));
} else {
/* stack should be available here */
ASSERT_NOT_REACHED();
}
}
#endif
#ifdef SIDELINE
/* N.B.: do not clean up any data structures while sideline thread
* is still running! put it to sleep for duration of this routine!
*/
if (!DYNAMO_OPTION(thin_client)) {
if (dynamo_options.sideline) {
/* put sideline thread to sleep */
sideline_stop();
/* sideline_stop will not return until sideline thread is asleep */
}
}
#endif
LOG(GLOBAL, LOG_TOP|LOG_THREADS, 1,
"\ndynamo_thread_exit (thread #%d id="TIDFMT"): %d thread(s) now\n\n",
get_thread_num(id), id, GLOBAL_STAT(num_threads)-1);
DOLOG(1, LOG_STATS, {
dump_global_stats(false);
});
LOG(THREAD, LOG_STATS|LOG_THREADS, 1, "\n## Statistics for this thread:\n");
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
int i;
ASSERT(dcontext);
LOG(THREAD, LOG_STATS|LOG_THREADS, 1, "\nTop ten cache times:\n");
for (i=0; i<10; i++) {
if (dcontext->cache_time[i] > (uint64) 0) {
uint top_part, bottom_part;
divide_int64_print(dcontext->cache_time[i], kilo_hertz, false,
3, &top_part, &bottom_part);
LOG(THREAD, LOG_STATS|LOG_THREADS, 1,
"\t#%2d = %6u.%.3u ms, %9d hits\n",
i+1, top_part, bottom_part, (int)dcontext->cache_count[i]);
}
}
LOG(THREAD, LOG_STATS|LOG_THREADS, 1, "\n");
}
#endif
/* In order to pass the client a dcontext in the process exit event
* we do some thread cleanup early for the final thread so we can delay
* the rest (PR 536058)
*/
if (!dynamo_exited_and_cleaned)
dynamo_thread_exit_pre_client(dcontext, id);
#ifdef CLIENT_INTERFACE
/* PR 243759: don't free client_data until after all fragment deletion events */
if (!DYNAMO_OPTION(thin_client))
instrument_thread_exit(dcontext);
#endif
/* i#920: we can't take segment/timer/asynch actions for other threads.
* This must be called after dynamo_thread_exit_pre_client where
* we called event callbacks.
*/
if (!other_thread)
dynamo_thread_not_under_dynamo(dcontext);
/* We clean up priv libs prior to setting tls dc to NULL so we can use
* TRY_EXCEPT when calling the priv lib entry routine
*/
if (!dynamo_exited ||
(other_thread &&
(IF_WINDOWS_ELSE(!doing_detach, true) ||
dcontext->owning_thread != get_thread_id()))) /* else already did this */
loader_thread_exit(dcontext);
/* set tls dc to NULL prior to cleanup, to avoid problems handling
* alarm signals received during cleanup (we'll suppress if tls
* dc==NULL which seems the right thing to do: not worth our
* effort to pass to another thread if thread-group-shared alarm,
* and if thread-private then thread would have exited soon
* anyway). see PR 596127.
*/
/* make sure we invalidate the dcontext before releasing the memory */
/* when cleaning up other threads, we cannot set their dcs to null,
* but we only do this at dynamorio_app_exit so who cares
*/
/* This must be called after instrument_thread_exit, which uses
* get_thread_private_dcontext for app/dr state checks.
*/
if (id == get_thread_id())
set_thread_private_dcontext(NULL);
fcache_thread_exit(dcontext);
link_thread_exit(dcontext);
monitor_thread_exit(dcontext);
if (!DYNAMO_OPTION(thin_client))
vm_areas_thread_exit(dcontext);
synch_thread_exit(dcontext);
arch_thread_exit(dcontext _IF_WINDOWS(detach_stacked_callbacks));
os_thread_exit(dcontext, other_thread);
DOLOG(1, LOG_STATS, {
dump_thread_stats(dcontext, false);
});
#ifdef KSTATS
kstat_thread_exit(dcontext);
#endif
DOSTATS({ stats_thread_exit(dcontext); });
heap_thread_exit(dcontext);
#ifdef DEADLOCK_AVOIDANCE
locks_thread_exit(dcontext);
#endif
#ifdef DEBUG
if (dcontext->logfile != INVALID_FILE) {
os_flush(dcontext->logfile);
close_log_file(dcontext->logfile);
}
#endif
/* remove thread from threads hashtable */
remove_thread(IF_WINDOWS_(NT_CURRENT_THREAD) id);
dcontext_tmp = dcontext;
#ifdef WINDOWS
/* clean up all the dcs */
num_dcontext = 0;
# ifdef DCONTEXT_IN_EDI
/* go to one end of list */
while (dcontext_tmp->next_saved)
dcontext_tmp = dcontext_tmp->next_saved;
# else
/* already at one end of list */
# endif
/* delete through to other end */
while (dcontext_tmp) {
num_dcontext++;
dcontext_next = dcontext_tmp->prev_unused;
delete_dynamo_context(dcontext_tmp,
dcontext_tmp == dcontext/*do not free dup cb stacks*/
&& !on_dstack/*do not free own stack*/);
dcontext_tmp = dcontext_next;
}
LOG(GLOBAL, LOG_STATS|LOG_THREADS, 1, "\tdynamo contexts used: %d\n",
num_dcontext);
#else /* UNIX */
delete_dynamo_context(dcontext_tmp, !on_dstack/*do not free own stack*/);
#endif /* UNIX */
os_tls_exit(local_state, other_thread);
#ifdef SIDELINE
/* see notes above -- we can now wake up sideline thread */
if (dynamo_options.sideline && get_num_threads() > 0) {
sideline_start();
}
#endif
if (!other_thread) {
mutex_unlock(&thread_initexit_lock);
/* FIXME: once thread_initexit_lock is released, we're not on
* thread list, and a terminate targeting us could kill us in the middle
* of this call -- but this can't come before the unlock b/c the lock's
* in the data segment! (see case 3121)
* (note we do not re-protect for process exit, see !dynamo_exited check
* in exiting_dynamorio)
*/
if (!on_dstack) {
EXITING_DR();
/* else, caller will clean up stack and then call EXITING_DR(),
* probably via dynamo_thread_stack_free_and_exit(), as the stack free
* must be done before the exit
*/
}
}
return SUCCESS;
}
int
dynamo_thread_exit(void)
{
dcontext_t *dcontext = get_thread_private_dcontext();
return dynamo_thread_exit_common(dcontext, get_thread_id(), IF_WINDOWS_(false) false);
}
/* NOTE : you must hold thread_initexit_lock to call this function! */
int
dynamo_other_thread_exit(thread_record_t *tr _IF_WINDOWS(bool detach_stacked_callbacks))
{
/* FIXME: Usually a safe spot for cleaning other threads should be
* under num_exits_dir_syscall, but for now rewinding all the way
*/
KSTOP_REWIND_DC(tr->dcontext, thread_measured);
KSTART_DC(tr->dcontext, thread_measured);
return dynamo_thread_exit_common(tr->dcontext, tr->id,
IF_WINDOWS_(detach_stacked_callbacks) true);
}
/* Called from another stack to finish cleaning up a thread.
* The final steps are to free the stack and perform the exit hook.
*/
void
dynamo_thread_stack_free_and_exit(byte *stack)
{
if (stack != NULL) {
stack_free(stack, DYNAMORIO_STACK_SIZE);
/* ASSUMPTION: if stack is NULL here, the exit was done earlier
* (fixes case 6967)
*/
EXITING_DR();
}
}
#ifdef DR_APP_EXPORTS
/* API routine to initialize DR */
DR_APP_API int
dr_app_setup(void)
{
/* FIXME: we either have to disallow the client calling this with
* more than one thread running, or we have to suspend all the threads.
* We should share the suspend-and-takeover loop (and for dr_app_setup_and_start
* share the takeover portion) from dr_app_start().
*/
int res;
dcontext_t *dcontext;
dr_api_entry = true;
res = dynamorio_app_init();
/* It would be more efficient to avoid setting up signal handlers and
* avoid hooking vsyscall during init, but the code is simpler this way.
*/
dcontext = get_thread_private_dcontext();
os_process_not_under_dynamorio(dcontext);
dynamo_thread_not_under_dynamo(dcontext);
return res;
}
/* API routine to exit DR */
DR_APP_API int
dr_app_cleanup(void)
{
thread_record_t *tr;
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_api_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* to keep properly nested */
/* XXX: The dynamo_thread_[not_]under_dynamo() routines are not idempotent,
* and must be balanced! On Linux, they track the shared itimer refcount,
* so a mismatch will lead to a refleak or negative refcount.
* dynamorio_app_exit() will call dynamo_thread_not_under_dynamo(), so we
* must ensure that we are under DR before calling it. Therefore, we
* require that the caller call dr_app_stop() before calling
* dr_app_cleanup(). However, we cannot make a usage assertion to that
* effect without addressing the FIXME comments in
* dynamo_thread_not_under_dynamo() about updating tr->under_dynamo_control.
*/
tr = thread_lookup(get_thread_id());
if (tr != NULL && tr->dcontext != NULL) {
os_process_under_dynamorio_initiate(tr->dcontext);
os_process_under_dynamorio_complete(tr->dcontext);
dynamo_thread_under_dynamo(tr->dcontext);
}
return dynamorio_app_exit();
}
/* Called by dr_app_start in arch-specific assembly file */
void
dr_app_start_helper(priv_mcontext_t *mc)
{
apicheck(dynamo_initialized, PRODUCT_NAME" not initialized");
LOG(GLOBAL, LOG_TOP, 1, "dr_app_start in thread "TIDFMT"\n", get_thread_id());
LOG(THREAD_GET, LOG_TOP, 1, "dr_app_start\n");
if (!INTERNAL_OPTION(nullcalls)) {
/* Adjust the app stack to account for the return address + alignment.
* See dr_app_start in x86.asm.
*/
mc->xsp += DYNAMO_START_XSP_ADJUST;
dynamo_start(mc);
/* the interpreter takes over from here */
}
}
/* dummy routine that returns control to the app if it is currently
* under dynamo control
*/
DR_APP_API void
dr_app_stop(void)
{
/* the application regains control in here */
}
DR_APP_API void
dr_app_stop_and_cleanup(void)
{
/* XXX i#95: today this is a full detach, while a separated dr_app_cleanup()
* is not. We should try and have dr_app_cleanup() take this detach path
* here (and then we can simplify exit_synch_state()) but it's more complicated
* and we need to resolve the unbounded dr_app_stop() time.
*/
if (dynamo_initialized && !dynamo_exited && !doing_detach) {
detach_on_permanent_stack(true/*internal*/, true/*do cleanup*/);
}
/* the application regains control in here */
}
DR_APP_API int
dr_app_setup_and_start(void)
{
int r = dr_app_setup();
if (r == SUCCESS)
dr_app_start();
return r;
}
#endif
/* For use by threads that start and stop whether dynamo controls them.
*/
void
dynamo_thread_under_dynamo(dcontext_t *dcontext)
{
LOG(THREAD, LOG_ASYNCH, 2, "thread %d under DR control\n",
dcontext->owning_thread);
ASSERT(dcontext != NULL);
/* FIXME: mark under_dynamo_control?
* see comments in not routine below
*/
os_thread_under_dynamo(dcontext);
#ifdef SIDELINE
if (dynamo_options.sideline) {
/* wake up sideline thread -- ok to call if thread already awake */
sideline_start();
}
#endif
dcontext->currently_stopped = false;
dcontext->go_native = false;
}
/* For use by threads that start and stop whether dynamo controls them.
* This must be called by the owner of dcontext and not another
* non-executing thread.
*/
void
dynamo_thread_not_under_dynamo(dcontext_t *dcontext)
{
ASSERT_MESSAGE(CHKLVL_ASSERTS+1/*expensive*/, "can only act on executing thread",
dcontext == get_thread_private_dcontext());
if (dcontext == NULL)
return;
LOG(THREAD, LOG_ASYNCH, 2, "thread %d not under DR control\n",
dcontext->owning_thread);
dcontext->currently_stopped = true;
os_thread_not_under_dynamo(dcontext);
#ifdef SIDELINE
/* FIXME: if # active threads is 0, then put sideline thread to sleep! */
if (dynamo_options.sideline) {
/* put sideline thread to sleep */
sideline_stop();
}
#endif
#ifdef DEBUG
os_flush(dcontext->logfile);
#endif
}
#define MAX_TAKE_OVER_ATTEMPTS 4
/* Take over other threads in the current process.
*/
void
dynamorio_take_over_threads(dcontext_t *dcontext)
{
/* We repeatedly check if there are other threads in the process, since
* while we're checking one may be spawning additional threads.
*/
bool found_threads;
uint attempts = 0;
os_process_under_dynamorio_initiate(dcontext);
/* XXX i#1305: we should suspend all the other threads for DR init to
* satisfy the parts of the init process that assume there are no races.
*/
do {
found_threads = os_take_over_all_unknown_threads(dcontext);
attempts++;
if (found_threads && !bb_lock_start)
bb_lock_start = true;
} while (found_threads && attempts < MAX_TAKE_OVER_ATTEMPTS);
os_process_under_dynamorio_complete(dcontext);
if (found_threads) {
SYSLOG(SYSLOG_WARNING, INTERNAL_SYSLOG_WARNING,
3, get_application_name(), get_application_pid(),
"Failed to take over all threads after multiple attempts");
ASSERT_NOT_REACHED();
}
DO_ONCE({
char buf[16];
int num_threads = get_num_threads();
if (num_threads > 1) { /* avoid for early injection */
snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%d", num_threads);
NULL_TERMINATE_BUFFER(buf);
SYSLOG(SYSLOG_INFORMATION, INFO_ATTACHED, 3, buf, get_application_name(),
get_application_pid());
}
});
}
/* Called by dynamorio_app_take_over in arch-specific assembly file */
void
dynamorio_app_take_over_helper(priv_mcontext_t *mc)
{
static bool have_taken_over = false; /* ASSUMPTION: not an actual write */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
APP_EXPORT_ASSERT(dynamo_initialized, PRODUCT_NAME" not initialized");
#ifdef RETURN_AFTER_CALL
/* FIXME : this is set after dynamo_initialized, so a slight race with
* an injected thread turning on .C protection before the main thread
* sets this. */
dr_preinjected = true; /* currently only relevant on Win32 */
#endif
LOG(GLOBAL, LOG_TOP, 1, "taking over via preinject in %s\n", __FUNCTION__);
if (!INTERNAL_OPTION(nullcalls) && !have_taken_over) {
have_taken_over = true;
LOG(GLOBAL, LOG_TOP, 1, "dynamorio_app_take_over\n");
/* set this flag to indicate that we should run until the program dies: */
automatic_startup = true;
if (DYNAMO_OPTION(inject_primary))
take_over_primary_thread();
/* who knows when this was called -- no guarantee we control all threads --
* unless we were auto-injected (preinject library calls this routine)
*/
control_all_threads = automatic_startup;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
/* Adjust the app stack to account for the return address + alignment.
* See dynamorio_app_take_over in x86.asm.
*/
mc->xsp += DYNAMO_START_XSP_ADJUST;
/* For hotp_only and thin_client, the app should run native, except
* for our hooks.
* This is where apps hooked using appinit key are let go native.
* Even though control is going to native app code, we want
* automatic_startup and control_all_threads set.
*/
if (!RUNNING_WITHOUT_CODE_CACHE())
dynamo_start(mc);
/* the interpreter takes over from here */
} else
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
#ifdef WINDOWS
extern app_pc parent_early_inject_address; /* from os.c */
/* in arch-specific assembly file */
void dynamorio_app_take_over(void);
DYNAMORIO_EXPORT void
dynamorio_app_init_and_early_takeover(uint inject_location, void *restore_code)
{
int res;
ASSERT(!dynamo_initialized && !dynamo_exited);
/* This routine combines dynamorio_app_init() and dynamrio_app_takeover into
* a single routine that also handles any early injection cleanup needed. */
ASSERT_NOT_IMPLEMENTED(inject_location != INJECT_LOCATION_KiUserApc);
/* currently only Ldr* hook points are known to work */
ASSERT_CURIOSITY(INJECT_LOCATION_IS_LDR(inject_location));
/* See notes in os.c DLLMain. When early injected we are unable to find
* the address of LdrpLoadDll so we use the parent's value which is passed
* to us at the start of restore_code. FIXME - if we start using multiple
* inject locations we'll probably have to ensure we always pass this.
*/
if (INJECT_LOCATION_IS_LDR(inject_location)) {
parent_early_inject_address = *(app_pc *)restore_code;
}
dr_early_injected = true;
dr_early_injected_location = inject_location;
res = dynamorio_app_init();
ASSERT(res == SUCCESS);
ASSERT(dynamo_initialized && !dynamo_exited);
LOG(GLOBAL, LOG_TOP, 1, "taking over via early injection in %s\n", __FUNCTION__);
/* FIXME - restore code needs to be freed, but we have to return through it
* first... could instead duplicate its tail here if we wrap this
* routine in asm or eqv. pass the continuation state in as args. */
ASSERT(inject_location != INJECT_LOCATION_KiUserApc);
dynamorio_app_take_over();
}
/* Called with DR library mapped in but without its imports processed.
*/
void
dynamorio_earliest_init_takeover_C(byte *arg_ptr)
{
int res;
bool earliest_inject;
/* Windows-specific code for the most part */
earliest_inject = earliest_inject_init(arg_ptr);
/* Initialize now that DR dll imports are hooked up */
if (earliest_inject) {
dr_earliest_injected = true;
dr_earliest_inject_args = arg_ptr;
} else
dr_early_injected = true;
res = dynamorio_app_init();
ASSERT(res == SUCCESS);
ASSERT(dynamo_initialized && !dynamo_exited);
LOG(GLOBAL, LOG_TOP, 1, "taking over via earliest injection in %s\n", __FUNCTION__);
/* earliest_inject_cleanup() is called within dynamorio_app_init() to avoid
* confusing the exec areas scan
*/
/* Take over at retaddr
*
* XXX i#626: app_takeover sets preinjected for rct (should prob. rename)
* which needs to be done whenever we takeover not at the bottom of the
* callstack. For earliest won't need to set this if we takeover
* in such a way as to handle the return back to our hook code without a
* violation -- though currently we will see 3 rets (return from
* dynamorio_app_take_over(), return from here, and return from
* dynamorio_earliest_init_takeover() to app hook code).
* Should we have dynamorio_earliest_init_takeover() set up an
* mcontext that we can go to directly instead of interpreting
* the returns in our own code? That would make tools that shadow
* callstacks simpler too.
*/
dynamorio_app_take_over();
}
#endif /* WINDOWS */
/***************************************************************************
* SELF-PROTECTION
*/
/* FIXME: even with -single_privileged_thread, we aren't fully protected,
* because there's a window between us resuming the other threads and
* returning to our caller where another thread could clobber our return
* address or something.
*/
static void
dynamorio_protect(void)
{
ASSERT(SELF_PROTECT_ON_CXT_SWITCH);
LOG(GLOBAL, LOG_DISPATCH, 4, "dynamorio_protect thread="TIDFMT"\n", get_thread_id());
/* we don't protect local heap here, that's done lazily */
mutex_lock(&protect_info->lock);
ASSERT(protect_info->num_threads_unprot > 0);
/* FIXME: nice to also catch double enters but would need to track more info */
if (protect_info->num_threads_unprot <= 0) {
/* Defensive code to prevent crashes from double exits (the theory
* for case 7631/8030). However, this precludes an extra exit+enter
* pair from working properly (though an extra enter+exit will continue
* to work), though such a pair would have crashed if another thread
* had entered in the interim anyway.
*/
protect_info->num_threads_unprot = 0;
mutex_unlock(&protect_info->lock);
return;
}
protect_info->num_threads_unprot--;
if (protect_info->num_threads_unprot > 0) {
/* other threads still in DR, cannot protect global memory */
LOG(GLOBAL, LOG_DISPATCH, 4, "dynamorio_protect: not last thread => nop\n");
mutex_unlock(&protect_info->lock);
return;
}
SELF_PROTECT_GLOBAL(READONLY);
if (INTERNAL_OPTION(single_privileged_thread)) {
/* FIXME: want to resume threads and allow thread creation only
* _after_ protect data segment, but lock is in data segment!
*/
if (protect_info->num_threads_suspended > 0) {
thread_record_t *tr;
int i, num = 0;
/* we do not need to grab the all_threads_lock because
* no threads can be added or removed so who cares if we
* access the data structure simultaneously with another
* reader of it
*/
for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) {
for (tr = all_threads[i]; tr; tr = tr->next) {
if (tr->under_dynamo_control) {
os_thread_resume(all_threads[i]);
num++;
}
}
}
ASSERT(num == protect_info->num_threads_suspended);
protect_info->num_threads_suspended = 0;
}
/* thread init/exit can proceed now */
mutex_unlock(&thread_initexit_lock);
}
/* FIXME case 8073: temporary until we put in unprots in the
* right places. if we were to leave this here we'd want to combine
* .fspdata and .cspdata for more efficient prot changes.
*/
SELF_PROTECT_DATASEC(DATASEC_FREQ_PROT);
SELF_PROTECT_DATASEC(DATASEC_CXTSW_PROT);
mutex_unlock(&protect_info->lock);
}
static void
dynamorio_unprotect(void)
{
ASSERT(SELF_PROTECT_ON_CXT_SWITCH);
mutex_lock(&protect_info->lock); /* lock in unprot heap, not data segment, so safe! */
protect_info->num_threads_unprot++;
if (protect_info->num_threads_unprot == 1) {
/* was protected, so we need to do the unprotection */
SELF_UNPROTECT_DATASEC(DATASEC_CXTSW_PROT);
/* FIXME case 8073: temporary until we put in unprots in the
* right places. if we were to leave this here we'd want to combine
* .fspdata and .cspdata for more efficient prot changes.
*/
SELF_UNPROTECT_DATASEC(DATASEC_FREQ_PROT);
if (INTERNAL_OPTION(single_privileged_thread)) {
/* FIXME: want to suspend all other threads _before_ unprotecting anything,
* but need to guarantee no new threads while we're suspending them,
* and can't do that without setting a lock => need data segment!
*/
mutex_lock(&thread_initexit_lock);
if (get_num_threads() > 1) {
thread_record_t *tr;
int i;
/* current multiple-thread solution: suspend all other threads! */
ASSERT(protect_info->num_threads_suspended == 0);
/* we do not need to grab the all_threads_lock because
* no threads can be added or removed so who cares if we
* access the data structure simultaneously with another
* reader of it
*/
for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) {
for (tr = all_threads[i]; tr; tr = tr->next) {
if (tr->under_dynamo_control) {
DEBUG_DECLARE(bool ok =)
os_thread_suspend(all_threads[i]);
ASSERT(ok);
protect_info->num_threads_suspended++;
}
}
}
}
/* we don't unlock or resume threads until we re-enter cache */
}
SELF_PROTECT_GLOBAL(WRITABLE);
}
/* we don't re-protect local heap here, that's done at points where
* it was protected lazily
*/
mutex_unlock(&protect_info->lock);
LOG(GLOBAL, LOG_DISPATCH, 4, "dynamorio_unprotect thread="TIDFMT"\n", get_thread_id());
}
#ifdef DEBUG
const char *
get_data_section_name(app_pc pc)
{
uint i;
for (i=0; i<DATASEC_NUM; i++) {
if (pc >= datasec_start[i] && pc < datasec_end[i])
return DATASEC_NAMES[i];
}
return NULL;
}
bool
check_should_be_protected(uint sec)
{
/* Blindly asserting that a data section is protected is racy as
* another thread could be in an unprot window. We use some
* heuristics to try and identify bugs where a section is left
* unprot, but it's not easy.
*/
if (/* case 8107: for INJECT_LOCATION_LdrpLoadImportModule we
* load a helper library and end up in dispatch() for
* syscall_while_native before DR is initialized.
*/
!dynamo_initialized ||
#ifdef WINDOWS
/* case 8113: detach currently unprots .data prior to its
* thread synch, so don't count anything after that
*/
doing_detach ||
#endif
!TEST(DATASEC_SELFPROT[sec], DYNAMO_OPTION(protect_mask)) ||
DATASEC_PROTECTED(sec))
return true;
STATS_INC(datasec_not_prot);
/* FIXME: even checking get_num_threads()==1 is still racy as a thread could
* exit, and it's not worth grabbing thread_initexit_lock here..
*/
if (threads_ever_count == 1
#ifdef DR_APP_EXPORTS
/* For start/stop, can be other threads running around so we bail on
* perfect protection
*/
&& !dr_api_entry
#endif
)
return false;
/* FIXME: no count of threads in DR or anything so can't conclude much
* Just return true and hope developer looks at datasec_not_prot stats.
* We do have an ASSERT_CURIOSITY on the stat in data_section_exit().
*/
return true;
}
# ifdef WINDOWS
/* Assumed to only be called about DR dll writable regions */
bool
data_sections_enclose_region(app_pc start, app_pc end)
{
/* Rather than solve the general enclose problem by sorting,
* we subtract each piece we find.
* It used to be that on 32-bit .data|.fspdata|.cspdata|.nspdata formed
* the only writable region, with .pdata between .data and .fspdata on 64.
* But building with VS2012, I'm seeing the sections in other orders (i#1075).
* And with x64 reachability we moved the interception buffer in .data,
* and marking it +rx results in sub-section calls to here.
*/
int i;
bool found_start = false, found_end = false;
ssize_t sz = end - start;
for (i = 0; i < DATASEC_NUM; i++) {
if (datasec_start[i] <= end && datasec_end[i] >= start) {
byte *overlap_start = MAX(datasec_start[i], start);
byte *overlap_end = MIN(datasec_end[i], end);
sz -= overlap_end - overlap_start;
}
}
return sz == 0;
}
# endif /* WINDOWS */
#endif /* DEBUG */
static void
get_data_section_bounds(uint sec)
{
/* FIXME: on linux we should include .got and .dynamic in one of our
* sections, requiring specifying the order of sections (case 3789)!
* Should use an ld script to ensure that .nspdata is last, or find a unique
* attribute to force separation (perhaps mark as rwx, then
* remove the x at init time?) ld 2.15 puts it at the end, but
* ld 2.13 puts .got and .dynamic after it! For now we simply
* don't protect subsequent guys.
* On win32 there are no other rw sections, fortunately.
*/
ASSERT(sec >=0 && sec < DATASEC_NUM);
/* for DEBUG we use for data_sections_enclose_region() */
ASSERT(IF_WINDOWS(IF_DEBUG(true ||))
TEST(DATASEC_SELFPROT[sec], dynamo_options.protect_mask));
mutex_lock(&datasec_lock[sec]);
ASSERT(datasec_start[sec] == NULL);
get_named_section_bounds(get_dynamorio_dll_start(), DATASEC_NAMES[sec],
&datasec_start[sec], &datasec_end[sec]);
mutex_unlock(&datasec_lock[sec]);
ASSERT(ALIGNED(datasec_start[sec], PAGE_SIZE));
ASSERT(ALIGNED(datasec_end[sec], PAGE_SIZE));
ASSERT(datasec_start[sec] < datasec_end[sec]);
#ifdef WINDOWS
if (IF_DEBUG(true ||) TEST(DATASEC_SELFPROT[sec], dynamo_options.protect_mask))
merge_writecopy_pages(datasec_start[sec], datasec_end[sec]);
#endif
}
#ifdef UNIX
/* We get into problems if we keep a .section open across string literals, etc.
* (such as when wrapping a function to get its local-scope statics in that section),
* but the VAR_IN_SECTION does the real work for us, just so long as we have one
* .section decl somewhere.
*/
DECLARE_DATA_SECTION(RARELY_PROTECTED_SECTION, "w")
DECLARE_DATA_SECTION(FREQ_PROTECTED_SECTION, "w")
DECLARE_DATA_SECTION(NEVER_PROTECTED_SECTION, "w")
END_DATA_SECTION_DECLARATIONS()
#endif
static void
data_section_init(void)
{
uint i;
for (i=0; i<DATASEC_NUM; i++) {
if (datasec_start[i] != NULL) {
/* We were called early due to an early syslog.
* We still retain our slightly later normal init position so we can
* log, etc. in normal runs.
*/
return;
}
ASSIGN_INIT_LOCK_FREE(datasec_lock[i], datasec_selfprot_lock);
/* for DEBUG we use for data_sections_enclose_region() */
if (IF_WINDOWS(IF_DEBUG(true ||))
TEST(DATASEC_SELFPROT[i], dynamo_options.protect_mask)) {
get_data_section_bounds(i);
}
}
DOCHECK(1, {
/* ensure no overlaps */
uint j;
for (i=0; i<DATASEC_NUM; i++) {
for (j=i+1; j<DATASEC_NUM; j++) {
ASSERT(datasec_start[i] >= datasec_end[j] ||
datasec_start[j] >= datasec_end[i]);
}
}
});
}
static void
data_section_exit(void)
{
uint i;
DOSTATS({
/* There can't have been that many races.
* A failure to re-protect should result in a ton of dispatch
* entrances w/ .data unprot, so should show up here.
* However, an app with threads that are initializing in DR and thus
* unprotected .data while other threads are running new code (such as
* on attach) can easily rack up hundreds of unprot cache entrances.
*/
ASSERT_CURIOSITY(GLOBAL_STAT(datasec_not_prot) < 5000);
});
for (i=0; i<DATASEC_NUM; i++)
DELETE_LOCK(datasec_lock[i]);
}
#define DATASEC_WRITABLE_MOD(which, op) \
((which) == DATASEC_RARELY_PROT ? (datasec_writable_rareprot op) : \
((which) == DATASEC_CXTSW_PROT ? (datasec_writable_cxtswprot op) : \
((which) == DATASEC_FREQ_PROT ? (datasec_writable_freqprot op) : \
(ASSERT_NOT_REACHED(), datasec_writable_neverprot))))
/* WARNING: any DO_ONCE will call this routine, so don't call anything here
* that has a DO_ONCE, to avoid deadlock!
*/
void
protect_data_section(uint sec, bool writable)
{
ASSERT(sec >=0 && sec < DATASEC_NUM);
ASSERT(TEST(DATASEC_SELFPROT[sec], dynamo_options.protect_mask));
/* We can be called very early before data_section_init() so init here
* (data_section_init() has no dependences).
*/
if (datasec_start[sec] == NULL) {
/* should only happen early in init */
ASSERT(!dynamo_initialized);
data_section_init();
}
mutex_lock(&datasec_lock[sec]);
ASSERT(datasec_start[sec] != NULL);
/* if using libc, we cannot print while data segment is read-only!
* thus, if making it writable, do that first, otherwise do it last.
* w/ ntdll this is not a problem.
*/
/* Remember that multiple threads can be doing (unprotect,protect) pairs of
* calls simultaneously. The datasec_lock makes each individual call atomic,
* and if all calls are properly nested, our use of counters should result in
* the proper protection only after the final protect call and not in the
* middle of some other thread's writes to the data section.
*/
if (writable) {
/* On-context-switch protection has a separate mechanism for
* only protecting when the final thread leaves DR
*/
ASSERT_CURIOSITY(DATASEC_WRITABLE(sec) <= 2); /* shouldn't nest too deep! */
if (DATASEC_WRITABLE(sec) == 0) {
make_writable(datasec_start[sec], datasec_end[sec] - datasec_start[sec]);
STATS_INC(datasec_prot_changes);
} else
STATS_INC(datasec_prot_wasted_calls);
(void)DATASEC_WRITABLE_MOD(sec, ++);
}
LOG(TEST(DATASEC_SELFPROT[sec], SELFPROT_ON_CXT_SWITCH) ? THREAD_GET : GLOBAL,
LOG_VMAREAS, TEST(DATASEC_SELFPROT[sec], SELFPROT_ON_CXT_SWITCH) ? 3U : 2U,
"protect_data_section: thread "TIDFMT" %s (recur %d, stat %d) %s %s %d\n",
get_thread_id(), DATASEC_WRITABLE(sec) == 1 ? "changing" : "nop",
DATASEC_WRITABLE(sec), GLOBAL_STAT(datasec_not_prot),
DATASEC_NAMES[sec], writable ? "rw" : "r", DATASEC_WRITABLE(sec));
if (!writable) {
ASSERT(DATASEC_WRITABLE(sec) > 0);
(void)DATASEC_WRITABLE_MOD(sec, --);
if (DATASEC_WRITABLE(sec) == 0) {
make_unwritable(datasec_start[sec], datasec_end[sec] - datasec_start[sec]);
STATS_INC(datasec_prot_changes);
} else
STATS_INC(datasec_prot_wasted_calls);
}
mutex_unlock(&datasec_lock[sec]);
}
/* enter/exit DR hooks */
void
entering_dynamorio(void)
{
if (SELF_PROTECT_ON_CXT_SWITCH)
dynamorio_unprotect();
ASSERT(HOOK_ENABLED);
LOG(GLOBAL, LOG_DISPATCH, 3, "entering_dynamorio thread="TIDFMT"\n", get_thread_id());
STATS_INC(num_entering_DR);
if (INTERNAL_OPTION(single_thread_in_DR)) {
acquire_recursive_lock(&thread_in_DR_exclusion);
LOG(GLOBAL, LOG_DISPATCH, 3, "entering_dynamorio thread="TIDFMT" count=%d\n",
get_thread_id(), thread_in_DR_exclusion.count);
}
}
void
exiting_dynamorio(void)
{
ASSERT(HOOK_ENABLED);
LOG(GLOBAL, LOG_DISPATCH, 3, "exiting_dynamorio thread="TIDFMT"\n", get_thread_id());
STATS_INC(num_exiting_DR);
if (INTERNAL_OPTION(single_thread_in_DR)) {
/* thread init/exit can proceed now */
LOG(GLOBAL, LOG_DISPATCH, 3, "exiting_dynamorio thread="TIDFMT" count=%d\n",
get_thread_id(), thread_in_DR_exclusion.count - 1);
release_recursive_lock(&thread_in_DR_exclusion);
}
if (SELF_PROTECT_ON_CXT_SWITCH && !dynamo_exited)
dynamorio_protect();
}
/* Note this includes any stack guard pages */
bool
is_on_initstack(byte *esp)
{
return (esp <= initstack && esp > initstack - DYNAMORIO_STACK_SIZE);
}
/* Note this includes any stack guard pages */
bool
is_on_dstack(dcontext_t *dcontext, byte *esp)
{
return (esp <= dcontext->dstack &&
esp > dcontext->dstack - DYNAMORIO_STACK_SIZE);
}
bool
is_currently_on_dstack(dcontext_t *dcontext)
{
byte *cur_esp;
GET_STACK_PTR(cur_esp);
return is_on_dstack(dcontext, cur_esp);
}
void
pre_second_thread(void)
{
/* i#1111: nop-out bb_building_lock until 2nd thread created.
* While normally we'll call this in the primary thread while not holding
* the lock, it's possible on Windows for an externally injected thread
* (or for a thread sneakily created by some native_exec code w/o going
* through ntdll wrappers) to appear. We solve the problem of the main
* thread currently holding bb_building_lock and us turning its
* unlock into an error by the bb_lock_would_have bool in
* SHARED_BB_UNLOCK().
*/
if (!bb_lock_start) {
mutex_lock(&bb_building_lock);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
bb_lock_start = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
mutex_unlock(&bb_building_lock);
}
}
| 1 | 11,366 | _and_synched seems to be more consistent w/ exited_and_cleaned | DynamoRIO-dynamorio | c |
@@ -13,6 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
+#if NETCOREAPP3_1
using Moq;
using Newtonsoft.Json;
using OpenTelemetry.Trace; | 1 | // <copyright file="DurationTest.cs" company="OpenTelemetry Authors">
// Copyright 2018, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using Moq;
using Newtonsoft.Json;
using OpenTelemetry.Trace;
using OpenTelemetry.Trace.Configuration;
using OpenTelemetry.Trace.Export;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Http;
using System.Reflection;
using System.Threading.Tasks;
using Xunit;
namespace OpenTelemetry.Collector.Dependencies.Tests
{
public partial class HttpClientTests
{
public class HttpOutTestCase
{
public string Name { get; set; }
public string Method { get; set; }
public string Url { get; set; }
public Dictionary<string, string> Headers { get; set; }
public int ResponseCode { get; set; }
public string SpanName { get; set; }
public string SpanKind { get; set; }
public string SpanStatus { get; set; }
public bool? SpanStatusHasDescription { get; set; }
public Dictionary<string, string> SpanAttributes { get; set; }
public bool SetHttpFlavor { get; set; }
}
private static IEnumerable<object[]> ReadTestCases()
{
var assembly = Assembly.GetExecutingAssembly();
var serializer = new JsonSerializer();
var input = serializer.Deserialize<HttpOutTestCase[]>(new JsonTextReader(new StreamReader(assembly.GetManifestResourceStream("OpenTelemetry.Collector.Dependencies.Tests.http-out-test-cases.json"))));
return GetArgumentsFromTestCaseObject(input);
}
private static IEnumerable<object[]> GetArgumentsFromTestCaseObject(IEnumerable<HttpOutTestCase> input)
{
var result = new List<object[]>();
foreach (var testCase in input)
{
result.Add(new object[] {
testCase,
});
}
return result;
}
public static IEnumerable<object[]> TestData => ReadTestCases();
[Theory]
[MemberData(nameof(TestData))]
public async Task HttpOutCallsAreCollectedSuccessfullyAsync(HttpOutTestCase tc)
{
var serverLifeTime = TestServer.RunServer(
(ctx) =>
{
ctx.Response.StatusCode = tc.ResponseCode == 0 ? 200 : tc.ResponseCode;
ctx.Response.OutputStream.Close();
},
out var host,
out var port);
var spanProcessor = new Mock<SpanProcessor>();
var tracer = TracerFactory.Create(b => b
.AddProcessorPipeline(p => p.AddProcessor(_ => spanProcessor.Object)))
.GetTracer(null);
tc.Url = NormalizeValues(tc.Url, host, port);
using (serverLifeTime)
using (new HttpClientCollector(tracer, new HttpClientCollectorOptions() { SetHttpFlavor = tc.SetHttpFlavor }))
{
try
{
using var c = new HttpClient();
var request = new HttpRequestMessage
{
RequestUri = new Uri(tc.Url),
Method = new HttpMethod(tc.Method),
Version = new Version(2, 0),
};
if (tc.Headers != null)
{
foreach (var header in tc.Headers)
{
request.Headers.Add(header.Key, header.Value);
}
}
await c.SendAsync(request);
}
catch (Exception)
{
//test case can intentionally send request that will result in exception
}
}
Assert.Equal(2, spanProcessor.Invocations.Count); // begin and end was called
var span = (SpanData)spanProcessor.Invocations[1].Arguments[0];
Assert.Equal(tc.SpanName, span.Name);
Assert.Equal(tc.SpanKind, span.Kind.ToString());
var d = new Dictionary<CanonicalCode, string>()
{
{ CanonicalCode.Ok, "OK"},
{ CanonicalCode.Cancelled, "CANCELLED"},
{ CanonicalCode.Unknown, "UNKNOWN"},
{ CanonicalCode.InvalidArgument, "INVALID_ARGUMENT"},
{ CanonicalCode.DeadlineExceeded, "DEADLINE_EXCEEDED"},
{ CanonicalCode.NotFound, "NOT_FOUND"},
{ CanonicalCode.AlreadyExists, "ALREADY_EXISTS"},
{ CanonicalCode.PermissionDenied, "PERMISSION_DENIED"},
{ CanonicalCode.ResourceExhausted, "RESOURCE_EXHAUSTED"},
{ CanonicalCode.FailedPrecondition, "FAILED_PRECONDITION"},
{ CanonicalCode.Aborted, "ABORTED"},
{ CanonicalCode.OutOfRange, "OUT_OF_RANGE"},
{ CanonicalCode.Unimplemented, "UNIMPLEMENTED"},
{ CanonicalCode.Internal, "INTERNAL"},
{ CanonicalCode.Unavailable, "UNAVAILABLE"},
{ CanonicalCode.DataLoss, "DATA_LOSS"},
{ CanonicalCode.Unauthenticated, "UNAUTHENTICATED"},
};
Assert.Equal(tc.SpanStatus, d[span.Status.CanonicalCode]);
if (tc.SpanStatusHasDescription.HasValue)
Assert.Equal(tc.SpanStatusHasDescription.Value, !string.IsNullOrEmpty(span.Status.Description));
var normalizedAttributes = span.Attributes.ToDictionary(x => x.Key, x => x.Value.ToString());
tc.SpanAttributes = tc.SpanAttributes.ToDictionary(x => x.Key, x => NormalizeValues(x.Value, host, port));
Assert.Equal(tc.SpanAttributes.ToHashSet(), normalizedAttributes.ToHashSet());
}
[Fact]
public async Task DebugIndividualTestAsync()
{
var serializer = new JsonSerializer();
var input = serializer.Deserialize<HttpOutTestCase[]>(new JsonTextReader(new StringReader(@"
[ {
""name"": ""Response code 404"",
""method"": ""GET"",
""url"": ""http://{host}:{port}/path/12314/?q=ddds#123"",
""responseCode"": 404,
""spanName"": ""/path/12314/"",
""spanStatus"": ""NOT_FOUND"",
""spanKind"": ""Client"",
""spanAttributes"": {
""component"": ""http"",
""http.method"": ""GET"",
""http.host"": ""{host}:{port}"",
""http.status_code"": ""404"",
""http.url"": ""http://{host}:{port}/path/12314/?q=ddds#123""
}
}
]
")));
var t = (Task)GetType().InvokeMember(nameof(HttpOutCallsAreCollectedSuccessfullyAsync), BindingFlags.InvokeMethod, null, this, GetArgumentsFromTestCaseObject(input).First());
await t;
}
private string NormalizeValues(string value, string host, int port)
{
return value.Replace("{host}", host).Replace("{port}", port.ToString());
}
}
}
| 1 | 13,384 | nit: in case of whole file `ifdef` it may be helpful to have `_netcore31` suffix it in the name of the file as well. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -35,12 +35,9 @@ public class SetNetworkConnection extends WebDriverHandler<Number> implements Js
@SuppressWarnings("unchecked")
@Override
public void setJsonParameters(Map<String, Object> allParameters) throws Exception {
- Map<String, Map<String, Object>> parameters = (Map<String, Map<String, Object>>)allParameters.get("parameters");
- Map<String, Object> typeMap = parameters.get("type");
-
- type = new ConnectionType(Boolean.parseBoolean(typeMap.get("wifiEnabled").toString()),
- Boolean.parseBoolean(typeMap.get("dataEnabled").toString()),
- Boolean.parseBoolean(typeMap.get("airplaneMode").toString()));
+ Map<String, Object> parameters = (Map<String, Object>)allParameters.get("parameters");
+ Long bitmask = (Long) parameters.get("type");
+ type = new ConnectionType(bitmask.intValue());
}
@Override | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.server.handler.mobile;
import java.util.Map;
import org.openqa.selenium.mobile.NetworkConnection.ConnectionType;
import org.openqa.selenium.remote.server.JsonParametersAware;
import org.openqa.selenium.remote.server.Session;
import org.openqa.selenium.remote.server.handler.WebDriverHandler;
import org.openqa.selenium.remote.server.handler.html5.Utils;
public class SetNetworkConnection extends WebDriverHandler<Number> implements JsonParametersAware {
private volatile ConnectionType type;
public SetNetworkConnection(Session session) {
super(session);
}
@SuppressWarnings("unchecked")
@Override
public void setJsonParameters(Map<String, Object> allParameters) throws Exception {
Map<String, Map<String, Object>> parameters = (Map<String, Map<String, Object>>)allParameters.get("parameters");
Map<String, Object> typeMap = parameters.get("type");
type = new ConnectionType(Boolean.parseBoolean(typeMap.get("wifiEnabled").toString()),
Boolean.parseBoolean(typeMap.get("dataEnabled").toString()),
Boolean.parseBoolean(typeMap.get("airplaneMode").toString()));
}
@Override
public Number call() throws Exception {
return Integer.parseInt(Utils.getNetworkConnection(getUnwrappedDriver()).setNetworkConnection(type).toString());
}
@Override
public String toString() {
return String.format("[set network connection : %s]", type.toString());
}
}
| 1 | 13,952 | should use Number instead of Long | SeleniumHQ-selenium | js |
@@ -10,7 +10,8 @@ class CartDecorator < Draper::Decorator
end
def approvals_by_status
- object.approvals.order(
+ # Override default scope
+ object.approvals.reorder(
# http://stackoverflow.com/a/6332081/358804
<<-SQL
CASE approvals.status | 1 | class CartDecorator < Draper::Decorator
delegate_all
def number_approved
object.approved_approvals.count
end
def total_approvers
object.approvals.count
end
def approvals_by_status
object.approvals.order(
# http://stackoverflow.com/a/6332081/358804
<<-SQL
CASE approvals.status
WHEN 'approved' THEN 1
WHEN 'rejected' THEN 2
WHEN 'pending' THEN 3
ELSE 4
END
SQL
)
end
def approvals_in_list_order
if object.flow == 'linear'
object.ordered_approvals
else
self.approvals_by_status
end
end
def display_status
if cart.pending?
'pending approval'
else
cart.status
end
end
def generate_status_message
if self.all_approvals_received?
completed_status_message
else
progress_status_message
end
end
def completed_status_message
"All #{number_approved} of #{total_approvers} approvals have been received. Please move forward with the purchase of Cart ##{object.proposal.public_identifier}."
end
def progress_status_message
"#{number_approved} of #{total_approvers} approved."
end
# @TODO: remove in favor of client_partial or similar
def cart_template_name
origin_name = self.proposal.client
if Cart::ORIGINS.include? origin_name
"#{origin_name}_cart"
else
'cart_mail'
end
end
def prefix_template_name
if self.client == 'navigator'
'navigator_prefix'
else
nil
end
end
end
| 1 | 12,794 | Had no idea that method existed! | 18F-C2 | rb |
@@ -38,6 +38,7 @@ class TestTabWidget:
qtbot.addWidget(w)
monkeypatch.setattr(tabwidget.objects, 'backend',
usertypes.Backend.QtWebKit)
+ monkeypatch.setattr(w.tabBar(), 'width', w.width)
return w
@pytest.fixture | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Daniel Schadt
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the custom TabWidget/TabBar."""
import functools
import pytest
from PyQt5.QtGui import QIcon, QPixmap
from qutebrowser.mainwindow import tabwidget, tabbedbrowser
from qutebrowser.utils import usertypes
class TestTabWidget:
"""Tests for TabWidget."""
@pytest.fixture
def widget(self, qtbot, monkeypatch, config_stub):
w = tabwidget.TabWidget(0)
qtbot.addWidget(w)
monkeypatch.setattr(tabwidget.objects, 'backend',
usertypes.Backend.QtWebKit)
return w
@pytest.fixture
def browser(self, qtbot, monkeypatch, config_stub):
w = tabbedbrowser.TabbedBrowser(win_id=0, private=False)
qtbot.addWidget(w)
monkeypatch.setattr(tabwidget.objects, 'backend',
usertypes.Backend.QtWebKit)
return w
def test_small_icon_doesnt_crash(self, widget, qtbot, fake_web_tab):
"""Test that setting a small icon doesn't produce a crash.
Regression test for #1015.
"""
# Size taken from issue report
pixmap = QPixmap(72, 1)
icon = QIcon(pixmap)
tab = fake_web_tab()
widget.addTab(tab, icon, 'foobar')
with qtbot.waitExposed(widget):
widget.show()
# Sizing tests
def test_tab_size_same(self, widget, fake_web_tab):
"""Ensure by default, all tab sizes are the same."""
num_tabs = 10
for i in range(num_tabs):
widget.addTab(fake_web_tab(), 'foobar' + str(i))
first_size = widget.tabBar().tabSizeHint(0)
first_size_min = widget.tabBar().minimumTabSizeHint(0)
for i in range(num_tabs):
assert first_size == widget.tabBar().tabSizeHint(i)
assert first_size_min == widget.tabBar().minimumTabSizeHint(i)
@pytest.mark.parametrize("shrink_pinned", [True, False])
@pytest.mark.parametrize("vertical", [True, False])
def test_pinned_size(self, widget, fake_web_tab, config_stub,
shrink_pinned, vertical):
"""Ensure by default, pinned min sizes are forced to title.
If pinned.shrink is not true, then all tabs should be the same
If tabs are vertical, all tabs should be the same"""
num_tabs = 10
for i in range(num_tabs):
widget.addTab(fake_web_tab(), 'foobar' + str(i))
# Set pinned title format longer than unpinned
config_stub.val.tabs.title.format_pinned = "_" * 20
config_stub.val.tabs.title.format = "_" * 2
config_stub.val.tabs.pinned.shrink = shrink_pinned
if vertical:
# Use pixel width so we don't need to mock main-window
config_stub.val.tabs.width = 50
config_stub.val.tabs.position = "left"
pinned_num = [1, num_tabs - 1]
for tab in pinned_num:
widget.set_tab_pinned(widget.widget(tab), True)
first_size = widget.tabBar().tabSizeHint(0)
first_size_min = widget.tabBar().minimumTabSizeHint(0)
for i in range(num_tabs):
if i in pinned_num and shrink_pinned and not vertical:
assert (first_size.width() <
widget.tabBar().tabSizeHint(i).width())
assert (first_size_min.width() <
widget.tabBar().minimumTabSizeHint(i).width())
else:
assert first_size == widget.tabBar().tabSizeHint(i)
assert first_size_min == widget.tabBar().minimumTabSizeHint(i)
@pytest.mark.parametrize("num_tabs", [4, 10])
def test_update_tab_titles_benchmark(self, benchmark, widget,
qtbot, fake_web_tab, num_tabs):
"""Benchmark for update_tab_titles."""
for i in range(num_tabs):
widget.addTab(fake_web_tab(), 'foobar' + str(i))
with qtbot.waitExposed(widget):
widget.show()
benchmark(widget.update_tab_titles)
@pytest.mark.parametrize("num_tabs", [4, 10])
def test_add_remove_tab_benchmark(self, benchmark, browser,
qtbot, fake_web_tab, num_tabs):
"""Benchmark for addTab and removeTab."""
def _run_bench():
for i in range(num_tabs):
browser.widget.addTab(fake_web_tab(), 'foobar' + str(i))
with qtbot.waitExposed(browser):
browser.show()
browser.shutdown()
benchmark(_run_bench)
def test_tab_pinned_benchmark(self, benchmark, widget, fake_web_tab):
"""Benchmark for _tab_pinned."""
widget.addTab(fake_web_tab(), 'foobar')
tab_bar = widget.tabBar()
benchmark(functools.partial(tab_bar._tab_pinned, 0))
| 1 | 22,307 | I wonder if we shouldn't just do `w.show()` here, which causes Qt to correctly calculate the sizes. | qutebrowser-qutebrowser | py |
@@ -24,12 +24,14 @@ class SparkDataFrameS3StoragePlugin(TypeStoragePlugin): # pylint: disable=no-in
@classmethod
def set_object(cls, object_store, obj, _context, _runtime_type, paths):
target_path = object_store.key_for_paths(paths)
- obj.write.parquet('s3a://' + target_path)
+ obj.write.parquet('s3a://' + object_store.bucket + '/' + target_path)
return target_path
@classmethod
def get_object(cls, object_store, context, _runtime_type, paths):
- return context.resources.spark.read.parquet('s3a://' + object_store.key_for_paths(paths))
+ return context.resources.spark.read.parquet(
+ 's3a://' + object_store.bucket + '/' + object_store.key_for_paths(paths)
+ )
class SparkDataFrameFilesystemStoragePlugin(TypeStoragePlugin): # pylint: disable=no-init | 1 | """Type definitions for the airline_demo."""
from collections import namedtuple
import sqlalchemy
from pyspark.sql import DataFrame
from dagster import as_dagster_type, Dict, Field, String
from dagster.core.object_store import get_valid_target_path, TypeStoragePlugin
from dagster.core.runs import RunStorageMode
from dagster.core.types.runtime import Stringish
from dagster.utils import safe_isfile
AirlineDemoResources = namedtuple(
'AirlineDemoResources',
('spark', 's3', 'db_url', 'db_engine', 'db_dialect', 'redshift_s3_temp_dir', 'db_load'),
)
class SparkDataFrameS3StoragePlugin(TypeStoragePlugin): # pylint: disable=no-init
@classmethod
def set_object(cls, object_store, obj, _context, _runtime_type, paths):
target_path = object_store.key_for_paths(paths)
obj.write.parquet('s3a://' + target_path)
return target_path
@classmethod
def get_object(cls, object_store, context, _runtime_type, paths):
return context.resources.spark.read.parquet('s3a://' + object_store.key_for_paths(paths))
class SparkDataFrameFilesystemStoragePlugin(TypeStoragePlugin): # pylint: disable=no-init
@classmethod
def set_object(cls, object_store, obj, _context, _runtime_type, paths):
target_path = get_valid_target_path(object_store.root, paths)
obj.write.parquet('file://' + target_path)
return target_path
@classmethod
def get_object(cls, object_store, context, _runtime_type, paths):
return context.resources.spark.read.parquet(get_valid_target_path(object_store.root, paths))
SparkDataFrameType = as_dagster_type(
DataFrame,
name='SparkDataFrameType',
description='A Pyspark data frame.',
storage_plugins={
RunStorageMode.S3: SparkDataFrameS3StoragePlugin,
RunStorageMode.FILESYSTEM: SparkDataFrameFilesystemStoragePlugin,
},
)
SqlAlchemyEngineType = as_dagster_type(
sqlalchemy.engine.Connectable,
name='SqlAlchemyEngineType',
description='A SqlAlchemy Connectable',
)
class SqlTableName(Stringish):
def __init__(self):
super(SqlTableName, self).__init__(description='The name of a database table')
class FileExistsAtPath(Stringish):
def __init__(self):
super(FileExistsAtPath, self).__init__(description='A path at which a file actually exists')
def coerce_runtime_value(self, value):
value = super(FileExistsAtPath, self).coerce_runtime_value(value)
return self.throw_if_false(safe_isfile, value)
RedshiftConfigData = Dict(
{
'redshift_username': Field(String),
'redshift_password': Field(String),
'redshift_hostname': Field(String),
'redshift_db_name': Field(String),
's3_temp_dir': Field(String),
}
)
DbInfo = namedtuple('DbInfo', 'engine url jdbc_url dialect load_table')
PostgresConfigData = Dict(
{
'postgres_username': Field(String),
'postgres_password': Field(String),
'postgres_hostname': Field(String),
'postgres_db_name': Field(String),
}
)
| 1 | 12,928 | may be nice to have helper method to generate s3 paths rather than the minor code dup | dagster-io-dagster | py |
@@ -169,11 +169,15 @@ export default Controller.extend(ValidationEngine, {
_oauthSetup() {
let blogTitle = this.get('blogTitle');
let config = this.get('config');
+ let promises = [];
+
+ promises.pushObject(this.get('settings').fetch());
+ promises.pushObject(this.get('config').fetchPrivate());
this.get('hasValidated').addObjects(['blogTitle', 'session']);
return this.validate().then(() => {
- return this.get('settings').fetch()
+ return RSVP.all(promises)
.then((settings) => {
settings.set('title', blogTitle);
| 1 | /* eslint-disable camelcase */
import Controller from 'ember-controller';
import RSVP from 'rsvp';
import ValidationEngine from 'ghost-admin/mixins/validation-engine';
import injectController from 'ember-controller/inject';
import injectService from 'ember-service/inject';
import {isInvalidError} from 'ember-ajax/errors';
import {isVersionMismatchError} from 'ghost-admin/services/ajax';
import {task} from 'ember-concurrency';
const {Promise} = RSVP;
export default Controller.extend(ValidationEngine, {
ajax: injectService(),
application: injectController(),
config: injectService(),
ghostPaths: injectService(),
notifications: injectService(),
session: injectService(),
settings: injectService(),
torii: injectService(),
// ValidationEngine settings
validationType: 'setup',
blogCreated: false,
blogTitle: null,
email: '',
flowErrors: '',
profileImage: null,
name: null,
password: null,
setup: task(function* () {
if (this.get('config.ghostOAuth')) {
return yield this._oauthSetup();
} else {
return yield this._passwordSetup();
}
}),
// TODO: remove duplication with controllers/signin
authenticateWithGhostOrg: task(function* () {
let authStrategy = 'authenticator:oauth2-ghost';
this.set('flowErrors', '');
try {
let authentication = yield this.get('torii')
.open('ghost-oauth2', {type: 'setup'});
yield this.get('authenticate').perform(authStrategy, [authentication]);
return true;
} catch (error) {
this.set('flowErrors', 'Authentication with Ghost.org denied or failed');
throw error;
}
}).drop(),
authenticate: task(function* (authStrategy, authentication) {
// we don't want to redirect after sign-in during setup
this.set('session.skipAuthSuccessHandler', true);
try {
let authResult = yield this.get('session')
.authenticate(authStrategy, ...authentication);
this.get('errors').remove('session');
return authResult;
} catch (error) {
if (error && error.errors) {
if (isVersionMismatchError(error)) {
return this.get('notifications').showAPIError(error);
}
error.errors.forEach((err) => {
err.message = err.message.htmlSafe();
});
this.set('flowErrors', error.errors[0].message.string);
} else {
// Connection errors don't return proper status message, only req.body
this.get('notifications').showAlert('There was a problem on the server.', {type: 'error', key: 'session.authenticate.failed'});
}
}
}),
/**
* Uploads the given data image, then sends the changed user image property to the server
* @param {Object} user User object, returned from the 'setup' api call
* @return {Ember.RSVP.Promise} A promise that takes care of both calls
*/
_sendImage(user) {
let image = this.get('profileImage');
return new Promise((resolve, reject) => {
image.formData = {};
return image.submit()
.done((response) => {
let usersUrl = this.get('ghostPaths.url').api('users', user.id.toString());
user.profile_image = response;
return this.get('ajax').put(usersUrl, {
data: {
users: [user]
}
}).then(resolve).catch(reject);
})
.fail(reject);
});
},
_passwordSetup() {
let setupProperties = ['blogTitle', 'name', 'email', 'password'];
let data = this.getProperties(setupProperties);
let config = this.get('config');
let method = this.get('blogCreated') ? 'put' : 'post';
this.set('flowErrors', '');
this.get('hasValidated').addObjects(setupProperties);
return this.validate().then(() => {
let authUrl = this.get('ghostPaths.url').api('authentication', 'setup');
return this.get('ajax')[method](authUrl, {
data: {
setup: [{
name: data.name,
email: data.email,
password: data.password,
blogTitle: data.blogTitle
}]
}
}).then((result) => {
config.set('blogTitle', data.blogTitle);
// don't try to login again if we are already logged in
if (this.get('session.isAuthenticated')) {
return this._afterAuthentication(result);
}
// Don't call the success handler, otherwise we will be redirected to admin
this.set('session.skipAuthSuccessHandler', true);
return this.get('session').authenticate('authenticator:oauth2', this.get('email'), this.get('password')).then(() => {
this.set('blogCreated', true);
return this._afterAuthentication(result);
}).catch((error) => {
this._handleAuthenticationError(error);
}).finally(() => {
this.set('session.skipAuthSuccessHandler', undefined);
});
}).catch((error) => {
this._handleSaveError(error);
});
}).catch(() => {
this.set('flowErrors', 'Please fill out the form to setup your blog.');
});
},
// NOTE: for OAuth ghost is in the "setup completed" step as soon
// as a user has been authenticated so we need to use the standard settings
// update to set the blog title before redirecting
_oauthSetup() {
let blogTitle = this.get('blogTitle');
let config = this.get('config');
this.get('hasValidated').addObjects(['blogTitle', 'session']);
return this.validate().then(() => {
return this.get('settings').fetch()
.then((settings) => {
settings.set('title', blogTitle);
return settings.save()
.then((settings) => {
// update the config so that the blog title shown in
// the nav bar is also updated
config.set('blogTitle', settings.get('title'));
// this.blogCreated is used by step 3 to check if step 2
// has been completed
this.set('blogCreated', true);
return this._afterAuthentication(settings);
})
.catch((error) => {
this._handleSaveError(error);
});
})
.finally(() => {
this.set('session.skipAuthSuccessHandler', undefined);
});
});
},
_handleSaveError(resp) {
if (isInvalidError(resp)) {
this.set('flowErrors', resp.errors[0].message);
} else {
this.get('notifications').showAPIError(resp, {key: 'setup.blog-details'});
}
},
_handleAuthenticationError(error) {
if (error && error.errors) {
this.set('flowErrors', error.errors[0].message);
} else {
// Connection errors don't return proper status message, only req.body
this.get('notifications').showAlert('There was a problem on the server.', {type: 'error', key: 'setup.authenticate.failed'});
}
},
_afterAuthentication(result) {
if (this.get('profileImage')) {
return this._sendImage(result.users[0])
.then(() => {
// fetch settings for synchronous access before transitioning
return this.get('settings').fetch().then(() => {
return this.transitionToRoute('setup.three');
});
}).catch((resp) => {
this.get('notifications').showAPIError(resp, {key: 'setup.blog-details'});
});
} else {
// fetch settings for synchronous access before transitioning
return this.get('settings').fetch().then(() => {
return this.transitionToRoute('setup.three');
});
}
},
actions: {
setup() {
this.get('setup').perform();
},
preValidate(model) {
// Only triggers validation if a value has been entered, preventing empty errors on focusOut
if (this.get(model)) {
return this.validate({property: model});
}
},
setImage(image) {
this.set('profileImage', image);
}
}
});
| 1 | 8,533 | This isn't needed here, we still run `this._afterAuthentication` which loads settings & config - the reason the settings fetch is here is to make sure we have all the settings before saving the blog title rather than fetching everything once auth has completed. Probably moot anyway as the oauth code will be removed shortly | TryGhost-Admin | js |
@@ -397,7 +397,7 @@ func assertConfigsCompatible(cfg1, cfg2 *Config) error {
if c1.ClientAuth != c2.ClientAuth {
return fmt.Errorf("client authentication policy mismatch")
}
- if c1.ClientAuth != tls.NoClientCert && c2.ClientAuth != tls.NoClientCert && c1.ClientCAs != c2.ClientCAs {
+ if c1.ClientAuth != tls.NoClientCert && c2.ClientAuth != tls.NoClientCert && !StringSliceEqual(cfg1.ClientCerts, cfg2.ClientCerts) {
// Two hosts defined on the same listener are not compatible if they
// have ClientAuth enabled, because there's no guarantee beyond the
// hostname which config will be used (because SNI only has server name). | 1 | // Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"sync/atomic"
"time"
"github.com/go-acme/lego/challenge/tlsalpn01"
"github.com/go-acme/lego/certcrypto"
"github.com/klauspost/cpuid"
"github.com/mholt/caddy"
"github.com/mholt/certmagic"
)
// Config describes how TLS should be configured and used.
type Config struct {
// The hostname or class of hostnames this config is
// designated for; can contain wildcard characters
// according to RFC 6125 §6.4.3 - this field MUST
// be set in order for things to work as expected,
// must be normalized, and if an IP address, must
// be normalized
Hostname string
// Whether TLS is enabled
Enabled bool
// Minimum and maximum protocol versions to allow
ProtocolMinVersion uint16
ProtocolMaxVersion uint16
// The list of cipher suites; first should be
// TLS_FALLBACK_SCSV to prevent degrade attacks
Ciphers []uint16
// Whether to prefer server cipher suites
PreferServerCipherSuites bool
// The list of preferred curves
CurvePreferences []tls.CurveID
// Client authentication policy
ClientAuth tls.ClientAuthType
// List of client CA certificates to allow, if
// client authentication is enabled
ClientCerts []string
// Manual means user provides own certs and keys
Manual bool
// Managed means this config should be managed
// by the CertMagic Config (Manager field)
Managed bool
// Manager is how certificates are managed
Manager *certmagic.Config
// SelfSigned means that this hostname is
// served with a self-signed certificate
// that we generated in memory for convenience
SelfSigned bool
// The email address to use when creating or
// using an ACME account (fun fact: if this
// is set to "off" then this config will not
// qualify for managed TLS)
ACMEEmail string
// The list of protocols to choose from for Application Layer
// Protocol Negotiation (ALPN).
ALPN []string
// The final tls.Config created with
// buildStandardTLSConfig()
tlsConfig *tls.Config
}
// NewConfig returns a new Config with a pointer to the instance's
// certificate cache. You will usually need to set other fields on
// the returned Config for successful practical use.
func NewConfig(inst *caddy.Instance) (*Config, error) {
inst.StorageMu.RLock()
certCache, ok := inst.Storage[CertCacheInstStorageKey].(*certmagic.Cache)
inst.StorageMu.RUnlock()
if !ok || certCache == nil {
// set up the clustering plugin, if there is one (and there should always
// be one since this tls plugin requires it) -- this should be done exactly
// once, but we can't do it during init while plugins are still registering,
// so do it as soon as we run a setup)
if atomic.CompareAndSwapInt32(&clusterPluginSetup, 0, 1) {
clusterPluginName := os.Getenv("CADDY_CLUSTERING")
if clusterPluginName == "" {
clusterPluginName = "file" // name of default storage plugin
}
clusterFn, ok := clusterProviders[clusterPluginName]
if ok {
storage, err := clusterFn()
if err != nil {
return nil, fmt.Errorf("constructing cluster plugin %s: %v", clusterPluginName, err)
}
certmagic.Default.Storage = storage
} else {
return nil, fmt.Errorf("unrecognized cluster plugin (was it included in the Caddy build?): %s", clusterPluginName)
}
}
certCache = certmagic.NewCache(certmagic.CacheOptions{
GetConfigForCert: func(cert certmagic.Certificate) (certmagic.Config, error) {
inst.StorageMu.Lock()
cfgMap, ok := inst.Storage[configMapKey].(map[string]*Config)
inst.StorageMu.Unlock()
if ok {
for hostname, cfg := range cfgMap {
if cfg.Manager != nil && hostname == cert.Names[0] {
return *cfg.Manager, nil
}
}
}
// returning Default not strictly necessary, since Default is used as template
// anyway; but this makes it clear that that's what we fall back to
return certmagic.Default, nil
},
})
storageCleaningTicker := time.NewTicker(12 * time.Hour)
done := make(chan bool)
go func() {
for {
select {
case <-done:
storageCleaningTicker.Stop()
return
case <-storageCleaningTicker.C:
certmagic.CleanStorage(certmagic.Default.Storage, certmagic.CleanStorageOptions{
OCSPStaples: true,
})
}
}
}()
inst.OnShutdown = append(inst.OnShutdown, func() error {
certCache.Stop()
done <- true
close(done)
return nil
})
inst.StorageMu.Lock()
inst.Storage[CertCacheInstStorageKey] = certCache
inst.StorageMu.Unlock()
}
return &Config{
Manager: certmagic.New(certCache, certmagic.Config{}),
}, nil
}
// buildStandardTLSConfig converts cfg (*caddytls.Config) to a *tls.Config
// and stores it in cfg so it can be used in servers. If TLS is disabled,
// no tls.Config is created.
func (c *Config) buildStandardTLSConfig() error {
if !c.Enabled {
return nil
}
config := new(tls.Config)
ciphersAdded := make(map[uint16]struct{})
curvesAdded := make(map[tls.CurveID]struct{})
// add cipher suites
for _, ciph := range c.Ciphers {
if _, ok := ciphersAdded[ciph]; !ok {
ciphersAdded[ciph] = struct{}{}
config.CipherSuites = append(config.CipherSuites, ciph)
}
}
config.PreferServerCipherSuites = c.PreferServerCipherSuites
// add curve preferences
for _, curv := range c.CurvePreferences {
if _, ok := curvesAdded[curv]; !ok {
curvesAdded[curv] = struct{}{}
config.CurvePreferences = append(config.CurvePreferences, curv)
}
}
// ensure ALPN includes the ACME TLS-ALPN protocol
var alpnFound bool
for _, a := range c.ALPN {
if a == tlsalpn01.ACMETLS1Protocol {
alpnFound = true
break
}
}
if !alpnFound {
c.ALPN = append(c.ALPN, tlsalpn01.ACMETLS1Protocol)
}
config.MinVersion = c.ProtocolMinVersion
config.MaxVersion = c.ProtocolMaxVersion
config.ClientAuth = c.ClientAuth
config.NextProtos = c.ALPN
config.GetCertificate = c.Manager.GetCertificate
// set up client authentication if enabled
if config.ClientAuth != tls.NoClientCert {
pool := x509.NewCertPool()
clientCertsAdded := make(map[string]struct{})
for _, caFile := range c.ClientCerts {
// don't add cert to pool more than once
if _, ok := clientCertsAdded[caFile]; ok {
continue
}
clientCertsAdded[caFile] = struct{}{}
// Any client with a certificate from this CA will be allowed to connect
caCrt, err := ioutil.ReadFile(caFile)
if err != nil {
return err
}
if !pool.AppendCertsFromPEM(caCrt) {
return fmt.Errorf("error loading client certificate '%s': no certificates were successfully parsed", caFile)
}
}
config.ClientCAs = pool
}
// default cipher suites
if len(config.CipherSuites) == 0 {
config.CipherSuites = getPreferredDefaultCiphers()
}
// for security, ensure TLS_FALLBACK_SCSV is always included first
if len(config.CipherSuites) == 0 || config.CipherSuites[0] != tls.TLS_FALLBACK_SCSV {
config.CipherSuites = append([]uint16{tls.TLS_FALLBACK_SCSV}, config.CipherSuites...)
}
// store the resulting new tls.Config
c.tlsConfig = config
return nil
}
// MakeTLSConfig makes a tls.Config from configs. The returned
// tls.Config is programmed to load the matching caddytls.Config
// based on the hostname in SNI, but that's all. This is used
// to create a single TLS configuration for a listener (a group
// of sites).
func MakeTLSConfig(configs []*Config) (*tls.Config, error) {
if len(configs) == 0 {
return nil, nil
}
configMap := make(configGroup)
for i, cfg := range configs {
if cfg == nil {
// avoid nil pointer dereference below this loop
configs[i] = new(Config)
continue
}
// can't serve TLS and non-TLS on same port
if i > 0 && cfg.Enabled != configs[i-1].Enabled {
thisConfProto, lastConfProto := "not TLS", "not TLS"
if cfg.Enabled {
thisConfProto = "TLS"
}
if configs[i-1].Enabled {
lastConfProto = "TLS"
}
return nil, fmt.Errorf("cannot multiplex %s (%s) and %s (%s) on same listener",
configs[i-1].Hostname, lastConfProto, cfg.Hostname, thisConfProto)
}
// convert this caddytls.Config into a tls.Config
if err := cfg.buildStandardTLSConfig(); err != nil {
return nil, err
}
// if an existing config with this hostname was already
// configured, then they must be identical (or at least
// compatible), otherwise that is a configuration error
if otherConfig, ok := configMap[cfg.Hostname]; ok {
if err := assertConfigsCompatible(cfg, otherConfig); err != nil {
return nil, fmt.Errorf("incompatible TLS configurations for the same SNI "+
"name (%s) on the same listener: %v",
cfg.Hostname, err)
}
}
// key this config by its hostname (overwrites
// configs with the same hostname pattern; should
// be OK since we already asserted they are roughly
// the same); during TLS handshakes, configs are
// loaded based on the hostname pattern according
// to client's ServerName (SNI) value
if cfg.Hostname == "0.0.0.0" || cfg.Hostname == "::" {
configMap[""] = cfg
} else {
configMap[cfg.Hostname] = cfg
}
}
// Is TLS disabled? By now, we know that all
// configs agree whether it is or not, so we
// can just look at the first one. If so,
// we're done here.
if len(configs) == 0 || !configs[0].Enabled {
return nil, nil
}
return &tls.Config{
// A tls.Config must have Certificates or GetCertificate
// set, in order to be accepted by tls.Listen and quic.Listen.
// TODO: remove this once the standard library allows a tls.Config with
// only GetConfigForClient set. https://github.com/mholt/caddy/pull/2404
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
return nil, fmt.Errorf("all certificates configured via GetConfigForClient")
},
GetConfigForClient: configMap.GetConfigForClient,
}, nil
}
// assertConfigsCompatible returns an error if the two Configs
// do not have the same (or roughly compatible) configurations.
// If one of the tlsConfig pointers on either Config is nil,
// an error will be returned. If both are nil, no error.
func assertConfigsCompatible(cfg1, cfg2 *Config) error {
c1, c2 := cfg1.tlsConfig, cfg2.tlsConfig
if (c1 == nil && c2 != nil) || (c1 != nil && c2 == nil) {
return fmt.Errorf("one config is not made")
}
if c1 == nil && c2 == nil {
return nil
}
if len(c1.CipherSuites) != len(c2.CipherSuites) {
return fmt.Errorf("different number of allowed cipher suites")
}
for i, ciph := range c1.CipherSuites {
if c2.CipherSuites[i] != ciph {
return fmt.Errorf("different cipher suites or different order")
}
}
if len(c1.CurvePreferences) != len(c2.CurvePreferences) {
return fmt.Errorf("different number of allowed cipher suites")
}
for i, curve := range c1.CurvePreferences {
if c2.CurvePreferences[i] != curve {
return fmt.Errorf("different curve preferences or different order")
}
}
if len(c1.NextProtos) != len(c2.NextProtos) {
return fmt.Errorf("different number of ALPN (NextProtos) values")
}
for i, proto := range c1.NextProtos {
if c2.NextProtos[i] != proto {
return fmt.Errorf("different ALPN (NextProtos) values or different order")
}
}
if c1.PreferServerCipherSuites != c2.PreferServerCipherSuites {
return fmt.Errorf("one prefers server cipher suites, the other does not")
}
if c1.MinVersion != c2.MinVersion {
return fmt.Errorf("minimum TLS version mismatch")
}
if c1.MaxVersion != c2.MaxVersion {
return fmt.Errorf("maximum TLS version mismatch")
}
if c1.ClientAuth != c2.ClientAuth {
return fmt.Errorf("client authentication policy mismatch")
}
if c1.ClientAuth != tls.NoClientCert && c2.ClientAuth != tls.NoClientCert && c1.ClientCAs != c2.ClientCAs {
// Two hosts defined on the same listener are not compatible if they
// have ClientAuth enabled, because there's no guarantee beyond the
// hostname which config will be used (because SNI only has server name).
// To prevent clients from bypassing authentication, require that
// ClientAuth be configured in an unambiguous manner.
return fmt.Errorf("multiple hosts requiring client authentication ambiguously configured")
}
return nil
}
// ConfigGetter gets a Config keyed by key.
type ConfigGetter func(c *caddy.Controller) *Config
var configGetters = make(map[string]ConfigGetter)
// RegisterConfigGetter registers fn as the way to get a
// Config for server type serverType.
func RegisterConfigGetter(serverType string, fn ConfigGetter) {
configGetters[serverType] = fn
}
// SetDefaultTLSParams sets the default TLS cipher suites, protocol versions,
// and server preferences of a server.Config if they were not previously set
// (it does not overwrite; only fills in missing values).
func SetDefaultTLSParams(config *Config) {
// If no ciphers provided, use default list
if len(config.Ciphers) == 0 {
config.Ciphers = getPreferredDefaultCiphers()
}
// Not a cipher suite, but still important for mitigating protocol downgrade attacks
// (prepend since having it at end breaks http2 due to non-h2-approved suites before it)
config.Ciphers = append([]uint16{tls.TLS_FALLBACK_SCSV}, config.Ciphers...)
// If no curves provided, use default list
if len(config.CurvePreferences) == 0 {
config.CurvePreferences = defaultCurves
}
// Set default protocol min and max versions - must balance compatibility and security
if config.ProtocolMinVersion == 0 {
config.ProtocolMinVersion = tls.VersionTLS12
}
if config.ProtocolMaxVersion == 0 {
config.ProtocolMaxVersion = tls.VersionTLS13
}
// Prefer server cipher suites
config.PreferServerCipherSuites = true
}
// Map of supported key types
var supportedKeyTypes = map[string]certcrypto.KeyType{
"P384": certcrypto.EC384,
"P256": certcrypto.EC256,
"RSA4096": certcrypto.RSA4096,
"RSA2048": certcrypto.RSA2048,
}
// SupportedProtocols is a map of supported protocols.
// HTTP/2 only supports TLS 1.2 and higher.
// If updating this map, also update tlsProtocolStringToMap in caddyhttp/fastcgi/fastcgi.go
var SupportedProtocols = map[string]uint16{
"tls1.0": tls.VersionTLS10,
"tls1.1": tls.VersionTLS11,
"tls1.2": tls.VersionTLS12,
"tls1.3": tls.VersionTLS13,
}
// GetSupportedProtocolName returns the protocol name
func GetSupportedProtocolName(protocol uint16) (string, error) {
for k, v := range SupportedProtocols {
if v == protocol {
return k, nil
}
}
return "", fmt.Errorf("name: unsupported protocol")
}
// SupportedCiphersMap has supported ciphers, used only for parsing config.
//
// Note that, at time of writing, HTTP/2 blacklists 276 cipher suites,
// including all but four of the suites below (the four GCM suites).
// See https://http2.github.io/http2-spec/#BadCipherSuites
//
// TLS_FALLBACK_SCSV is not in this list because we manually ensure
// it is always added (even though it is not technically a cipher suite).
//
// This map, like any map, is NOT ORDERED. Do not range over this map.
var SupportedCiphersMap = map[string]uint16{
"ECDHE-ECDSA-AES256-GCM-SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
"ECDHE-RSA-AES256-GCM-SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
"ECDHE-ECDSA-AES128-GCM-SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
"ECDHE-RSA-AES128-GCM-SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
"ECDHE-ECDSA-WITH-CHACHA20-POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
"ECDHE-RSA-WITH-CHACHA20-POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
"ECDHE-RSA-AES256-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
"ECDHE-RSA-AES128-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
"ECDHE-ECDSA-AES256-CBC-SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
"ECDHE-ECDSA-AES128-CBC-SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
"RSA-AES256-CBC-SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
"RSA-AES128-CBC-SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
"ECDHE-RSA-3DES-EDE-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
"RSA-3DES-EDE-CBC-SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
}
// GetSupportedCipherName returns the cipher name
func GetSupportedCipherName(cipher uint16) (string, error) {
for k, v := range SupportedCiphersMap {
if v == cipher {
return k, nil
}
}
return "", fmt.Errorf("name: unsupported cipher")
}
// List of all the ciphers we want to use by default
var defaultCiphers = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
}
// List of ciphers we should prefer if native AESNI support is missing
var defaultCiphersNonAESNI = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
// getPreferredDefaultCiphers returns an appropriate cipher suite to use, depending on
// the hardware support available for AES-NI.
//
// See https://github.com/mholt/caddy/issues/1674
func getPreferredDefaultCiphers() []uint16 {
if cpuid.CPU.AesNi() {
return defaultCiphers
}
// Return a cipher suite that prefers ChaCha20
return defaultCiphersNonAESNI
}
// Map of supported curves
// https://golang.org/pkg/crypto/tls/#CurveID
var supportedCurvesMap = map[string]tls.CurveID{
"X25519": tls.X25519,
"P256": tls.CurveP256,
"P384": tls.CurveP384,
"P521": tls.CurveP521,
}
// List of all the curves we want to use by default.
//
// This list should only include curves which are fast by design (e.g. X25519)
// and those for which an optimized assembly implementation exists (e.g. P256).
// The latter ones can be found here: https://github.com/golang/go/tree/master/src/crypto/elliptic
var defaultCurves = []tls.CurveID{
tls.X25519,
tls.CurveP256,
}
var clusterPluginSetup int32 // access atomically
// CertCacheInstStorageKey is the name of the key for
// accessing the certificate storage on the *caddy.Instance.
const CertCacheInstStorageKey = "tls_cert_cache"
| 1 | 13,317 | This line is getting a little long; let's move the conditions to at least two lines (maybe three). | caddyserver-caddy | go |
@@ -37,6 +37,10 @@
#include <fastdds/dds/log/Log.hpp>
+#include "../../../../fastdds/core/policy/ParameterList.hpp"
+
+using ParameterList = eprosima::fastdds::dds::ParameterList;
+
namespace eprosima {
namespace fastrtps {
namespace rtps { | 1 | // Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file PDPListener.cpp
*
*/
#include <fastdds/rtps/reader/RTPSReader.h>
#include <fastdds/rtps/history/ReaderHistory.h>
#include <fastrtps/utils/TimeConversion.h>
#include <fastdds/rtps/builtin/discovery/participant/PDP.h>
#include <fastdds/rtps/builtin/discovery/endpoint/EDP.h>
#include <fastdds/rtps/builtin/discovery/participant/PDPListener.h>
#include <fastdds/rtps/resources/TimedEvent.h>
#include <fastdds/rtps/participant/ParticipantDiscoveryInfo.h>
#include <fastdds/rtps/participant/RTPSParticipantListener.h>
#include <rtps/participant/RTPSParticipantImpl.h>
#include <mutex>
#include <fastdds/dds/log/Log.hpp>
namespace eprosima {
namespace fastrtps {
namespace rtps {
PDPListener::PDPListener(
PDP* parent)
: parent_pdp_(parent)
, temp_participant_data_(parent->getRTPSParticipant()->getRTPSParticipantAttributes().allocation)
{
}
void PDPListener::onNewCacheChangeAdded(
RTPSReader* reader,
const CacheChange_t* const change_in)
{
CacheChange_t* change = const_cast<CacheChange_t*>(change_in);
GUID_t writer_guid = change->writerGUID;
logInfo(RTPS_PDP, "SPDP Message received");
// Make sure we have an instance handle (i.e GUID)
if (change->instanceHandle == c_InstanceHandle_Unknown)
{
if (!this->get_key(change))
{
logWarning(RTPS_PDP, "Problem getting the key of the change, removing");
parent_pdp_->mp_PDPReaderHistory->remove_change(change);
return;
}
}
// Take GUID from instance handle
GUID_t guid;
iHandle2GUID(guid, change->instanceHandle);
if (change->kind == ALIVE)
{
// Ignore announcement from own RTPSParticipant
if (guid == parent_pdp_->getRTPSParticipant()->getGuid())
{
logInfo(RTPS_PDP, "Message from own RTPSParticipant, removing");
parent_pdp_->mp_PDPReaderHistory->remove_change(change);
return;
}
// Release reader lock to avoid ABBA lock. PDP mutex should always be first.
// Keep change information on local variables to check consistency later
SequenceNumber_t seq_num = change->sequenceNumber;
reader->getMutex().unlock();
std::unique_lock<std::recursive_mutex> lock(*parent_pdp_->getMutex());
reader->getMutex().lock();
// If change is not consistent, it will be processed on the thread that has overriten it
if ((ALIVE != change->kind) || (seq_num != change->sequenceNumber) || (writer_guid != change->writerGUID))
{
return;
}
// Access to temp_participant_data_ is protected by reader lock
// Load information on temp_participant_data_
CDRMessage_t msg(change->serializedPayload);
temp_participant_data_.clear();
if (temp_participant_data_.readFromCDRMessage(&msg, true, parent_pdp_->getRTPSParticipant()->network_factory(),
parent_pdp_->getRTPSParticipant()->has_shm_transport()))
{
// After correctly reading it
change->instanceHandle = temp_participant_data_.m_key;
guid = temp_participant_data_.m_guid;
// Check if participant already exists (updated info)
ParticipantProxyData* pdata = nullptr;
for (ParticipantProxyData* it : parent_pdp_->participant_proxies_)
{
if (guid == it->m_guid)
{
pdata = it;
break;
}
}
auto status = (pdata == nullptr) ? ParticipantDiscoveryInfo::DISCOVERED_PARTICIPANT :
ParticipantDiscoveryInfo::CHANGED_QOS_PARTICIPANT;
if (pdata == nullptr)
{
// Create a new one when not found
pdata = parent_pdp_->createParticipantProxyData(temp_participant_data_, writer_guid);
if (pdata != nullptr)
{
reader->getMutex().unlock();
lock.unlock();
logInfo(RTPS_PDP_DISCOVERY, "New participant " << pdata->m_guid << " at " << "MTTLoc: "
<< pdata->metatraffic_locators << " DefLoc:" << pdata->default_locators);
// Assigning remote endpoints implies sending a DATA(p) to all matched and fixed readers, since
// StatelessWriter::matched_reader_add marks the entire history as unsent if the added reader's
// durability is bigger or equal to TRANSIENT_LOCAL_DURABILITY_QOS (TRANSIENT_LOCAL or TRANSIENT),
// which is the case of ENTITYID_BUILTIN_SDP_PARTICIPANT_READER (TRANSIENT_LOCAL). If a remote
// participant is discovered before creating the first DATA(p) change (which happens at the end of
// BuiltinProtocols::initBuiltinProtocols), then StatelessWriter::matched_reader_add ends up marking
// no changes as unsent (since the history is empty), which is OK because this can only happen if a
// participant is discovered in the middle of BuiltinProtocols::initBuiltinProtocols, which will
// create the first DATA(p) upon finishing, thus triggering the sent to all fixed and matched
// readers anyways.
parent_pdp_->assignRemoteEndpoints(pdata);
}
}
else
{
pdata->updateData(temp_participant_data_);
pdata->isAlive = true;
reader->getMutex().unlock();
lock.unlock();
logInfo(RTPS_PDP_DISCOVERY, "Update participant " << pdata->m_guid << " at " << "MTTLoc: "
<< pdata->metatraffic_locators << " DefLoc:" << pdata->default_locators);
if (parent_pdp_->updateInfoMatchesEDP())
{
parent_pdp_->mp_EDP->assignRemoteEndpoints(*pdata);
}
}
if (pdata != nullptr)
{
RTPSParticipantListener* listener = parent_pdp_->getRTPSParticipant()->getListener();
if (listener != nullptr)
{
std::lock_guard<std::mutex> cb_lock(parent_pdp_->callback_mtx_);
ParticipantDiscoveryInfo info(*pdata);
info.status = status;
listener->onParticipantDiscovery(
parent_pdp_->getRTPSParticipant()->getUserRTPSParticipant(),
std::move(info));
}
}
// Take again the reader lock
reader->getMutex().lock();
}
}
else
{
reader->getMutex().unlock();
if (parent_pdp_->remove_remote_participant(guid, ParticipantDiscoveryInfo::REMOVED_PARTICIPANT))
{
reader->getMutex().lock();
// All changes related with this participant have been removed from history by remove_remote_participant
return;
}
reader->getMutex().lock();
}
//Remove change form history.
parent_pdp_->mp_PDPReaderHistory->remove_change(change);
}
bool PDPListener::get_key(
CacheChange_t* change)
{
return ParameterList::readInstanceHandleFromCDRMsg(change, fastdds::dds::PID_PARTICIPANT_GUID);
}
} /* namespace rtps */
} /* namespace fastrtps */
} /* namespace eprosima */
| 1 | 18,473 | Don't use relative paths. src directory is already on the include search path | eProsima-Fast-DDS | cpp |
@@ -2,7 +2,7 @@ class Api::V1::CompletionsController < ApiController
before_action :doorkeeper_authorize!, if: lambda { !signed_in? }
def index
- respond_with current_resource_owner.completions.only_trail_object_ids
+ respond_with completions: current_resource_owner.completions.only_trail_object_ids
end
def create | 1 | class Api::V1::CompletionsController < ApiController
before_action :doorkeeper_authorize!, if: lambda { !signed_in? }
def index
respond_with current_resource_owner.completions.only_trail_object_ids
end
def create
completion = current_resource_owner.completions.create(
trail_object_id: params[:trail_object_id],
trail_name: params[:trail_name]
)
respond_with :api, :v1, completion
end
def show
completion = lookup_completion(params[:id])
respond_with completion
end
def destroy
completion = lookup_completion(params[:id])
completion.destroy
respond_with :api, :v1, completion
end
private
def lookup_completion(trail_object_id)
current_resource_owner.completions.find_by_trail_object_id!(trail_object_id)
end
def current_resource_owner
current_user || resource_owner
end
end
| 1 | 13,472 | Line is too long. [86/80] | thoughtbot-upcase | rb |
@@ -4,6 +4,11 @@ describe 'TwitterDigits' do
let(:service_provider_url) { Faker::Internet.url }
let(:credentials) { "oauth_consumer_key=#{ Faker::Internet.password }" }
+ before do
+ original_twitter_digits_path = File.expand_path('../../../../app/lib/twitter_digits.rb', __FILE__)
+ load original_twitter_digits_path
+ end
+
it 'must return the id_str when response is 200' do
id_str = Faker::Internet.password
response = stub(code: '200', body: { id_str: id_str }.to_json) | 1 | require 'test_helper'
describe 'TwitterDigits' do
let(:service_provider_url) { Faker::Internet.url }
let(:credentials) { "oauth_consumer_key=#{ Faker::Internet.password }" }
it 'must return the id_str when response is 200' do
id_str = Faker::Internet.password
response = stub(code: '200', body: { id_str: id_str }.to_json)
Net::HTTP.any_instance.stubs(:get2).returns(response)
TwitterDigits.get_twitter_id(service_provider_url, credentials).must_equal id_str
end
it 'must return nil when response is not 200' do
response = stub(code: '404')
Net::HTTP.any_instance.stubs(:get2).returns(response)
TwitterDigits.get_twitter_id(service_provider_url, credentials).must_be_nil
end
end
| 1 | 8,090 | We have to navigate up four directories to come down three? I see it, but it's kinda icky. Is something like `load Rails.root + 'app/lib/twitter_digits.rb'` out of fashion nowadays? | blackducksoftware-ohloh-ui | rb |
@@ -208,6 +208,10 @@ int main(int argc, char *argv[]) {
res = fpgaDmaOpen(afc_h, &dma_h);
ON_ERR_GOTO(res, out_dma_close, "fpgaDmaOpen");
+ if(!dma_h) {
+ res = FPGA_EXCEPTION;
+ ON_ERR_GOTO(res, out_dma_close, "Invaid DMA Handle");
+ }
if(use_ase)
count = ASE_TEST_BUF_SIZE; | 1 | // Copyright(c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <string.h>
#include <uuid/uuid.h>
#include <opae/fpga.h>
#include <time.h>
#include "fpga_dma.h"
/**
* \fpga_dma_test.c
* \brief User-mode DMA test
*/
#include <stdlib.h>
#include <assert.h>
#define HELLO_AFU_ID "331DB30C-9885-41EA-9081-F88B8F655CAA"
#define TEST_BUF_SIZE (10*1024*1024)
#define ASE_TEST_BUF_SIZE (4*1024)
static int err_cnt = 0;
/*
* macro for checking return codes
*/
#define ON_ERR_GOTO(res, label, desc)\
do {\
if ((res) != FPGA_OK) {\
err_cnt++;\
fprintf(stderr, "Error %s: %s\n", (desc), fpgaErrStr(res));\
goto label;\
}\
} while (0)
void fill_buffer(char *buf, size_t size) {
size_t i=0;
// use a deterministic seed to generate pseudo-random numbers
srand(99);
for(i=0; i<size; i++) {
*buf = rand()%256;
buf++;
}
}
fpga_result verify_buffer(char *buf, size_t size) {
size_t i, rnum=0;
srand(99);
for(i=0; i<size; i++) {
rnum = rand()%256;
if((*buf&0xFF) != rnum) {
printf("Invalid data at %zx Expected = %zx Actual = %x\n",i,rnum,(*buf&0xFF));
return FPGA_INVALID_PARAM;
}
buf++;
}
printf("Buffer Verification Success!\n");
return FPGA_OK;
}
void clear_buffer(char *buf, size_t size) {
memset(buf, 0, size);
}
void report_bandwidth(size_t size, double seconds) {
double throughput = (double)size/((double)seconds*1000*1000);
printf("\rMeasured bandwidth = %lf Megabytes/sec\n", throughput);
}
fpga_result ddr_sweep(fpga_dma_handle dma_h) {
int res;
ssize_t total_mem_size = (uint64_t)(4*1024)*(uint64_t)(1024*1024);
uint64_t *dma_buf_ptr = malloc(total_mem_size);
if(dma_buf_ptr == NULL) {
printf("Unable to allocate %ld bytes of memory", total_mem_size);
return FPGA_NO_MEMORY;
}
printf("Allocated test buffer\n");
printf("Fill test buffer\n");
fill_buffer((char*)dma_buf_ptr, total_mem_size);
uint64_t src = (uint64_t)dma_buf_ptr;
uint64_t dst = 0x0;
printf("DDR Sweep Host to FPGA\n");
clock_t start, end;
start = clock();
res = fpgaDmaTransferSync(dma_h, dst, src, total_mem_size, HOST_TO_FPGA_MM);
if(res != FPGA_OK) {
printf(" fpgaDmaTransferSync Host to FPGA failed with error %s", fpgaErrStr(res));
free(dma_buf_ptr);
return FPGA_EXCEPTION;
}
end = clock();
double seconds = ((double) (end - start)) / CLOCKS_PER_SEC;
report_bandwidth(total_mem_size, seconds);
printf("\rClear buffer\n");
clear_buffer((char*)dma_buf_ptr, total_mem_size);
src = 0x0;
dst = (uint64_t)dma_buf_ptr;
start = clock();
printf("DDR Sweep FPGA to Host\n");
res = fpgaDmaTransferSync(dma_h, dst, src, total_mem_size, FPGA_TO_HOST_MM);
if(res != FPGA_OK) {
printf(" fpgaDmaTransferSync FPGA to Host failed with error %s", fpgaErrStr(res));
free(dma_buf_ptr);
return FPGA_EXCEPTION;
}
end = clock();
seconds = ((double) (end - start)) / CLOCKS_PER_SEC;
report_bandwidth(total_mem_size, seconds);
printf("Verifying buffer..\n");
verify_buffer((char*)dma_buf_ptr, total_mem_size);
free(dma_buf_ptr);
return FPGA_OK;
}
int main(int argc, char *argv[]) {
fpga_result res = FPGA_OK;
fpga_dma_handle dma_h;
uint64_t count;
fpga_properties filter = NULL;
fpga_token afc_token;
fpga_handle afc_h;
fpga_guid guid;
uint32_t num_matches;
volatile uint64_t *mmio_ptr = NULL;
uint64_t *dma_buf_ptr = NULL;
uint32_t use_ase;
if(argc < 2) {
printf("Usage: fpga_dma_test <use_ase = 1 (simulation only), 0 (hardware)>");
return 1;
}
use_ase = atoi(argv[1]);
if(use_ase) {
printf("Running test in ASE mode\n");
} else {
printf("Running test in HW mode\n");
}
// enumerate the afc
if(uuid_parse(HELLO_AFU_ID, guid) < 0) {
return 1;
}
res = fpgaGetProperties(NULL, &filter);
ON_ERR_GOTO(res, out, "fpgaGetProperties");
res = fpgaPropertiesSetObjectType(filter, FPGA_ACCELERATOR);
ON_ERR_GOTO(res, out_destroy_prop, "fpgaPropertiesSetObjectType");
res = fpgaPropertiesSetGUID(filter, guid);
ON_ERR_GOTO(res, out_destroy_prop, "fpgaPropertiesSetGUID");
res = fpgaEnumerate(&filter, 1, &afc_token, 1, &num_matches);
ON_ERR_GOTO(res, out_destroy_prop, "fpgaEnumerate");
if(num_matches < 1) {
printf("Error: Number of matches < 1");
ON_ERR_GOTO(FPGA_INVALID_PARAM, out_destroy_prop, "num_matches<1");
}
// open the AFC
res = fpgaOpen(afc_token, &afc_h, 0);
ON_ERR_GOTO(res, out_destroy_tok, "fpgaOpen");
if(!use_ase) {
res = fpgaMapMMIO(afc_h, 0, (uint64_t**)&mmio_ptr);
ON_ERR_GOTO(res, out_close, "fpgaMapMMIO");
}
// reset AFC
res = fpgaReset(afc_h);
ON_ERR_GOTO(res, out_unmap, "fpgaReset");
res = fpgaDmaOpen(afc_h, &dma_h);
ON_ERR_GOTO(res, out_dma_close, "fpgaDmaOpen");
if(use_ase)
count = ASE_TEST_BUF_SIZE;
else
count = TEST_BUF_SIZE;
dma_buf_ptr = (uint64_t*)malloc(count);
if(dma_h == NULL || dma_buf_ptr == NULL) {
res = FPGA_NO_MEMORY;
ON_ERR_GOTO(res, out_dma_close, "Malloc failed: no memory");
}
fill_buffer((char*)dma_buf_ptr, count);
// Test procedure
// - Fill host buffer with pseudo-random data
// - Copy from host buffer to FPGA buffer at address 0x0
// - Clear host buffer
// - Copy from FPGA buffer to host buffer
// - Verify host buffer data
// - Clear host buffer
// - Copy FPGA buffer at address 0x0 to FPGA buffer at addr "count"
// - Copy data from FPGA buffer at addr "count" to host buffer
// - Verify host buffer data
// copy from host to fpga
res = fpgaDmaTransferSync(dma_h, 0x0 /*dst*/, (uint64_t)dma_buf_ptr /*src*/, count, HOST_TO_FPGA_MM);
ON_ERR_GOTO(res, out_dma_close, "fpgaDmaTransferSync HOST_TO_FPGA_MM");
clear_buffer((char*)dma_buf_ptr, count);
// copy from fpga to host
res = fpgaDmaTransferSync(dma_h, (uint64_t)dma_buf_ptr /*dst*/, 0x0 /*src*/, count, FPGA_TO_HOST_MM);
ON_ERR_GOTO(res, out_dma_close, "fpgaDmaTransferSync FPGA_TO_HOST_MM");
res = verify_buffer((char*)dma_buf_ptr, count);
ON_ERR_GOTO(res, out_dma_close, "verify_buffer");
clear_buffer((char*)dma_buf_ptr, count);
// copy from fpga to fpga
res = fpgaDmaTransferSync(dma_h, count /*dst*/, 0x0 /*src*/, count, FPGA_TO_FPGA_MM);
ON_ERR_GOTO(res, out_dma_close, "fpgaDmaTransferSync FPGA_TO_FPGA_MM");
// copy from fpga to host
res = fpgaDmaTransferSync(dma_h, (uint64_t)dma_buf_ptr /*dst*/, count /*src*/, count, FPGA_TO_HOST_MM);
ON_ERR_GOTO(res, out_dma_close, "fpgaDmaTransferSync FPGA_TO_HOST_MM");
res = verify_buffer((char*)dma_buf_ptr, count);
ON_ERR_GOTO(res, out_dma_close, "verify_buffer");
if(!use_ase) {
printf("Running DDR sweep test\n");
res = ddr_sweep(dma_h);
ON_ERR_GOTO(res, out_dma_close, "ddr_sweep");
}
out_dma_close:
free(dma_buf_ptr);
if(dma_h)
res = fpgaDmaClose(dma_h);
ON_ERR_GOTO(res, out_unmap, "fpgaDmaClose");
out_unmap:
if(!use_ase) {
res = fpgaUnmapMMIO(afc_h, 0);
ON_ERR_GOTO(res, out_close, "fpgaUnmapMMIO");
}
out_close:
res = fpgaClose(afc_h);
ON_ERR_GOTO(res, out_destroy_tok, "fpgaClose");
out_destroy_tok:
res = fpgaDestroyToken(&afc_token);
ON_ERR_GOTO(res, out_destroy_prop, "fpgaDestroyToken");
out_destroy_prop:
res = fpgaDestroyProperties(&filter);
ON_ERR_GOTO(res, out, "fpgaDestroyProperties");
out:
return err_cnt;
}
| 1 | 14,773 | Empty space at end. | OPAE-opae-sdk | c |
@@ -215,6 +215,7 @@ module Bolt
def with_bolt_executor(executor, inventory, pdb_client = nil, applicator = nil, &block)
setup
opts = {
+ bolt_project: @project,
bolt_executor: executor,
bolt_inventory: inventory,
bolt_pdb_client: pdb_client, | 1 | # frozen_string_literal: true
require 'bolt/applicator'
require 'bolt/executor'
require 'bolt/error'
require 'bolt/plan_result'
require 'bolt/util'
require 'bolt/config/modulepath'
require 'etc'
module Bolt
class PAL
# PALError is used to convert errors from executing puppet code into
# Bolt::Errors
class PALError < Bolt::Error
def self.from_preformatted_error(err)
error = if err.cause.is_a? Bolt::Error
err.cause
else
from_error(err)
end
# Provide the location of an error if it came from a plan
details = {}
details[:file] = err.file if defined?(err.file)
details[:line] = err.line if defined?(err.line)
details[:column] = err.pos if defined?(err.pos)
error.add_filelineno(details.compact)
error
end
# Generate a Bolt::Pal::PALError for non-bolt errors
def self.from_error(err)
# Use the original error message if available
message = err.cause ? err.cause.message : err.message
e = new(message)
e.set_backtrace(err.backtrace)
e
end
def initialize(msg, details = {})
super(msg, 'bolt/pal-error', details)
end
end
def initialize(modulepath, hiera_config, resource_types, max_compiles = Etc.nprocessors,
trusted_external = nil, apply_settings = {}, project = nil)
unless modulepath.is_a?(Bolt::Config::Modulepath)
msg = "Type error in PAL: modulepath must be a Bolt::Config::Modulepath"
raise Bolt::Error.new(msg, "bolt/execution-error")
end
# Nothing works without initialized this global state. Reinitializing
# is safe and in practice only happens in tests
self.class.load_puppet
@modulepath = modulepath
@hiera_config = hiera_config
@trusted_external = trusted_external
@apply_settings = apply_settings
@max_compiles = max_compiles
@resource_types = resource_types
@project = project
@logger = Bolt::Logger.logger(self)
unless user_modulepath.empty?
@logger.debug("Loading modules from #{full_modulepath.join(File::PATH_SEPARATOR)}")
end
@loaded = false
end
def full_modulepath
@modulepath.full_modulepath
end
def user_modulepath
@modulepath.user_modulepath
end
# Puppet logging is global so this is class method to avoid confusion
def self.configure_logging
Puppet::Util::Log.destinations.clear
Puppet::Util::Log.newdestination(Bolt::Logger.logger('Puppet'))
# Defer all log level decisions to the Logging library by telling Puppet
# to log everything
Puppet.settings[:log_level] = 'debug'
end
def self.load_puppet
if Bolt::Util.windows?
# Windows 'fix' for openssl behaving strangely. Prevents very slow operation
# of random_bytes later when establishing winrm connections from a Windows host.
# See https://github.com/rails/rails/issues/25805 for background.
require 'openssl'
OpenSSL::Random.random_bytes(1)
end
begin
require 'puppet_pal'
rescue LoadError
raise Bolt::Error.new("Puppet must be installed to execute tasks", "bolt/puppet-missing")
end
require 'bolt/pal/logging'
require 'bolt/pal/issues'
require 'bolt/pal/yaml_plan/loader'
require 'bolt/pal/yaml_plan/transpiler'
# Now that puppet is loaded we can include puppet mixins in data types
Bolt::ResultSet.include_iterable
end
def setup
unless @loaded
# This is slow so don't do it until we have to
Bolt::PAL.load_puppet
# Make sure we don't create the puppet directories
with_puppet_settings { |_| nil }
@loaded = true
end
end
# Create a top-level alias for TargetSpec and PlanResult so that users don't have to
# namespace it with Boltlib, which is just an implementation detail. This
# allows them to feel like a built-in type in bolt, rather than
# something has been, no pun intended, "bolted on".
def alias_types(compiler)
compiler.evaluate_string('type TargetSpec = Boltlib::TargetSpec')
compiler.evaluate_string('type PlanResult = Boltlib::PlanResult')
end
# Register all resource types defined in $Project/.resource_types as well as
# the built in types registered with the runtime_3_init method.
def register_resource_types(loaders)
static_loader = loaders.static_loader
static_loader.runtime_3_init
if File.directory?(@resource_types)
Dir.children(@resource_types).each do |resource_pp|
type_name_from_file = File.basename(resource_pp, '.pp').capitalize
typed_name = Puppet::Pops::Loader::TypedName.new(:type, type_name_from_file)
resource_type = Puppet::Pops::Types::TypeFactory.resource(type_name_from_file)
loaders.static_loader.set_entry(typed_name, resource_type)
end
end
end
def detect_project_conflict(project, environment)
return unless project && project.load_as_module?
# The environment modulepath has stripped out non-existent directories,
# so we don't need to check for them
modules = environment.modulepath.flat_map do |path|
Dir.children(path).select { |name| Puppet::Module.is_module_directory?(name, path) }
end
if modules.include?(project.name)
Bolt::Logger.warn_once(
"project_shadows_module",
"The project '#{project.name}' shadows an existing module of the same name"
)
end
end
# Runs a block in a PAL script compiler configured for Bolt. Catches
# exceptions thrown by the block and re-raises them ensuring they are
# Bolt::Errors since the script compiler block will squash all exceptions.
def in_bolt_compiler
# TODO: If we always call this inside a bolt_executor we can remove this here
setup
r = Puppet::Pal.in_tmp_environment('bolt', modulepath: full_modulepath, facts: {}) do |pal|
# Only load the project if it a) exists, b) has a name it can be loaded with
Puppet.override(bolt_project: @project,
yaml_plan_instantiator: Bolt::PAL::YamlPlan::Loader) do
# Because this has the side effect of loading and caching the list
# of modules, it must happen *after* we have overridden
# bolt_project or the project will be ignored
detect_project_conflict(@project, Puppet.lookup(:environments).get('bolt'))
pal.with_script_compiler(set_local_facts: false) do |compiler|
alias_types(compiler)
register_resource_types(Puppet.lookup(:loaders)) if @resource_types
begin
yield compiler
rescue Bolt::Error => e
e
rescue Puppet::DataBinding::LookupError => e
if e.issue_code == :HIERA_UNDEFINED_VARIABLE
message = "Interpolations are not supported in lookups outside of an apply block: #{e.message}"
PALError.new(message)
else
PALError.from_preformatted_error(e)
end
rescue Puppet::PreformattedError => e
if e.issue_code == :UNKNOWN_VARIABLE &&
%w[facts trusted server_facts settings].include?(e.arguments[:name])
message = "Evaluation Error: Variable '#{e.arguments[:name]}' is not available in the current scope "\
"unless explicitly defined."
details = { file: e.file, line: e.line, column: e.pos }
PALError.new(message, details)
else
PALError.from_preformatted_error(e)
end
rescue StandardError => e
PALError.from_preformatted_error(e)
end
end
end
end
# Plans may return PuppetError but nothing should be throwing them
if r.is_a?(StandardError) && !r.is_a?(Bolt::PuppetError)
raise r
end
r
end
def with_bolt_executor(executor, inventory, pdb_client = nil, applicator = nil, &block)
setup
opts = {
bolt_executor: executor,
bolt_inventory: inventory,
bolt_pdb_client: pdb_client,
apply_executor: applicator || Applicator.new(
inventory,
executor,
full_modulepath,
# Skip syncing built-in plugins, since we vendor some Puppet 6
# versions of "core" types, which are already present on the agent,
# but may cause issues on Puppet 5 agents.
user_modulepath,
@project,
pdb_client,
@hiera_config,
@max_compiles,
@apply_settings
)
}
Puppet.override(opts, &block)
end
def in_plan_compiler(executor, inventory, pdb_client, applicator = nil)
with_bolt_executor(executor, inventory, pdb_client, applicator) do
# TODO: remove this call and see if anything breaks when
# settings dirs don't actually exist. Plans shouldn't
# actually be using them.
with_puppet_settings do
in_bolt_compiler do |compiler|
yield compiler
end
end
end
end
def in_task_compiler(executor, inventory)
with_bolt_executor(executor, inventory) do
in_bolt_compiler do |compiler|
yield compiler
end
end
end
# TODO: PUP-8553 should replace this
def with_puppet_settings
dir = Dir.mktmpdir('bolt')
cli = []
Puppet::Settings::REQUIRED_APP_SETTINGS.each do |setting|
cli << "--#{setting}" << dir
end
Puppet.settings.send(:clear_everything_for_tests)
Puppet.initialize_settings(cli)
Puppet::GettextConfig.create_default_text_domain
Puppet[:trusted_external_command] = @trusted_external
Puppet.settings[:hiera_config] = @hiera_config
self.class.configure_logging
yield
ensure
# Delete the tmpdir if it still exists. This check is needed to
# prevent Bolt from erroring if the tmpdir is somehow deleted
# before reaching this point.
FileUtils.remove_entry_secure(dir) if File.exist?(dir)
end
# Parses a snippet of Puppet manifest code and returns the AST represented
# in JSON.
def parse_manifest(code, filename)
setup
Puppet::Pops::Parser::EvaluatingParser.new.parse_string(code, filename)
rescue Puppet::Error => e
raise Bolt::PAL::PALError, "Failed to parse manifest: #{e}"
end
# Filters content by a list of names and glob patterns specified in project
# configuration.
def filter_content(content, patterns)
return content unless content && patterns
content.select do |name,|
patterns.any? { |pattern| File.fnmatch?(pattern, name, File::FNM_EXTGLOB) }
end
end
def list_tasks(filter_content: false)
in_bolt_compiler do |compiler|
tasks = compiler.list_tasks.map(&:name).sort.each_with_object([]) do |task_name, data|
task_sig = compiler.task_signature(task_name)
unless task_sig.task_hash['metadata']['private']
data << [task_name, task_sig.task_hash['metadata']['description']]
end
end
filter_content ? filter_content(tasks, @project&.tasks) : tasks
end
end
def parse_params(type, object_name, params)
in_bolt_compiler do |compiler|
case type
when 'task'
param_spec = compiler.task_signature(object_name)&.task_hash&.dig('parameters')
when 'plan'
plan = compiler.plan_signature(object_name)
param_spec = plan.params_type.elements&.each_with_object({}) { |t, h| h[t.name] = t.value_type } if plan
end
param_spec ||= {}
params.each_with_object({}) do |(name, str), acc|
type = param_spec[name]
begin
parsed = JSON.parse(str, quirks_mode: true)
# The type may not exist if the module is remote on orch or if a task
# defines no parameters. Since we treat no parameters as Any we
# should parse everything in this case
acc[name] = if type && !type.instance?(parsed)
str
else
parsed
end
rescue JSON::ParserError
# This value may not be assignable in which case run_* will error
acc[name] = str
end
acc
end
end
end
def task_signature(task_name)
in_bolt_compiler do |compiler|
compiler.task_signature(task_name)
end
end
def get_task(task_name)
task = task_signature(task_name)
if task.nil?
raise Bolt::Error.unknown_task(task_name)
end
Bolt::Task.from_task_signature(task)
end
def list_plans_with_cache(filter_content: false)
# Don't filter content yet, so that if users update their plan filters
# we don't need to refresh the cache
plan_names = list_plans(filter_content: false).map(&:first)
plan_cache = if @project
Bolt::Util.read_optional_json_file(@project.plan_cache_file, 'Plan cache file')
else
{}
end
updated = false
plan_list = plan_names.each_with_object([]) do |plan_name, list|
info = plan_cache[plan_name] || get_plan_info(plan_name, with_mtime: true)
# If the plan is a 'local' plan (in the project itself, or the
# modules/ directory) then verify it hasn't been updated since we
# cached it. If it has been updated, refresh the cache and use the
# new data.
if info['file'] &&
(File.mtime(info.dig('file', 'path')) <=> info.dig('file', 'mtime')) != 0
info = get_plan_info(plan_name, with_mtime: true)
updated = true
plan_cache[plan_name] = info
end
list << [plan_name] unless info['private']
end
File.write(@project.plan_cache_file, plan_cache.to_json) if updated
filter_content ? filter_content(plan_list, @project&.plans) : plan_list
end
def list_plans(filter_content: false)
in_bolt_compiler do |compiler|
errors = []
plans = compiler.list_plans(nil, errors).map { |plan| [plan.name] }.sort
errors.each do |error|
Bolt::Logger.warn("plan_load_error", error.details['original_error'])
end
filter_content ? filter_content(plans, @project&.plans) : plans
end
end
def get_plan_info(plan_name, with_mtime: false)
plan_sig = in_bolt_compiler do |compiler|
compiler.plan_signature(plan_name)
end
if plan_sig.nil?
raise Bolt::Error.unknown_plan(plan_name)
end
# path may be a Pathname object, so make sure to stringify it
mod = plan_sig.instance_variable_get(:@plan_func).loader.parent.path.to_s
# If it's a Puppet language plan, use strings to extract data. The only
# way to tell is to check which filename exists in the module.
plan_subpath = File.join(plan_name.split('::').drop(1))
plan_subpath = 'init' if plan_subpath.empty?
pp_path = File.join(mod, 'plans', "#{plan_subpath}.pp")
if File.exist?(pp_path)
require 'puppet-strings'
require 'puppet-strings/yard'
PuppetStrings::Yard.setup!
YARD::Logger.instance.level = :error
YARD.parse(pp_path)
plan = YARD::Registry.at("puppet_plans::#{plan_name}")
description = if plan.tag(:summary)
plan.tag(:summary).text
elsif !plan.docstring.empty?
plan.docstring
end
defaults = plan.parameters.to_h.compact
signature_params = Set.new(plan.parameters.map(&:first))
parameters = plan.tags(:param).each_with_object({}) do |param, params|
name = param.name
if signature_params.include?(name)
params[name] = { 'type' => param.types.first }
params[name]['sensitive'] = param.types.first =~ /\ASensitive(\[.*\])?\z/ ? true : false
params[name]['default_value'] = defaults[name] if defaults.key?(name)
params[name]['description'] = param.text unless param.text.empty?
else
Bolt::Logger.warn(
"missing_plan_parameter",
"The documented parameter '#{name}' does not exist in signature for plan '#{plan.name}'"
)
end
end
privie = plan.tag(:private)&.text
unless privie.nil? || %w[true false].include?(privie.downcase)
msg = "Plan #{plan_name} key 'private' must be a boolean, received: #{privie}"
raise Bolt::Error.new(msg, 'bolt/invalid-plan')
end
pp_info = {
'name' => plan_name,
'description' => description,
'parameters' => parameters,
'module' => mod
}
pp_info.merge!({ 'private' => privie&.downcase == 'true' }) unless privie.nil?
pp_info.merge!(get_plan_mtime(plan.file)) if with_mtime
pp_info
# If it's a YAML plan, fall back to limited data
else
yaml_path = File.join(mod, 'plans', "#{plan_subpath}.yaml")
plan_content = File.read(yaml_path)
plan = Bolt::PAL::YamlPlan::Loader.from_string(plan_name, plan_content, yaml_path)
parameters = plan.parameters.each_with_object({}) do |param, params|
name = param.name
type_str = case param.type_expr
when Puppet::Pops::Types::PTypeReferenceType
param.type_expr.type_string
when nil
'Any'
else
param.type_expr
end
params[name] = { 'type' => type_str }
params[name]['sensitive'] = param.type_expr.instance_of?(Puppet::Pops::Types::PSensitiveType)
params[name]['default_value'] = param.value unless param.value.nil?
params[name]['description'] = param.description if param.description
end
yaml_info = {
'name' => plan_name,
'description' => plan.description,
'parameters' => parameters,
'module' => mod
}
yaml_info.merge!({ 'private' => plan.private }) unless plan.private.nil?
yaml_info.merge!(get_plan_mtime(yaml_path)) if with_mtime
yaml_info
end
end
def get_plan_mtime(path)
# If the plan is from the project modules/ directory, or is in the
# project itself, include the last mtime of the file so we can compare
# if the plan has been updated since it was cached.
if @project &&
File.exist?(path) &&
(path.include?(File.join(@project.path, 'modules')) ||
path.include?(@project.plans_path.to_s))
{ 'file' => { 'mtime' => File.mtime(path),
'path' => path } }
else
{}
end
end
def convert_plan(plan_path)
Puppet[:tasks] = true
transpiler = YamlPlan::Transpiler.new
transpiler.transpile(plan_path)
end
# Returns a mapping of all modules available to the Bolt compiler
#
# @return [Hash{String => Array<Hash{Symbol => String,nil}>}]
# A hash that associates each directory on the modulepath with an array
# containing a hash of information for each module in that directory.
# The information hash provides the name, version, and a string
# indicating whether the module belongs to an internal module group.
def list_modules
internal_module_groups = { Bolt::Config::Modulepath::BOLTLIB_PATH => 'Plan Language Modules',
Bolt::Config::Modulepath::MODULES_PATH => 'Packaged Modules',
@project.managed_moduledir.to_s => 'Project Dependencies' }
in_bolt_compiler do
# NOTE: Can replace map+to_h with transform_values when Ruby 2.4
# is the minimum supported version.
Puppet.lookup(:current_environment).modules_by_path.map do |path, modules|
module_group = internal_module_groups[path]
values = modules.map do |mod|
mod_info = { name: (mod.forge_name || mod.name),
version: mod.version }
mod_info[:internal_module_group] = module_group unless module_group.nil?
mod_info
end
[path, values]
end.to_h
end
end
def generate_types(cache: false)
require 'puppet/face/generate'
in_bolt_compiler do
generator = Puppet::Generate::Type
inputs = generator.find_inputs(:pcore)
FileUtils.mkdir_p(@resource_types)
cache_plan_info if @project && cache
generator.generate(inputs, @resource_types, true)
end
end
def cache_plan_info
# plan_name is an array here
plans_info = list_plans(filter_content: false).map do |plan_name,|
data = get_plan_info(plan_name, with_mtime: true)
{ plan_name => data }
end.reduce({}, :merge)
FileUtils.touch(@project.plan_cache_file)
File.write(@project.plan_cache_file, plans_info.to_json)
end
def run_task(task_name, targets, params, executor, inventory, description = nil)
in_task_compiler(executor, inventory) do |compiler|
params = params.merge('_bolt_api_call' => true, '_catch_errors' => true)
compiler.call_function('run_task', task_name, targets, description, params)
end
end
def run_plan(plan_name, params, executor = nil, inventory = nil, pdb_client = nil, applicator = nil)
in_plan_compiler(executor, inventory, pdb_client, applicator) do |compiler|
r = compiler.call_function('run_plan', plan_name, params.merge('_bolt_api_call' => true))
Bolt::PlanResult.from_pcore(r, 'success')
end
rescue Bolt::Error => e
Bolt::PlanResult.new(e, 'failure')
end
end
end
| 1 | 17,894 | I think similar to line 176 here we'll want to call `detect_project_conflict` after overriding this. | puppetlabs-bolt | rb |
@@ -1,7 +1,8 @@
const fs = require('fs')
const path = require('path')
const chalk = require('chalk')
-const { exec } = require('child_process')
+const { spawn } = require('child_process')
+const readline = require('readline')
const YAML = require('js-yaml')
const { promisify } = require('util')
const gzipSize = require('gzip-size') | 1 | const fs = require('fs')
const path = require('path')
const chalk = require('chalk')
const { exec } = require('child_process')
const YAML = require('js-yaml')
const { promisify } = require('util')
const gzipSize = require('gzip-size')
const prettierBytes = require('@transloadit/prettier-bytes')
const browserify = require('browserify')
const touch = require('touch')
const glob = require('glob')
const webRoot = __dirname
const uppyRoot = path.join(__dirname, '../packages/uppy')
const robodogRoot = path.join(__dirname, '../packages/@uppy/robodog')
const localesRoot = path.join(__dirname, '../packages/@uppy/locales')
const configPath = path.join(webRoot, '/themes/uppy/_config.yml')
const { version } = require(path.join(uppyRoot, '/package.json'))
const regionalDisplayNames = new Intl.DisplayNames('en-US', { type: 'region' })
const languageDisplayNames = new Intl.DisplayNames('en-US', { type: 'language' })
const defaultConfig = {
comment: 'Auto updated by inject.js',
uppy_version_anchor: '001',
uppy_version: '0.0.1',
uppy_bundle_kb_sizes: {},
config: {},
}
// Keeping a whitelist so utils etc are excluded
// It may be easier to maintain a blacklist instead
const packages = [
// Bundles
'uppy',
'@uppy/robodog',
// Integrations
'@uppy/react',
// Core
'@uppy/core',
// Plugins -- please keep these sorted alphabetically
'@uppy/aws-s3',
'@uppy/aws-s3-multipart',
'@uppy/dashboard',
'@uppy/drag-drop',
'@uppy/dropbox',
'@uppy/file-input',
'@uppy/form',
'@uppy/golden-retriever',
'@uppy/google-drive',
'@uppy/informer',
'@uppy/instagram',
'@uppy/image-editor',
'@uppy/progress-bar',
'@uppy/screen-capture',
'@uppy/status-bar',
'@uppy/thumbnail-generator',
'@uppy/transloadit',
'@uppy/tus',
'@uppy/url',
'@uppy/webcam',
'@uppy/xhr-upload',
'@uppy/drop-target',
// Stores
'@uppy/store-default',
'@uppy/store-redux',
]
const excludes = {
'@uppy/react': ['react'],
}
inject().catch((err) => {
console.error(err)
process.exit(1)
})
async function getMinifiedSize (pkg, name) {
const b = browserify(pkg)
const packageJSON = fs.readFileSync(path.join(pkg, 'package.json'))
const version = JSON.parse(packageJSON).version
if (name !== '@uppy/core' && name !== 'uppy') {
b.exclude('@uppy/core')
// Already unconditionally included through @uppy/core
b.exclude('preact')
}
if (excludes[name]) {
b.exclude(excludes[name])
}
b.plugin('tinyify')
const bundle = await promisify(b.bundle).call(b)
const gzipped = await gzipSize(bundle)
return {
minified: bundle.length,
gzipped,
version,
}
}
async function injectSizes (config) {
console.info(chalk.grey('Generating bundle sizes…'))
const padTarget = packages.reduce((max, cur) => Math.max(max, cur.length), 0) + 2
const sizesPromise = Promise.all(
packages.map(async (pkg) => {
const result = await getMinifiedSize(path.join(__dirname, '../packages', pkg), pkg)
console.info(chalk.green(
// ✓ @uppy/pkgname: 10.0 kB min / 2.0 kB gz
` ✓ ${pkg}: ${' '.repeat(padTarget - pkg.length)}${
`${prettierBytes(result.minified)} min`.padEnd(10)
} / ${prettierBytes(result.gzipped)} gz`
))
return Object.assign(result, {
prettyMinified: prettierBytes(result.minified),
prettyGzipped: prettierBytes(result.gzipped),
})
})
).then((list) => {
const map = {}
list.forEach((size, i) => {
map[packages[i]] = size
})
return map
})
config.uppy_bundle_kb_sizes = await sizesPromise
}
async function injectBundles () {
const cmds = [
`mkdir -p ${path.join(webRoot, '/themes/uppy/source/uppy')}`,
`mkdir -p ${path.join(webRoot, '/themes/uppy/source/uppy/locales')}`,
`cp -vfR ${path.join(uppyRoot, '/dist/*')} ${path.join(webRoot, '/themes/uppy/source/uppy/')}`,
`cp -vfR ${path.join(robodogRoot, '/dist/*')} ${path.join(webRoot, '/themes/uppy/source/uppy/')}`,
`cp -vfR ${path.join(localesRoot, '/dist/*')} ${path.join(webRoot, '/themes/uppy/source/uppy/locales')}`,
].join(' && ')
const { stdout } = await promisify(exec)(cmds)
stdout.trim().split('\n').forEach((line) => {
console.info(chalk.green('✓ injected: '), chalk.grey(line))
})
}
// re-enable after rate limiter issue is fixed
//
async function injectGhStars () {
const opts = {}
if ('GITHUB_TOKEN' in process.env) {
opts.auth = process.env.GITHUB_TOKEN
}
const { Octokit } = require('@octokit/rest')
const octokit = new Octokit(opts)
const { headers, data } = await octokit.repos.get({
owner: 'transloadit',
repo: 'uppy',
})
console.log(`${headers['x-ratelimit-remaining']} requests remaining until we hit GitHub ratelimiter`)
const dstpath = path.join(webRoot, 'themes', 'uppy', 'layout', 'partials', 'generated_stargazers.ejs')
fs.writeFileSync(dstpath, String(data.stargazers_count), 'utf-8')
console.log(`${data.stargazers_count} stargazers written to '${dstpath}'`)
}
async function injectMarkdown () {
const sources = {
'.github/ISSUE_TEMPLATE/integration_help.md': 'src/_template/integration_help.md',
'.github/CONTRIBUTING.md': 'src/_template/contributing.md',
}
for (const src in sources) {
const dst = sources[src]
// strip yaml frontmatter:
const srcpath = path.join(uppyRoot, `/../../${src}`)
const dstpath = path.join(webRoot, dst)
const parts = fs.readFileSync(srcpath, 'utf-8').split(/---\s*\n/)
if (parts.length >= 3) {
parts.shift()
parts.shift()
}
let content = `<!-- WARNING! This file was injected. Please edit in "${src}" instead and run "${path.basename(__filename)}" -->\n\n`
content += parts.join('---\n')
fs.writeFileSync(dstpath, content, 'utf-8')
console.info(chalk.green('✓ injected: '), chalk.grey(srcpath))
}
touch(path.join(webRoot, '/src/support.md'))
}
function injectLocaleList () {
const mdTable = [
`<!-- WARNING! This file was automatically injected. Please run "${path.basename(__filename)}" to re-generate -->\n\n`,
'| %count% Locales | NPM | CDN | Source on GitHub |',
'| --------------- | ------------------ | ------------------- | ---------------- |',
]
const mdRows = []
const localeList = {}
const localePackagePath = path.join(localesRoot, 'src', '*.js')
const localePackageVersion = require(path.join(localesRoot, 'package.json')).version
glob.sync(localePackagePath).forEach((localePath) => {
const localeName = path.basename(localePath, '.js')
const [languageCode, regionCode, variant] = localeName.split(/[-_]/)
const languageName = languageDisplayNames.of(languageCode)
const regionName = regionalDisplayNames.of(regionCode)
const npmPath = `<code class="raw"><a href="https://www.npmjs.com/package/@uppy/locales">@uppy/locales</a>/lib/${localeName}</code>`
const cdnPath = `[\`${localeName}.min.js\`](https://releases.transloadit.com/uppy/locales/v${localePackageVersion}/${localeName}.min.js)`
const githubSource = `[\`${localeName}.js\`](https://github.com/transloadit/uppy/blob/master/packages/%40uppy/locales/src/${localeName}.js)`
const mdTableRow = `| ${languageName}<br/> <small>${regionName}</small>${variant ? `<br /><small>(${variant})</small>` : ''} | ${npmPath} | ${cdnPath} | ✏️ ${githubSource} |`
mdRows.push(mdTableRow)
localeList[localeName] = `${languageName} (${regionName}${variant ? `, ${variant}` : ''})`
})
const resultingMdTable = mdTable.concat(mdRows.sort()).join('\n').replace('%count%', mdRows.length)
const dstpath = path.join(webRoot, 'src', '_template', 'list_of_locale_packs.md')
const localeListDstPath = path.join(webRoot, 'src', 'examples', 'locale_list.json')
fs.writeFileSync(dstpath, resultingMdTable, 'utf-8')
console.info(chalk.green('✓ injected: '), chalk.grey(dstpath))
fs.writeFileSync(localeListDstPath, JSON.stringify(localeList), 'utf-8')
console.info(chalk.green('✓ injected: '), chalk.grey(localeListDstPath))
}
async function readConfig () {
try {
const buf = await promisify(fs.readFile)(configPath, 'utf8')
return YAML.safeLoad(buf)
} catch (err) {
return {}
}
}
async function inject () {
const config = await readConfig()
await injectGhStars()
await injectMarkdown()
injectLocaleList()
config.uppy_version = version
config.uppy_version_anchor = version.replace(/[^\d]+/g, '')
await injectSizes(config)
const saveConfig = { ...defaultConfig, ...config }
await promisify(fs.writeFile)(configPath, YAML.safeDump(saveConfig), 'utf-8')
console.info(chalk.green('✓ rewritten: '), chalk.grey(configPath))
try {
await injectBundles()
} catch (error) {
console.error(
chalk.red('x failed to inject: '),
chalk.grey(`uppy bundle into site, because: ${error}`)
)
process.exit(1)
}
}
| 1 | 14,306 | How about `const { promises: fs } = require('fs')` and then replacing `fs.promises.` with `fs.`? | transloadit-uppy | js |
@@ -129,13 +129,9 @@ func getGithubData(ctx context.Context, url string) ([]byte, error) {
return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status)
}
- buf, err := ioutil.ReadAll(res.Body)
- if err != nil {
- _ = res.Body.Close()
- return nil, err
- }
+ defer res.Body.Close()
- err = res.Body.Close()
+ buf, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
} | 1 | package selfupdate
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context/ctxhttp"
)
// Release collects data about a single release on GitHub.
type Release struct {
Name string `json:"name"`
TagName string `json:"tag_name"`
Draft bool `json:"draft"`
PreRelease bool `json:"prerelease"`
PublishedAt time.Time `json:"published_at"`
Assets []Asset `json:"assets"`
Version string `json:"-"` // set manually in the code
}
// Asset is a file uploaded and attached to a release.
type Asset struct {
ID int `json:"id"`
Name string `json:"name"`
URL string `json:"url"`
}
func (r Release) String() string {
return fmt.Sprintf("%v %v, %d assets",
r.TagName,
r.PublishedAt.Local().Format("2006-01-02 15:04:05"),
len(r.Assets))
}
const githubAPITimeout = 30 * time.Second
// githubError is returned by the GitHub API, e.g. for rate-limiting.
type githubError struct {
Message string
}
// GitHubLatestRelease uses the GitHub API to get information about the latest
// release of a repository.
func GitHubLatestRelease(ctx context.Context, owner, repo string) (Release, error) {
ctx, cancel := context.WithTimeout(ctx, githubAPITimeout)
defer cancel()
url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return Release{}, err
}
// pin API version 3
req.Header.Set("Accept", "application/vnd.github.v3+json")
res, err := ctxhttp.Do(ctx, http.DefaultClient, req)
if err != nil {
return Release{}, err
}
if res.StatusCode != http.StatusOK {
content := res.Header.Get("Content-Type")
if strings.Contains(content, "application/json") {
// try to decode error message
var msg githubError
jerr := json.NewDecoder(res.Body).Decode(&msg)
if jerr == nil {
return Release{}, fmt.Errorf("unexpected status %v (%v) returned, message:\n %v", res.StatusCode, res.Status, msg.Message)
}
}
_ = res.Body.Close()
return Release{}, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status)
}
buf, err := ioutil.ReadAll(res.Body)
if err != nil {
_ = res.Body.Close()
return Release{}, err
}
err = res.Body.Close()
if err != nil {
return Release{}, err
}
var release Release
err = json.Unmarshal(buf, &release)
if err != nil {
return Release{}, err
}
if release.TagName == "" {
return Release{}, errors.New("tag name for latest release is empty")
}
if !strings.HasPrefix(release.TagName, "v") {
return Release{}, errors.Errorf("tag name %q is invalid, does not start with 'v'", release.TagName)
}
release.Version = release.TagName[1:]
return release, nil
}
func getGithubData(ctx context.Context, url string) ([]byte, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
// request binary data
req.Header.Set("Accept", "application/octet-stream")
res, err := ctxhttp.Do(ctx, http.DefaultClient, req)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status)
}
buf, err := ioutil.ReadAll(res.Body)
if err != nil {
_ = res.Body.Close()
return nil, err
}
err = res.Body.Close()
if err != nil {
return nil, err
}
return buf, nil
}
func getGithubDataFile(ctx context.Context, assets []Asset, suffix string, printf func(string, ...interface{})) (filename string, data []byte, err error) {
var url string
for _, a := range assets {
if strings.HasSuffix(a.Name, suffix) {
url = a.URL
filename = a.Name
break
}
}
if url == "" {
return "", nil, fmt.Errorf("unable to find file with suffix %v", suffix)
}
printf("download %v\n", filename)
data, err = getGithubData(ctx, url)
if err != nil {
return "", nil, err
}
return filename, data, nil
}
| 1 | 11,586 | This ignores errors closing the body. Not likely to happen, but no reason to take the risk either. | restic-restic | go |
@@ -73,7 +73,7 @@ public abstract class FlinkTestBase extends AbstractTestBase {
}
protected static TableResult exec(TableEnvironment env, String query, Object... args) {
- return env.executeSql(String.format(query, args));
+ return env.executeSql(args.length > 0 ? String.format(query, args) : query);
}
protected TableResult exec(String query, Object... args) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.stream.IntStream;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.test.util.AbstractTestBase;
import org.apache.flink.types.Row;
import org.apache.flink.util.CloseableIterator;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.iceberg.hive.HiveCatalog;
import org.apache.iceberg.hive.TestHiveMetastore;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public abstract class FlinkTestBase extends AbstractTestBase {
private static TestHiveMetastore metastore = null;
protected static HiveConf hiveConf = null;
protected static HiveCatalog catalog = null;
private volatile TableEnvironment tEnv = null;
@BeforeClass
public static void startMetastore() {
FlinkTestBase.metastore = new TestHiveMetastore();
metastore.start();
FlinkTestBase.hiveConf = metastore.hiveConf();
FlinkTestBase.catalog = new HiveCatalog(metastore.hiveConf());
}
@AfterClass
public static void stopMetastore() {
metastore.stop();
catalog.close();
FlinkTestBase.catalog = null;
}
protected TableEnvironment getTableEnv() {
if (tEnv == null) {
synchronized (this) {
if (tEnv == null) {
this.tEnv = TableEnvironment.create(EnvironmentSettings
.newInstance()
.useBlinkPlanner()
.inBatchMode().build());
}
}
}
return tEnv;
}
protected static TableResult exec(TableEnvironment env, String query, Object... args) {
return env.executeSql(String.format(query, args));
}
protected TableResult exec(String query, Object... args) {
return exec(getTableEnv(), query, args);
}
protected List<Object[]> sql(String query, Object... args) {
TableResult tableResult = exec(String.format(query, args));
tableResult.getJobClient().ifPresent(c -> {
try {
c.getJobExecutionResult(Thread.currentThread().getContextClassLoader()).get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
});
List<Object[]> results = Lists.newArrayList();
try (CloseableIterator<Row> iter = tableResult.collect()) {
while (iter.hasNext()) {
Row row = iter.next();
results.add(IntStream.range(0, row.getArity()).mapToObj(row::getField).toArray(Object[]::new));
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return results;
}
}
| 1 | 30,041 | Why was this change needed? | apache-iceberg | java |
@@ -259,6 +259,8 @@ namespace pwiz.Skyline
(c, p) => c.LockmassNegative = p.ValueDouble);
public static readonly Argument ARG_IMPORT_LOCKMASS_TOLERANCE = new DocArgument(@"import-lockmass-tolerance", NUM_VALUE,
(c, p) => c.LockmassTolerance = p.ValueDouble);
+ public static readonly Argument ARG_IMPORT_PEAK_BOUNDARIES = new DocArgument(@"import-peak-boundaries", PATH_TO_FILE,
+ (c, p) => c.ImportPeakBoundaries = p.ValueFullPath);
private static readonly ArgumentGroup GROUP_IMPORT = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_IMPORT_Importing_results_replicates, false,
ARG_IMPORT_FILE, ARG_IMPORT_REPLICATE_NAME, ARG_IMPORT_OPTIMIZING, ARG_IMPORT_APPEND, ARG_IMPORT_ALL, | 1 | /*
* Original author: John Chilton <jchilton .at. u.washington.edu>,
* Brendan MacLean <brendanx .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2011-2019 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;
using System.Web;
using System.Xml.Serialization;
using pwiz.Common.DataBinding.Documentation;
using pwiz.Common.SystemUtil;
using pwiz.ProteowizardWrapper;
using pwiz.Skyline.Controls.Graphs;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.GroupComparison;
using pwiz.Skyline.Model.Irt;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Model.Results.Scoring;
using pwiz.Skyline.Model.Tools;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Util.Extensions;
namespace pwiz.Skyline
{
public class CommandArgs
{
// Argument value descriptions
private static readonly Func<string> PATH_TO_FILE = () => CommandArgUsage.CommandArgs_PATH_TO_FILE_path_to_file;
private static string GetPathToFile(string ext)
{
return PATH_TO_FILE() + ext;
}
private static readonly Func<string> PATH_TO_DOCUMENT = () => GetPathToFile(SrmDocument.EXT);
private static readonly Func<string> PATH_TO_FOLDER = () => CommandArgUsage.CommandArgs_PATH_TO_FOLDER;
private static readonly Func<string> DATE_VALUE = () => CommandArgUsage.CommandArgs_DATE_VALUE;
private static readonly Func<string> INT_VALUE = () => CommandArgUsage.CommandArgs_INT_VALUE;
private static readonly Func<string> NUM_VALUE = () => CommandArgUsage.CommandArgs_NUM_VALUE;
private static readonly Func<string> NUM_LIST_VALUE = () => CommandArgUsage.CommandArgs_NUM_LIST_VALUE;
private static readonly Func<string> NAME_VALUE = () => CommandArgUsage.CommandArgs_NAME_VALUE;
private static readonly Func<string> FEATURE_NAME_VALUE = () => CommandArgUsage.CommandArgs_FEATURE_NAME_VALUE;
private static readonly Func<string> REPORT_NAME_VALUE = () => CommandArgUsage.CommandArgs_REPORT_NAME_VALUE;
private static readonly Func<string> PIPE_NAME_VALUE = () => CommandArgUsage.CommandArgs_PIPE_NAME_VALUE;
private static readonly Func<string> REGEX_VALUE = () => CommandArgUsage.CommandArgs_REGEX_VALUE;
private static readonly Func<string> RP_VALUE = () => CommandArgUsage.CommandArgs_RP_VALUE;
private static readonly Func<string> MZ_VALUE = () => CommandArgUsage.CommandArgs_MZ_VALUE;
private static readonly Func<string> MINUTES_VALUE = () => CommandArgUsage.CommandArgs_MINUTES_VALUE;
private static readonly Func<string> MILLIS_VALE = () => CommandArgUsage.CommandArgs_MILLIS_VALE;
private static readonly Func<string> SERVER_URL_VALUE = () => CommandArgUsage.CommandArgs_SERVER_URL_VALUE;
private static readonly Func<string> USERNAME_VALUE = () => CommandArgUsage.CommandArgs_USERNAME_VALUE;
private static readonly Func<string> PASSWORD_VALUE = () => CommandArgUsage.CommandArgs_PASSWORD_VALUE;
private static readonly Func<string> COMMAND_VALUE = () => CommandArgUsage.CommandArgs_COMMAND_VALUE;
private static readonly Func<string> COMMAND_ARGUMENTS_VALUE = () => CommandArgUsage.CommandArgs_COMMAND_ARGUMENTS_VALUE;
private static readonly Func<string> PROGRAM_MACRO_VALUE = () => CommandArgUsage.CommandArgs_PROGRAM_MACRO_VALUE;
private static readonly Func<string> LABEL_VALUE = () => CommandArgUsage.CommandArgs_LABEL_VALUE;
// ReSharper disable LocalizableElement
private static readonly Func<string> INT_LIST_VALUE = () => "\"1, 2, 3...\""; // Not L10N
private static readonly Func<string> ION_TYPE_LIST_VALUE = () => "\"a, b, c, x, y, z, p\""; // Not L10N
// ReSharper restore LocalizableElement
// Internal use arguments
public static readonly Argument ARG_INTERNAL_SCREEN_WIDTH = new Argument(@"sw", INT_VALUE,
(c, p) => c._usageWidth = p.ValueInt) {InternalUse = true};
public static readonly Argument ARG_INTERNAL_CULTURE = new Argument(@"culture", () => @"en|fr|ja|zh-CHS...",
(c, p) => SetCulture(p.Value)) { InternalUse = true };
private static void SetCulture(string cultureName)
{
LocalizationHelper.CurrentCulture = LocalizationHelper.CurrentUICulture = new CultureInfo(cultureName);
LocalizationHelper.InitThread(Thread.CurrentThread);
}
// Multi process import
public static readonly Argument ARG_INTERNAL_IMPORT_FILE_CACHE = new DocArgument(@"import-file-cache", PATH_TO_FILE,
(c, p) => Program.ReplicateCachePath = p.Value) {InternalUse = true};
public static readonly Argument ARG_INTERNAL_IMPORT_PROGRESS_PIPE = new DocArgument(@"import-progress-pipe", PIPE_NAME_VALUE,
(c, p) => Program.ImportProgressPipe = p.Value) {InternalUse = true};
public static readonly Argument ARG_TEST_UI = new Argument(@"ui",
(c, p) => /* Handled by Program */ true) {InternalUse = true};
public static readonly Argument ARG_TEST_HIDEACG = new Argument(@"hideacg",
(c, p) => c.HideAllChromatogramsGraph = true) {InternalUse = true};
public static readonly Argument ARG_TEST_NOACG = new Argument(@"noacg",
(c, p) => c.NoAllChromatogramsGraph = true) {InternalUse = true};
private static readonly ArgumentGroup GROUP_INTERNAL = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_INTERNAL, false,
ARG_INTERNAL_SCREEN_WIDTH, ARG_INTERNAL_CULTURE, ARG_INTERNAL_IMPORT_FILE_CACHE, ARG_INTERNAL_IMPORT_PROGRESS_PIPE,
ARG_TEST_UI, ARG_TEST_HIDEACG, ARG_TEST_NOACG);
public bool HideAllChromatogramsGraph { get; private set; }
public bool NoAllChromatogramsGraph { get; private set; }
// Conflict resolution values
public const string ARG_VALUE_OVERWRITE = "overwrite";
public const string ARG_VALUE_SKIP = "skip";
public const string ARG_VALUE_PARALLEL = "parallel";
public static readonly Argument ARG_IN = new DocArgument(@"in", PATH_TO_DOCUMENT,
(c, p) => c.SkylineFile = p.ValueFullPath);
public static readonly Argument ARG_SAVE = new DocArgument(@"save", (c, p) => { c.Saving = true; });
public static readonly Argument ARG_SAVE_SETTINGS = new DocArgument(@"save-settings", (c, p) => c.SaveSettings = true);
public static readonly Argument ARG_OUT = new DocArgument(@"out", PATH_TO_DOCUMENT,
(c, p) => { c.SaveFile = p.ValueFullPath; });
public static readonly Argument ARG_SHARE_ZIP = new DocArgument(@"share-zip", () => GetPathToFile(SrmDocumentSharing.EXT_SKY_ZIP),
(c, p) =>
{
c.SharingZipFile = true;
if (!string.IsNullOrEmpty(p.Value))
c.SharedFile = p.Value;
})
{ OptionalValue = true};
public static readonly Argument ARG_SHARE_TYPE = new Argument(@"share-type",
new[] {ARG_VALUE_SHARE_TYPE_MINIMAL, ARG_VALUE_SHARE_TYPE_COMPLETE},
(c, p) => c.SharedFileType = p.IsValue(ARG_VALUE_SHARE_TYPE_MINIMAL) ? ShareType.MINIMAL : ShareType.COMPLETE);
public const string ARG_VALUE_SHARE_TYPE_MINIMAL = "minimal";
public const string ARG_VALUE_SHARE_TYPE_COMPLETE = "complete";
public static readonly Argument ARG_BATCH = new Argument(@"batch-commands", PATH_TO_FILE, // Run each line of a text file like a command
(c, p) =>
{
c.BatchCommandsPath = p.ValueFullPath;
c.RunningBatchCommands = true;
});
public static readonly Argument ARG_DIR = new Argument(@"dir", PATH_TO_FOLDER,
(c, p) =>
{
if (!Directory.Exists(p.Value))
{
c.WriteLine(Resources.CommandArgs_ParseArgsInternal_Error__The_specified_working_directory__0__does_not_exist_, p.Value);
return false;
}
Directory.SetCurrentDirectory(p.Value);
return true;
});
public static readonly Argument ARG_TIMESTAMP = new Argument(@"timestamp", (c, p) => c._out.IsTimeStamped = true);
public static readonly Argument ARG_MEMSTAMP = new Argument(@"memstamp", (c, p) => c._out.IsMemStamped = true);
public static readonly Argument ARG_LOG_FILE = new Argument(@"log-file", PATH_TO_FILE, (c, p) => c.LogFile = p.Value);
public static readonly Argument ARG_HELP = new Argument(@"help",
new[] { ARG_VALUE_ASCII, ARG_VALUE_NO_BORDERS },
(c, p) => c.Usage(p.Value)) {OptionalValue = true};
public const string ARG_VALUE_ASCII = "ascii";
public const string ARG_VALUE_NO_BORDERS = "no-borders";
public static readonly Argument ARG_VERSION = new Argument(@"version", (c, p) => c.Version());
private static readonly ArgumentGroup GROUP_GENERAL_IO = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_GENERAL_IO_General_input_output, true,
ARG_IN, ARG_SAVE, ARG_SAVE_SETTINGS, ARG_OUT, ARG_SHARE_ZIP, ARG_SHARE_TYPE, ARG_BATCH, ARG_DIR, ARG_TIMESTAMP, ARG_MEMSTAMP,
ARG_LOG_FILE, ARG_HELP, ARG_VERSION)
{
Validate = c => c.ValidateGeneralArgs()
};
private void Version()
{
UsageShown = true; // Keep from showing the full usage table
_out.WriteLine(Install.ProgramNameAndVersion);
VersionPwiz();
}
private void VersionPwiz()
{
UsageShown = true; // Keep from showing the full usage table
_out.WriteLine(@" ProteoWizard MSData {0}", MsDataFileImpl.InstalledVersion);
}
private bool ValidateGeneralArgs()
{
// If SkylineFile isn't set and one of the commands that requires --in is called, complain.
if (string.IsNullOrEmpty(SkylineFile) && RequiresSkylineDocument && !_isDocumentLoaded)
{
WriteLine(Resources.CommandArgs_ParseArgsInternal_Error__Use___in_to_specify_a_Skyline_document_to_open_);
return false;
}
// Use the original file as the output file, if not told otherwise.
if (Saving && string.IsNullOrEmpty(SaveFile))
{
SaveFile = SkylineFile;
}
return true;
}
public string LogFile { get; private set; }
public string SkylineFile { get; private set; }
public string SaveFile { get; private set; }
private bool _saving;
public bool Saving
{
get { return !String.IsNullOrEmpty(SaveFile) || _saving; }
set { _saving = value; }
}
public bool SaveSettings { get; private set; }
// For sharing zip file
public bool SharingZipFile { get; private set; }
public string SharedFile { get; private set; }
public ShareType SharedFileType { get; private set; }
public static readonly Argument ARG_IMPORT_FILE = new DocArgument(@"import-file", PATH_TO_FILE,
(c, p) => c.ParseImportFile(p));
public static readonly Argument ARG_IMPORT_REPLICATE_NAME = new DocArgument(@"import-replicate-name", NAME_VALUE,
(c, p) => c.ReplicateName = p.Value);
public static readonly Argument ARG_IMPORT_OPTIMIZING = new DocArgument(@"import-optimizing", new[] {OPT_CE, OPT_DP},
(c, p) => c.ImportOptimizeType = p.Value);
public static readonly Argument ARG_IMPORT_APPEND = new DocArgument(@"import-append", (c, p) => c.ImportAppend = true);
public static readonly Argument ARG_IMPORT_ALL = new DocArgument(@"import-all", PATH_TO_FOLDER,
(c, p) =>
{
c.ImportSourceDirectory = p.ValueFullPath;
c.ImportRecursive = true;
});
public static readonly Argument ARG_IMPORT_ALL_FILES = new DocArgument(@"import-all-files", PATH_TO_FOLDER,
(c, p) => c.ImportSourceDirectory = p.ValueFullPath);
public static readonly Argument ARG_IMPORT_NAMING_PATTERN = new DocArgument(@"import-naming-pattern", REGEX_VALUE,
(c, p) => c.ParseImportNamingPattern(p));
public static readonly Argument ARG_IMPORT_FILENAME_PATTERN = new DocArgument(@"import-filename-pattern", REGEX_VALUE,
(c, p) => c.ParseImportFileNamePattern(p));
public static readonly Argument ARG_IMPORT_SAMPLENAME_PATTERN = new DocArgument(@"import-samplename-pattern", REGEX_VALUE,
(c, p) => c.ParseImportSampleNamePattern(p));
public static readonly Argument ARG_IMPORT_BEFORE = new DocArgument(@"import-before", DATE_VALUE,
(c, p) => c.ImportBeforeDate = p.ValueDate);
public static readonly Argument ARG_IMPORT_ON_OR_AFTER = new DocArgument(@"import-on-or-after", DATE_VALUE,
(c, p) => c.ImportOnOrAfterDate = p.ValueDate);
public static readonly Argument ARG_IMPORT_WARN_ON_FAILURE = new DocArgument(@"import-warn-on-failure",
(c, p) => c.ImportWarnOnFailure = true);
public static readonly Argument ARG_IMPORT_NO_JOIN = new DocArgument(@"import-no-join",
(c, p) => c.ImportDisableJoining = true);
public static readonly Argument ARG_IMPORT_PROCESS_COUNT = new Argument(@"import-process-count", INT_VALUE,
(c, p) =>
{
c.ImportThreads = p.ValueInt;
if (c.ImportThreads > 0)
Program.MultiProcImport = true;
});
public static readonly Argument ARG_IMPORT_THREADS = new Argument(@"import-threads", INT_VALUE,
(c, p) => c.ImportThreads = p.ValueInt);
public static readonly Argument ARG_IMPORT_LOCKMASS_POSITIVE = new DocArgument(@"import-lockmass-positive", NUM_VALUE,
(c, p) => c.LockmassPositive = p.ValueDouble);
public static readonly Argument ARG_IMPORT_LOCKMASS_NEGATIVE = new DocArgument(@"import-lockmass-negative", NUM_VALUE,
(c, p) => c.LockmassNegative = p.ValueDouble);
public static readonly Argument ARG_IMPORT_LOCKMASS_TOLERANCE = new DocArgument(@"import-lockmass-tolerance", NUM_VALUE,
(c, p) => c.LockmassTolerance = p.ValueDouble);
private static readonly ArgumentGroup GROUP_IMPORT = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_IMPORT_Importing_results_replicates, false,
ARG_IMPORT_FILE, ARG_IMPORT_REPLICATE_NAME, ARG_IMPORT_OPTIMIZING, ARG_IMPORT_APPEND, ARG_IMPORT_ALL,
ARG_IMPORT_ALL_FILES, ARG_IMPORT_NAMING_PATTERN, ARG_IMPORT_FILENAME_PATTERN, ARG_IMPORT_SAMPLENAME_PATTERN,
ARG_IMPORT_BEFORE, ARG_IMPORT_ON_OR_AFTER, ARG_IMPORT_NO_JOIN, ARG_IMPORT_PROCESS_COUNT, ARG_IMPORT_THREADS,
ARG_IMPORT_WARN_ON_FAILURE, ARG_IMPORT_LOCKMASS_POSITIVE, ARG_IMPORT_LOCKMASS_NEGATIVE, ARG_IMPORT_LOCKMASS_TOLERANCE);
public static readonly Argument ARG_REMOVE_BEFORE = new DocArgument(@"remove-before", DATE_VALUE,
(c, p) => c.SetRemoveBefore(p.ValueDate));
public static readonly Argument ARG_REMOVE_ALL = new DocArgument(@"remove-all",
(c, p) => c.SetRemoveBefore(null));
private void SetRemoveBefore(DateTime? date)
{
RemovingResults = true;
RemoveBeforeDate = date;
}
private static readonly ArgumentGroup GROUP_REMOVE = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_REMOVE_Removing_results_replicates, false,
ARG_REMOVE_BEFORE, ARG_REMOVE_ALL)
{
Validate = c => c.ValidateImportResultsArgs()
};
private bool ValidateImportResultsArgs()
{
// CONSIDER: Add declarative Exclusive arguments? So far only these two
if (ImportingReplicateFile && ImportingSourceDirectory)
{
ErrorArgsExclusive(ARG_IMPORT_FILE, ARG_IMPORT_ALL);
return false;
}
if (ImportingReplicateFile && ImportNamingPattern != null)
{
ErrorArgsExclusive(ARG_IMPORT_NAMING_PATTERN, ARG_IMPORT_FILE);
return false;
}
return true;
}
public static readonly Argument ARG_CHROMATOGRAMS_LIMIT_NOISE = new DocArgument(@"chromatograms-limit-noise", NUM_VALUE,
(c, p) => c.LimitNoise = p.ValueDouble);
public static readonly Argument ARG_CHROMATOGRAMS_DISCARD_UNUSED = new DocArgument(@"chromatograms-discard-unused",
(c, p) => c.ChromatogramsDiscard = true );
private static readonly ArgumentGroup GROUP_MINIMIZE_RESULTS = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_MINIMIZE_RESULTS_Minimizing_results_file_size, false,
ARG_CHROMATOGRAMS_LIMIT_NOISE, ARG_CHROMATOGRAMS_DISCARD_UNUSED)
{
Validate = c => c.ValidateMinimizeResultsArgs()
};
private bool ValidateMinimizeResultsArgs()
{
if (Minimizing)
{
if (!_seenArguments.Contains(ARG_SAVE) && !_seenArguments.Contains(ARG_OUT))
{
// Has minimize argument(s), but no --save or --out command
if (ChromatogramsDiscard)
{
WarnArgRequirement(ARG_CHROMATOGRAMS_DISCARD_UNUSED, ARG_SAVE, ARG_OUT);
}
if (LimitNoise.HasValue)
{
WarnArgRequirement(ARG_CHROMATOGRAMS_LIMIT_NOISE, ARG_SAVE, ARG_OUT);
}
return false;
}
}
return true;
}
public List<MsDataFileUri> ReplicateFile { get; private set; }
public string ReplicateName { get; private set; }
public int ImportThreads { get; private set; }
public bool ImportAppend { get; private set; }
public bool ImportDisableJoining { get; private set; }
public bool ImportRecursive { get; private set; }
public string ImportSourceDirectory { get; private set; }
public Regex ImportNamingPattern { get; private set; }
public Regex ImportFileNamePattern { get; private set; }
public Regex ImportSampleNamePattern { get; private set; }
public bool ImportWarnOnFailure { get; private set; }
public bool RemovingResults { get; private set; }
public DateTime? RemoveBeforeDate { get; private set; }
public bool ChromatogramsDiscard{ get; private set; }
public double? LimitNoise { get; private set; }
public DateTime? ImportBeforeDate { get; private set; }
public DateTime? ImportOnOrAfterDate { get; private set; }
// Waters lockmass correction
public double? LockmassPositive { get; private set; }
public double? LockmassNegative { get; private set; }
public double? LockmassTolerance { get; private set; }
public LockMassParameters LockMassParameters { get { return new LockMassParameters(LockmassPositive, LockmassNegative, LockmassTolerance); } }
private void ParseImportFile(NameValuePair pair)
{
ReplicateFile.Add(new MsDataFilePath(pair.ValueFullPath));
}
private bool ParseImportNamingPattern(NameValuePair pair)
{
var importNamingPatternVal = pair.Value;
try
{
ImportNamingPattern = new Regex(importNamingPatternVal);
}
catch (Exception e)
{
WriteLine(Resources.CommandArgs_ParseArgsInternal_Error__Regular_expression__0__cannot_be_parsed_,
importNamingPatternVal);
WriteLine(e.Message);
return false;
}
// ReSharper disable LocalizableElement
Match match = Regex.Match(importNamingPatternVal, @".*\(.+\).*");
// ReSharper restore LocalizableElement
if (!match.Success)
{
WriteLine(Resources.CommandArgs_ParseArgsInternal_Error__Regular_expression___0___does_not_have_any_groups___String,
importNamingPatternVal);
return false;
}
return true;
}
private bool ParseImportFileNamePattern(NameValuePair pair)
{
return ParseRegexArgument(pair, r => ImportFileNamePattern = r);
}
private bool ParseImportSampleNamePattern(NameValuePair pair)
{
return ParseRegexArgument(pair, r => ImportSampleNamePattern = r);
}
private bool ParseRegexArgument(NameValuePair pair, Action<Regex> assign)
{
var regexText = pair.Value;
try
{
assign(new Regex(regexText));
}
catch (Exception e)
{
WriteLine(Resources.CommandArgs_ParseRegexArgument_Error__Regular_expression___0___for__1__cannot_be_parsed_, regexText, pair.Match.ArgumentText);
WriteLine(e.Message);
return false;
}
return true;
}
// Document import
public static readonly Argument ARG_IMPORT_DOCUMENT = new DocArgument(@"import-document", PATH_TO_DOCUMENT,
(c, p) =>
{
c.DocImportPaths.Add(p.ValueFullPath);
c.DocImportResultsMerge = c.DocImportResultsMerge ?? MeasuredResults.MergeAction.remove;
});
public static readonly Argument ARG_IMPORT_DOCUMENT_RESULTS = new DocArgument(@"import-document-results",
Helpers.GetEnumValues<MeasuredResults.MergeAction>().Select(p => p.ToString()).ToArray(),
(c, p) => c.DocImportResultsMerge = (MeasuredResults.MergeAction)Enum.Parse(typeof(MeasuredResults.MergeAction), p.Value, true))
{ WrapValue = true};
public static readonly Argument ARG_IMPORT_DOCUMENT_MERGE_PEPTIDES = new DocArgument(@"import-document-merge-peptides",
(c, p) => c.DocImportMergePeptides = true);
private static readonly ArgumentGroup GROUP_IMPORT_DOC = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_IMPORT_DOC_Importing_other_Skyline_documents, false,
ARG_IMPORT_DOCUMENT, ARG_IMPORT_DOCUMENT_RESULTS, ARG_IMPORT_DOCUMENT_MERGE_PEPTIDES)
{
LeftColumnWidth = 36,
Dependencies =
{
{ ARG_IMPORT_DOCUMENT_RESULTS, ARG_IMPORT_DOCUMENT },
{ ARG_IMPORT_DOCUMENT_MERGE_PEPTIDES, ARG_IMPORT_DOCUMENT }
}
};
public bool ImportingDocuments { get { return DocImportPaths.Any(); } }
public List<string> DocImportPaths { get; private set; }
public MeasuredResults.MergeAction? DocImportResultsMerge { get; private set; }
public bool DocImportMergePeptides { get; private set; }
// Importing FASTA
public static readonly Argument ARG_IMPORT_FASTA = new DocArgument(@"import-fasta", PATH_TO_FILE,
(c, p) => c.FastaPath = p.ValueFullPath);
public static readonly Argument ARG_KEEP_EMPTY_PROTEINS = new DocArgument(@"keep-empty-proteins",
(c, p) => c.KeepEmptyProteins = true);
private static readonly ArgumentGroup GROUP_FASTA = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_FASTA_Importing_FASTA_files, false,
ARG_IMPORT_FASTA, ARG_KEEP_EMPTY_PROTEINS);
public string FastaPath { get; private set; }
public bool KeepEmptyProteins { get; private set; }
// Transition list and assay library import
public static readonly Argument ARG_IMPORT_TRANSITION_LIST = new DocArgument(@"import-transition-list", PATH_TO_FILE,
(c, p) => c.ParseListPath(p, false));
public static readonly Argument ARG_IMPORT_ASSAY_LIBRARY = new DocArgument(@"import-assay-library", PATH_TO_FILE,
(c, p) => c.ParseListPath(p, true));
public static readonly Argument ARG_IGNORE_TRANSITION_ERRORS = new DocArgument(@"ignore-transition-errors",
(c, p) => c.IsIgnoreTransitionErrors = true);
public static readonly Argument ARG_IRT_STANDARDS_GROUP_NAME = new DocArgument(@"irt-standards-group-name", NAME_VALUE,
(c, p) => c.IrtGroupName = p.Value);
public static readonly Argument ARG_IRT_STANDARDS_FILE = new DocArgument(@"irt-standards-file", PATH_TO_FILE,
(c, p) => c.IrtStandardsPath = p.ValueFullPath);
public static readonly Argument ARG_IRT_DATABASE_PATH = new DocArgument(@"irt-database-path", () => GetPathToFile(IrtDb.EXT),
(c, p) => c.IrtDatabasePath = p.ValueFullPath);
public static readonly Argument ARG_IRT_CALC_NAME = new DocArgument(@"irt-calc-name", NAME_VALUE,
(c, p) => c.IrtCalcName = p.Value);
private static readonly ArgumentGroup GROUP_IMPORT_LIST = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_IMPORT_LIST_Importing_transition_lists_and_assay_libraries, false,
ARG_IMPORT_TRANSITION_LIST, ARG_IMPORT_ASSAY_LIBRARY, ARG_IGNORE_TRANSITION_ERRORS, ARG_IRT_STANDARDS_GROUP_NAME,
ARG_IRT_STANDARDS_FILE, ARG_IRT_DATABASE_PATH, ARG_IRT_CALC_NAME)
{
Dependencies =
{
{ ARG_IRT_STANDARDS_GROUP_NAME, ARG_IMPORT_ASSAY_LIBRARY },
{ ARG_IRT_STANDARDS_FILE, ARG_IMPORT_ASSAY_LIBRARY },
},
Validate = (c) =>
{
if (!c.ImportingTransitionList) // Either --import-transition-list or --import-assay-library
{
if (c.IsIgnoreTransitionErrors)
c. WarnArgRequirement(ARG_IGNORE_TRANSITION_ERRORS, ARG_IMPORT_TRANSITION_LIST);
}
return true;
}
};
public string TransitionListPath { get; private set; }
public bool IsTransitionListAssayLibrary { get; private set; }
public bool IsIgnoreTransitionErrors { get; private set; }
public string IrtGroupName { get; private set; }
public string IrtStandardsPath { get; private set; }
public string IrtDatabasePath { get; private set; }
public string IrtCalcName { get; private set; }
private void ParseListPath(NameValuePair pair, bool isAssayLib)
{
TransitionListPath = pair.ValueFullPath;
IsTransitionListAssayLibrary = isAssayLib;
}
// Add a library
public static readonly Argument ARG_ADD_LIBRARY_NAME = new DocArgument(@"add-library-name", NAME_VALUE,
(c, p) => c.LibraryName = p.Value);
public static readonly Argument ARG_ADD_LIBRARY_PATH = new DocArgument(@"add-library-path", PATH_TO_FILE,
(c, p) => c.LibraryPath = p.ValueFullPath);
private static readonly ArgumentGroup GROUP_ADD_LIBRARY = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_ADD_LIBRARY_Adding_spectral_libraries, false,
ARG_ADD_LIBRARY_PATH, ARG_ADD_LIBRARY_NAME);
public string LibraryName { get; private set; }
public string LibraryPath { get; private set; }
// Decoys
public static readonly Argument ARG_DECOYS_ADD = new DocArgument(@"decoys-add",
new[] {ARG_VALUE_DECOYS_ADD_REVERSE, ARG_VALUE_DECOYS_ADD_SHUFFLE},
(c, p) => c.AddDecoysType = p.IsNameOnly || p.IsValue(ARG_VALUE_DECOYS_ADD_REVERSE)
? DecoyGeneration.REVERSE_SEQUENCE
: DecoyGeneration.SHUFFLE_SEQUENCE)
{ OptionalValue = true};
public const string ARG_VALUE_DECOYS_ADD_SHUFFLE = "shuffle";
public const string ARG_VALUE_DECOYS_ADD_REVERSE = "reverse";
public static readonly Argument ARG_DECOYS_ADD_COUNT = new DocArgument(@"decoys-add-count", INT_VALUE,
(c, p) => c.AddDecoysCount = p.ValueInt);
public static readonly Argument ARG_DECOYS_DISCARD = new DocArgument(@"decoys-discard",
(c, p) => c.DiscardDecoys = true);
private static readonly ArgumentGroup GROUP_DECOYS = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_DECOYS, false,
ARG_DECOYS_ADD, ARG_DECOYS_ADD_COUNT, ARG_DECOYS_DISCARD)
{
Dependencies =
{
{ ARG_DECOYS_ADD_COUNT, ARG_DECOYS_ADD },
}
};
public string AddDecoysType { get; private set; }
public int? AddDecoysCount { get; private set; }
public bool DiscardDecoys { get; private set; }
public bool AddDecoys
{
get { return !string.IsNullOrEmpty(AddDecoysType); }
}
public bool ImportingResults
{
get { return ImportingReplicateFile || ImportingSourceDirectory; }
}
public bool ImportingReplicateFile
{
get { return ReplicateFile.Count > 0; }
}
public bool ImportingSourceDirectory
{
get { return !string.IsNullOrEmpty(ImportSourceDirectory); }
}
public bool ImportingFasta
{
get { return !string.IsNullOrWhiteSpace(FastaPath); }
}
public bool ImportingTransitionList
{
get { return !string.IsNullOrWhiteSpace(TransitionListPath); }
}
public bool SettingLibraryPath
{
get { return !string.IsNullOrWhiteSpace(LibraryName) || !string.IsNullOrWhiteSpace(LibraryPath); }
}
// Annotations
private static readonly Argument ARG_IMPORT_ANNOTATIONS = new DocArgument(@"import-annotations", () => GetPathToFile(TextUtil.EXT_CSV),
(c, p) => c.ImportAnnotations = p.ValueFullPath);
private static readonly ArgumentGroup GROUP_ANNOTATIONS = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_ANNOTATIONS_Importing_annotations, false,
ARG_IMPORT_ANNOTATIONS);
public string ImportAnnotations { get; private set; }
// For reintegration
private static readonly Argument ARG_REINTEGRATE_MODEL_NAME = new DocArgument(@"reintegrate-model-name", NAME_VALUE,
(c, p) => c.ReintegrateModelName = p.Value);
private static readonly Argument ARG_REINTEGRATE_CREATE_MODEL = new DocArgument(@"reintegrate-create-model",
(c, p) =>
{
c.IsCreateScoringModel = true;
if (!c.IsSecondBestModel)
c.IsDecoyModel = true;
});
private static readonly Argument ARG_REINTEGRATE_MODEL_TYPE = new DocArgument(@"reintegrate-model-type",
Helpers.GetEnumValues<ScoringModelType>().Select(p => p.ToString()).ToArray(),
(c, p) => c.ReintegrateModelType = (ScoringModelType)Enum.Parse(typeof(ScoringModelType), p.Value, true)) {WrapValue = true};
private static readonly Argument ARG_REINTEGRATE_MODEL_CUTOFFS = new DocArgument(@"reintegrate-model-cutoffs", NUM_LIST_VALUE,
(c, p) => c.ReintegrateModelCutoffs = c.ParseNumberList(p))
{ InternalUse = true };
private static readonly Argument ARG_REINTEGRATE_MODEL_ITERATION_COUNT = new DocArgument(@"reintegrate-model-iteration-count", INT_VALUE,
(c, p) => c.ReintegrateModelIterationCount = p.ValueInt) {InternalUse = true};
private static readonly Argument ARG_REINTEGRATE_MODEL_SECOND_BEST = new DocArgument(@"reintegrate-model-second-best",
(c, p) =>
{
c.IsSecondBestModel = true;
c.IsDecoyModel = false;
});
private static readonly Argument ARG_REINTEGRATE_MODEL_BOTH = new DocArgument(@"reintegrate-model-both",
(c, p) => c.IsDecoyModel = c.IsSecondBestModel = true);
private static readonly Argument ARG_REINTEGRATE_OVERWRITE_PEAKS = new DocArgument(@"reintegrate-overwrite-peaks",
(c, p) => c.IsOverwritePeaks = true);
private static readonly Argument ARG_REINTEGRATE_LOG_TRAINING = new DocArgument(@"reintegrate-log-training",
(c, p) => c.IsLogTraining = true) {InternalUse = true};
private static readonly Argument ARG_REINTEGRATE_EXCLUDE_FEATURE = new DocArgument(@"reintegrate-exclude-feature", FEATURE_NAME_VALUE,
(c, p) => c.ParseReintegrateExcludeFeature(p))
{ WrapValue = true };
private static readonly ArgumentGroup GROUP_REINTEGRATE = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_REINTEGRATE_Reintegrate_with_advanced_peak_picking_models, false,
ARG_REINTEGRATE_MODEL_NAME, ARG_REINTEGRATE_CREATE_MODEL, ARG_REINTEGRATE_MODEL_TYPE, ARG_REINTEGRATE_MODEL_ITERATION_COUNT,
ARG_REINTEGRATE_MODEL_CUTOFFS, ARG_REINTEGRATE_MODEL_SECOND_BEST, ARG_REINTEGRATE_MODEL_BOTH, ARG_REINTEGRATE_OVERWRITE_PEAKS,
ARG_REINTEGRATE_LOG_TRAINING, ARG_REINTEGRATE_EXCLUDE_FEATURE)
{
Dependencies =
{
{ ARG_REINTEGRATE_CREATE_MODEL , ARG_REINTEGRATE_MODEL_NAME },
{ ARG_REINTEGRATE_MODEL_TYPE , ARG_REINTEGRATE_CREATE_MODEL },
{ ARG_REINTEGRATE_MODEL_CUTOFFS , ARG_REINTEGRATE_CREATE_MODEL },
{ ARG_REINTEGRATE_OVERWRITE_PEAKS , ARG_REINTEGRATE_MODEL_NAME },
{ ARG_REINTEGRATE_MODEL_SECOND_BEST, ARG_REINTEGRATE_CREATE_MODEL},
{ ARG_REINTEGRATE_MODEL_BOTH, ARG_REINTEGRATE_CREATE_MODEL},
{ ARG_REINTEGRATE_EXCLUDE_FEATURE, ARG_REINTEGRATE_CREATE_MODEL },
},
Validate = c => c.ValidateReintegrateArgs()
};
private bool ValidateReintegrateArgs()
{
if (ReintegrateModelType != ScoringModelType.mProphet && ExcludeFeatures.Count > 0)
{
WriteLine(Resources.CommandLine_CreateUntrainedScoringModel_Error__Excluding_feature_scores_is_not_permitted_with_the_default_Skyline_model_);
return false;
}
if (ReintegrateModelCutoffs != null)
{
if (ReintegrateModelType == ScoringModelType.Skyline)
{
WriteLine(Resources.CommandArgs_ValidateReintegrateArgs_Error__Model_cutoffs_cannot_be_applied_in_calibrating_the_Skyline_default_model_);
return false;
}
double maxCutoff = MProphetPeakScoringModel.DEFAULT_CUTOFFS[0];
if (MonotonicallyDecreasing(ReintegrateModelCutoffs, maxCutoff))
{
WriteLine(Resources.CommandArgs_ValidateReintegrateArgs_Error__Model_cutoffs___0___must_be_in_decreasing_order_greater_than_zero_and_less_than__1__, string.Join(
CultureInfo.CurrentCulture.TextInfo.ListSeparator, ReintegrateModelCutoffs.Select(c => c.ToString(CultureInfo.CurrentCulture))), maxCutoff);
}
}
return true;
}
private bool MonotonicallyDecreasing(List<double> values, double maxValue)
{
double? lastValue = null;
foreach (var value in values)
{
if (value > maxValue || (lastValue.HasValue && value >= lastValue.Value))
return false;
lastValue = value;
}
return true;
}
public enum ScoringModelType
{
mProphet, // Full mProphet model (default)
Skyline, // Skyline default model with coefficients scaled to estimate unit normal distribution from decoys
SkylineML // Skyline Machine Learning - essentially mProphet model with default set of features
}
public string ReintegrateModelName { get; private set; }
public List<double> ReintegrateModelCutoffs { get; private set; }
public int? ReintegrateModelIterationCount { get; private set; }
public bool IsOverwritePeaks { get; private set; }
public bool IsCreateScoringModel { get; private set; }
public bool IsSecondBestModel { get; private set; }
public bool IsDecoyModel { get; private set; }
public bool IsLogTraining { get; private set; }
public ScoringModelType ReintegrateModelType { get; private set; }
public List<IPeakFeatureCalculator> ExcludeFeatures { get; private set; }
public bool Reintegrating { get { return !string.IsNullOrEmpty(ReintegrateModelName); } }
public bool Minimizing { get { return ChromatogramsDiscard || LimitNoise.HasValue; } }
private List<double> ParseNumberList(NameValuePair pair)
{
try
{
return pair.Value.Split(new[] {CultureInfo.CurrentCulture.TextInfo.ListSeparator},
StringSplitOptions.RemoveEmptyEntries)
.Select(double.Parse).ToList();
}
catch (Exception)
{
throw new ValueInvalidNumberListException(pair.Match, pair.Value);
}
}
private bool ParseReintegrateExcludeFeature(NameValuePair pair)
{
string featureName = pair.Value;
var calc = PeakFeatureCalculator.Calculators.FirstOrDefault(c =>
Equals(featureName, c.HeaderName) || Equals(featureName, c.Name));
if (calc == null)
{
WriteLine(
Resources
.CommandArgs_ParseArgsInternal_Error__Attempting_to_exclude_an_unknown_feature_name___0____Try_one_of_the_following_,
featureName);
foreach (var featureCalculator in PeakFeatureCalculator.Calculators)
{
if (Equals(featureCalculator.HeaderName, featureCalculator.Name))
WriteLine(@" {0}", featureCalculator.HeaderName);
else
WriteLine(Resources.CommandArgs_ParseArgsInternal______0__or___1__, featureCalculator.HeaderName,
featureCalculator.Name);
}
return false;
}
ExcludeFeatures.Add(calc);
return true;
}
// Refinement
public static readonly Argument ARG_REFINE_MIN_PEPTIDES = new RefineArgument(@"refine-min-peptides", INT_VALUE,
(c, p) => c.Refinement.MinPeptidesPerProtein = p.ValueInt);
public static readonly Argument ARG_REFINE_REMOVE_REPEATS = new RefineArgument(@"refine-remove-repeats",
(c, p) => c.Refinement.RemoveRepeatedPeptides = true);
public static readonly Argument ARG_REFINE_REMOVE_DUPLICATES = new RefineArgument(@"refine-remove-duplicates",
(c, p) => c.Refinement.RemoveDuplicatePeptides = true);
public static readonly Argument ARG_REFINE_MISSING_LIBRARY = new RefineArgument(@"refine-missing-library",
(c, p) => c.Refinement.RemoveMissingLibrary = true);
public static readonly Argument ARG_REFINE_MIN_TRANSITIONS = new RefineArgument(@"refine-min-transitions", INT_VALUE,
(c, p) => c.Refinement.MinTransitionsPepPrecursor = p.ValueInt);
public static readonly Argument ARG_REFINE_LABEL_TYPE = new RefineArgument(@"refine-label-type", LABEL_VALUE,
(c, p) => c.RefinementLabelTypeName = p.Value);
public static readonly Argument ARG_REFINE_ADD_LABEL_TYPE = new RefineArgument(@"refine-add-label-type",
(c, p) => c.Refinement.AddLabelType = true);
public static readonly Argument ARG_REFINE_AUTOSEL_PEPTIDES = new RefineArgument(@"refine-auto-select-peptides",
(c, p) => c.Refinement.AutoPickChildrenAll = c.Refinement.AutoPickChildrenAll | PickLevel.peptides);
public static readonly Argument ARG_REFINE_AUTOSEL_PRECURSORS = new RefineArgument(@"refine-auto-select-precursors",
(c, p) => c.Refinement.AutoPickChildrenAll = c.Refinement.AutoPickChildrenAll | PickLevel.precursors);
public static readonly Argument ARG_REFINE_AUTOSEL_TRANSITIONS = new RefineArgument(@"refine-auto-select-transitions",
(c, p) => c.Refinement.AutoPickChildrenAll = c.Refinement.AutoPickChildrenAll | PickLevel.transitions);
// Refinement requiring imported results
public static readonly Argument ARG_REFINE_MIN_PEAK_FOUND_RATIO = new RefineArgument(@"refine-min-peak-found-ratio", NUM_VALUE,
(c, p) => c.Refinement.MinPeakFoundRatio = p.ValueDouble) { WrapValue = true };
public static readonly Argument ARG_REFINE_MAX_PEAK_FOUND_RATIO = new RefineArgument(@"refine-max-peak-found-ratio", NUM_VALUE,
(c, p) => c.Refinement.MaxPeakFoundRatio = p.ValueDouble) { WrapValue = true };
public static readonly Argument ARG_REFINE_MAX_PEPTIDE_PEAK_RANK = new RefineArgument(@"refine-max-peptide-peak-rank", INT_VALUE,
(c, p) => c.Refinement.MaxPepPeakRank = p.ValueInt) { WrapValue = true };
public static readonly Argument ARG_REFINE_MAX_PEAK_RANK = new RefineArgument(@"refine-max-transition-peak-rank", INT_VALUE,
(c, p) => c.Refinement.MaxPeakRank = p.ValueInt) { WrapValue = true };
public static readonly Argument ARG_REFINE_MAX_PRECURSOR_PEAK_ONLY = new RefineArgument(@"refine-max-precursor-only",
(c, p) => c.Refinement.MaxPrecursorPeakOnly = true);
public static readonly Argument ARG_REFINE_PREFER_LARGER_PRODUCTS = new RefineArgument(@"refine-prefer-larger-products",
(c, p) => c.Refinement.PreferLargeIons = true);
public static readonly Argument ARG_REFINE_MISSING_RESULTS = new RefineArgument(@"refine-missing-results",
(c, p) => c.Refinement.RemoveMissingResults = true);
public static readonly Argument ARG_REFINE_MIN_TIME_CORRELATION = new RefineArgument(@"refine-min-time-correlation", NUM_VALUE,
(c, p) => c.Refinement.RTRegressionThreshold = p.ValueDouble) { WrapValue = true };
public static readonly Argument ARG_REFINE_MIN_DOTP = new RefineArgument(@"refine-min-dotp", NUM_VALUE,
(c, p) => c.Refinement.DotProductThreshold = p.ValueDouble);
public static readonly Argument ARG_REFINE_MIN_IDOTP = new RefineArgument(@"refine-min-idotp", NUM_VALUE,
(c, p) => c.Refinement.IdotProductThreshold = p.ValueDouble);
public static readonly Argument ARG_REFINE_USE_BEST_RESULT = new RefineArgument(@"refine-use-best-result",
(c, p) => c.Refinement.UseBestResult = true);
// Refinement consistency tab
public static readonly Argument ARG_REFINE_CV_REMOVE_ABOVE_CUTOFF = new RefineArgument(@"refine-cv-remove-above-cutoff", NUM_VALUE,
(c,p) => c.Refinement.CVCutoff = p.ValueDouble >= 1 ? p.ValueDouble : p.ValueDouble * 100); // If a value like 0.2, interpret as 20%
public static readonly Argument ARG_REFINE_CV_GLOBAL_NORMALIZE = new RefineArgument(@"refine-cv-global-normalize",
new[] { NormalizationMethod.GLOBAL_STANDARDS.Name, NormalizationMethod.EQUALIZE_MEDIANS.Name, NormalizationMethod.TIC.Name },
(c, p) =>
{
if (p.Value == NormalizationMethod.GLOBAL_STANDARDS.Name)
{
c.Refinement.NormalizationMethod = NormalizeOption.FromNormalizationMethod(NormalizationMethod.GLOBAL_STANDARDS);
}
else if (p.Value == NormalizationMethod.TIC.Name)
{
c.Refinement.NormalizationMethod = NormalizeOption.FromNormalizationMethod(NormalizationMethod.TIC);
}
else
{
c.Refinement.NormalizationMethod = NormalizeOption.FromNormalizationMethod(NormalizationMethod.EQUALIZE_MEDIANS);
}
}) { WrapValue = true };
public static readonly Argument ARG_REFINE_CV_REFERENCE_NORMALIZE = new RefineArgument(@"refine-cv-reference-normalize", LABEL_VALUE,
(c, p) =>
{
c.Refinement.NormalizationMethod = NormalizeOption.FromNormalizationMethod(NormalizationMethod.FromIsotopeLabelTypeName(p.Value));
}) { WrapValue = true };
public static readonly Argument ARG_REFINE_CV_TRANSITIONS = new RefineArgument(@"refine-cv-transitions",
new[] { AreaCVTransitions.all.ToString(), AreaCVTransitions.best.ToString() },
(c, p) =>
{
c.Refinement.Transitions = (AreaCVTransitions)Enum.Parse(typeof(AreaCVTransitions), p.Value, false);
c.Refinement.CountTransitions = -1;
});
public static readonly Argument ARG_REFINE_CV_TRANSITIONS_COUNT = new RefineArgument(@"refine-cv-transitions-count", INT_VALUE,
(c, p) =>
{
c.Refinement.Transitions = AreaCVTransitions.count;
c.Refinement.CountTransitions = p.ValueInt;
});
public static readonly Argument ARG_REFINE_CV_MS_LEVEL = new RefineArgument(@"refine-cv-ms-level",
Helpers.GetEnumValues<AreaCVMsLevel>().Select(e => e.ToString()).ToArray(),
(c, p) => c.Refinement.MSLevel = (AreaCVMsLevel) Enum.Parse(typeof(AreaCVMsLevel), p.Value, true));
public static readonly Argument ARG_REFINE_QVALUE_CUTOFF = new RefineArgument(@"refine-qvalue-cutoff", NUM_VALUE,
(c, p) => c.Refinement.QValueCutoff = p.ValueDouble);
public static readonly Argument ARG_REFINE_MINIMUM_DETECTIONS = new RefineArgument(@"refine-minimum-detections", INT_VALUE,
(c, p) => c.Refinement.MinimumDetections = p.ValueInt);
// Refinement Group Comparison Tab
public static readonly Argument ARG_REFINE_GC_P_VALUE_CUTOFF = new RefineArgument(
@"refine-gc-p-value-cutoff", NUM_VALUE,
(c, p) => c.Refinement.AdjustedPValueCutoff = p.ValueDouble);
public static readonly Argument ARG_REFINE_GC_FOLD_CHANGE_CUTOFF = new RefineArgument(@"refine-gc-fold-change-cutoff",
NUM_VALUE,
(c, p) => c.Refinement.FoldChangeCutoff = Math.Log(p.ValueDouble, 2));
public static readonly Argument ARG_REFINE_GC_MS_LEVEL = new RefineArgument(@"refine-gc-ms-level", NUM_VALUE,
(c, p) => c.Refinement.MSLevelGroupComparison = p.ValueInt);
public static readonly Argument ARG_REFINE_GROUP_NAME = new RefineArgument(@"refine-gc-name", LABEL_VALUE,
(c, p) => c.Refinement.GroupComparisonNames.Add(p.Value));
private static readonly ArgumentGroup GROUP_REFINEMENT = new ArgumentGroup(
() => CommandArgUsage.CommandArgs_GROUP_REFINEMENT, false,
ARG_REFINE_MIN_PEPTIDES, ARG_REFINE_REMOVE_REPEATS, ARG_REFINE_REMOVE_DUPLICATES,
ARG_REFINE_MISSING_LIBRARY, ARG_REFINE_MIN_TRANSITIONS, ARG_REFINE_LABEL_TYPE,
ARG_REFINE_ADD_LABEL_TYPE, ARG_REFINE_AUTOSEL_PEPTIDES, ARG_REFINE_AUTOSEL_PRECURSORS,
ARG_REFINE_AUTOSEL_TRANSITIONS);
private static readonly ArgumentGroup GROUP_REFINEMENT_W_RESULTS = new ArgumentGroup(
() => CommandArgUsage.CommandArgs_GROUP_REFINEMENT_W_RESULTS, false,
ARG_REFINE_MIN_PEAK_FOUND_RATIO, ARG_REFINE_MAX_PEAK_FOUND_RATIO, ARG_REFINE_MAX_PEPTIDE_PEAK_RANK,
ARG_REFINE_MAX_PEAK_RANK, ARG_REFINE_MAX_PRECURSOR_PEAK_ONLY,
ARG_REFINE_PREFER_LARGER_PRODUCTS, ARG_REFINE_MISSING_RESULTS,
ARG_REFINE_MIN_TIME_CORRELATION, ARG_REFINE_MIN_DOTP, ARG_REFINE_MIN_IDOTP,
ARG_REFINE_USE_BEST_RESULT,
ARG_REFINE_CV_REMOVE_ABOVE_CUTOFF, ARG_REFINE_CV_GLOBAL_NORMALIZE, ARG_REFINE_CV_REFERENCE_NORMALIZE,
ARG_REFINE_CV_TRANSITIONS, ARG_REFINE_CV_TRANSITIONS_COUNT, ARG_REFINE_CV_MS_LEVEL,
ARG_REFINE_QVALUE_CUTOFF, ARG_REFINE_MINIMUM_DETECTIONS,
ARG_REFINE_GC_P_VALUE_CUTOFF, ARG_REFINE_GC_FOLD_CHANGE_CUTOFF, ARG_REFINE_GC_MS_LEVEL, ARG_REFINE_GROUP_NAME);
public RefinementSettings Refinement { get; private set; }
public string RefinementLabelTypeName { get; private set; } // Must store as string until document is instantiated
public string RefinementCvLabelTypeName { get; private set; } // Must store as string until document is instantiated
// For exporting reports
// Adding reports does not require a document
public static readonly Argument ARG_REPORT_NAME = new Argument(@"report-name", NAME_VALUE,
(c, p) => c.ReportName = p.Value);
public static readonly Argument ARG_REPORT_ADD = new Argument(@"report-add", () => GetPathToFile(ReportSpecList.EXT_REPORTS),
(c, p) =>
{
c.ImportingSkyr = true;
c.SkyrPath = p.ValueFullPath;
});
public static readonly Argument ARG_REPORT_CONFLICT_RESOLUTION = new Argument(@"report-conflict-resolution",
new []{ARG_VALUE_OVERWRITE, ARG_VALUE_SKIP},
(c, p) => c.ResolveSkyrConflictsBySkipping = p.IsValue(ARG_VALUE_SKIP)) { WrapValue = true };
// Exporting reports does require a document
public static readonly Argument ARG_REPORT_FILE = new DocArgument(@"report-file", () => GetPathToFile(TextUtil.EXT_CSV),
(c, p) => c.ReportFile = p.ValueFullPath);
public static readonly Argument ARG_REPORT_FORMAT = new DocArgument(@"report-format",
new []{ARG_VALUE_CSV, ARG_VALUE_TSV},
(c, p) => c.ReportColumnSeparator = p.IsValue(ARG_VALUE_TSV)
? TextUtil.SEPARATOR_TSV
: TextUtil.CsvSeparator);
public const string ARG_VALUE_CSV = "csv";
public const string ARG_VALUE_TSV = "tsv";
public static readonly Argument ARG_REPORT_INVARIANT = new DocArgument(@"report-invariant",
(c, p) => c.IsReportInvariant = true);
private static readonly ArgumentGroup GROUP_REPORT = new ArgumentGroup(
() => CommandArgUsage.CommandArgs_GROUP_REPORT_Exporting_reports, false,
ARG_REPORT_NAME, ARG_REPORT_FILE, ARG_REPORT_ADD, ARG_REPORT_CONFLICT_RESOLUTION, ARG_REPORT_FORMAT,
ARG_REPORT_INVARIANT);
public string ReportName { get; private set; }
public char ReportColumnSeparator { get; private set; }
public string ReportFile { get; private set; }
public bool IsReportInvariant { get; private set; }
public bool ExportingReport
{
get { return !string.IsNullOrEmpty(ReportName); }
}
// For adding a skyr file to user.config
public string SkyrPath { get; private set; }
private bool _importingSkyr;
public bool ImportingSkyr
{
get { return !string.IsNullOrEmpty(SkyrPath) || _importingSkyr; }
set { _importingSkyr = value; }
}
public bool? ResolveSkyrConflictsBySkipping { get; private set; }
// For exporting chromatograms
private static readonly Argument ARG_CHROMATOGRAM_FILE = new DocArgument(@"chromatogram-file", () => GetPathToFile(TextUtil.EXT_TSV),
(c, p) => c.ChromatogramsFile = p.ValueFullPath);
private static readonly Argument ARG_CHROMATOGRAM_PRECURSORS = new DocArgument(@"chromatogram-precursors",
(c, p) => c.ChromatogramsPrecursors = true);
private static readonly Argument ARG_CHROMATOGRAM_PRODUCTS = new DocArgument(@"chromatogram-products",
(c, p) => c.ChromatogramsProducts = true);
private static readonly Argument ARG_CHROMATOGRAM_BASE_PEAKS = new DocArgument(@"chromatogram-base-peaks",
(c, p) => c.ChromatogramsBasePeaks = true);
private static readonly Argument ARG_CHROMATOGRAM_TICS = new DocArgument(@"chromatogram-tics",
(c, p) => c.ChromatogramsTics = true);
private static readonly ArgumentGroup GROUP_CHROMATOGRAM = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_CHROMATOGRAM_Exporting_chromatograms, false,
ARG_CHROMATOGRAM_FILE, ARG_CHROMATOGRAM_PRECURSORS, ARG_CHROMATOGRAM_PRODUCTS, ARG_CHROMATOGRAM_BASE_PEAKS,
ARG_CHROMATOGRAM_TICS)
{
Dependencies =
{
{ ARG_CHROMATOGRAM_PRECURSORS, ARG_CHROMATOGRAM_FILE },
{ ARG_CHROMATOGRAM_PRODUCTS, ARG_CHROMATOGRAM_FILE },
{ ARG_CHROMATOGRAM_BASE_PEAKS, ARG_CHROMATOGRAM_FILE },
{ ARG_CHROMATOGRAM_TICS, ARG_CHROMATOGRAM_FILE },
},
Validate = c => c.ValidateChromatogramArgs()
};
private bool ValidateChromatogramArgs()
{
if (ExportingChromatograms)
{
if (!ChromatogramsPrecursors && !ChromatogramsProducts && !ChromatogramsBasePeaks && !ChromatogramsTics)
ChromatogramsPrecursors = ChromatogramsProducts = true;
}
return true;
}
public string ChromatogramsFile { get; private set; }
public bool ChromatogramsPrecursors { get; private set; }
public bool ChromatogramsProducts { get; private set; }
public bool ChromatogramsBasePeaks { get; private set; }
public bool ChromatogramsTics { get; private set; }
public bool ExportingChromatograms { get { return !string.IsNullOrEmpty(ChromatogramsFile); } }
// For publishing the document to Panorama
private static readonly Argument ARG_PANORAMA_SERVER = new DocArgument(@"panorama-server", SERVER_URL_VALUE,
(c, p) => c.PanoramaServerUri = p.Value);
private static readonly Argument ARG_PANORAMA_USERNAME = new DocArgument(@"panorama-username", USERNAME_VALUE,
(c, p) => c.PanoramaUserName = p.Value);
private static readonly Argument ARG_PANORAMA_PASSWORD = new DocArgument(@"panorama-password", PASSWORD_VALUE,
(c, p) => c.PanoramaPassword = p.Value);
private static readonly Argument ARG_PANORAMA_FOLDER = new DocArgument(@"panorama-folder", PATH_TO_FOLDER,
(c, p) => c.PanoramaFolder = p.Value);
private static readonly ArgumentGroup GROUP_PANORAMA = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_PANORAMA_Publishing_to_Panorama, false,
ARG_PANORAMA_SERVER, ARG_PANORAMA_USERNAME, ARG_PANORAMA_PASSWORD, ARG_PANORAMA_FOLDER
)
{
Validate = c => c.ValidatePanoramaArgs(),
Postamble = () => CommandArgUsage.CommandArgs_GROUP_PANORAMA_postamble
};
private string PanoramaServerUri { get; set; }
private string PanoramaUserName { get; set; }
private string PanoramaPassword { get; set; }
public string PanoramaFolder { get; private set; }
public bool PublishingToPanorama { get; private set; }
public Server PanoramaServer { get; private set; }
private bool ValidatePanoramaArgs()
{
if (!string.IsNullOrEmpty(PanoramaServerUri) || !string.IsNullOrEmpty(PanoramaFolder))
{
if (!PanoramaArgsComplete())
return false;
var serverUri = PanoramaUtil.ServerNameToUri(PanoramaServerUri);
if (serverUri == null)
{
WriteLine(Resources.CommandLine_GeneralException_Error___0_,
string.Format(Resources.EditServerDlg_OkDialog_The_text__0__is_not_a_valid_server_name_, PanoramaServerUri));
return false;
}
var panoramaClient = new WebPanoramaClient(serverUri);
var panoramaHelper = new PanoramaHelper(_out); // Helper writes messages for failures below
PanoramaServer = panoramaHelper.ValidateServer(panoramaClient, PanoramaUserName, PanoramaPassword);
if (PanoramaServer == null)
return false;
if (!panoramaHelper.ValidateFolder(panoramaClient, PanoramaServer, PanoramaFolder))
return false;
PublishingToPanorama = true;
}
return true;
}
private bool PanoramaArgsComplete()
{
var missingArgs = new List<string>();
if (string.IsNullOrWhiteSpace(PanoramaServerUri))
{
missingArgs.Add(ARG_PANORAMA_SERVER.ArgumentText);
}
if (string.IsNullOrWhiteSpace(PanoramaUserName))
{
missingArgs.Add(ARG_PANORAMA_USERNAME.ArgumentText);
}
if (string.IsNullOrWhiteSpace(PanoramaPassword))
{
missingArgs.Add(ARG_PANORAMA_PASSWORD.ArgumentText);
}
if (string.IsNullOrWhiteSpace(PanoramaFolder))
{
missingArgs.Add(ARG_PANORAMA_FOLDER.ArgumentText);
}
if (missingArgs.Count > 0)
{
WriteLine(missingArgs.Count > 1
? Resources.CommandArgs_PanoramaArgsComplete_plural_
: Resources.CommandArgs_PanoramaArgsComplete_,
TextUtil.LineSeparate(missingArgs));
return false;
}
return true;
}
public class PanoramaHelper
{
private readonly TextWriter _statusWriter;
public PanoramaHelper(TextWriter statusWriter)
{
_statusWriter = statusWriter;
}
public Server ValidateServer(IPanoramaClient panoramaClient, string panoramaUsername, string panoramaPassword)
{
try
{
PanoramaUtil.VerifyServerInformation(panoramaClient, panoramaUsername, panoramaPassword);
return new Server(panoramaClient.ServerUri, panoramaUsername, panoramaPassword);
}
catch (PanoramaServerException x)
{
_statusWriter.WriteLine(Resources.PanoramaHelper_ValidateServer_PanoramaServerException_, x.Message);
}
catch (Exception x)
{
_statusWriter.WriteLine(Resources.PanoramaHelper_ValidateServer_Exception_, x.Message);
}
return null;
}
public bool ValidateFolder(IPanoramaClient panoramaClient, Server server, string panoramaFolder)
{
try
{
PanoramaUtil.VerifyFolder(panoramaClient, server, panoramaFolder);
return true;
}
catch (PanoramaServerException x)
{
_statusWriter.WriteLine(Resources.PanoramaHelper_ValidateFolder_PanoramaServerException_, x.Message);
}
catch (Exception x)
{
_statusWriter.WriteLine(
Resources.PanoramaHelper_ValidateFolder_Exception_,
panoramaFolder, panoramaClient.ServerUri,
x.Message);
}
return false;
}
}
// For importing a tool.
public string ToolName { get; private set; }
public string ToolCommand { get; private set; }
public string ToolArguments { get; private set; }
public string ToolInitialDirectory { get; private set; }
public string ToolReportTitle { get; private set; }
public bool ToolOutputToImmediateWindow { get; private set; }
private bool _importingTool;
public bool ImportingTool
{
get { return !string.IsNullOrEmpty(ToolName) || _importingTool; }
set { _importingTool = value; }
}
public bool? ResolveToolConflictsBySkipping { get; private set; }
// For importing a peptide search
public static readonly Argument ARG_IMPORT_PEPTIDE_SEARCH_FILE = new DocArgument(@"import-search-file", PATH_TO_FILE,
(c, p) =>
{
c.SearchResultsFiles.Add(p.ValueFullPath);
c.CutoffScore = c.CutoffScore ?? Settings.Default.LibraryResultCutOff;
c.IrtStandardName = null;
c.NumCirts = null;
c.RecalibrateIrts = false;
});
public static readonly Argument ARG_IMPORT_PEPTIDE_SEARCH_CUTOFF = new Argument(@"import-search-cutoff-score", NUM_VALUE,
(c, p) => c.CutoffScore = p.GetValueDouble(0, 1));
public static readonly Argument ARG_IMPORT_PEPTIDE_SEARCH_IRTS = new Argument(@"import-search-irts", NAME_VALUE,
(c, p) => c.IrtStandardName = p.Value);
public static readonly Argument ARG_IMPORT_PEPTIDE_SEARCH_NUM_CIRTS = new Argument(@"import-search-num-cirts", INT_VALUE,
(c, p) => c.NumCirts = p.ValueInt);
public static readonly Argument ARG_IMPORT_PEPTIDE_SEARCH_RECALIBRATE_IRTS = new Argument(@"import-search-recalibrate-irts",
(c, p) => c.RecalibrateIrts = true);
public static readonly Argument ARG_IMPORT_PEPTIDE_SEARCH_MODS = new Argument(@"import-search-add-mods",
(c, p) => c.AcceptAllModifications = true);
public static readonly Argument ARG_IMPORT_PEPTIDE_SEARCH_AMBIGUOUS = new Argument(@"import-search-include-ambiguous",
(c, p) => c.IncludeAmbiguousMatches = true);
public static readonly Argument ARG_IMPORT_PEPTIDE_SEARCH_PREFER_EMBEDDED = new Argument(@"import-search-prefer-embedded-spectra",
(c, p) => c.PreferEmbeddedSpectra = true);
private static readonly ArgumentGroup GROUP_IMPORT_SEARCH = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_IMPORT_SEARCH_Importing_peptide_searches, false,
ARG_IMPORT_PEPTIDE_SEARCH_FILE, ARG_IMPORT_PEPTIDE_SEARCH_CUTOFF, ARG_IMPORT_PEPTIDE_SEARCH_IRTS, ARG_IMPORT_PEPTIDE_SEARCH_NUM_CIRTS,
ARG_IMPORT_PEPTIDE_SEARCH_RECALIBRATE_IRTS, ARG_IMPORT_PEPTIDE_SEARCH_MODS, ARG_IMPORT_PEPTIDE_SEARCH_AMBIGUOUS, ARG_IMPORT_PEPTIDE_SEARCH_PREFER_EMBEDDED)
{
Dependencies =
{
{ ARG_IMPORT_PEPTIDE_SEARCH_CUTOFF, ARG_IMPORT_PEPTIDE_SEARCH_FILE },
{ ARG_IMPORT_PEPTIDE_SEARCH_IRTS, ARG_IMPORT_PEPTIDE_SEARCH_FILE },
{ ARG_IMPORT_PEPTIDE_SEARCH_NUM_CIRTS, ARG_IMPORT_PEPTIDE_SEARCH_IRTS },
{ ARG_IMPORT_PEPTIDE_SEARCH_RECALIBRATE_IRTS, ARG_IMPORT_PEPTIDE_SEARCH_IRTS },
{ ARG_IMPORT_PEPTIDE_SEARCH_MODS, ARG_IMPORT_PEPTIDE_SEARCH_FILE },
{ ARG_IMPORT_PEPTIDE_SEARCH_AMBIGUOUS, ARG_IMPORT_PEPTIDE_SEARCH_FILE },
{ ARG_IMPORT_PEPTIDE_SEARCH_PREFER_EMBEDDED, ARG_IMPORT_PEPTIDE_SEARCH_FILE },
}
};
public List<string> SearchResultsFiles { get; private set; }
public double? CutoffScore { get; private set; }
public string IrtStandardName { get; private set; }
public int? NumCirts { get; private set; }
public bool RecalibrateIrts { get; private set; }
public bool AcceptAllModifications { get; private set; }
public bool IncludeAmbiguousMatches { get; private set; }
public bool? PreferEmbeddedSpectra { get; private set; }
public bool ImportingSearch
{
get { return SearchResultsFiles.Count > 0; }
}
// For adjusting transition filter and full-scan settings
public static readonly Argument ARG_TRAN_PRECURSOR_ION_CHARGES = new DocArgument(@"tran-precursor-ion-charges", INT_LIST_VALUE,
(c, p) => c.FilterPrecursorCharges = ParseIonCharges(p, TransitionGroup.MIN_PRECURSOR_CHARGE, TransitionGroup.MAX_PRECURSOR_CHARGE))
{ WrapValue = true };
public static readonly Argument ARG_TRAN_FRAGMENT_ION_CHARGES = new DocArgument(@"tran-product-ion-charges", INT_LIST_VALUE,
(c, p) => c.FilterProductCharges = ParseIonCharges(p, Transition.MIN_PRODUCT_CHARGE, Transition.MAX_PRODUCT_CHARGE))
{ WrapValue = true };
public static readonly Argument ARG_TRAN_FRAGMENT_ION_TYPES = new DocArgument(@"tran-product-ion-types", ION_TYPE_LIST_VALUE,
(c, p) => c.FilterProductTypes = ParseIonTypes(p)) { WrapValue = true };
public static readonly Argument ARG_TRAN_PREDICT_CE = new DocArgument(@"tran-predict-ce", () => GetDisplayNames(Settings.Default.CollisionEnergyList),
(c, p) => c.PredictCEName = p.Value) { WrapValue = true };
public static readonly Argument ARG_TRAN_PREDICT_DP = new DocArgument(@"tran-predict-dp", () => GetDisplayNames(Settings.Default.DeclusterPotentialList),
(c, p) => c.PredictDPName = p.Value) { WrapValue = true };
public static readonly Argument ARG_TRAN_PREDICT_COV = new DocArgument(@"tran-predict-cov", () => GetDisplayNames(Settings.Default.CompensationVoltageList),
(c, p) => c.PredictCoVName = p.Value) { WrapValue = true };
public static readonly Argument ARG_TRAN_PREDICT_OPTDB = new DocArgument(@"tran-predict-optdb", () => GetDisplayNames(Settings.Default.OptimizationLibraryList),
(c, p) => c.PredictOpimizationLibraryName = p.Value) { WrapValue = true };
public static readonly Argument ARG_FULL_SCAN_PRECURSOR_RES = new DocArgument(@"full-scan-precursor-res", RP_VALUE,
(c, p) => c.FullScanPrecursorRes = p.ValueDouble) { WrapValue = true };
public static readonly Argument ARG_FULL_SCAN_PRECURSOR_RES_MZ = new DocArgument(@"full-scan-precursor-res-mz", MZ_VALUE,
(c, p) => c.FullScanPrecursorResMz = p.ValueDouble) { WrapValue = true };
public static readonly Argument ARG_FULL_SCAN_PRODUCT_RES = new DocArgument(@"full-scan-product-res", RP_VALUE,
(c, p) => c.FullScanProductRes = p.ValueDouble) { WrapValue = true };
public static readonly Argument ARG_FULL_SCAN_PRODUCT_RES_MZ = new DocArgument(@"full-scan-product-res-mz", MZ_VALUE,
(c, p) => c.FullScanProductResMz = p.ValueDouble) { WrapValue = true };
public static readonly Argument ARG_FULL_SCAN_RT_FILTER_TOLERANCE = new DocArgument(@"full-scan-rt-filter-tolerance", MINUTES_VALUE,
(c, p) => c.FullScanRetentionTimeFilterLength = p.ValueDouble) { WrapValue = true };
public static readonly Argument ARG_IMS_LIBRARY_RES = new DocArgument(@"ims-library-res", RP_VALUE,
(c, p) => c.IonMobilityLibraryRes = p.ValueDouble)
{ WrapValue = true };
private static readonly ArgumentGroup GROUP_SETTINGS = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_SETTINGS_Document_Settings, false,
ARG_TRAN_PRECURSOR_ION_CHARGES, ARG_TRAN_FRAGMENT_ION_CHARGES, ARG_TRAN_FRAGMENT_ION_TYPES,
ARG_TRAN_PREDICT_CE, ARG_TRAN_PREDICT_DP, ARG_TRAN_PREDICT_COV, ARG_TRAN_PREDICT_OPTDB,
ARG_FULL_SCAN_PRECURSOR_RES, ARG_FULL_SCAN_PRECURSOR_RES_MZ,
ARG_FULL_SCAN_PRODUCT_RES, ARG_FULL_SCAN_PRODUCT_RES_MZ,
ARG_FULL_SCAN_RT_FILTER_TOLERANCE, ARG_IMS_LIBRARY_RES)
{
LeftColumnWidth = 34,
Dependencies =
{
{ARG_FULL_SCAN_PRECURSOR_RES_MZ, ARG_FULL_SCAN_PRECURSOR_RES},
{ARG_FULL_SCAN_PRODUCT_RES_MZ, ARG_FULL_SCAN_PRODUCT_RES},
}
};
public static string[] GetDisplayNames<TItem>(SettingsListBase<TItem> list) where TItem : IKeyContainer<string>, IXmlSerializable
{
return list.Select(list.GetDisplayName).ToArray();
}
private static Adduct[] ParseIonCharges(NameValuePair p, int min, int max)
{
Assume.IsNotNull(p.Match); // Must be matched before accessing this
var charges = ArrayUtil.Parse(p.Value, Adduct.FromStringAssumeProtonated, TextUtil.SEPARATOR_CSV, null);
if (charges == null)
throw new ValueInvalidChargeListException(p.Match, p.Value);
foreach (var charge in charges)
{
if (min > charge.AdductCharge || charge.AdductCharge > max)
throw new ValueOutOfRangeIntException(p.Match, charge.AdductCharge, min, max);
}
return charges;
}
private static IonType[] ParseIonTypes(NameValuePair p)
{
Assume.IsNotNull(p.Match); // Must be matched before accessing this
var types = TransitionFilter.ParseTypes(p.Value, null);
if (types == null)
throw new ValueInvalidIonTypeListException(p.Match, p.Value);
return types;
}
public Adduct[] FilterPrecursorCharges { get; private set; }
public Adduct[] FilterProductCharges { get; private set; }
public IonType[] FilterProductTypes { get; private set; }
public bool FilterSettings
{
get
{
return (FilterPrecursorCharges != null ||
FilterProductCharges != null ||
FilterProductTypes != null);
}
}
public string PredictCEName { get; private set; }
public string PredictDPName { get; private set; }
public string PredictCoVName { get; private set; }
public string PredictOpimizationLibraryName { get; private set; }
public bool PredictTranSettings
{
get
{
return (PredictCEName != null ||
PredictDPName != null ||
PredictCoVName != null ||
PredictOpimizationLibraryName != null);
}
}
public double? FullScanPrecursorRes { get; private set; }
public double? FullScanPrecursorResMz { get; private set; }
public double? FullScanProductRes { get; private set; }
public double? FullScanProductResMz { get; private set; }
public double? FullScanRetentionTimeFilterLength { get; private set; }
public bool FullScanSettings
{
get
{
return (FullScanPrecursorRes
?? FullScanPrecursorResMz
?? FullScanProductRes
?? FullScanProductResMz
?? FullScanRetentionTimeFilterLength).HasValue;
}
}
public double? IonMobilityLibraryRes { get; private set; }
public bool ImsSettings
{
get { return IonMobilityLibraryRes.HasValue; }
}
// For importing a tool from a zip file.
public static readonly Argument ARG_TOOL_ADD = new ToolArgument(@"tool-add", NAME_VALUE,
(c, p) => c.ToolName = p.Value);
public static readonly Argument ARG_TOOL_COMMAND = new ToolArgument(@"tool-command", COMMAND_VALUE,
(c, p) => c.ToolCommand = p.Value);
public static readonly Argument ARG_TOOL_ARGUMENTS = new ToolArgument(@"tool-arguments", COMMAND_ARGUMENTS_VALUE,
(c, p) => c.ToolArguments = p.Value);
public static readonly Argument ARG_TOOL_INITIAL_DIR = new ToolArgument(@"tool-initial-dir", PATH_TO_FOLDER,
(c, p) => c.ToolInitialDirectory = p.Value);
public static readonly Argument ARG_TOOL_CONFLICT_RESOLUTION = new Argument(@"tool-conflict-resolution",
new[] {ARG_VALUE_OVERWRITE, ARG_VALUE_SKIP},
(c, p) => c.ResolveToolConflictsBySkipping = p.IsValue(ARG_VALUE_SKIP)) {WrapValue = true};
public static readonly Argument ARG_TOOL_REPORT = new ToolArgument(@"tool-report", REPORT_NAME_VALUE,
(c, p) => c.ToolReportTitle = p.Value);
public static readonly Argument ARG_TOOL_OUTPUT_TO_IMMEDIATE_WINDOW = new ToolArgument(@"tool-output-to-immediate-window",
(c, p) => c.ToolOutputToImmediateWindow = true);
public static readonly Argument ARG_TOOL_ADD_ZIP = new Argument(@"tool-add-zip", () => GetPathToFile(ToolDescription.EXT_INSTALL),
(c, p) =>
{
c.InstallingToolsFromZip = true;
c.ZippedToolsPath = p.Value;
});
public static readonly Argument ARG_TOOL_ZIP_CONFLICT_RESOLUTION = new Argument(@"tool-zip-conflict-resolution",
new [] {ARG_VALUE_OVERWRITE, ARG_VALUE_PARALLEL}, (c, p) =>
{
c.ResolveZipToolConflictsBySkipping = p.IsValue(ARG_VALUE_OVERWRITE)
? CommandLine.ResolveZipToolConflicts.overwrite
: CommandLine.ResolveZipToolConflicts.in_parallel;
}) {WrapValue = true};
public static readonly Argument ARG_TOOL_ZIP_OVERWRITE_ANNOTATIONS = new Argument(@"tool-zip-overwrite-annotations",
new[] {ARG_VALUE_TRUE, ARG_VALUE_FALSE}, (c, p) => c.ResolveZipToolAnotationConflictsBySkipping = p.IsValue(ARG_VALUE_TRUE))
{ WrapValue = true};
public const string ARG_VALUE_TRUE = "true";
public const string ARG_VALUE_FALSE = "false";
public static readonly Argument ARG_TOOL_PROGRAM_MACRO = new Argument(@"tool-program-macro",
PROGRAM_MACRO_VALUE, (c, p) => c.ParseToolProgramMacro(p)) {WrapValue = true};
public static readonly Argument ARG_TOOL_PROGRAM_PATH = new Argument(@"tool-program-path", PATH_TO_FILE,
(c, p) => c.ZippedToolsProgramPathValue = p.Value);
public static readonly Argument ARG_TOOL_IGNORE_REQUIRED_PACKAGES = new Argument(@"tool-ignore-required-packages",
(c, p) => c.ZippedToolsPackagesHandled = true);
public static readonly Argument ARG_TOOL_LIST_EXPORT = new Argument(@"tool-list-export", PATH_TO_FILE,
(c, p) => ExportToolList(p)) {InternalUse = true};
private void ParseToolProgramMacro(NameValuePair pair)
{
// example --tool-program-macro=R,2.15.2
var spliced = pair.Value.Split(',');
if (spliced.Length > 2)
{
WriteLine(Resources.CommandArgs_ParseArgsInternal_Warning__Incorrect_Usage_of_the___tool_program_macro_command_);
}
else
{
string programName = spliced[0];
string programVersion = null;
if (spliced.Length > 1)
{
// Extract the version if specified.
programVersion = spliced[1];
}
ZippedToolsProgramPathContainer = new ProgramPathContainer(programName, programVersion);
}
}
private static void ExportToolList(NameValuePair pair)
{
// A command that exports all the tools to a text file in a SkylineRunner form for --batch-commands
// Not advertised.
string pathToOutputFile = pair.ValueFullPath;
using (StreamWriter sw = new StreamWriter(pathToOutputFile))
{
foreach (var tool in Settings.Default.ToolList)
{
// ReSharper disable LocalizableElement
string command = "--tool-add=" + "\"" + tool.Title + "\"" +
" --tool-command=" + "\"" + tool.Command + "\"" +
" --tool-arguments=" + "\"" + tool.Arguments + "\"" +
" --tool-initial-dir=" + "\"" + tool.InitialDirectory + "\"" +
" --tool-conflict-resolution=skip" +
" --tool-report=" + "\"" + tool.ReportTitle + "\"";
if (tool.OutputToImmediateWindow)
command += " --tool-output-to-immediate-window";
sw.WriteLine(command);
// ReSharper restore LocalizableElement
}
}
}
private static readonly ArgumentGroup GROUP_TOOLS = new ArgumentGroup(() => Resources.CommandArgs_GROUP_TOOLS_Tools_Installation, false,
ARG_TOOL_ADD, ARG_TOOL_COMMAND, ARG_TOOL_ARGUMENTS, ARG_TOOL_INITIAL_DIR, ARG_TOOL_CONFLICT_RESOLUTION,
ARG_TOOL_REPORT, ARG_TOOL_OUTPUT_TO_IMMEDIATE_WINDOW, ARG_TOOL_ADD_ZIP, ARG_TOOL_ZIP_CONFLICT_RESOLUTION,
ARG_TOOL_ZIP_OVERWRITE_ANNOTATIONS, ARG_TOOL_PROGRAM_MACRO, ARG_TOOL_PROGRAM_PATH,
ARG_TOOL_IGNORE_REQUIRED_PACKAGES, ARG_TOOL_LIST_EXPORT)
{
Preamble = () => Resources.CommandArgs_GROUP_TOOLS_The_arguments_below_can_be_used_to_install_tools_onto_the_Tools_menu_and_do_not_rely_on_the____in__argument_because_they_independent_of_a_specific_Skyline_document_,
};
public bool InstallingToolsFromZip { get; private set; }
public string ZippedToolsPath { get; private set; }
public CommandLine.ResolveZipToolConflicts? ResolveZipToolConflictsBySkipping { get; private set; }
public bool? ResolveZipToolAnotationConflictsBySkipping { get; private set; }
public ProgramPathContainer ZippedToolsProgramPathContainer { get; private set; }
public string ZippedToolsProgramPathValue { get; private set; }
public bool ZippedToolsPackagesHandled { get; set; }
// For keeping track of when an in command is required.
public bool RequiresSkylineDocument { get; private set; }
// For --batch-commands parameter
public string BatchCommandsPath { get; private set; }
private bool _runningBatchCommands;
public bool RunningBatchCommands
{
get { return !string.IsNullOrEmpty(BatchCommandsPath) || _runningBatchCommands; }
set { _runningBatchCommands = value; }
}
// Export isolation / transition list
public static readonly Argument ARG_EXP_ISOLATION_LIST_INSTRUMENT = new DocArgument(@"exp-isolationlist-instrument",
ExportInstrumentType.ISOLATION_LIST_TYPES, (c, p) => c.ParseExpIsolationListInstrumentType(p)) {WrapValue = true};
public static readonly Argument ARG_EXP_TRANSITION_LIST_INSTRUMENT = new DocArgument(@"exp-translist-instrument",
ExportInstrumentType.TRANSITION_LIST_TYPES, (c, p) => c.ParseExpTransitionListInstrumentType(p)) {WrapValue = true};
private static readonly ArgumentGroup GROUP_LISTS = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_LISTS_Exporting_isolation_transition_lists, false,
ARG_EXP_ISOLATION_LIST_INSTRUMENT, ARG_EXP_TRANSITION_LIST_INSTRUMENT) {LeftColumnWidth = 34};
private string _isolationListInstrumentType;
public string IsolationListInstrumentType
{
get { return _isolationListInstrumentType; }
set
{
if (ExportInstrumentType.ISOLATION_LIST_TYPES.Any(inst => inst.Equals(value)))
{
_isolationListInstrumentType = value;
}
else
{
throw new ArgumentException(string.Format(Resources.CommandArgs_IsolationListInstrumentType_The_instrument_type__0__is_not_valid_for_isolation_list_export, value));
}
}
}
public bool ExportingIsolationList
{
get { return !string.IsNullOrEmpty(IsolationListInstrumentType); }
}
private string _transListInstrumentType;
public string TransListInstrumentType
{
get { return _transListInstrumentType; }
set
{
if (ExportInstrumentType.TRANSITION_LIST_TYPES.Any(inst => inst.Equals(value)))
{
_transListInstrumentType = value;
}
else
{
throw new ArgumentException(string.Format(Resources.CommandArgs_TransListInstrumentType_The_instrument_type__0__is_not_valid_for_transition_list_export, value));
}
}
}
public bool ExportingTransitionList
{
get { return !string.IsNullOrEmpty(TransListInstrumentType); }
}
private void ParseExpIsolationListInstrumentType(NameValuePair pair)
{
try
{
IsolationListInstrumentType = pair.Value;
}
catch (ArgumentException)
{
WriteInstrumentValueError(pair, ExportInstrumentType.ISOLATION_LIST_TYPES);
WriteLine(Resources.CommandArgs_ParseArgsInternal_No_isolation_list_will_be_exported_);
}
}
private void ParseExpTransitionListInstrumentType(NameValuePair pair)
{
try
{
TransListInstrumentType = pair.Value;
}
catch (ArgumentException)
{
WriteInstrumentValueError(pair, ExportInstrumentType.TRANSITION_LIST_TYPES);
WriteLine(Resources.CommandArgs_ParseArgsInternal_No_transition_list_will_be_exported_);
}
}
private void WriteInstrumentValueError(NameValuePair pair, string[] listInstrumentTypes)
{
WriteLine(Resources.CommandArgs_ParseArgsInternal_Warning__The_instrument_type__0__is_not_valid__Please_choose_from_,
pair.Value);
foreach (string str in listInstrumentTypes)
WriteLine(str);
}
// Export method
public static readonly Argument ARG_EXP_METHOD_INSTRUMENT = new DocArgument(@"exp-method-instrument",
ExportInstrumentType.METHOD_TYPES, (c, p) => c.ParseExpMethodInstrumentType(p)) { WrapValue = true };
public static readonly Argument ARG_EXP_TEMPLATE = new DocArgument(@"exp-template", PATH_TO_FILE,
(c, p) => c.TemplateFile = p.ValueFullPath);
private static readonly ArgumentGroup GROUP_METHOD = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_METHOD_Exporting_native_instrument_methods, false,
ARG_EXP_METHOD_INSTRUMENT, ARG_EXP_TEMPLATE) {LeftColumnWidth = 34};
private string _methodInstrumentType;
public string MethodInstrumentType
{
get { return _methodInstrumentType; }
set
{
if (ExportInstrumentType.METHOD_TYPES.Any(inst => inst.Equals(value)))
{
_methodInstrumentType = value;
}
else
{
throw new ArgumentException(string.Format(Resources.CommandArgs_MethodInstrumentType_The_instrument_type__0__is_not_valid_for_method_export, value));
}
}
}
public bool ExportingMethod
{
get { return !string.IsNullOrEmpty(MethodInstrumentType); }
}
public string TemplateFile { get; private set; }
private void ParseExpMethodInstrumentType(NameValuePair pair)
{
try
{
MethodInstrumentType = pair.Value;
}
catch (ArgumentException)
{
WriteInstrumentValueError(pair, ExportInstrumentType.METHOD_TYPES);
WriteLine(Resources.CommandArgs_ParseArgsInternal_No_method_will_be_exported_);
}
}
// Export list/method arguments
public static readonly Argument ARG_EXP_FILE = new DocArgument(@"exp-file", PATH_TO_FILE,
(c, p) => c.ExportPath = p.ValueFullPath);
public static readonly Argument ARG_EXP_STRATEGY = new DocArgument(@"exp-strategy",
Helpers.GetEnumValues<ExportStrategy>().Select(p => p.ToString()).ToArray(),
(c, p) =>
{
c.ExportStrategySet = true;
c.ExportStrategy = (ExportStrategy)Enum.Parse(typeof(ExportStrategy), p.Value, true);
})
{ WrapValue = true};
public static readonly Argument ARG_EXP_METHOD_TYPE = new DocArgument(@"exp-method-type",
Helpers.GetEnumValues<ExportMethodType>().Select(p => p.ToString()).ToArray(),
(c, p) => c.ExportMethodType = (ExportMethodType)Enum.Parse(typeof(ExportMethodType), p.Value, true))
{ WrapValue = true};
public static readonly Argument ARG_EXP_MAX_TRANS = new DocArgument(@"exp-max-trans", NAME_VALUE,
(c, p) => c.MaxTransitionsPerInjection = p.ValueInt);
public static readonly Argument ARG_EXP_OPTIMIZING = new DocArgument(@"exp-optimizing", new[] { OPT_CE, OPT_DP},
(c, p) => c.ExportOptimizeType = p.IsValue(OPT_CE) ? OPT_CE : OPT_DP);
public static readonly Argument ARG_EXP_SCHEDULING_REPLICATE = new DocArgument(@"exp-scheduling-replicate", NAME_VALUE,
(c, p) => c.SchedulingReplicate = p.Value);
public static readonly Argument ARG_EXP_ORDER_BY_MZ = new DocArgument(@"exp-order-by-mz",
(c, p) => c.SortByMz = true);
public static readonly Argument ARG_EXP_IGNORE_PROTEINS = new DocArgument(@"exp-ignore-proteins",
(c, p) => c.IgnoreProteins = true);
public static readonly Argument ARG_EXP_PRIMARY_COUNT = new DocArgument(@"exp-primary-count", INT_VALUE,
(c, p) => c.PrimaryTransitionCount = p.GetValueInt(AbstractMassListExporter.PRIMARY_COUNT_MIN, AbstractMassListExporter.PRIMARY_COUNT_MAX));
public static readonly Argument ARG_EXP_POLARITY = new Argument(@"exp-polarity",
Helpers.GetEnumValues<ExportPolarity>().Select(p => p.ToString()).ToArray(),
(c, p) => c.ExportPolarityFilter = (ExportPolarity)Enum.Parse(typeof(ExportPolarity), p.Value, true))
{ WrapValue = true};
private static readonly ArgumentGroup GROUP_EXP_GENERAL = new ArgumentGroup(
() => CommandArgUsage.CommandArgs_GROUP_EXP_GENERAL_Method_and_transition_list_options, false,
ARG_EXP_FILE, ARG_EXP_STRATEGY, ARG_EXP_METHOD_TYPE, ARG_EXP_MAX_TRANS,
ARG_EXP_OPTIMIZING, ARG_EXP_SCHEDULING_REPLICATE, ARG_EXP_ORDER_BY_MZ, ARG_EXP_IGNORE_PROTEINS,
ARG_EXP_PRIMARY_COUNT, ARG_EXP_POLARITY); // {LeftColumnWidth = 34};
// Instrument specific arguments
public static readonly Argument ARG_EXP_DWELL_TIME = new DocArgument(@"exp-dwell-time", MILLIS_VALE,
(c, p) => c.DwellTime = p.GetValueInt(AbstractMassListExporter.DWELL_TIME_MIN, AbstractMassListExporter.DWELL_TIME_MAX))
{ AppliesTo = CommandArgUsage.CommandArgs_ARG_EXP_DWELL_TIME_AppliesTo};
public static readonly Argument ARG_EXP_ADD_ENERGY_RAMP = new DocArgument(@"exp-add-energy-ramp",
(c, p) => c.AddEnergyRamp = true)
{ AppliesTo = CommandArgUsage.CommandArgs_ARG_EXP_Thermo};
public static readonly Argument ARG_EXP_USE_S_LENS = new DocArgument(@"exp-use-s-lens",
(c, p) => c.UseSlens = true)
{ AppliesTo = CommandArgUsage.CommandArgs_ARG_EXP_Thermo};
public static readonly Argument ARG_EXP_RUN_LENGTH = new DocArgument(@"exp-run-length", MINUTES_VALUE,
(c, p) => c.RunLength = p.GetValueDouble(AbstractMassListExporter.RUN_LENGTH_MIN, AbstractMassListExporter.RUN_LENGTH_MAX))
{ AppliesTo = CommandArgUsage.CommandArgs_ARG_EXP_RUN_LENGTH_AppliesTo};
private static readonly ArgumentGroup GROUP_EXP_INSTRUMENT = new ArgumentGroup(() => CommandArgUsage.CommandArgs_GROUP_EXP_INSTRUMENT_Vendor_specific_method_and_transition_list_options, false,
ARG_EXP_DWELL_TIME, ARG_EXP_ADD_ENERGY_RAMP, ARG_EXP_USE_S_LENS, ARG_EXP_RUN_LENGTH);
public string ExportPath { get; private set; }
public bool ExportStrategySet { get; private set; }
public ExportStrategy ExportStrategy { get; set; }
// The min value for this field comes from either MassListExporter.MAX_TRANS_PER_INJ_MIN
// or MethodExporter.MAX_TRANS_PER_INJ_MIN_TLTQ depending on the instrument. The max value
// comes from the document. Point being, there is no way to check the value in the accessor.
public int MaxTransitionsPerInjection { get; set; }
private string _importOptimizeType;
public string ImportOptimizeType
{
get { return _importOptimizeType; }
set { _importOptimizeType = ToOptimizeString(value); }
}
private string _exportOptimizeType;
public string ExportOptimizeType
{
get { return _exportOptimizeType; }
set { _exportOptimizeType = ToOptimizeString(value); }
}
public const string OPT_NONE = "NONE";
public const string OPT_CE = "CE";
public const string OPT_DP = "DP";
private static string ToOptimizeString(string value)
{
if (value == null)
return null;
switch (value.ToUpperInvariant())
{
case OPT_NONE:
return null;
case OPT_CE:
return ExportOptimize.CE;
case OPT_DP:
return ExportOptimize.DP;
default:
throw new ArgumentException(string.Format(Resources.CommandArgs_ToOptimizeString_The_instrument_parameter__0__is_not_valid_for_optimization_, value));
}
}
public ExportMethodType ExportMethodType { get; private set; }
public ExportSchedulingAlgorithm ExportSchedulingAlgorithm
{
get
{
return String.IsNullOrEmpty(SchedulingReplicate)
? ExportSchedulingAlgorithm.Average
: ExportSchedulingAlgorithm.Single;
}
}
public string SchedulingReplicate { get; private set; }
public bool SortByMz { get; private set; }
public bool IgnoreProteins { get; private set; }
private int _primaryTransitionCount;
public int PrimaryTransitionCount
{
get { return _primaryTransitionCount; }
set
{
if (value < AbstractMassListExporter.PRIMARY_COUNT_MIN || value > AbstractMassListExporter.PRIMARY_COUNT_MAX)
{
throw new ArgumentException(string.Format(Resources.CommandArgs_PrimaryTransitionCount_The_primary_transition_count__0__must_be_between__1__and__2__, value, AbstractMassListExporter.PRIMARY_COUNT_MIN, AbstractMassListExporter.PRIMARY_COUNT_MAX));
}
_primaryTransitionCount = value;
}
}
private int _dwellTime;
public int DwellTime
{
get { return _dwellTime; }
set
{
if (value < AbstractMassListExporter.DWELL_TIME_MIN || value > AbstractMassListExporter.DWELL_TIME_MAX)
{
throw new ArgumentException(string.Format(Resources.CommandArgs_DwellTime_The_dwell_time__0__must_be_between__1__and__2__, value, AbstractMassListExporter.DWELL_TIME_MIN, AbstractMassListExporter.DWELL_TIME_MAX));
}
_dwellTime = value;
}
}
public bool AddEnergyRamp { get; private set; }
public bool UseSlens { get; private set; }
private double _runLength;
public double RunLength
{
get { return _runLength; }
set
{
// Not inclusive of minimum, because it was made zero
if (value <= AbstractMassListExporter.RUN_LENGTH_MIN || value > AbstractMassListExporter.RUN_LENGTH_MAX)
{
throw new ArgumentException(string.Format(Resources.CommandArgs_RunLength_The_run_length__0__must_be_between__1__and__2__, value, AbstractMassListExporter.RUN_LENGTH_MIN, AbstractMassListExporter.RUN_LENGTH_MAX));
}
_runLength = value;
}
}
public ExportPolarity ExportPolarityFilter { get; private set; }
public ExportCommandProperties ExportCommandProperties
{
get
{
return new ExportCommandProperties(_out)
{
AddEnergyRamp = AddEnergyRamp,
UseSlens = UseSlens,
DwellTime = DwellTime,
ExportStrategy = ExportStrategy,
SortByMz = SortByMz,
IgnoreProteins = IgnoreProteins,
MaxTransitions = MaxTransitionsPerInjection,
MethodType = ExportMethodType,
OptimizeType = ExportOptimizeType,
PolarityFilter = ExportPolarityFilter,
RunLength = RunLength,
SchedulingAlgorithm = ExportSchedulingAlgorithm
};
}
}
private class ParaUsageBlock : IUsageBlock
{
public ParaUsageBlock(string text)
{
Text = text;
}
public string Text { get; private set; }
public string ToString(int width, string formatType)
{
return ConsoleTable.ParaToString(width, Text, true);
}
public string ToHtmlString()
{
return @"<p>" + Text + @"</p>";
}
}
public static IEnumerable<IUsageBlock> UsageBlocks
{
get
{
return new IUsageBlock[]
{
new ParaUsageBlock(CommandArgUsage.CommandArgs_Usage_To_access_the_command_line_interface_for_Skyline_you_can_use_either_SkylineRunner_exe_or_SkylineCmd_exe_),
new ParaUsageBlock(CommandArgUsage.CommandArgs_Usage_para2),
new ParaUsageBlock(CommandArgUsage.CommandArgs_Usage_para3),
new ParaUsageBlock(CommandArgUsage.CommandArgs_Usage_para4),
GROUP_GENERAL_IO,
GROUP_INTERNAL, // No output
new ParaUsageBlock(CommandArgUsage.CommandArgs_Usage_Until_the_section_titled_Settings_Customization_all_other_command_line_arguments_rely_on_the____in__argument_because_they_all_rely_on_having_a_Skyline_document_open_),
GROUP_IMPORT,
GROUP_REINTEGRATE,
GROUP_REMOVE,
GROUP_MINIMIZE_RESULTS,
GROUP_IMPORT_DOC,
GROUP_ANNOTATIONS,
GROUP_FASTA,
GROUP_IMPORT_SEARCH,
GROUP_IMPORT_LIST,
GROUP_ADD_LIBRARY,
GROUP_DECOYS,
GROUP_REFINEMENT,
GROUP_REFINEMENT_W_RESULTS,
GROUP_REPORT,
GROUP_CHROMATOGRAM,
GROUP_LISTS,
GROUP_METHOD,
GROUP_EXP_GENERAL,
GROUP_EXP_INSTRUMENT,
GROUP_PANORAMA,
GROUP_SETTINGS,
GROUP_TOOLS
};
}
}
public static IEnumerable<Argument> UsageArguments
{
get
{
return AllArguments.Where(a => !a.InternalUse);
}
}
public static IEnumerable<Argument> AllArguments
{
get
{
return UsageBlocks.Where(b => b is ArgumentGroup).Cast<ArgumentGroup>()
.SelectMany(g => g.Args);
}
}
public static string GenerateUsageHtml()
{
var sb = new StringBuilder(@"<html><head>");
sb.AppendLine(DocumentationGenerator.GetStyleSheetHtml());
sb.AppendLine(@"</head><body>");
foreach (var block in UsageBlocks)
sb.Append(block.ToHtmlString());
sb.Append(@"</body></html>");
return sb.ToString();
}
public bool UsageShown { get; private set; }
public bool Usage(string formatType = null)
{
if (!UsageShown) // Avoid showing again
{
if (formatType == ARG_VALUE_ASCII)
CultureInfo.CurrentCulture = CultureInfo.InvariantCulture; // Use invariant culture for ascii output
UsageShown = true;
foreach (var block in UsageBlocks)
_out.Write(block.ToString(_usageWidth, formatType));
}
return false; // End argument processing
}
private int _usageWidth = 78;
private readonly CommandStatusWriter _out;
private readonly bool _isDocumentLoaded;
private readonly IList<Argument> _seenArguments = new List<Argument>();
public CommandArgs(CommandStatusWriter output, bool isDocumentLoaded)
{
ResolveToolConflictsBySkipping = null;
ResolveSkyrConflictsBySkipping = null;
_out = output;
_isDocumentLoaded = isDocumentLoaded;
ReportColumnSeparator = TextUtil.CsvSeparator;
MaxTransitionsPerInjection = AbstractMassListExporter.MAX_TRANS_PER_INJ_DEFAULT;
ImportOptimizeType = OPT_NONE;
ExportOptimizeType = OPT_NONE;
ExportStrategy = ExportStrategy.Single;
ExportMethodType = ExportMethodType.Standard;
PrimaryTransitionCount = AbstractMassListExporter.PRIMARY_COUNT_DEFAULT;
DwellTime = AbstractMassListExporter.DWELL_TIME_DEFAULT;
RunLength = AbstractMassListExporter.RUN_LENGTH_DEFAULT;
DocImportPaths = new List<string>();
ReplicateFile = new List<MsDataFileUri>();
SearchResultsFiles = new List<string>();
ExcludeFeatures = new List<IPeakFeatureCalculator>();
SharedFileType = ShareType.DEFAULT;
ImportBeforeDate = null;
ImportOnOrAfterDate = null;
}
private void WriteLine(string value, params object[] obs)
{
_out.WriteLine(value, obs);
}
public bool ParseArgs(string[] args)
{
try
{
return ParseArgsInternal(args);
}
catch (UsageException x)
{
WriteLine(Resources.CommandLine_GeneralException_Error___0_, x.Message);
return false;
}
catch (Exception x)
{
// Unexpected behavior, but better to output the error than appear to crash, and
// have Windows write it to the application event log.
WriteLine(Resources.CommandLine_GeneralException_Error___0_, x.Message);
WriteLine(x.StackTrace);
return false;
}
}
private bool ParseArgsInternal(IEnumerable<string> args)
{
_seenArguments.Clear();
foreach (string s in args)
{
var pair = Argument.Parse(s);
if (!ProcessArgument(pair))
return false;
}
return ValidateArgs();
}
private bool ProcessArgument(NameValuePair pair)
{
// Only name-value pairs get processed here
if (pair == null || pair.IsEmpty)
return true;
foreach (var definedArgument in AllArguments)
{
if (pair.IsMatch(definedArgument))
{
Assume.IsNotNull(definedArgument.ProcessValue); // Must define some way to process the value
_seenArguments.Add(definedArgument);
return definedArgument.ProcessValue(this, pair);
}
}
// Unmatched argument
WriteLine(Resources.CommandArgs_ParseArgsInternal_Error__Unexpected_argument____0_, pair.Name);
return false;
}
private bool ValidateArgs()
{
// Check argument dependencies
var allDependencies = new Dictionary<Argument, Argument>();
foreach (ArgumentGroup group in UsageBlocks.Where(b => b is ArgumentGroup))
{
if (group.Validate != null && !group.Validate(this))
return false;
foreach (var pair in group.Dependencies)
allDependencies.Add(pair.Key, pair.Value);
}
var seenSet = new HashSet<Argument>(_seenArguments);
var warningSet = new HashSet<Argument>(); // Warn only once
foreach (var seenArgument in _seenArguments)
{
Argument dependency;
if (allDependencies.TryGetValue(seenArgument, out dependency) &&
!seenSet.Contains(dependency) && !warningSet.Contains(seenArgument))
{
WarnArgRequirement(seenArgument, dependency);
warningSet.Add(seenArgument);
}
}
return true;
}
public static string WarnArgRequirementText(Argument usedArg, params Argument[] requiredArgs)
{
if (requiredArgs.Length == 1)
return string.Format(Resources.CommandArgs_WarnArgRequirment_Warning__Use_of_the_argument__0__requires_the_argument__1_,
usedArg.ArgumentText, requiredArgs[0].ArgumentText);
var requiredArgsText = new List<string>()
{
string.Format(
Resources
.CommandArgs_WarnArgRequirementText_Use_of_the_argument__0__requires_one_of_the_following_arguments_,
usedArg.ArgumentText)
};
requiredArgsText.AddRange(requiredArgs.Select(i => i.ArgumentText).ToList());
return TextUtil.LineSeparate(requiredArgsText);
}
private void WarnArgRequirement(Argument usedArg, params Argument[] requiredArgs)
{
WriteLine(WarnArgRequirementText(usedArg, requiredArgs));
}
public static string ErrorArgsExclusiveText(Argument arg1, Argument arg2)
{
return string.Format(Resources.CommandArgs_ErrorArgsExclusiveText_Error__The_arguments__0__and__1__options_cannot_be_used_together_,
arg1.ArgumentText, arg2.ArgumentText);
}
private void ErrorArgsExclusive(Argument arg1, Argument arg2)
{
WriteLine(ErrorArgsExclusiveText(arg1, arg2));
}
public class Argument
{
private const string ARG_PREFIX = "--";
public Argument(string name, Func<CommandArgs, NameValuePair, bool> processValue)
{
Name = name;
ProcessValue = processValue;
}
public Argument(string name, Action<CommandArgs, NameValuePair> processValue)
: this(name, (c, p) =>
{
processValue(c, p);
return true;
})
{
}
public Argument(string name, Func<string> valueExample, Func<CommandArgs, NameValuePair, bool> processValue)
: this(name, processValue)
{
ValueExample = valueExample;
}
public Argument(string name, Func<string> valueExample, Action<CommandArgs, NameValuePair> processValue)
: this(name, valueExample, (c, p) =>
{
processValue(c, p);
return true;
})
{
}
public Argument(string name, string[] values, Func<CommandArgs, NameValuePair, bool> processValue)
: this(name, () => ValuesToExample(values), processValue)
{
_fixedValues = values;
}
public Argument(string name, string[] values, Action<CommandArgs, NameValuePair> processValue)
: this(name, values, (c, p) =>
{
processValue(c, p);
return true;
})
{
}
public Argument(string name, Func<string[]> values, Func<CommandArgs, NameValuePair, bool> processValue)
: this(name, () => ValuesToExample(values()), processValue)
{
_dynamicValues = values;
}
public Argument(string name, Func<string[]> values, Action<CommandArgs, NameValuePair> processValue)
: this(name, values, (c, p) =>
{
processValue(c, p);
return true;
})
{
}
private string[] _fixedValues;
private Func<string[]> _dynamicValues;
public Func<CommandArgs, NameValuePair, bool> ProcessValue;
public string Name { get; private set; }
public string AppliesTo { get; set; }
public string Description
{
get { return CommandArgUsage.ResourceManager.GetString("_" + Name.Replace('-', '_')); }
}
public Func<string> ValueExample { get; private set; }
public string[] Values
{
get
{
return _dynamicValues?.Invoke() ?? _fixedValues;
}
}
public bool WrapValue { get; set; }
public bool OptionalValue { get; set; }
public bool InternalUse { get; set; }
public string ArgumentText
{
get { return ARG_PREFIX + Name; }
}
public string GetArgumentTextWithValue(string value)
{
if (ValueExample == null)
throw new ArgumentException(@"The argument {0} is valueless.");
else if (Values != null && !Values.Any(v => v.Equals(value, StringComparison.CurrentCultureIgnoreCase)))
throw new ValueInvalidException(this, value, Values);
return ArgumentText + '=' + value;
}
public string ArgumentDescription
{
get
{
var retValue = ArgumentText;
if (ValueExample != null)
{
var valueText = '=' + (WrapValue ? Environment.NewLine : string.Empty) + ValueExample();
if (OptionalValue)
valueText = '[' + valueText + ']';
retValue += valueText;
}
return retValue;
}
}
public override string ToString()
{
return ArgumentDescription;
}
public static string ValuesToExample(params string[] options)
{
var sb = new StringBuilder();
sb.Append('<');
foreach (var o in options)
{
if (sb.Length > 1)
sb.Append(@" | ");
sb.Append(o);
}
sb.Append('>');
return sb.ToString();
}
public static NameValuePair Parse(string arg)
{
if (!arg.StartsWith(ARG_PREFIX))
return NameValuePair.EMPTY;
string name, value = null;
arg = arg.Substring(2);
int indexEqualsSign = arg.IndexOf('=');
if (indexEqualsSign >= 0)
{
name = arg.Substring(0, indexEqualsSign);
value = arg.Substring(indexEqualsSign + 1);
}
else
{
name = arg;
}
return new NameValuePair(name, value);
}
}
public class DocArgument : Argument
{
public DocArgument(string name, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public DocArgument(string name, Action<CommandArgs, NameValuePair> processValue)
: base(name, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public DocArgument(string name, Func<string> valueExample, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, valueExample, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public DocArgument(string name, Func<string> valueExample, Action<CommandArgs, NameValuePair> processValue)
: base(name, valueExample, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public DocArgument(string name, string[] values, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, values, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public DocArgument(string name, string[] values, Action<CommandArgs, NameValuePair> processValue)
: base(name, values, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public DocArgument(string name, Func<string[]> values, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, values, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public DocArgument(string name, Func<string[]> values, Action<CommandArgs, NameValuePair> processValue)
: base(name, values, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
private static bool ProcessValueOverride(CommandArgs c, NameValuePair p, Func<CommandArgs, NameValuePair, bool> processValue)
{
c.RequiresSkylineDocument = true;
return processValue(c, p);
}
private static void ProcessValueOverride(CommandArgs c, NameValuePair p, Action<CommandArgs, NameValuePair> processValue)
{
c.RequiresSkylineDocument = true;
processValue(c, p);
}
}
public class RefineArgument : DocArgument
{
public RefineArgument(string name, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public RefineArgument(string name, Action<CommandArgs, NameValuePair> processValue)
: base(name, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public RefineArgument(string name, Func<string> valueExample, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, valueExample, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public RefineArgument(string name, Func<string> valueExample, Action<CommandArgs, NameValuePair> processValue)
: base(name, valueExample, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public RefineArgument(string name, string[] values, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, values, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public RefineArgument(string name, string[] values, Action<CommandArgs, NameValuePair> processValue)
: base(name, values, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
private static bool ProcessValueOverride(CommandArgs c, NameValuePair p, Func<CommandArgs, NameValuePair, bool> processValue)
{
if (c.Refinement == null)
c.Refinement = new RefinementSettings();
return processValue(c, p);
}
private static void ProcessValueOverride(CommandArgs c, NameValuePair p, Action<CommandArgs, NameValuePair> processValue)
{
if (c.Refinement == null)
c.Refinement = new RefinementSettings();
processValue(c, p);
}
}
public class ToolArgument : Argument
{
public ToolArgument(string name, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public ToolArgument(string name, Action<CommandArgs, NameValuePair> processValue)
: base(name, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public ToolArgument(string name, Func<string> valueExample, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, valueExample, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public ToolArgument(string name, Func<string> valueExample, Action<CommandArgs, NameValuePair> processValue)
: base(name, valueExample, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public ToolArgument(string name, string[] values, Func<CommandArgs, NameValuePair, bool> processValue)
: base(name, values, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
public ToolArgument(string name, string[] values, Action<CommandArgs, NameValuePair> processValue)
: base(name, values, (c, p) => ProcessValueOverride(c, p, processValue))
{
}
private static bool ProcessValueOverride(CommandArgs c, NameValuePair p, Func<CommandArgs, NameValuePair, bool> processValue)
{
c.ImportingTool = true;
return processValue(c, p);
}
private static void ProcessValueOverride(CommandArgs c, NameValuePair p, Action<CommandArgs, NameValuePair> processValue)
{
c.ImportingTool = true;
processValue(c, p);
}
}
public class NameValuePair
{
public static NameValuePair EMPTY = new NameValuePair(null, null);
public NameValuePair(string name, string value)
{
Name = name;
Value = value;
}
public string Name { get; private set; }
public string Value { get; private set; }
public Argument Match { get; private set; }
public int ValueInt
{
get
{
Assume.IsNotNull(Match); // Must be matched before accessing this
try
{
return int.Parse(Value);
}
catch (FormatException)
{
throw new ValueInvalidIntException(Match, Value);
}
}
}
public int GetValueInt(int minVal, int maxVal)
{
int v = ValueInt;
if (minVal > v || v > maxVal)
throw new ValueOutOfRangeIntException(Match, v, minVal, maxVal);
return v;
}
public double ValueDouble
{
get
{
Assume.IsNotNull(Match); // Must be matched before accessing this
double valueDouble;
// Try both local and invariant formats to make batch files more portable
if (!double.TryParse(Value, out valueDouble) && !double.TryParse(Value, NumberStyles.Float, CultureInfo.InvariantCulture, out valueDouble))
throw new ValueInvalidDoubleException(Match, Value);
return valueDouble;
}
}
public double GetValueDouble(double minVal, double maxVal)
{
double v = ValueDouble;
if (minVal > v || v > maxVal)
throw new ValueOutOfRangeDoubleException(Match, v, minVal, maxVal);
return v;
}
public DateTime ValueDate
{
get
{
Assume.IsNotNull(Match); // Must be matched before accessing this
try
{
// Try local format
return Convert.ToDateTime(Value);
}
catch (Exception)
{
try
{
// Try invariant format to make command-line batch files more portable
return Convert.ToDateTime(Value, CultureInfo.InvariantCulture);
}
catch (Exception)
{
throw new ValueInvalidDateException(Match, Value);
}
}
}
}
public string ValueFullPath
{
get
{
try
{
return Path.GetFullPath(Value);
}
catch (Exception)
{
throw new ValueInvalidPathException(Match, Value);
}
}
}
public bool IsEmpty { get { return string.IsNullOrEmpty(Name); } }
public bool IsNameOnly { get { return string.IsNullOrEmpty(Value); } }
public bool IsMatch(Argument arg)
{
if (!Name.Equals(arg.Name))
return false;
if (arg.ValueExample == null && !IsNameOnly)
throw new ValueUnexpectedException(arg);
if (arg.ValueExample != null)
{
if (IsNameOnly)
{
if (!arg.OptionalValue)
throw new ValueMissingException(arg);
}
else
{
var val = Value;
if (arg.Values != null && !arg.Values.Any(v => v.Equals(val, StringComparison.CurrentCultureIgnoreCase)))
throw new ValueInvalidException(arg, Value, arg.Values);
}
}
Match = arg;
return true;
}
public bool IsValue(string value)
{
return value.Equals(Value, StringComparison.CurrentCultureIgnoreCase);
}
}
public class ArgumentGroup : IUsageBlock
{
private readonly Func<string> _getTitle;
public ArgumentGroup(Func<string> getTitle, bool showHeaders, params Argument[] args)
{
_getTitle = getTitle;
Args = args;
ShowHeaders = showHeaders;
Dependencies = new Dictionary<Argument, Argument>();
}
public string Title { get { return _getTitle(); } }
public Func<string> Preamble { get; set; }
public Func<string> Postamble { get; set; }
public IList<Argument> Args { get; private set; }
public bool ShowHeaders { get; private set; }
public IDictionary<Argument, Argument> Dependencies { get; set; }
public Func<CommandArgs, bool> Validate { get; set; }
public int? LeftColumnWidth { get; set; }
public bool IncludeInUsage
{
get { return !Args.All(a => a.InternalUse); }
}
public override string ToString()
{
return ToString(78, null, true);
}
public string ToString(int width, string formatType)
{
return ToString(width, formatType, false);
}
private string ToString(int width, string formatType, bool forDebugging)
{
if (!IncludeInUsage && !forDebugging)
return string.Empty;
var ct = new ConsoleTable
{
Title = Title,
Borders = formatType != ARG_VALUE_NO_BORDERS,
Ascii = formatType == ARG_VALUE_ASCII
};
if (Preamble != null)
ct.Preamble = Preamble();
if (Postamble != null)
ct.Postamble = Postamble();
if (LeftColumnWidth.HasValue)
ct.Widths = new[] { LeftColumnWidth.Value, width - LeftColumnWidth.Value - 3 }; // 3 borders
else
ct.Width = width;
bool hasAppliesTo = Args.Any(a => a.AppliesTo != null);
if (ShowHeaders)
{
if (hasAppliesTo)
ct.SetHeaders(CommandArgUsage.CommandArgGroup_ToString_Applies_To,
CommandArgUsage.CommandArgGroup_ToString_Argument,
CommandArgUsage.CommandArgGroup_ToString_Description);
else
ct.SetHeaders(CommandArgUsage.CommandArgGroup_ToString_Argument,
CommandArgUsage.CommandArgGroup_ToString_Description);
}
foreach (var commandArg in Args.Where(a => !a.InternalUse))
{
if (hasAppliesTo)
ct.AddRow(commandArg.AppliesTo ?? string.Empty, commandArg.ArgumentDescription, commandArg.Description);
else
ct.AddRow(commandArg.ArgumentDescription, commandArg.Description);
}
return ct.ToString();
}
public string ToHtmlString()
{
if (!IncludeInUsage)
return string.Empty;
// ReSharper disable LocalizableElement
var sb = new StringBuilder();
sb.AppendLine("<div class=\"RowType\">" + HtmlEncode(Title) + "</div>");
if (Preamble != null)
sb.AppendLine("<p>" + Preamble() + "</p>");
sb.AppendLine("<table>");
bool hasAppliesTo = Args.Any(a => a.AppliesTo != null);
if (ShowHeaders)
{
sb.Append("<tr>");
if (hasAppliesTo)
sb.Append("<th>").Append(CommandArgUsage.CommandArgGroup_ToString_Applies_To).Append("</th>");
sb.Append("<th>").Append(CommandArgUsage.CommandArgGroup_ToString_Argument).Append("</th>");
sb.Append("<th>").Append(CommandArgUsage.CommandArgGroup_ToString_Description).Append("</th>");
sb.AppendLine("</tr>");
}
foreach (var commandArg in Args.Where(a => !a.InternalUse))
{
sb.Append("<tr>");
if (hasAppliesTo)
sb.Append("<td>").Append(commandArg.AppliesTo != null ? HtmlEncode(commandArg.AppliesTo) : " ").Append("</td>");
string argDescription = HtmlEncode(commandArg.ArgumentDescription);
if (!argDescription.Contains('|'))
argDescription = argDescription.Replace(" ", " ");
argDescription = argDescription.Replace(Environment.NewLine, "<br/>");
sb.Append("<td>").Append(argDescription).Append("</td>");
sb.Append("<td>").Append(HtmlEncode(commandArg.Description)).Append("</td>");
sb.AppendLine("</tr>");
}
sb.AppendLine("</table>");
if (Postamble != null)
sb.AppendLine("<p>" + Postamble() + "</p>");
return sb.ToString();
// ReSharper restore LocalizableElement
}
private static string HtmlEncode(string str)
{
string encodedText = HttpUtility.HtmlEncode(str ?? string.Empty);
return encodedText.Replace(@"-", @"‑"); // Use non-breaking hyphens
}
}
public interface IUsageBlock
{
string ToString(int width, string formatType);
string ToHtmlString();
}
public class ValueMissingException : UsageException
{
public ValueMissingException(Argument arg)
: base(string.Format(Resources.ValueMissingException_ValueMissingException_, arg.ArgumentText))
{
}
}
public class ValueUnexpectedException : UsageException
{
public ValueUnexpectedException(Argument arg)
: base(string.Format(Resources.ValueUnexpectedException_ValueUnexpectedException_The_argument__0__should_not_have_a_value_specified, arg.ArgumentText))
{
}
}
public class ValueInvalidException : UsageException
{
public ValueInvalidException(Argument arg, string value, string[] argValues)
: base(string.Format(Resources.ValueInvalidException_ValueInvalidException_The_value___0___is_not_valid_for_the_argument__1___Use_one_of__2_, value, arg.ArgumentText, string.Join(@", ", argValues)))
{
}
}
public class ValueInvalidDoubleException : UsageException
{
public ValueInvalidDoubleException(Argument arg, string value)
: base(string.Format(Resources.ValueInvalidDoubleException_ValueInvalidDoubleException_The_value___0___is_not_valid_for_the_argument__1__which_requires_a_decimal_number_, value, arg.ArgumentText))
{
}
}
public class ValueOutOfRangeDoubleException : UsageException
{
public ValueOutOfRangeDoubleException(Argument arg, double value, double minVal, double maxVal)
: base(string.Format(Resources.ValueOutOfRangeDoubleException_ValueOutOfRangeException_The_value___0___for_the_argument__1__must_be_between__2__and__3__, value, arg.ArgumentText, minVal, maxVal))
{
}
}
public class ValueInvalidIntException : UsageException
{
public ValueInvalidIntException(Argument arg, string value)
: base(string.Format(Resources.ValueInvalidIntException_ValueInvalidIntException_The_value___0___is_not_valid_for_the_argument__1__which_requires_an_integer_, value, arg.ArgumentText))
{
}
}
public class ValueInvalidNumberListException : UsageException
{
public ValueInvalidNumberListException(Argument arg, string value)
: base(string.Format(Resources.ValueInvalidNumberListException_ValueInvalidNumberListException_The_value__0__is_not_valid_for_the_argument__1__which_requires_a_list_of_decimal_numbers_, value, arg.ArgumentText))
{
}
}
public class ValueInvalidChargeListException : UsageException
{
public ValueInvalidChargeListException(Argument arg, string value)
: base(string.Format(Resources.ValueInvalidChargeListException_ValueInvalidChargeListException_The_value___0___is_not_valid_for_the_argument__1__which_requires_an_comma_separated_list_of_integers_, value, arg.ArgumentText))
{
}
}
public class ValueInvalidIonTypeListException : UsageException
{
public ValueInvalidIonTypeListException(Argument arg, string value)
: base(string.Format(Resources.ValueInvalidIonTypeListException_ValueInvalidIonTypeListException_The_value___0___is_not_valid_for_the_argument__1__which_requires_an_comma_separated_list_of_fragment_ion_types__a__b__c__x__y__z__p__, value, arg.ArgumentText))
{
}
}
public class ValueOutOfRangeIntException : UsageException
{
public ValueOutOfRangeIntException(Argument arg, int value, int minVal, int maxVal)
: base(string.Format(Resources.ValueOutOfRangeDoubleException_ValueOutOfRangeException_The_value___0___for_the_argument__1__must_be_between__2__and__3__, value, arg.ArgumentText, minVal, maxVal))
{
}
}
public class ValueInvalidDateException : UsageException
{
public ValueInvalidDateException(Argument arg, string value)
: base(string.Format(Resources.ValueInvalidDateException_ValueInvalidDateException_The_value___0___is_not_valid_for_the_argument__1__which_requires_a_date_time_value_, value, arg.ArgumentText))
{
}
}
public class ValueInvalidPathException : UsageException
{
public ValueInvalidPathException(Argument arg, string value)
: base(string.Format(Resources.ValueInvalidPathException_ValueInvalidPathException_The_value___0___is_not_valid_for_the_argument__1__failed_attempting_to_convert_it_to_a_full_file_path_, value, arg.ArgumentText))
{
}
}
public class UsageException : ArgumentException
{
protected UsageException(string message) : base(message)
{
}
}
}
}
| 1 | 14,467 | Needs a description added to CommandArgsUsage.resx | ProteoWizard-pwiz | .cs |
@@ -153,7 +153,7 @@ public class DownloadServiceNotification {
iconId = R.drawable.ic_notification_sync_error;
intent = ClientConfig.downloadServiceCallbacks.getReportNotificationContentIntent(context);
id = R.id.notification_download_report;
- content = String.format(context.getString(R.string.download_report_content), successfulDownloads, failedDownloads);
+ content = context.getResources().getQuantityString(R.plurals.download_report_content, successfulDownloads, successfulDownloads, failedDownloads);
}
NotificationCompat.Builder builder = new NotificationCompat.Builder(context, channelId); | 1 | package de.danoeh.antennapod.core.service.download;
import android.app.Notification;
import android.app.NotificationManager;
import android.app.PendingIntent;
import android.content.Context;
import android.os.Build;
import android.util.Log;
import androidx.core.app.NotificationCompat;
import de.danoeh.antennapod.core.ClientConfig;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.util.gui.NotificationUtils;
import java.util.List;
public class DownloadServiceNotification {
private static final String TAG = "DownloadSvcNotification";
private final Context context;
private NotificationCompat.Builder notificationCompatBuilder;
public DownloadServiceNotification(Context context) {
this.context = context;
setupNotificationBuilders();
}
private void setupNotificationBuilders() {
notificationCompatBuilder = new NotificationCompat.Builder(context, NotificationUtils.CHANNEL_ID_DOWNLOADING)
.setOngoing(true)
.setContentIntent(ClientConfig.downloadServiceCallbacks.getNotificationContentIntent(context))
.setSmallIcon(R.drawable.ic_notification_sync);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
notificationCompatBuilder.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
}
Log.d(TAG, "Notification set up");
}
/**
* Updates the contents of the service's notifications. Should be called
* after setupNotificationBuilders.
*/
public Notification updateNotifications(int numDownloads, List<Downloader> downloads) {
if (notificationCompatBuilder == null) {
return null;
}
String contentTitle = context.getString(R.string.download_notification_title);
String downloadsLeft = (numDownloads > 0)
? context.getResources().getQuantityString(R.plurals.downloads_left, numDownloads, numDownloads)
: context.getString(R.string.downloads_processing);
String bigText = compileNotificationString(downloads);
notificationCompatBuilder.setContentTitle(contentTitle);
notificationCompatBuilder.setContentText(downloadsLeft);
notificationCompatBuilder.setStyle(new NotificationCompat.BigTextStyle().bigText(bigText));
return notificationCompatBuilder.build();
}
private static String compileNotificationString(List<Downloader> downloads) {
StringBuilder stringBuilder = new StringBuilder();
for (int i = 0; i < downloads.size(); i++) {
Downloader downloader = downloads.get(i);
if (downloader.cancelled) {
continue;
}
stringBuilder.append("• ");
DownloadRequest request = downloader.getDownloadRequest();
switch (request.getFeedfileType()) {
case Feed.FEEDFILETYPE_FEED:
if (request.getTitle() != null) {
stringBuilder.append(request.getTitle());
}
break;
case FeedMedia.FEEDFILETYPE_FEEDMEDIA:
if (request.getTitle() != null) {
stringBuilder.append(request.getTitle())
.append(" (")
.append(request.getProgressPercent())
.append("%)");
}
break;
default:
stringBuilder.append("Unknown: ").append(request.getFeedfileType());
}
if (i != downloads.size()) {
stringBuilder.append("\n");
}
}
return stringBuilder.toString();
}
private static String createAutoDownloadNotificationContent(List<DownloadStatus> statuses) {
int length = statuses.size();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < length; i++) {
sb.append("• ").append(statuses.get(i).getTitle());
if (i != length - 1) {
sb.append("\n");
}
}
return sb.toString();
}
/**
* Creates a notification at the end of the service lifecycle to notify the
* user about the number of completed downloads. A report will only be
* created if there is at least one failed download excluding images
*/
public void updateReport(List<DownloadStatus> reportQueue, boolean showAutoDownloadReport) {
// check if report should be created
boolean createReport = false;
int successfulDownloads = 0;
int failedDownloads = 0;
// a download report is created if at least one download has failed
// (excluding failed image downloads)
for (DownloadStatus status : reportQueue) {
if (status.isSuccessful()) {
successfulDownloads++;
createReport |= showAutoDownloadReport && !status.isInitiatedByUser() && status.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA;
} else if (!status.isCancelled()) {
failedDownloads++;
createReport = true;
}
}
if (createReport) {
Log.d(TAG, "Creating report");
// create notification object
String channelId;
int titleId;
int iconId;
int id;
String content;
PendingIntent intent;
if (failedDownloads == 0) {
// We are generating an auto-download report
channelId = NotificationUtils.CHANNEL_ID_AUTO_DOWNLOAD;
titleId = R.string.auto_download_report_title;
iconId = R.drawable.ic_notification_auto_download_complete;
intent = ClientConfig.downloadServiceCallbacks.getAutoDownloadReportNotificationContentIntent(context);
id = R.id.notification_auto_download_report;
content = createAutoDownloadNotificationContent(reportQueue);
} else {
channelId = NotificationUtils.CHANNEL_ID_ERROR;
titleId = R.string.download_report_title;
iconId = R.drawable.ic_notification_sync_error;
intent = ClientConfig.downloadServiceCallbacks.getReportNotificationContentIntent(context);
id = R.id.notification_download_report;
content = String.format(context.getString(R.string.download_report_content), successfulDownloads, failedDownloads);
}
NotificationCompat.Builder builder = new NotificationCompat.Builder(context, channelId);
builder.setTicker(context.getString(titleId))
.setContentTitle(context.getString(titleId))
.setContentText(content)
.setStyle(new NotificationCompat.BigTextStyle().bigText(content))
.setSmallIcon(iconId)
.setContentIntent(intent)
.setAutoCancel(true);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
builder.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
}
NotificationManager nm = (NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE);
nm.notify(id, builder.build());
} else {
Log.d(TAG, "No report is created");
}
}
public void postAuthenticationNotification(final DownloadRequest downloadRequest) {
final String resourceTitle = (downloadRequest.getTitle() != null) ?
downloadRequest.getTitle() : downloadRequest.getSource();
NotificationCompat.Builder builder = new NotificationCompat.Builder(context, NotificationUtils.CHANNEL_ID_USER_ACTION);
builder.setTicker(context.getText(R.string.authentication_notification_title))
.setContentTitle(context.getText(R.string.authentication_notification_title))
.setContentText(context.getText(R.string.authentication_notification_msg))
.setStyle(new NotificationCompat.BigTextStyle().bigText(context.getText(R.string.authentication_notification_msg)
+ ": " + resourceTitle))
.setSmallIcon(R.drawable.ic_key_white)
.setAutoCancel(true)
.setContentIntent(ClientConfig.downloadServiceCallbacks.getAuthentificationNotificationContentIntent(context, downloadRequest));
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
builder.setVisibility(NotificationCompat.VISIBILITY_PUBLIC);
}
NotificationManager nm = (NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE);
nm.notify(downloadRequest.getSource().hashCode(), builder.build());
}
}
| 1 | 17,181 | The line is a bit too long. That's why the test currently fails. Please break it into two lines. | AntennaPod-AntennaPod | java |
@@ -59,6 +59,13 @@ public class NodeStatus {
}
}
+ public boolean hasCapability(Capabilities caps) {
+ long count = slots.stream()
+ .filter(slot -> slot.isSupporting(caps))
+ .count();
+ return count > 0;
+ }
+
public boolean hasCapacity() {
return slots.stream().anyMatch(slot -> !slot.getSession().isPresent());
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.data;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.json.JsonInput;
import org.openqa.selenium.json.TypeToken;
import java.net.URI;
import java.time.Instant;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
public class NodeStatus {
private final NodeId nodeId;
private final URI externalUri;
private final int maxSessionCount;
private final Set<Slot> slots;
private final Availability availability;
public NodeStatus(
NodeId nodeId,
URI externalUri,
int maxSessionCount,
Set<Slot> slots,
Availability availability) {
this.nodeId = Require.nonNull("Node id", nodeId);
this.externalUri = Require.nonNull("URI", externalUri);
this.maxSessionCount = Require.positive("Max session count",
maxSessionCount,
"Make sure that a driver is available on $PATH");
this.slots = ImmutableSet.copyOf(Require.nonNull("Slots", slots));
this.availability = Require.nonNull("Availability", availability);
ImmutableSet.Builder<Session> sessions = ImmutableSet.builder();
for (Slot slot : slots) {
slot.getSession().ifPresent(sessions::add);
}
}
public boolean hasCapacity() {
return slots.stream().anyMatch(slot -> !slot.getSession().isPresent());
}
public boolean hasCapacity(Capabilities caps) {
long count = slots.stream()
.filter(slot -> !slot.getSession().isPresent())
.filter(slot -> slot.isSupporting(caps))
.count();
return count > 0;
}
public NodeId getId() {
return nodeId;
}
public URI getUri() {
return externalUri;
}
public int getMaxSessionCount() {
return maxSessionCount;
}
public Set<Slot> getSlots() {
return slots;
}
public Availability getAvailability() {
return availability;
}
public float getLoad() {
float inUse = slots.parallelStream()
.filter(slot -> slot.getSession().isPresent())
.count();
return (inUse / (float) maxSessionCount) * 100f;
}
public long getLastSessionCreated() {
return slots.parallelStream()
.map(Slot::getLastStarted)
.mapToLong(Instant::toEpochMilli)
.max()
.orElse(0);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof NodeStatus)) {
return false;
}
NodeStatus that = (NodeStatus) o;
return Objects.equals(this.nodeId, that.nodeId) &&
Objects.equals(this.externalUri, that.externalUri) &&
this.maxSessionCount == that.maxSessionCount &&
Objects.equals(this.slots, that.slots) &&
Objects.equals(this.availability, that.availability);
}
@Override
public int hashCode() {
return Objects.hash(nodeId, externalUri, maxSessionCount, slots);
}
private Map<String, Object> toJson() {
return new ImmutableMap.Builder<String, Object>()
.put("id", nodeId)
.put("uri", externalUri)
.put("maxSessions", maxSessionCount)
.put("slots", slots)
.put("availability", availability)
.build();
}
public static NodeStatus fromJson(JsonInput input) {
NodeId nodeId = null;
URI uri = null;
int maxSessions = 0;
Set<Slot> slots = null;
Availability availability = null;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "availability":
availability = input.read(Availability.class);
break;
case "id":
nodeId = input.read(NodeId.class);
break;
case "maxSessions":
maxSessions = input.read(Integer.class);
break;
case "slots":
slots = input.read(new TypeToken<Set<Slot>>(){}.getType());
break;
case "uri":
uri = input.read(URI.class);
break;
default:
input.skipValue();
break;
}
}
input.endObject();
return new NodeStatus(
nodeId,
uri,
maxSessions,
slots,
availability);
}
}
| 1 | 18,174 | Prefer `Stream.anyMatch` instead of iterating over all slots. | SeleniumHQ-selenium | rb |
@@ -1,4 +1,6 @@
module Ncr
+ START_OF_NEW_6X_APPROVAL_POLICY = Time.zone.local(2016, 7, 5, 0, 0, 0)
+
class ApprovalManager
def initialize(work_order)
@work_order = work_order | 1 | module Ncr
class ApprovalManager
def initialize(work_order)
@work_order = work_order
end
def system_approvers
if %w(BA60 BA61).include?(work_order.expense_type)
ba_6x_approvers
else
[ba_80_approver]
end
end
def setup_approvals_and_observers
if work_order.requires_approval?
set_up_as_approvers
else
set_up_as_observers
end
end
private
attr_reader :work_order
delegate :proposal, to: :work_order
def set_up_as_approvers
original_step_users = proposal.reload.individual_steps.non_pending.map(&:user)
force_approvers(approvers)
notify_removed_step_users(original_step_users)
end
def set_up_as_observers
approvers.each do |user|
work_order.add_observer(user)
end
# skip state machine
proposal.update(status: "completed")
end
def approvers
system_approvers.unshift(work_order.approving_official)
end
# Generally shouldn't be called directly as it doesn't account for
# emergencies, or notify removed approvers
def force_approvers(users)
new_child_steps = users.map do |user|
proposal.existing_step_for(user) || Steps::Approval.new(user: user)
end
unless proposal.root_step && child_steps_unchanged?(proposal.root_step, new_child_steps)
proposal.root_step = Steps::Serial.new(child_steps: new_child_steps)
end
end
def child_steps_unchanged?(parent_step, new_steps)
old_steps = parent_step.child_steps
old_steps.size == new_steps.size && (old_steps & new_steps).size == old_steps.size
end
def notify_removed_step_users(original_step_users)
current_step_users = proposal.individual_steps.non_pending.map(&:user)
removed_step_users_to_notify = original_step_users - current_step_users
DispatchFinder.run(proposal).on_step_user_removal(removed_step_users_to_notify)
end
def ba_6x_approvers
results = []
if work_order.for_whsc_organization?
# no tier 1
elsif work_order.ba_6x_tier1_team?
results << Ncr::Mailboxes.ba61_tier1_budget_team
else
results << Ncr::Mailboxes.ba61_tier1_budget
end
results << Ncr::Mailboxes.ba61_tier2_budget
results
end
def ba_80_approver
if work_order.for_ool_organization?
Ncr::Mailboxes.ool_ba80_budget
else
Ncr::Mailboxes.ba80_budget
end
end
end
end
| 1 | 17,718 | Can this just be a feature flag to check if it's on? This will give us flexibility on launch date (which could be turned on July 1st) and allow us to easily revert back in case the policy is reverted. | 18F-C2 | rb |
@@ -44,4 +44,5 @@ func (p Pin) Low() {
type ADC struct {
Pin Pin
+ Bus uint8
} | 1 | package machine
import "errors"
var (
ErrInvalidInputPin = errors.New("machine: invalid input pin")
ErrInvalidOutputPin = errors.New("machine: invalid output pin")
ErrInvalidClockPin = errors.New("machine: invalid clock pin")
ErrInvalidDataPin = errors.New("machine: invalid data pin")
ErrNoPinChangeChannel = errors.New("machine: no channel available for pin interrupt")
)
// PinMode sets the direction and pull mode of the pin. For example, PinOutput
// sets the pin as an output and PinInputPullup sets the pin as an input with a
// pull-up.
type PinMode uint8
type PinConfig struct {
Mode PinMode
}
// Pin is a single pin on a chip, which may be connected to other hardware
// devices. It can either be used directly as GPIO pin or it can be used in
// other peripherals like ADC, I2C, etc.
type Pin uint8
// NoPin explicitly indicates "not a pin". Use this pin if you want to leave one
// of the pins in a peripheral unconfigured (if supported by the hardware).
const NoPin = Pin(0xff)
// High sets this GPIO pin to high, assuming it has been configured as an output
// pin. It is hardware dependent (and often undefined) what happens if you set a
// pin to high that is not configured as an output pin.
func (p Pin) High() {
p.Set(true)
}
// Low sets this GPIO pin to low, assuming it has been configured as an output
// pin. It is hardware dependent (and often undefined) what happens if you set a
// pin to low that is not configured as an output pin.
func (p Pin) Low() {
p.Set(false)
}
type ADC struct {
Pin Pin
}
| 1 | 12,156 | Due to this change, src/examples/adc needs to be modified | tinygo-org-tinygo | go |
@@ -19,6 +19,12 @@ import (
"unsafe"
)
+// singlePointer wraps an unsafe.Pointer and supports basic
+// load(), store(), clear(), and swapNil() operations.
+type singlePtr struct {
+ ptr unsafe.Pointer
+}
+
func (l *sortedLabels) Len() int {
return len(*l)
} | 1 | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"sync/atomic"
"unsafe"
)
func (l *sortedLabels) Len() int {
return len(*l)
}
func (l *sortedLabels) Swap(i, j int) {
(*l)[i], (*l)[j] = (*l)[j], (*l)[i]
}
func (l *sortedLabels) Less(i, j int) bool {
return (*l)[i].Key < (*l)[j].Key
}
func (m *SDK) addPrimary(rec *record) {
for {
rec.next.primary.store(m.records.primary.load())
if atomic.CompareAndSwapPointer(
&m.records.primary.ptr,
rec.next.primary.ptr,
unsafe.Pointer(rec),
) {
return
}
}
}
func (m *SDK) addReclaim(rec *record) {
for {
rec.next.reclaim.store(m.records.reclaim.load())
if atomic.CompareAndSwapPointer(
&m.records.reclaim.ptr,
rec.next.reclaim.ptr,
unsafe.Pointer(rec),
) {
return
}
}
}
func (s *singlePtr) swapNil() *record {
for {
newValue := unsafe.Pointer(nil)
swapped := atomic.LoadPointer(&s.ptr)
if atomic.CompareAndSwapPointer(&s.ptr, swapped, newValue) {
return (*record)(swapped)
}
}
}
func (s *singlePtr) load() *record {
return (*record)(atomic.LoadPointer(&s.ptr))
}
func (s *singlePtr) store(r *record) {
atomic.StorePointer(&s.ptr, unsafe.Pointer(r))
}
func (s *singlePtr) clear() {
atomic.StorePointer(&s.ptr, unsafe.Pointer(nil))
}
| 1 | 11,070 | `s/singlePointer/singlePtr` or please rename the type to `singlePointer`. | open-telemetry-opentelemetry-go | go |
@@ -577,7 +577,8 @@ bool nano::node_config::upgrade_json (unsigned version_a, nano::jsonconfig & jso
}
case 17:
{
- json.put ("vote_generator_delay", vote_generator_delay.count ()); // Update value
+ json.put ("active_elections_size", 10000); // Update value
+ json.put ("vote_generator_delay", 100); // Update value
json.put ("backup_before_upgrade", backup_before_upgrade);
json.put ("work_watcher_period", work_watcher_period.count ());
} | 1 | #include <nano/crypto_lib/random_pool.hpp>
#include <nano/lib/config.hpp>
#include <nano/lib/jsonconfig.hpp>
#include <nano/lib/rocksdbconfig.hpp>
#include <nano/lib/rpcconfig.hpp>
#include <nano/lib/tomlconfig.hpp>
#include <nano/node/nodeconfig.hpp>
// NOTE: to reduce compile times, this include can be replaced by more narrow includes
// once nano::network is factored out of node.{c|h}pp
#include <nano/node/node.hpp>
namespace
{
const char * preconfigured_peers_key = "preconfigured_peers";
const char * signature_checker_threads_key = "signature_checker_threads";
const char * pow_sleep_interval_key = "pow_sleep_interval";
const char * default_beta_peer_network = "peering-beta.nano.org";
const char * default_live_peer_network = "peering.nano.org";
}
nano::node_config::node_config () :
node_config (0, nano::logging ())
{
}
nano::node_config::node_config (uint16_t peering_port_a, nano::logging const & logging_a) :
peering_port (peering_port_a),
logging (logging_a)
{
// The default constructor passes 0 to indicate we should use the default port,
// which is determined at node startup based on active network.
if (peering_port == 0)
{
peering_port = network_params.network.default_node_port;
}
max_work_generate_difficulty = nano::difficulty::from_multiplier (max_work_generate_multiplier, network_params.network.publish_threshold);
switch (network_params.network.network ())
{
case nano::nano_networks::nano_test_network:
enable_voting = true;
preconfigured_representatives.push_back (network_params.ledger.genesis_account);
break;
case nano::nano_networks::nano_beta_network:
preconfigured_peers.push_back (default_beta_peer_network);
preconfigured_representatives.emplace_back ("259A4011E6CAD1069A97C02C3C1F2AAA32BC093C8D82EE1334F937A4BE803071");
preconfigured_representatives.emplace_back ("259A40656144FAA16D2A8516F7BE9C74A63C6CA399960EDB747D144ABB0F7ABD");
break;
case nano::nano_networks::nano_live_network:
preconfigured_peers.push_back (default_live_peer_network);
preconfigured_representatives.emplace_back ("A30E0A32ED41C8607AA9212843392E853FCBCB4E7CB194E35C94F07F91DE59EF");
preconfigured_representatives.emplace_back ("67556D31DDFC2A440BF6147501449B4CB9572278D034EE686A6BEE29851681DF");
preconfigured_representatives.emplace_back ("5C2FBB148E006A8E8BA7A75DD86C9FE00C83F5FFDBFD76EAA09531071436B6AF");
preconfigured_representatives.emplace_back ("AE7AC63990DAAAF2A69BF11C913B928844BF5012355456F2F164166464024B29");
preconfigured_representatives.emplace_back ("BD6267D6ECD8038327D2BCC0850BDF8F56EC0414912207E81BCF90DFAC8A4AAA");
preconfigured_representatives.emplace_back ("2399A083C600AA0572F5E36247D978FCFC840405F8D4B6D33161C0066A55F431");
preconfigured_representatives.emplace_back ("2298FAB7C61058E77EA554CB93EDEEDA0692CBFCC540AB213B2836B29029E23A");
preconfigured_representatives.emplace_back ("3FE80B4BC842E82C1C18ABFEEC47EA989E63953BC82AC411F304D13833D52A56");
break;
default:
assert (false);
break;
}
}
nano::error nano::node_config::serialize_toml (nano::tomlconfig & toml) const
{
toml.put ("peering_port", peering_port, "Node peering port\ntype:uint16");
toml.put ("bootstrap_fraction_numerator", bootstrap_fraction_numerator, "Change bootstrap threshold (online stake / 256 * bootstrap_fraction_numerator)\ntype:uint32");
toml.put ("receive_minimum", receive_minimum.to_string_dec (), "Minimum receive amount\ntype:string,amount,raw");
toml.put ("online_weight_minimum", online_weight_minimum.to_string_dec (), "Online weight minimum required to confirm block\ntype:string,amount,raw");
toml.put ("online_weight_quorum", online_weight_quorum, "Percentage of votes required to rollback blocks\ntype:uint64");
toml.put ("password_fanout", password_fanout, "Password fanout factor\ntype:uint64");
toml.put ("io_threads", io_threads, "Number of threads dedicated to I/O opeations\ntype:uint64");
toml.put ("network_threads", network_threads, "Number of threads dedicated to processing network messages\ntype:uint64");
toml.put ("work_threads", work_threads, "Number of threads dedicated to CPU generated work. Defaults to all available CPU threads.\ntype:uint64");
toml.put ("signature_checker_threads", signature_checker_threads, "Number of additional threads dedicated to signature verification\ntype:uint64");
toml.put ("enable_voting", enable_voting, "Enable or disable voting. Enabling voting requires additional system resources.\ntype:bool");
toml.put ("bootstrap_connections", bootstrap_connections, "Number of outbound bootstrap connections. Must be a power of 2.\ntype:uint64");
toml.put ("bootstrap_connections_max", bootstrap_connections_max, "Maximum number of inbound bootstrap connections\ntype:uint64");
toml.put ("lmdb_max_dbs", lmdb_max_dbs, "Maximum open lmdb databases. Increase default if more than 100 wallets is required.\ntype:uint64");
toml.put ("block_processor_batch_max_time", block_processor_batch_max_time.count (), "The maximum time the block processor can process blocks at a time\ntype:milliseconds");
toml.put ("allow_local_peers", allow_local_peers, "Enable or disable local host peering\ntype:bool");
toml.put ("vote_minimum", vote_minimum.to_string_dec (), "Do not vote if delegated weight is under this threshold\ntype:string,amount,raw");
toml.put ("vote_generator_delay", vote_generator_delay.count (), "Delay before votes are sent to allow for better bundling of hashes in votes.\nHigh performance nodes may need slightly higher values to optimize vote bandwidth.\ntype:milliseconds");
toml.put ("vote_generator_threshold", vote_generator_threshold, "Number of bundled hashes required for an additional generator delay\ntype:uint64,[1..11]");
toml.put ("unchecked_cutoff_time", unchecked_cutoff_time.count (), "Number of seconds unchecked entry survives before being cleaned\ntype:seconds");
toml.put ("tcp_io_timeout", tcp_io_timeout.count (), "Timeout for TCP connect-, read- and write operations\ntype:seconds");
toml.put ("pow_sleep_interval", pow_sleep_interval.count (), "The amount to sleep after each batch of POW calculations. Reduces max CPU usage at the expensive of a longer.\ntype:nanoseconds");
toml.put ("external_address", external_address.to_string (), "The external address of this node (NAT). If not set, the node will request this information via UPnP.\ntype:string,ip");
toml.put ("external_port", external_port, "The external port number of this node (NAT). If not set, the node will request this information via UPnP.\ntype:uint16");
toml.put ("tcp_incoming_connections_max", tcp_incoming_connections_max, "Maximum number of incoming TCP connections\ntype:uint64");
toml.put ("use_memory_pools", use_memory_pools, "If true, allocate memory from memory pools. Enabling this may improve performance. Memory is never released to the OS.\ntype:bool");
toml.put ("confirmation_history_size", confirmation_history_size, "Maximum confirmation history size\ntype:uint64");
toml.put ("active_elections_size", active_elections_size, "Limits number of active elections before dropping will be considered (other conditions must also be satisfied)\ntype:uint64,[250..]");
toml.put ("bandwidth_limit", bandwidth_limit, "Outbound traffic limit in bytes/sec after which messages will be dropped\ntype:uint64");
toml.put ("conf_height_processor_batch_min_time", conf_height_processor_batch_min_time.count (), "Minimum write batching time when there are blocks pending confirmation height\ntype:milliseconds");
toml.put ("backup_before_upgrade", backup_before_upgrade, "Backup the ledger database before performing upgrades\ntype:bool");
toml.put ("work_watcher_period", work_watcher_period.count (), "Time between checks for confirmation and re-generating higher difficulty work if unconfirmed, for blocks in the work watcher.\ntype:seconds");
toml.put ("max_work_generate_multiplier", max_work_generate_multiplier, "Maximum allowed difficulty multiplier for work generation\ntype:double,[1..]");
toml.put ("frontiers_confirmation", serialize_frontiers_confirmation (frontiers_confirmation), "Mode for force frontiers confirmation\ntype:string");
auto work_peers_l (toml.create_array ("work_peers", "A list of \"address:port\" entries to identify work peers"));
for (auto i (work_peers.begin ()), n (work_peers.end ()); i != n; ++i)
{
work_peers_l->push_back (boost::str (boost::format ("%1%:%2%") % i->first % i->second));
}
auto preconfigured_peers_l (toml.create_array ("preconfigured_peers", "A list of \"address:port\" entries to identify preconfigured peers"));
for (auto i (preconfigured_peers.begin ()), n (preconfigured_peers.end ()); i != n; ++i)
{
preconfigured_peers_l->push_back (*i);
}
auto preconfigured_representatives_l (toml.create_array ("preconfigured_representatives", "A list of representative account addresses"));
for (auto i (preconfigured_representatives.begin ()), n (preconfigured_representatives.end ()); i != n; ++i)
{
preconfigured_representatives_l->push_back (i->to_account ());
}
nano::tomlconfig callback_l;
callback_l.put ("address", callback_address, "Callback address\ntype:string,ip");
callback_l.put ("port", callback_port, "Callback port number\ntype:uint16");
callback_l.put ("target", callback_target, "Callback target path\ntype:string,uri");
toml.put_child ("httpcallback", callback_l);
nano::tomlconfig logging_l;
logging.serialize_toml (logging_l);
toml.put_child ("logging", logging_l);
nano::tomlconfig websocket_l;
websocket_config.serialize_toml (websocket_l);
toml.put_child ("websocket", websocket_l);
nano::tomlconfig ipc_l;
ipc_config.serialize_toml (ipc_l);
toml.put_child ("ipc", ipc_l);
nano::tomlconfig diagnostics_l;
diagnostics_config.serialize_toml (diagnostics_l);
toml.put_child ("diagnostics", diagnostics_l);
nano::tomlconfig stat_l;
stat_config.serialize_toml (stat_l);
toml.put_child ("statistics", stat_l);
nano::tomlconfig rocksdb_l;
rocksdb_config.serialize_toml (rocksdb_l);
toml.put_child ("rocksdb", rocksdb_l);
return toml.get_error ();
}
nano::error nano::node_config::deserialize_toml (nano::tomlconfig & toml)
{
try
{
if (toml.has_key ("httpcallback"))
{
auto callback_l (toml.get_required_child ("httpcallback"));
callback_l.get<std::string> ("address", callback_address);
callback_l.get<uint16_t> ("port", callback_port);
callback_l.get<std::string> ("target", callback_target);
}
if (toml.has_key ("logging"))
{
auto logging_l (toml.get_required_child ("logging"));
logging.deserialize_toml (logging_l);
}
if (toml.has_key ("websocket"))
{
auto websocket_config_l (toml.get_required_child ("websocket"));
websocket_config.deserialize_toml (websocket_config_l);
}
if (toml.has_key ("ipc"))
{
auto ipc_config_l (toml.get_required_child ("ipc"));
ipc_config.deserialize_toml (ipc_config_l);
}
if (toml.has_key ("diagnostics"))
{
auto diagnostics_config_l (toml.get_required_child ("diagnostics"));
diagnostics_config.deserialize_toml (diagnostics_config_l);
}
if (toml.has_key ("statistics"))
{
auto stat_config_l (toml.get_required_child ("statistics"));
stat_config.deserialize_toml (stat_config_l);
}
if (toml.has_key ("rocksdb"))
{
auto rocksdb_config_l (toml.get_required_child ("rocksdb"));
rocksdb_config.deserialize_toml (rocksdb_config_l);
}
if (toml.has_key ("work_peers"))
{
work_peers.clear ();
toml.array_entries_required<std::string> ("work_peers", [this](std::string const & entry) {
auto port_position (entry.rfind (':'));
bool result = port_position == -1;
if (!result)
{
auto port_str (entry.substr (port_position + 1));
uint16_t port;
result |= parse_port (port_str, port);
if (!result)
{
auto address (entry.substr (0, port_position));
this->work_peers.emplace_back (address, port);
}
}
});
}
if (toml.has_key (preconfigured_peers_key))
{
preconfigured_peers.clear ();
toml.array_entries_required<std::string> (preconfigured_peers_key, [this](std::string entry) {
preconfigured_peers.push_back (entry);
});
}
if (toml.has_key ("preconfigured_representatives"))
{
preconfigured_representatives.clear ();
toml.array_entries_required<std::string> ("preconfigured_representatives", [this, &toml](std::string entry) {
nano::account representative (0);
if (representative.decode_account (entry))
{
toml.get_error ().set ("Invalid representative account: " + entry);
}
preconfigured_representatives.push_back (representative);
});
}
if (preconfigured_representatives.empty ())
{
toml.get_error ().set ("At least one representative account must be set");
}
auto receive_minimum_l (receive_minimum.to_string_dec ());
if (toml.has_key ("receive_minimum"))
{
receive_minimum_l = toml.get<std::string> ("receive_minimum");
}
if (receive_minimum.decode_dec (receive_minimum_l))
{
toml.get_error ().set ("receive_minimum contains an invalid decimal amount");
}
auto online_weight_minimum_l (online_weight_minimum.to_string_dec ());
if (toml.has_key ("online_weight_minimum"))
{
online_weight_minimum_l = toml.get<std::string> ("online_weight_minimum");
}
if (online_weight_minimum.decode_dec (online_weight_minimum_l))
{
toml.get_error ().set ("online_weight_minimum contains an invalid decimal amount");
}
auto vote_minimum_l (vote_minimum.to_string_dec ());
if (toml.has_key ("vote_minimum"))
{
vote_minimum_l = toml.get<std::string> ("vote_minimum");
}
if (vote_minimum.decode_dec (vote_minimum_l))
{
toml.get_error ().set ("vote_minimum contains an invalid decimal amount");
}
auto delay_l = vote_generator_delay.count ();
toml.get ("vote_generator_delay", delay_l);
vote_generator_delay = std::chrono::milliseconds (delay_l);
toml.get<unsigned> ("vote_generator_threshold", vote_generator_threshold);
auto block_processor_batch_max_time_l = block_processor_batch_max_time.count ();
toml.get ("block_processor_batch_max_time", block_processor_batch_max_time_l);
block_processor_batch_max_time = std::chrono::milliseconds (block_processor_batch_max_time_l);
auto unchecked_cutoff_time_l = static_cast<unsigned long> (unchecked_cutoff_time.count ());
toml.get ("unchecked_cutoff_time", unchecked_cutoff_time_l);
unchecked_cutoff_time = std::chrono::seconds (unchecked_cutoff_time_l);
auto tcp_io_timeout_l = static_cast<unsigned long> (tcp_io_timeout.count ());
toml.get ("tcp_io_timeout", tcp_io_timeout_l);
tcp_io_timeout = std::chrono::seconds (tcp_io_timeout_l);
toml.get<uint16_t> ("peering_port", peering_port);
toml.get<unsigned> ("bootstrap_fraction_numerator", bootstrap_fraction_numerator);
toml.get<unsigned> ("online_weight_quorum", online_weight_quorum);
toml.get<unsigned> ("password_fanout", password_fanout);
toml.get<unsigned> ("io_threads", io_threads);
toml.get<unsigned> ("work_threads", work_threads);
toml.get<unsigned> ("network_threads", network_threads);
toml.get<unsigned> ("bootstrap_connections", bootstrap_connections);
toml.get<unsigned> ("bootstrap_connections_max", bootstrap_connections_max);
toml.get<int> ("lmdb_max_dbs", lmdb_max_dbs);
toml.get<bool> ("enable_voting", enable_voting);
toml.get<bool> ("allow_local_peers", allow_local_peers);
toml.get<unsigned> (signature_checker_threads_key, signature_checker_threads);
toml.get<boost::asio::ip::address_v6> ("external_address", external_address);
toml.get<uint16_t> ("external_port", external_port);
toml.get<unsigned> ("tcp_incoming_connections_max", tcp_incoming_connections_max);
auto pow_sleep_interval_l (pow_sleep_interval.count ());
toml.get (pow_sleep_interval_key, pow_sleep_interval_l);
pow_sleep_interval = std::chrono::nanoseconds (pow_sleep_interval_l);
toml.get<bool> ("use_memory_pools", use_memory_pools);
toml.get<size_t> ("confirmation_history_size", confirmation_history_size);
toml.get<size_t> ("active_elections_size", active_elections_size);
toml.get<size_t> ("bandwidth_limit", bandwidth_limit);
toml.get<bool> ("backup_before_upgrade", backup_before_upgrade);
auto work_watcher_period_l = work_watcher_period.count ();
toml.get ("work_watcher_period", work_watcher_period_l);
work_watcher_period = std::chrono::seconds (work_watcher_period_l);
auto conf_height_processor_batch_min_time_l (conf_height_processor_batch_min_time.count ());
toml.get ("conf_height_processor_batch_min_time", conf_height_processor_batch_min_time_l);
conf_height_processor_batch_min_time = std::chrono::milliseconds (conf_height_processor_batch_min_time_l);
nano::network_constants network;
toml.get<double> ("max_work_generate_multiplier", max_work_generate_multiplier);
max_work_generate_difficulty = nano::difficulty::from_multiplier (max_work_generate_multiplier, network.publish_threshold);
if (toml.has_key ("frontiers_confirmation"))
{
auto frontiers_confirmation_l (toml.get<std::string> ("frontiers_confirmation"));
frontiers_confirmation = deserialize_frontiers_confirmation (frontiers_confirmation_l);
}
// Validate ranges
if (online_weight_quorum > 100)
{
toml.get_error ().set ("online_weight_quorum must be less than 100");
}
if (password_fanout < 16 || password_fanout > 1024 * 1024)
{
toml.get_error ().set ("password_fanout must be a number between 16 and 1048576");
}
if (io_threads == 0)
{
toml.get_error ().set ("io_threads must be non-zero");
}
if (active_elections_size <= 250 && !network.is_test_network ())
{
toml.get_error ().set ("active_elections_size must be greater than 250");
}
if (bandwidth_limit > std::numeric_limits<size_t>::max ())
{
toml.get_error ().set ("bandwidth_limit unbounded = 0, default = 5242880, max = 18446744073709551615");
}
if (vote_generator_threshold < 1 || vote_generator_threshold > 11)
{
toml.get_error ().set ("vote_generator_threshold must be a number between 1 and 11");
}
if (work_watcher_period < std::chrono::seconds (1))
{
toml.get_error ().set ("work_watcher_period must be equal or larger than 1");
}
if (max_work_generate_multiplier < 1)
{
toml.get_error ().set ("max_work_generate_multiplier must be greater than or equal to 1");
}
if (frontiers_confirmation == nano::frontiers_confirmation_mode::invalid)
{
toml.get_error ().set ("frontiers_confirmation value is invalid (available: always, auto, disabled)");
}
}
catch (std::runtime_error const & ex)
{
toml.get_error ().set (ex.what ());
}
return toml.get_error ();
}
nano::error nano::node_config::serialize_json (nano::jsonconfig & json) const
{
json.put ("version", json_version ());
json.put ("peering_port", peering_port);
json.put ("bootstrap_fraction_numerator", bootstrap_fraction_numerator);
json.put ("receive_minimum", receive_minimum.to_string_dec ());
nano::jsonconfig logging_l;
logging.serialize_json (logging_l);
json.put_child ("logging", logging_l);
nano::jsonconfig work_peers_l;
for (auto i (work_peers.begin ()), n (work_peers.end ()); i != n; ++i)
{
work_peers_l.push (boost::str (boost::format ("%1%:%2%") % i->first % i->second));
}
json.put_child ("work_peers", work_peers_l);
nano::jsonconfig preconfigured_peers_l;
for (auto i (preconfigured_peers.begin ()), n (preconfigured_peers.end ()); i != n; ++i)
{
preconfigured_peers_l.push (*i);
}
json.put_child (preconfigured_peers_key, preconfigured_peers_l);
nano::jsonconfig preconfigured_representatives_l;
for (auto i (preconfigured_representatives.begin ()), n (preconfigured_representatives.end ()); i != n; ++i)
{
preconfigured_representatives_l.push (i->to_account ());
}
json.put_child ("preconfigured_representatives", preconfigured_representatives_l);
json.put ("online_weight_minimum", online_weight_minimum.to_string_dec ());
json.put ("online_weight_quorum", online_weight_quorum);
json.put ("password_fanout", password_fanout);
json.put ("io_threads", io_threads);
json.put ("network_threads", network_threads);
json.put ("work_threads", work_threads);
json.put (signature_checker_threads_key, signature_checker_threads);
json.put ("enable_voting", enable_voting);
json.put ("bootstrap_connections", bootstrap_connections);
json.put ("bootstrap_connections_max", bootstrap_connections_max);
json.put ("callback_address", callback_address);
json.put ("callback_port", callback_port);
json.put ("callback_target", callback_target);
json.put ("lmdb_max_dbs", lmdb_max_dbs);
json.put ("block_processor_batch_max_time", block_processor_batch_max_time.count ());
json.put ("allow_local_peers", allow_local_peers);
json.put ("vote_minimum", vote_minimum.to_string_dec ());
json.put ("vote_generator_delay", vote_generator_delay.count ());
json.put ("vote_generator_threshold", vote_generator_threshold);
json.put ("unchecked_cutoff_time", unchecked_cutoff_time.count ());
json.put ("tcp_io_timeout", tcp_io_timeout.count ());
json.put ("pow_sleep_interval", pow_sleep_interval.count ());
json.put ("external_address", external_address.to_string ());
json.put ("external_port", external_port);
json.put ("tcp_incoming_connections_max", tcp_incoming_connections_max);
json.put ("use_memory_pools", use_memory_pools);
nano::jsonconfig websocket_l;
websocket_config.serialize_json (websocket_l);
json.put_child ("websocket", websocket_l);
nano::jsonconfig ipc_l;
ipc_config.serialize_json (ipc_l);
json.put_child ("ipc", ipc_l);
nano::jsonconfig diagnostics_l;
diagnostics_config.serialize_json (diagnostics_l);
json.put_child ("diagnostics", diagnostics_l);
json.put ("confirmation_history_size", confirmation_history_size);
json.put ("active_elections_size", active_elections_size);
json.put ("bandwidth_limit", bandwidth_limit);
json.put ("backup_before_upgrade", backup_before_upgrade);
json.put ("work_watcher_period", work_watcher_period.count ());
return json.get_error ();
}
bool nano::node_config::upgrade_json (unsigned version_a, nano::jsonconfig & json)
{
json.put ("version", json_version ());
switch (version_a)
{
case 1:
{
auto reps_l (json.get_required_child ("preconfigured_representatives"));
nano::jsonconfig reps;
reps_l.array_entries<std::string> ([&reps](std::string entry) {
nano::account account;
account.decode_account (entry);
reps.push (account.to_account ());
});
json.replace_child ("preconfigured_representatives", reps);
}
case 2:
{
json.put ("inactive_supply", nano::uint128_union (0).to_string_dec ());
json.put ("password_fanout", std::to_string (1024));
json.put ("io_threads", std::to_string (io_threads));
json.put ("work_threads", std::to_string (work_threads));
}
case 3:
json.erase ("receive_minimum");
json.put ("receive_minimum", nano::xrb_ratio.convert_to<std::string> ());
case 4:
json.erase ("receive_minimum");
json.put ("receive_minimum", nano::xrb_ratio.convert_to<std::string> ());
case 5:
json.put ("enable_voting", enable_voting);
json.erase ("packet_delay_microseconds");
json.erase ("rebroadcast_delay");
json.erase ("creation_rebroadcast");
case 6:
json.put ("bootstrap_connections", 16);
json.put ("callback_address", "");
json.put ("callback_port", 0);
json.put ("callback_target", "");
case 7:
json.put ("lmdb_max_dbs", 128);
case 8:
json.put ("bootstrap_connections_max", "64");
case 9:
json.put ("state_block_parse_canary", nano::block_hash (0).to_string ());
json.put ("state_block_generate_canary", nano::block_hash (0).to_string ());
case 10:
json.put ("online_weight_minimum", online_weight_minimum.to_string_dec ());
json.put ("online_weight_quorom", std::to_string (online_weight_quorum));
json.erase ("inactive_supply");
case 11:
{
// Rename
std::string online_weight_quorum_l;
json.get<std::string> ("online_weight_quorom", online_weight_quorum_l);
json.erase ("online_weight_quorom");
json.put ("online_weight_quorum", online_weight_quorum_l);
}
case 12:
json.erase ("state_block_parse_canary");
json.erase ("state_block_generate_canary");
case 13:
json.put ("generate_hash_votes_at", 0);
case 14:
json.put ("network_threads", std::to_string (network_threads));
json.erase ("generate_hash_votes_at");
json.put ("block_processor_batch_max_time", block_processor_batch_max_time.count ());
case 15:
{
json.put ("allow_local_peers", allow_local_peers);
// Update to the new preconfigured_peers url for rebrand if it is found (rai -> nano)
auto peers_l (json.get_required_child (preconfigured_peers_key));
nano::jsonconfig peers;
peers_l.array_entries<std::string> ([&peers](std::string entry) {
if (entry == "rai-beta.raiblocks.net")
{
entry = default_beta_peer_network;
}
else if (entry == "rai.raiblocks.net")
{
entry = default_live_peer_network;
}
peers.push (std::move (entry));
});
json.replace_child (preconfigured_peers_key, peers);
json.put ("vote_minimum", vote_minimum.to_string_dec ());
nano::jsonconfig ipc_l;
ipc_config.serialize_json (ipc_l);
json.put_child ("ipc", ipc_l);
json.put (signature_checker_threads_key, signature_checker_threads);
json.put ("unchecked_cutoff_time", unchecked_cutoff_time.count ());
}
case 16:
{
nano::jsonconfig websocket_l;
websocket_config.serialize_json (websocket_l);
json.put_child ("websocket", websocket_l);
nano::jsonconfig diagnostics_l;
diagnostics_config.serialize_json (diagnostics_l);
json.put_child ("diagnostics", diagnostics_l);
json.put ("tcp_io_timeout", tcp_io_timeout.count ());
json.put (pow_sleep_interval_key, pow_sleep_interval.count ());
json.put ("external_address", external_address.to_string ());
json.put ("external_port", external_port);
json.put ("tcp_incoming_connections_max", tcp_incoming_connections_max);
json.put ("vote_generator_delay", vote_generator_delay.count ());
json.put ("vote_generator_threshold", vote_generator_threshold);
json.put ("use_memory_pools", use_memory_pools);
json.put ("confirmation_history_size", confirmation_history_size);
json.put ("active_elections_size", active_elections_size);
json.put ("bandwidth_limit", bandwidth_limit);
json.put ("conf_height_processor_batch_min_time", conf_height_processor_batch_min_time.count ());
}
case 17:
{
json.put ("vote_generator_delay", vote_generator_delay.count ()); // Update value
json.put ("backup_before_upgrade", backup_before_upgrade);
json.put ("work_watcher_period", work_watcher_period.count ());
}
case 18:
break;
default:
throw std::runtime_error ("Unknown node_config version");
}
return version_a < json_version ();
}
nano::error nano::node_config::deserialize_json (bool & upgraded_a, nano::jsonconfig & json)
{
try
{
auto version_l (json.get_optional<unsigned> ("version"));
if (!version_l)
{
version_l = 1;
json.put ("version", version_l);
auto work_peers_l (json.get_optional_child ("work_peers"));
if (!work_peers_l)
{
nano::jsonconfig empty;
json.put_child ("work_peers", empty);
}
upgraded_a = true;
}
upgraded_a |= upgrade_json (version_l.get (), json);
auto logging_l (json.get_required_child ("logging"));
logging.deserialize_json (upgraded_a, logging_l);
work_peers.clear ();
auto work_peers_l (json.get_required_child ("work_peers"));
work_peers_l.array_entries<std::string> ([this](std::string entry) {
auto port_position (entry.rfind (':'));
bool result = port_position == -1;
if (!result)
{
auto port_str (entry.substr (port_position + 1));
uint16_t port;
result |= parse_port (port_str, port);
if (!result)
{
auto address (entry.substr (0, port_position));
this->work_peers.push_back (std::make_pair (address, port));
}
}
});
auto preconfigured_peers_l (json.get_required_child (preconfigured_peers_key));
preconfigured_peers.clear ();
preconfigured_peers_l.array_entries<std::string> ([this](std::string entry) {
preconfigured_peers.push_back (entry);
});
auto preconfigured_representatives_l (json.get_required_child ("preconfigured_representatives"));
preconfigured_representatives.clear ();
preconfigured_representatives_l.array_entries<std::string> ([this, &json](std::string entry) {
nano::account representative (0);
if (representative.decode_account (entry))
{
json.get_error ().set ("Invalid representative account: " + entry);
}
preconfigured_representatives.push_back (representative);
});
if (preconfigured_representatives.empty ())
{
json.get_error ().set ("At least one representative account must be set");
}
auto stat_config_l (json.get_optional_child ("statistics"));
if (stat_config_l)
{
stat_config.deserialize_json (stat_config_l.get ());
}
auto receive_minimum_l (json.get<std::string> ("receive_minimum"));
if (receive_minimum.decode_dec (receive_minimum_l))
{
json.get_error ().set ("receive_minimum contains an invalid decimal amount");
}
auto online_weight_minimum_l (json.get<std::string> ("online_weight_minimum"));
if (online_weight_minimum.decode_dec (online_weight_minimum_l))
{
json.get_error ().set ("online_weight_minimum contains an invalid decimal amount");
}
auto vote_minimum_l (json.get<std::string> ("vote_minimum"));
if (vote_minimum.decode_dec (vote_minimum_l))
{
json.get_error ().set ("vote_minimum contains an invalid decimal amount");
}
auto delay_l = vote_generator_delay.count ();
json.get ("vote_generator_delay", delay_l);
vote_generator_delay = std::chrono::milliseconds (delay_l);
json.get<unsigned> ("vote_generator_threshold", vote_generator_threshold);
auto block_processor_batch_max_time_l (json.get<unsigned long> ("block_processor_batch_max_time"));
block_processor_batch_max_time = std::chrono::milliseconds (block_processor_batch_max_time_l);
auto unchecked_cutoff_time_l = static_cast<unsigned long> (unchecked_cutoff_time.count ());
json.get ("unchecked_cutoff_time", unchecked_cutoff_time_l);
unchecked_cutoff_time = std::chrono::seconds (unchecked_cutoff_time_l);
auto tcp_io_timeout_l = static_cast<unsigned long> (tcp_io_timeout.count ());
json.get ("tcp_io_timeout", tcp_io_timeout_l);
tcp_io_timeout = std::chrono::seconds (tcp_io_timeout_l);
auto ipc_config_l (json.get_optional_child ("ipc"));
if (ipc_config_l)
{
ipc_config.deserialize_json (upgraded_a, ipc_config_l.get ());
}
auto websocket_config_l (json.get_optional_child ("websocket"));
if (websocket_config_l)
{
websocket_config.deserialize_json (websocket_config_l.get ());
}
auto diagnostics_config_l (json.get_optional_child ("diagnostics"));
if (diagnostics_config_l)
{
diagnostics_config.deserialize_json (diagnostics_config_l.get ());
}
json.get<uint16_t> ("peering_port", peering_port);
json.get<unsigned> ("bootstrap_fraction_numerator", bootstrap_fraction_numerator);
json.get<unsigned> ("online_weight_quorum", online_weight_quorum);
json.get<unsigned> ("password_fanout", password_fanout);
json.get<unsigned> ("io_threads", io_threads);
json.get<unsigned> ("work_threads", work_threads);
json.get<unsigned> ("network_threads", network_threads);
json.get<unsigned> ("bootstrap_connections", bootstrap_connections);
json.get<unsigned> ("bootstrap_connections_max", bootstrap_connections_max);
json.get<std::string> ("callback_address", callback_address);
json.get<uint16_t> ("callback_port", callback_port);
json.get<std::string> ("callback_target", callback_target);
json.get<int> ("lmdb_max_dbs", lmdb_max_dbs);
json.get<bool> ("enable_voting", enable_voting);
json.get<bool> ("allow_local_peers", allow_local_peers);
json.get<unsigned> (signature_checker_threads_key, signature_checker_threads);
json.get<boost::asio::ip::address_v6> ("external_address", external_address);
json.get<uint16_t> ("external_port", external_port);
json.get<unsigned> ("tcp_incoming_connections_max", tcp_incoming_connections_max);
auto pow_sleep_interval_l (pow_sleep_interval.count ());
json.get (pow_sleep_interval_key, pow_sleep_interval_l);
pow_sleep_interval = std::chrono::nanoseconds (pow_sleep_interval_l);
json.get<bool> ("use_memory_pools", use_memory_pools);
json.get<size_t> ("confirmation_history_size", confirmation_history_size);
json.get<size_t> ("active_elections_size", active_elections_size);
json.get<size_t> ("bandwidth_limit", bandwidth_limit);
json.get<bool> ("backup_before_upgrade", backup_before_upgrade);
auto work_watcher_period_l = work_watcher_period.count ();
json.get ("work_watcher_period", work_watcher_period_l);
work_watcher_period = std::chrono::seconds (work_watcher_period_l);
auto conf_height_processor_batch_min_time_l (conf_height_processor_batch_min_time.count ());
json.get ("conf_height_processor_batch_min_time", conf_height_processor_batch_min_time_l);
conf_height_processor_batch_min_time = std::chrono::milliseconds (conf_height_processor_batch_min_time_l);
nano::network_constants network;
// Validate ranges
if (online_weight_quorum > 100)
{
json.get_error ().set ("online_weight_quorum must be less than 100");
}
if (password_fanout < 16 || password_fanout > 1024 * 1024)
{
json.get_error ().set ("password_fanout must be a number between 16 and 1048576");
}
if (io_threads == 0)
{
json.get_error ().set ("io_threads must be non-zero");
}
if (active_elections_size <= 250 && !network.is_test_network ())
{
json.get_error ().set ("active_elections_size must be greater than 250");
}
if (bandwidth_limit > std::numeric_limits<size_t>::max ())
{
json.get_error ().set ("bandwidth_limit unbounded = 0, default = 5242880, max = 18446744073709551615");
}
if (vote_generator_threshold < 1 || vote_generator_threshold > 11)
{
json.get_error ().set ("vote_generator_threshold must be a number between 1 and 11");
}
if (work_watcher_period < std::chrono::seconds (1))
{
json.get_error ().set ("work_watcher_period must be equal or larger than 1");
}
}
catch (std::runtime_error const & ex)
{
json.get_error ().set (ex.what ());
}
return json.get_error ();
}
std::string nano::node_config::serialize_frontiers_confirmation (nano::frontiers_confirmation_mode mode_a) const
{
switch (mode_a)
{
case nano::frontiers_confirmation_mode::always:
return "always";
case nano::frontiers_confirmation_mode::automatic:
return "auto";
case nano::frontiers_confirmation_mode::disabled:
return "disabled";
default:
return "auto";
}
}
nano::frontiers_confirmation_mode nano::node_config::deserialize_frontiers_confirmation (std::string const & string_a)
{
if (string_a == "always")
{
return nano::frontiers_confirmation_mode::always;
}
else if (string_a == "auto")
{
return nano::frontiers_confirmation_mode::automatic;
}
else if (string_a == "disabled")
{
return nano::frontiers_confirmation_mode::disabled;
}
else
{
return nano::frontiers_confirmation_mode::invalid;
}
}
nano::account nano::node_config::random_representative () const
{
assert (!preconfigured_representatives.empty ());
size_t index (nano::random_pool::generate_word32 (0, static_cast<CryptoPP::word32> (preconfigured_representatives.size () - 1)));
auto result (preconfigured_representatives[index]);
return result;
}
| 1 | 15,925 | Do we need it? If right now it's toml | nanocurrency-nano-node | cpp |
@@ -22,6 +22,16 @@ options._catchError = function(error, newVNode, oldVNode) {
oldCatchError(error, newVNode, oldVNode);
};
+const oldUnmount = options.unmount;
+options.unmount = function(vnode) {
+ /** @type {import('./internal').Component} */
+ const component = vnode._component;
+ if (component && component._onResolve) {
+ component._onResolve();
+ }
+ if (oldUnmount) oldUnmount(vnode);
+};
+
function detachedClone(vnode, detachedParent, parentDom) {
if (vnode) {
if (vnode._component && vnode._component.__hooks) { | 1 | import { Component, createElement, options, Fragment } from 'preact';
import { assign } from './util';
const oldCatchError = options._catchError;
options._catchError = function(error, newVNode, oldVNode) {
if (error.then) {
/** @type {import('./internal').Component} */
let component;
let vnode = newVNode;
for (; (vnode = vnode._parent); ) {
if ((component = vnode._component) && component._childDidSuspend) {
if (newVNode._dom == null) {
newVNode._dom = oldVNode._dom;
newVNode._children = oldVNode._children;
}
// Don't call oldCatchError if we found a Suspense
return component._childDidSuspend(error, newVNode);
}
}
}
oldCatchError(error, newVNode, oldVNode);
};
function detachedClone(vnode, detachedParent, parentDom) {
if (vnode) {
if (vnode._component && vnode._component.__hooks) {
vnode._component.__hooks._list.forEach(effect => {
if (typeof effect._cleanup == 'function') effect._cleanup();
});
vnode._component.__hooks = null;
}
vnode = assign({}, vnode);
if (vnode._component != null) {
if (vnode._component._parentDom === parentDom) {
vnode._component._parentDom = detachedParent;
}
vnode._component = null;
}
vnode._children =
vnode._children &&
vnode._children.map(child =>
detachedClone(child, detachedParent, parentDom)
);
}
return vnode;
}
function removeOriginal(vnode, detachedParent, originalParent) {
if (vnode) {
vnode._original = null;
vnode._children =
vnode._children &&
vnode._children.map(child =>
removeOriginal(child, detachedParent, originalParent)
);
if (vnode._component) {
if (vnode._component._parentDom === detachedParent) {
if (vnode._dom) {
originalParent.insertBefore(vnode._dom, vnode._nextDom);
}
vnode._component._force = true;
vnode._component._parentDom = originalParent;
}
}
}
return vnode;
}
// having custom inheritance instead of a class here saves a lot of bytes
export function Suspense() {
// we do not call super here to golf some bytes...
this._pendingSuspensionCount = 0;
this._suspenders = null;
this._detachOnNextRender = null;
}
// Things we do here to save some bytes but are not proper JS inheritance:
// - call `new Component()` as the prototype
// - do not set `Suspense.prototype.constructor` to `Suspense`
Suspense.prototype = new Component();
/**
* @this {import('./internal').SuspenseComponent}
* @param {Promise} promise The thrown promise
* @param {import('./internal').VNode<any, any>} suspendingVNode The suspending component
*/
Suspense.prototype._childDidSuspend = function(promise, suspendingVNode) {
const suspendingComponent = suspendingVNode._component;
/** @type {import('./internal').SuspenseComponent} */
const c = this;
if (c._suspenders == null) {
c._suspenders = [];
}
c._suspenders.push(suspendingComponent);
const resolve = suspended(c._vnode);
let resolved = false;
const onResolved = () => {
if (resolved) return;
resolved = true;
suspendingComponent.componentWillUnmount =
suspendingComponent._suspendedComponentWillUnmount;
if (resolve) {
resolve(onSuspensionComplete);
} else {
onSuspensionComplete();
}
};
suspendingComponent._suspendedComponentWillUnmount =
suspendingComponent.componentWillUnmount;
suspendingComponent.componentWillUnmount = () => {
onResolved();
if (suspendingComponent._suspendedComponentWillUnmount) {
suspendingComponent._suspendedComponentWillUnmount();
}
};
const onSuspensionComplete = () => {
if (!--c._pendingSuspensionCount) {
// If the suspension was during hydration we don't need to restore the
// suspended children into the _children array
if (c.state._suspended) {
const suspendedVNode = c.state._suspended;
c._vnode._children[0] = removeOriginal(
suspendedVNode,
suspendedVNode._component._parentDom,
suspendedVNode._component._originalParentDom
);
}
c.setState({ _suspended: (c._detachOnNextRender = null) });
let suspended;
while ((suspended = c._suspenders.pop())) {
suspended.forceUpdate();
}
}
};
/**
* We do not set `suspended: true` during hydration because we want the actual markup
* to remain on screen and hydrate it when the suspense actually gets resolved.
* While in non-hydration cases the usual fallback -> component flow would occour.
*/
const wasHydrating = suspendingVNode._hydrating === true;
if (!c._pendingSuspensionCount++ && !wasHydrating) {
c.setState({ _suspended: (c._detachOnNextRender = c._vnode._children[0]) });
}
promise.then(onResolved, onResolved);
};
Suspense.prototype.componentWillUnmount = function() {
this._suspenders = [];
};
/**
* @this {import('./internal').SuspenseComponent}
* @param {import('./internal').SuspenseComponent["props"]} props
* @param {import('./internal').SuspenseState} state
*/
Suspense.prototype.render = function(props, state) {
if (this._detachOnNextRender) {
// When the Suspense's _vnode was created by a call to createVNode
// (i.e. due to a setState further up in the tree)
// it's _children prop is null, in this case we "forget" about the parked vnodes to detach
if (this._vnode._children) {
const detachedParent = document.createElement('div');
const detachedComponent = this._vnode._children[0]._component;
this._vnode._children[0] = detachedClone(
this._detachOnNextRender,
detachedParent,
(detachedComponent._originalParentDom = detachedComponent._parentDom)
);
}
this._detachOnNextRender = null;
}
// Wrap fallback tree in a VNode that prevents itself from being marked as aborting mid-hydration:
/** @type {import('./internal').VNode} */
const fallback =
state._suspended && createElement(Fragment, null, props.fallback);
if (fallback) fallback._hydrating = null;
return [
createElement(Fragment, null, state._suspended ? null : props.children),
fallback
];
};
/**
* Checks and calls the parent component's _suspended method, passing in the
* suspended vnode. This is a way for a parent (e.g. SuspenseList) to get notified
* that one of its children/descendants suspended.
*
* The parent MAY return a callback. The callback will get called when the
* suspension resolves, notifying the parent of the fact.
* Moreover, the callback gets function `unsuspend` as a parameter. The resolved
* child descendant will not actually get unsuspended until `unsuspend` gets called.
* This is a way for the parent to delay unsuspending.
*
* If the parent does not return a callback then the resolved vnode
* gets unsuspended immediately when it resolves.
*
* @param {import('./internal').VNode} vnode
* @returns {((unsuspend: () => void) => void)?}
*/
export function suspended(vnode) {
/** @type {import('./internal').Component} */
let component = vnode._parent._component;
return component && component._suspended && component._suspended(vnode);
}
export function lazy(loader) {
let prom;
let component;
let error;
function Lazy(props) {
if (!prom) {
prom = loader();
prom.then(
exports => {
component = exports.default || exports;
},
e => {
error = e;
}
);
}
if (error) {
throw error;
}
if (!component) {
throw prom;
}
return createElement(component, props);
}
Lazy.displayName = 'Lazy';
Lazy._forwarded = true;
return Lazy;
}
| 1 | 16,536 | You are never calling oldUnmount, this could lead to a plugin chain failing. | preactjs-preact | js |
@@ -119,7 +119,9 @@ public class MetaUtils {
name = "Table" + tid;
}
return new TiTableInfo(
- tid, CIStr.newCIStr(name), "", "", pkHandle, columns, indices, "", 0, 0, 0, 0);
+ tid, CIStr.newCIStr(name), "", "", pkHandle, columns,
+ indices, "", 0, 0, 0
+ , 0, null);
}
}
| 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.meta;
import com.google.common.collect.ImmutableList;
import com.google.protobuf.ByteString;
import com.pingcap.tikv.GrpcUtils;
import com.pingcap.tikv.KVMockServer;
import com.pingcap.tikv.PDMockServer;
import com.pingcap.tikv.codec.Codec.BytesCodec;
import com.pingcap.tikv.codec.Codec.IntegerCodec;
import com.pingcap.tikv.codec.CodecDataOutput;
import com.pingcap.tikv.exception.TiClientInternalException;
import com.pingcap.tikv.kvproto.Metapb;
import com.pingcap.tikv.types.DataType;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class MetaUtils {
public static class TableBuilder {
static long autoId = 1;
private static long newId() {
return autoId++;
}
public static TableBuilder newBuilder() {
return new TableBuilder();
}
private boolean pkHandle;
private String name;
private List<TiColumnInfo> columns = new ArrayList<>();
private List<TiIndexInfo> indices = new ArrayList<>();
private Long tid = null;
public TableBuilder() {}
public TableBuilder name(String name) {
this.name = name;
return this;
}
public TableBuilder tableId(long id) {
this.tid = id;
return this;
}
public TableBuilder addColumn(String name, DataType type) {
return addColumn(name, type, false);
}
public TableBuilder addColumn(String name, DataType type, boolean pk) {
for (TiColumnInfo c : columns) {
if (c.matchName(name)) {
throw new TiClientInternalException("duplicated name: " + name);
}
}
TiColumnInfo col = new TiColumnInfo(newId(), name, columns.size(), type, pk);
columns.add(col);
return this;
}
public TableBuilder appendIndex(long iid, String indexName, List<String> colNames, boolean isPk) {
List<TiIndexColumn> indexCols =
colNames
.stream()
.map(name -> columns.stream().filter(c -> c.matchName(name)).findFirst())
.flatMap(col -> col.isPresent() ? Stream.of(col.get()) : Stream.empty())
.map(TiColumnInfo::toIndexColumn)
.collect(Collectors.toList());
TiIndexInfo index =
new TiIndexInfo(
iid,
CIStr.newCIStr(indexName),
CIStr.newCIStr(name),
ImmutableList.copyOf(indexCols),
false,
isPk,
SchemaState.StatePublic.getStateCode(),
"",
IndexType.IndexTypeBtree.getTypeCode(),
false);
indices.add(index);
return this;
}
public TableBuilder appendIndex(String indexName, List<String> colNames, boolean isPk) {
return appendIndex(newId(), indexName, colNames, isPk);
}
public TableBuilder setPkHandle(boolean pkHandle) {
this.pkHandle = pkHandle;
return this;
}
public TiTableInfo build() {
if (tid == null) {
tid = newId();
}
if (name == null) {
name = "Table" + tid;
}
return new TiTableInfo(
tid, CIStr.newCIStr(name), "", "", pkHandle, columns, indices, "", 0, 0, 0, 0);
}
}
public static class MetaMockHelper {
public static final String LOCAL_ADDR = "127.0.0.1";
public static int MEMBER_ID = 1;
public static int STORE_ID = 1;
public static Metapb.Region region =
Metapb.Region.newBuilder()
.setRegionEpoch(Metapb.RegionEpoch.newBuilder().setConfVer(1).setVersion(1))
.setId(1)
.setStartKey(ByteString.EMPTY)
.setEndKey(ByteString.EMPTY)
.addPeers(Metapb.Peer.newBuilder().setId(1).setStoreId(1))
.build();
public MetaMockHelper(PDMockServer pdServer, KVMockServer kvServer) {
this.kvServer = kvServer;
this.pdServer = pdServer;
}
private KVMockServer kvServer;
private PDMockServer pdServer;
public void preparePDForRegionRead() {
pdServer.addGetMemberResp(
GrpcUtils.makeGetMembersResponse(
pdServer.getClusterId(),
GrpcUtils.makeMember(MEMBER_ID, "http://" + LOCAL_ADDR + ":" + pdServer.port)));
pdServer.addGetStoreResp(
GrpcUtils.makeGetStoreResponse(
pdServer.getClusterId(),
GrpcUtils.makeStore(
STORE_ID,
LOCAL_ADDR + ":" + kvServer.getPort(),
Metapb.StoreState.Up)));
pdServer.addGetRegionResp(
GrpcUtils.makeGetRegionResponse(
pdServer.getClusterId(),
region));
}
private ByteString getDBKey(long id) {
CodecDataOutput cdo = new CodecDataOutput();
cdo.write(new byte[] {'m'});
BytesCodec.writeBytes(cdo, "DBs".getBytes());
IntegerCodec.writeULong(cdo, 'h');
BytesCodec.writeBytes(cdo, String.format("DB:%d", id).getBytes());
return cdo.toByteString();
}
public void addDatabase(long id, String name) {
String dbJson = String.format("{\n"
+ " \"id\":%d,\n"
+ " \"db_name\":{\"O\":\"%s\",\"L\":\"%s\"},\n"
+ " \"charset\":\"utf8\",\"collate\":\"utf8_bin\",\"state\":5\n"
+ "}", id, name, name.toLowerCase());
kvServer.put(getDBKey(id), ByteString.copyFromUtf8(dbJson));
}
public void dropDatabase(long id) {
kvServer.remove(getDBKey(id));
}
private ByteString getKeyForTable(long dbId, long tableId) {
ByteString dbKey = ByteString.copyFrom(String.format("%s:%d", "DB", dbId).getBytes());
ByteString tableKey = ByteString.copyFrom(String.format("%s:%d", "Table", tableId).getBytes());
CodecDataOutput cdo = new CodecDataOutput();
cdo.write(new byte[] {'m'});
BytesCodec.writeBytes(cdo, dbKey.toByteArray());
IntegerCodec.writeULong(cdo, 'h');
BytesCodec.writeBytes(cdo, tableKey.toByteArray());
return cdo.toByteString();
}
private ByteString getSchemaVersionKey() {
CodecDataOutput cdo = new CodecDataOutput();
cdo.write(new byte[] {'m'});
BytesCodec.writeBytes(cdo, "SchemaVersionKey".getBytes());
IntegerCodec.writeULong(cdo, 's');
return cdo.toByteString();
}
public void setSchemaVersion(long version) {
CodecDataOutput cdo = new CodecDataOutput();
cdo.write(new byte[] {'m'});
BytesCodec.writeBytes(cdo, "SchemaVersionKey".getBytes());
IntegerCodec.writeULong(cdo, 's');
kvServer.put(getSchemaVersionKey(), ByteString.copyFromUtf8(String.format("%d", version)));
}
public void addTable(int dbId, int tableId, String tableName) {
String tableJson = String.format(
"\n"
+ "{\n"
+ " \"id\": %d,\n"
+ " \"name\": {\n"
+ " \"O\": \"%s\",\n"
+ " \"L\": \"%s\"\n"
+ " },\n"
+ " \"charset\": \"\",\n"
+ " \"collate\": \"\",\n"
+ " \"cols\": [\n"
+ " {\n"
+ " \"id\": 1,\n"
+ " \"name\": {\n"
+ " \"O\": \"c1\",\n"
+ " \"L\": \"c1\"\n"
+ " },\n"
+ " \"offset\": 0,\n"
+ " \"origin_default\": null,\n"
+ " \"default\": null,\n"
+ " \"type\": {\n"
+ " \"Tp\": 3,\n"
+ " \"Flag\": 139,\n"
+ " \"Flen\": 11,\n"
+ " \"Decimal\": -1,\n"
+ " \"Charset\": \"binary\",\n"
+ " \"Collate\": \"binary\",\n"
+ " \"Elems\": null\n"
+ " },\n"
+ " \"state\": 5,\n"
+ " \"comment\": \"\"\n"
+ " }\n"
+ " ],\n"
+ " \"index_info\": [],\n"
+ " \"fk_info\": null,\n"
+ " \"state\": 5,\n"
+ " \"pk_is_handle\": true,\n"
+ " \"comment\": \"\",\n"
+ " \"auto_inc_id\": 0,\n"
+ " \"max_col_id\": 4,\n"
+ " \"max_idx_id\": 1\n"
+ "}", tableId, tableName, tableName.toLowerCase());
kvServer.put(getKeyForTable(dbId, tableId),
ByteString.copyFromUtf8(tableJson));
}
public void dropTable(long dbId, long tableId) {
kvServer.remove(getKeyForTable(dbId, tableId));
}
}
}
| 1 | 8,804 | What does setting partitionInfo to null mean exactly? | pingcap-tispark | java |
@@ -225,6 +225,18 @@ class BokehPlot(DimensionedPlot):
source.data.update(converted_data)
else:
source.stream(data, stream.length)
+ return
+
+ # Determine if the CDS.data requires a full update or simply needs
+ # to be updated, this is done by determining whether newly added
+ # or not updated columns have the same length
+ new_length = [len(v) for v in data.values() if isinstance(v, (list, np.ndarray))]
+ length = [len(v) for v in source.data.values() if isinstance(v, (list, np.ndarray))]
+ not_updated = [k for k in source.data if k not in data]
+ new = [k for k in data if k not in source.data]
+ if ((not_updated and new_length and any(len(source.data[n]) != new_length[0] for n in not_updated))
+ or (new and length and any(len(data[n]) != length[0] for n in new))):
+ source.data = data
else:
source.data.update(data)
| 1 | import json
from itertools import groupby
import numpy as np
import param
from bokeh.models import (ColumnDataSource, Column, Row, Div)
from bokeh.models.widgets import Panel, Tabs
from ...core import (OrderedDict, Store, AdjointLayout, NdLayout, Layout,
Empty, GridSpace, HoloMap, Element, DynamicMap)
from ...core.options import SkipRendering
from ...core.util import basestring, wrap_tuple, unique_iterator, get_method_owner
from ...streams import Stream
from ..plot import (DimensionedPlot, GenericCompositePlot, GenericLayoutPlot,
GenericElementPlot, GenericOverlayPlot)
from ..util import attach_streams, displayable, collate
from .callbacks import Callback
from .util import (layout_padding, pad_plots, filter_toolboxes, make_axis,
update_shared_sources, empty_plot, decode_bytes,
bokeh_version)
from bokeh.layouts import gridplot
from bokeh.plotting.helpers import _known_tools as known_tools
from bokeh.util.serialization import convert_datetime_array
TOOLS = {name: tool if isinstance(tool, basestring) else type(tool())
for name, tool in known_tools.items()}
class BokehPlot(DimensionedPlot):
"""
Plotting baseclass for the Bokeh backends, implementing the basic
plotting interface for Bokeh based plots.
"""
width = param.Integer(default=300, doc="""
Width of the plot in pixels""")
height = param.Integer(default=300, doc="""
Height of the plot in pixels""")
sizing_mode = param.ObjectSelector(default='fixed',
objects=['fixed', 'stretch_both', 'scale_width', 'scale_height',
'scale_both'], doc="""
How the item being displayed should size itself.
"stretch_both" plots will resize to occupy all available
space, even if this changes the aspect ratio of the element.
"fixed" plots are not responsive and will retain their
original width and height regardless of any subsequent browser
window resize events.
"scale_width" elements will responsively resize to fit to the
width available, while maintaining the original aspect ratio.
"scale_height" elements will responsively resize to fit to the
height available, while maintaining the original aspect ratio.
"scale_both" elements will responsively resize to for both the
width and height available, while maintaining the original
aspect ratio.""")
shared_datasource = param.Boolean(default=True, doc="""
Whether Elements drawing the data from the same object should
share their Bokeh data source allowing for linked brushing
and other linked behaviors.""")
title_format = param.String(default="{label} {group} {dimensions}", doc="""
The formatting string for the title of this plot, allows defining
a label group separator and dimension labels.""")
toolbar = param.ObjectSelector(default='above',
objects=["above", "below",
"left", "right", None],
doc="""
The toolbar location, must be one of 'above', 'below',
'left', 'right', None.""")
_merged_tools = ['pan', 'box_zoom', 'box_select', 'lasso_select',
'poly_select', 'ypan', 'xpan']
backend = 'bokeh'
@property
def document(self):
return self._document
@property
def id(self):
return self.state.ref['id']
@document.setter
def document(self, doc):
self._document = doc
if self.subplots:
for plot in self.subplots.values():
if plot is not None:
plot.document = doc
def __init__(self, *args, **params):
super(BokehPlot, self).__init__(*args, **params)
self._document = None
self.root = None
def get_data(self, element, ranges, style):
"""
Returns the data from an element in the appropriate format for
initializing or updating a ColumnDataSource and a dictionary
which maps the expected keywords arguments of a glyph to
the column in the datasource.
"""
raise NotImplementedError
def _construct_callbacks(self):
"""
Initializes any callbacks for streams which have defined
the plotted object as a source.
"""
if isinstance(self, GenericOverlayPlot):
zorders = []
elif self.batched:
zorders = list(range(self.zorder, self.zorder+len(self.hmap.last)))
else:
zorders = [self.zorder]
if isinstance(self, GenericOverlayPlot) and not self.batched:
sources = []
elif not self.static or isinstance(self.hmap, DynamicMap):
sources = [(i, o) for i, inputs in self.stream_sources.items()
for o in inputs if i in zorders]
else:
sources = [(self.zorder, self.hmap.last)]
cb_classes = set()
for _, source in sources:
streams = Stream.registry.get(id(source), [])
registry = Stream._callbacks['bokeh']
cb_classes |= {(registry[type(stream)], stream) for stream in streams
if type(stream) in registry and stream.linked}
cbs = []
sorted_cbs = sorted(cb_classes, key=lambda x: id(x[0]))
for cb, group in groupby(sorted_cbs, lambda x: x[0]):
cb_streams = [s for _, s in group]
cbs.append(cb(self, cb_streams, source))
return cbs
def push(self):
"""
Pushes updated plot data via the Comm.
"""
if self.renderer.mode == 'server':
return
if self.comm is None:
raise Exception('Renderer does not have a comm.')
msg = self.renderer.diff(self, binary=True)
if msg is None:
return
self.comm.send(msg.header_json)
self.comm.send(msg.metadata_json)
self.comm.send(msg.content_json)
for header, payload in msg.buffers:
self.comm.send(json.dumps(header))
self.comm.send(buffers=[payload])
def set_root(self, root):
"""
Sets the current document on all subplots.
"""
for plot in self.traverse(lambda x: x):
plot.root = root
def _init_datasource(self, data):
"""
Initializes a data source to be passed into the bokeh glyph.
"""
data = {k: decode_bytes(vs) for k, vs in data.items()}
return ColumnDataSource(data=data)
def _update_datasource(self, source, data):
"""
Update datasource with data for a new frame.
"""
data = {k: decode_bytes(vs) for k, vs in data.items()}
empty = all(len(v) == 0 for v in data.values())
if (self.streaming and self.streaming[0].data is self.current_frame.data
and self._stream_data and not empty):
stream = self.streaming[0]
if stream._triggering:
data = {k: v[-stream._chunk_length:] for k, v in data.items()}
# NOTE: Workaround for bug in bokeh 0.12.14, data conversion
# should be removed once fixed in bokeh (https://github.com/bokeh/bokeh/issues/7587)
converted_data = {}
for k, vals in data.items():
cdata = source.data[k]
odata = data[k]
if (bokeh_version in ['0.12.14', '0.12.15dev1'] and
isinstance(cdata, np.ndarray) and cdata.dtype.kind == 'M'
and isinstance(vals, np.ndarray) and vals.dtype.kind == 'M'):
cdata = convert_datetime_array(cdata)
odata = convert_datetime_array(odata)
if len(odata):
cdata = np.concatenate([odata, cdata])
converted_data[k] = cdata
if converted_data:
for k, vals in data.items():
cdata = source.data[k]
odata = data[k]
if k not in converted_data:
if len(odata):
cdata = np.concatenate([odata, cdata])
converted_data[k] = cdata
source.data.update(converted_data)
else:
source.stream(data, stream.length)
else:
source.data.update(data)
def _update_callbacks(self, plot):
"""
Iterates over all subplots and updates existing CustomJS
callbacks with models that were replaced when compositing
subplots into a CompositePlot and sets the plot id to match
the root level bokeh model.
"""
subplots = self.traverse(lambda x: x, [GenericElementPlot])
merged_tools = {t: list(plot.select({'type': TOOLS[t]}))
for t in self._merged_tools}
for subplot in subplots:
for cb in subplot.callbacks:
for c in cb.callbacks:
for tool, objs in merged_tools.items():
if tool in c.args and objs:
c.args[tool] = objs[0]
if self.top_level:
c.code = c.code.replace('PLACEHOLDER_PLOT_ID', self.id)
@property
def state(self):
"""
The plotting state that gets updated via the update method and
used by the renderer to generate output.
"""
return self.handles['plot']
@property
def current_handles(self):
"""
Should return a list of plot objects that have changed and
should be updated.
"""
return []
def cleanup(self):
"""
Cleans up references to the plot after the plot has been
deleted. Traverses through all plots cleaning up Callbacks and
Stream subscribers.
"""
plots = self.traverse(lambda x: x, [BokehPlot])
for plot in plots:
if not isinstance(plot, (GenericCompositePlot, GenericElementPlot, GenericOverlayPlot)):
continue
streams = list(plot.streams)
for stream in set(streams):
stream._subscribers = [
(p, subscriber) for p, subscriber in stream._subscribers
if get_method_owner(subscriber) not in plots
]
if not isinstance(plot, GenericElementPlot):
continue
for callback in plot.callbacks:
streams += callback.streams
callbacks = {k: cb for k, cb in callback._callbacks.items()
if cb is not callback}
Callback._callbacks = callbacks
def _fontsize(self, key, label='fontsize', common=True):
"""
Converts integer fontsizes to a string specifying
fontsize in pt.
"""
size = super(BokehPlot, self)._fontsize(key, label, common)
return {k: v if isinstance(v, basestring) else '%spt' % v
for k, v in size.items()}
def sync_sources(self):
"""
Syncs data sources between Elements, which draw data
from the same object.
"""
get_sources = lambda x: (id(x.current_frame.data), x)
filter_fn = lambda x: (x.shared_datasource and x.current_frame is not None and
not isinstance(x.current_frame.data, np.ndarray)
and 'source' in x.handles)
data_sources = self.traverse(get_sources, [filter_fn])
grouped_sources = groupby(sorted(data_sources, key=lambda x: x[0]), lambda x: x[0])
shared_sources = []
source_cols = {}
plots = []
for _, group in grouped_sources:
group = list(group)
if len(group) > 1:
source_data = {}
for _, plot in group:
source_data.update(plot.handles['source'].data)
new_source = ColumnDataSource(source_data)
for _, plot in group:
renderer = plot.handles.get('glyph_renderer')
for callback in plot.callbacks:
callback.reset()
if renderer is None:
continue
elif 'data_source' in renderer.properties():
renderer.update(data_source=new_source)
else:
renderer.update(source=new_source)
if hasattr(renderer, 'view'):
renderer.view.update(source=new_source)
plot.handles['source'] = new_source
plots.append(plot)
shared_sources.append(new_source)
source_cols[id(new_source)] = [c for c in new_source.data]
for plot in plots:
for callback in plot.callbacks:
callback.initialize()
self.handles['shared_sources'] = shared_sources
self.handles['source_cols'] = source_cols
class CompositePlot(BokehPlot):
"""
CompositePlot is an abstract baseclass for plot types that draw
render multiple axes. It implements methods to add an overall title
to such a plot.
"""
fontsize = param.Parameter(default={'title': '16pt'}, allow_None=True, doc="""
Specifies various fontsizes of the displayed text.
Finer control is available by supplying a dictionary where any
unmentioned keys reverts to the default sizes, e.g:
{'title': '15pt'}""")
_title_template = "<span style='font-size: {fontsize}'><b>{title}</b></font>"
def _get_title(self, key):
title_div = None
title = self._format_title(key) if self.show_title else ''
if title:
fontsize = self._fontsize('title')
title_tags = self._title_template.format(title=title,
**fontsize)
if 'title' in self.handles:
title_div = self.handles['title']
else:
title_div = Div()
title_div.text = title_tags
return title_div
@property
def current_handles(self):
"""
Should return a list of plot objects that have changed and
should be updated.
"""
return [self.handles['title']] if 'title' in self.handles else []
class GridPlot(CompositePlot, GenericCompositePlot):
"""
Plot a group of elements in a grid layout based on a GridSpace element
object.
"""
axis_offset = param.Integer(default=50, doc="""
Number of pixels to adjust row and column widths and height by
to compensate for shared axes.""")
fontsize = param.Parameter(default={'title': '16pt'},
allow_None=True, doc="""
Specifies various fontsizes of the displayed text.
Finer control is available by supplying a dictionary where any
unmentioned keys reverts to the default sizes, e.g:
{'title': '15pt'}""")
merge_tools = param.Boolean(default=True, doc="""
Whether to merge all the tools into a single toolbar""")
shared_xaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
shared_yaxis = param.Boolean(default=False, doc="""
If enabled the x-axes of the GridSpace will be drawn from the
objects inside the Grid rather than the GridSpace dimensions.""")
xaxis = param.ObjectSelector(default=True,
objects=['bottom', 'top', None, True, False], doc="""
Whether and where to display the xaxis, supported options are
'bottom', 'top' and None.""")
yaxis = param.ObjectSelector(default=True,
objects=['left', 'right', None, True, False], doc="""
Whether and where to display the yaxis, supported options are
'left', 'right' and None.""")
xrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the xticks.""")
yrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the yticks.""")
plot_size = param.Integer(default=120, doc="""
Defines the width and height of each plot in the grid""")
def __init__(self, layout, ranges=None, layout_num=1, keys=None, **params):
if not isinstance(layout, GridSpace):
raise Exception("GridPlot only accepts GridSpace.")
super(GridPlot, self).__init__(layout=layout, layout_num=layout_num,
ranges=ranges, keys=keys, **params)
self.cols, self.rows = layout.shape
self.subplots, self.layout = self._create_subplots(layout, ranges)
if self.top_level:
self.comm = self.init_comm()
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.traverse(lambda x: attach_streams(self, x.hmap, 2),
[GenericElementPlot])
def _create_subplots(self, layout, ranges):
subplots = OrderedDict()
frame_ranges = self.compute_ranges(layout, None, ranges)
keys = self.keys[:1] if self.dynamic else self.keys
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in keys])
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
for i, coord in enumerate(layout.keys(full_grid=True)):
r = i % self.rows
c = i // self.rows
if not isinstance(coord, tuple): coord = (coord,)
view = layout.data.get(coord, None)
# Create subplot
if view is not None:
vtype = view.type if isinstance(view, HoloMap) else view.__class__
opts = self.lookup_options(view, 'plot').options
else:
vtype = None
if type(view) in (Layout, NdLayout):
raise SkipRendering("Cannot plot nested Layouts.")
if not displayable(view):
view = collate(view)
# Create axes
offset = self.axis_offset
kwargs = {}
if c == 0 and r != 0:
kwargs['xaxis'] = None
kwargs['width'] = self.plot_size+offset
if c != 0 and r == 0:
kwargs['yaxis'] = None
kwargs['height'] = self.plot_size+offset
if c == 0 and r == 0:
kwargs['width'] = self.plot_size+offset
kwargs['height'] = self.plot_size+offset
if r != 0 and c != 0:
kwargs['xaxis'] = None
kwargs['yaxis'] = None
if 'width' not in kwargs or not self.shared_yaxis:
kwargs['width'] = self.plot_size
if 'height' not in kwargs or not self.shared_xaxis:
kwargs['height'] = self.plot_size
if 'border' not in kwargs:
kwargs['border'] = 3
kwargs['show_legend'] = False
if not self.shared_xaxis:
kwargs['xaxis'] = None
if not self.shared_yaxis:
kwargs['yaxis'] = None
# Create subplot
plotting_class = Store.registry[self.renderer.backend].get(vtype, None)
if plotting_class is None:
if view is not None:
self.warning("Bokeh plotting class for %s type not found, "
"object will not be rendered." % vtype.__name__)
else:
subplot = plotting_class(view, dimensions=self.dimensions,
show_title=False, subplot=True,
renderer=self.renderer,
ranges=frame_ranges, uniform=self.uniform,
keys=self.keys, **dict(opts, **kwargs))
collapsed_layout[coord] = (subplot.layout
if isinstance(subplot, GenericCompositePlot)
else subplot.hmap)
subplots[coord] = subplot
return subplots, collapsed_layout
def initialize_plot(self, ranges=None, plots=[]):
ranges = self.compute_ranges(self.layout, self.keys[-1], None)
passed_plots = list(plots)
plots = [[None for c in range(self.cols)] for r in range(self.rows)]
for i, coord in enumerate(self.layout.keys(full_grid=True)):
r = i % self.rows
c = i // self.rows
subplot = self.subplots.get(wrap_tuple(coord), None)
if subplot is not None:
plot = subplot.initialize_plot(ranges=ranges, plots=passed_plots)
plots[r][c] = plot
passed_plots.append(plot)
else:
passed_plots.append(None)
plot = gridplot(plots[::-1], toolbar_location=self.toolbar,
merge_tools=self.merge_tools)
plot = self._make_axes(plot)
title = self._get_title(self.keys[-1])
if title:
plot = Column(title, plot)
self.handles['title'] = title
self.handles['plot'] = plot
self.handles['plots'] = plots
self._update_callbacks(plot)
if self.shared_datasource:
self.sync_sources()
self.drawn = True
return self.handles['plot']
def _make_axes(self, plot):
width, height = self.renderer.get_size(plot)
x_axis, y_axis = None, None
kwargs = dict(sizing_mode=self.sizing_mode)
keys = self.layout.keys(full_grid=True)
if self.xaxis:
flip = self.shared_xaxis
rotation = self.xrotation
lsize = self._fontsize('xlabel').get('fontsize')
tsize = self._fontsize('xticks', common=False).get('fontsize')
xfactors = list(unique_iterator([wrap_tuple(k)[0] for k in keys]))
x_axis = make_axis('x', width, xfactors, self.layout.kdims[0],
flip=flip, rotation=rotation, label_size=lsize,
tick_size=tsize)
if self.yaxis and self.layout.ndims > 1:
flip = self.shared_yaxis
rotation = self.yrotation
lsize = self._fontsize('ylabel').get('fontsize')
tsize = self._fontsize('yticks', common=False).get('fontsize')
yfactors = list(unique_iterator([k[1] for k in keys]))
y_axis = make_axis('y', height, yfactors, self.layout.kdims[1],
flip=flip, rotation=rotation, label_size=lsize,
tick_size=tsize)
if x_axis and y_axis:
plot = filter_toolboxes(plot)
r1, r2 = ([y_axis, plot], [None, x_axis])
if self.shared_xaxis:
r1, r2 = r2, r1
if self.shared_yaxis:
r1, r2 = r1[::-1], r2[::-1]
models = layout_padding([r1, r2], self.renderer)
plot = gridplot(models, **kwargs)
elif y_axis:
models = [y_axis, plot]
if self.shared_yaxis: models = models[::-1]
plot = Row(*models, **kwargs)
elif x_axis:
models = [plot, x_axis]
if self.shared_xaxis: models = models[::-1]
plot = Column(*models, **kwargs)
return plot
@update_shared_sources
def update_frame(self, key, ranges=None):
"""
Update the internal state of the Plot to represent the given
key tuple (where integers represent frames). Returns this
state.
"""
ranges = self.compute_ranges(self.layout, key, ranges)
for coord in self.layout.keys(full_grid=True):
subplot = self.subplots.get(wrap_tuple(coord), None)
if subplot is not None:
subplot.update_frame(key, ranges)
title = self._get_title(key)
if title:
self.handles['title']
class LayoutPlot(CompositePlot, GenericLayoutPlot):
shared_axes = param.Boolean(default=True, doc="""
Whether axes should be shared across plots""")
shared_datasource = param.Boolean(default=False, doc="""
Whether Elements drawing the data from the same object should
share their Bokeh data source allowing for linked brushing
and other linked behaviors.""")
merge_tools = param.Boolean(default=True, doc="""
Whether to merge all the tools into a single toolbar""")
tabs = param.Boolean(default=False, doc="""
Whether to display overlaid plots in separate panes""")
def __init__(self, layout, keys=None, **params):
super(LayoutPlot, self).__init__(layout, keys=keys, **params)
self.layout, self.subplots, self.paths = self._init_layout(layout)
if self.top_level:
self.comm = self.init_comm()
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.traverse(lambda x: attach_streams(self, x.hmap, 2),
[GenericElementPlot])
def _init_layout(self, layout):
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
layout_count = 0
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
keys = self.keys[:1] if self.dynamic else self.keys
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in keys])
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layout_subplots, layouts, paths = {}, {}, {}
for r, c in self.coords:
# Get view at layout position and wrap in AdjointLayout
key, view = layout_items.get((c, r) if self.transpose else (r, c), (None, None))
view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = view
paths[r, c] = key
# Compute the layout type from shape
layout_lens = {1:'Single', 2:'Dual', 3: 'Triple'}
layout_type = layout_lens.get(len(view), 'Single')
# Get the AdjoinLayout at the specified coordinate
positions = AdjointLayoutPlot.layout_dict[layout_type]['positions']
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
empty = isinstance(view.main, Empty)
if empty or view.main is None:
continue
elif not view.traverse(lambda x: x, [Element]):
self.warning('%s is empty, skipping subplot.' % view.main)
continue
else:
layout_count += 1
subplot_data = self._create_subplots(view, positions,
layout_dimensions, frame_ranges,
num=0 if empty else layout_count)
subplots, adjoint_layout = subplot_data
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subplots, **plotopts)
layout_subplots[(r, c)] = layout_plot
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
return collapsed_layout, layout_subplots, paths
def _create_subplots(self, layout, positions, layout_dimensions, ranges, num=0):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
subplots = {}
adjoint_clone = layout.clone(shared_data=False, id=layout.id)
main_plot = None
for pos in positions:
# Pos will be one of 'main', 'top' or 'right' or None
element = layout.get(pos, None)
if element is None or not element.traverse(lambda x: x, [Element, Empty]):
continue
if not displayable(element):
element = collate(element)
subplot_opts = dict(adjoined=main_plot)
# Options common for any subplot
vtype = element.type if isinstance(element, HoloMap) else element.__class__
plot_type = Store.registry[self.renderer.backend].get(vtype, None)
plotopts = self.lookup_options(element, 'plot').options
side_opts = {}
if pos != 'main':
plot_type = AdjointLayoutPlot.registry.get(vtype, plot_type)
if pos == 'right':
yaxis = 'right-bare' if plot_type and 'bare' in plot_type.yaxis else 'right'
width = plot_type.width if plot_type else 0
side_opts = dict(height=main_plot.height, yaxis=yaxis,
width=width, invert_axes=True,
labelled=['y'], xticks=1, xaxis=main_plot.xaxis)
else:
xaxis = 'top-bare' if plot_type and 'bare' in plot_type.xaxis else 'top'
height = plot_type.height if plot_type else 0
side_opts = dict(width=main_plot.width, xaxis=xaxis,
height=height, labelled=['x'],
yticks=1, yaxis=main_plot.yaxis)
# Override the plotopts as required
# Customize plotopts depending on position.
plotopts = dict(side_opts, **plotopts)
plotopts.update(subplot_opts)
if vtype is Empty:
subplots[pos] = None
continue
elif plot_type is None:
self.warning("Bokeh plotting class for %s type not found, object will "
"not be rendered." % vtype.__name__)
continue
num = num if len(self.coords) > 1 else 0
subplot = plot_type(element, keys=self.keys,
dimensions=self.dimensions,
layout_dimensions=layout_dimensions,
ranges=ranges, subplot=True,
uniform=self.uniform, layout_num=num,
renderer=self.renderer,
**dict({'shared_axes': self.shared_axes},
**plotopts))
subplots[pos] = subplot
if isinstance(plot_type, type) and issubclass(plot_type, GenericCompositePlot):
adjoint_clone[pos] = subplots[pos].layout
else:
adjoint_clone[pos] = subplots[pos].hmap
if pos == 'main':
main_plot = subplot
return subplots, adjoint_clone
def initialize_plot(self, plots=None, ranges=None):
ranges = self.compute_ranges(self.layout, self.keys[-1], None)
passed_plots = [] if plots is None else plots
plots = [[] for _ in range(self.rows)]
tab_titles = {}
insert_rows, insert_cols = [], []
offset = 0
for r, c in self.coords:
subplot = self.subplots.get((r, c), None)
if subplot is not None:
shared_plots = passed_plots if self.shared_axes else None
subplots = subplot.initialize_plot(ranges=ranges, plots=shared_plots)
# Computes plotting offsets depending on
# number of adjoined plots
offset = sum(r >= ir for ir in insert_rows)
if len(subplots) > 2:
# Add pad column in this position
insert_cols.append(c)
if r not in insert_rows:
# Insert and pad marginal row if none exists
plots.insert(r+offset, [None for _ in range(len(plots[r]))])
# Pad previous rows
for ir in range(r):
plots[ir].insert(c+1, None)
# Add to row offset
insert_rows.append(r)
offset += 1
# Add top marginal
plots[r+offset-1] += [subplots.pop(-1), None]
elif len(subplots) > 1:
# Add pad column in this position
insert_cols.append(c)
# Pad previous rows
for ir in range(r):
plots[r].insert(c+1, None)
# Pad top marginal if one exists
if r in insert_rows:
plots[r+offset-1] += 2*[None]
else:
# Pad top marginal if one exists
if r in insert_rows:
plots[r+offset-1] += [None] * (1+(c in insert_cols))
plots[r+offset] += subplots
if len(subplots) == 1 and c in insert_cols:
plots[r+offset].append(None)
passed_plots.append(subplots[0])
if self.tabs:
title = subplot.subplots['main']._format_title(self.keys[-1],
dimensions=False)
if not title:
title = ' '.join(self.paths[r,c])
tab_titles[r, c] = title
else:
plots[r+offset] += [empty_plot(0, 0)]
# Replace None types with empty plots
# to avoid bokeh bug
plots = layout_padding(plots, self.renderer)
# Wrap in appropriate layout model
kwargs = dict(sizing_mode=self.sizing_mode)
if self.tabs:
panels = [Panel(child=child, title=str(tab_titles.get((r, c))))
for r, row in enumerate(plots)
for c, child in enumerate(row)
if child is not None]
layout_plot = Tabs(tabs=panels)
else:
plots = filter_toolboxes(plots)
plots, width = pad_plots(plots)
layout_plot = gridplot(children=plots, width=width,
toolbar_location=self.toolbar,
merge_tools=self.merge_tools, **kwargs)
title = self._get_title(self.keys[-1])
if title:
self.handles['title'] = title
layout_plot = Column(title, layout_plot, **kwargs)
self.handles['plot'] = layout_plot
self.handles['plots'] = plots
self._update_callbacks(layout_plot)
if self.shared_datasource:
self.sync_sources()
self.drawn = True
return self.handles['plot']
@update_shared_sources
def update_frame(self, key, ranges=None):
"""
Update the internal state of the Plot to represent the given
key tuple (where integers represent frames). Returns this
state.
"""
ranges = self.compute_ranges(self.layout, key, ranges)
for r, c in self.coords:
subplot = self.subplots.get((r, c), None)
if subplot is not None:
subplot.update_frame(key, ranges)
title = self._get_title(key)
if title:
self.handles['title'] = title
class AdjointLayoutPlot(BokehPlot):
layout_dict = {'Single': {'positions': ['main']},
'Dual': {'positions': ['main', 'right']},
'Triple': {'positions': ['main', 'right', 'top']}}
registry = {}
def __init__(self, layout, layout_type, subplots, **params):
# The AdjointLayout ViewableElement object
self.layout = layout
# Type may be set to 'Embedded Dual' by a call it grid_situate
self.layout_type = layout_type
self.view_positions = self.layout_dict[self.layout_type]['positions']
# The supplied (axes, view) objects as indexed by position
super(AdjointLayoutPlot, self).__init__(subplots=subplots, **params)
def initialize_plot(self, ranges=None, plots=[]):
"""
Plot all the views contained in the AdjointLayout Object using axes
appropriate to the layout configuration. All the axes are
supplied by LayoutPlot - the purpose of the call is to
invoke subplots with correct options and styles and hide any
empty axes as necessary.
"""
if plots is None: plots = []
adjoined_plots = []
for pos in self.view_positions:
# Pos will be one of 'main', 'top' or 'right' or None
subplot = self.subplots.get(pos, None)
# If no view object or empty position, disable the axis
if subplot:
passed_plots = plots + adjoined_plots
adjoined_plots.append(subplot.initialize_plot(ranges=ranges, plots=passed_plots))
else:
adjoined_plots.append(empty_plot(0, 0))
self.drawn = True
if not adjoined_plots: adjoined_plots = [None]
return adjoined_plots
def update_frame(self, key, ranges=None):
plot = None
for pos in ['main', 'right', 'top']:
subplot = self.subplots.get(pos)
if subplot is not None:
plot = subplot.update_frame(key, ranges)
return plot
| 1 | 20,966 | Not sure you need the initial ``not_updated`` as ``any([])`` evaluates to false. | holoviz-holoviews | py |
@@ -0,0 +1,13 @@
+class AddPublishedAtToVideos < ActiveRecord::Migration
+ def up
+ add_column :videos, :published_at, :datetime
+ execute <<-SQL
+ UPDATE videos
+ SET published_at = created_at
+ SQL
+ end
+
+ def down
+ remove_column :videos, :published_at
+ end
+end | 1 | 1 | 9,351 | Since this migration hasn't been merged to master yet, what do you think about just squashing these two into the migration you really want? | thoughtbot-upcase | rb |
|
@@ -0,0 +1 @@
+BetterErrors.editor = :subl if defined? BetterErrors | 1 | 1 | 8,596 | This doesn't apply to all developers | blackducksoftware-ohloh-ui | rb |
|
@@ -52,13 +52,16 @@ class SonataMediaExtension extends Extension
$loader->load('gaufrette.xml');
$loader->load('validators.xml');
$loader->load('serializer.xml');
- $loader->load('api_form.xml');
-
+
$bundles = $container->getParameter('kernel.bundles');
if ('doctrine_orm' == $config['db_driver'] && isset($bundles['FOSRestBundle']) && isset($bundles['NelmioApiDocBundle'])) {
$loader->load('api_controllers.xml');
}
+
+ if (isset($bundles['JMSSerializerBundle'])) {
+ $loader->load('api_form.xml');
+ }
if (isset($bundles['SonataNotificationBundle'])) {
$loader->load('consumer.xml'); | 1 | <?php
/*
* This file is part of the Sonata project.
*
* (c) Thomas Rabaix <thomas.rabaix@sonata-project.org>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\DependencyInjection;
use Symfony\Component\DependencyInjection\Loader\XmlFileLoader;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Reference;
use Symfony\Component\DependencyInjection\Definition;
use Symfony\Component\HttpKernel\DependencyInjection\Extension;
use Symfony\Component\Config\FileLocator;
use Symfony\Component\Config\Definition\Processor;
use Sonata\EasyExtendsBundle\Mapper\DoctrineCollector;
/**
* MediaExtension
*
*
* @author Thomas Rabaix <thomas.rabaix@sonata-project.org>
*/
class SonataMediaExtension extends Extension
{
/**
* Loads the url shortener configuration.
*
* @param array $configs An array of configuration settings
* @param ContainerBuilder $container A ContainerBuilder instance
*/
public function load(array $configs, ContainerBuilder $container)
{
$processor = new Processor();
$configuration = new Configuration();
$config = $processor->processConfiguration($configuration, $configs);
$loader = new XmlFileLoader($container, new FileLocator(__DIR__ . '/../Resources/config'));
$loader->load('provider.xml');
$loader->load('media.xml');
$loader->load('twig.xml');
$loader->load('block.xml');
$loader->load('security.xml');
$loader->load('extra.xml');
$loader->load('form.xml');
$loader->load('gaufrette.xml');
$loader->load('validators.xml');
$loader->load('serializer.xml');
$loader->load('api_form.xml');
$bundles = $container->getParameter('kernel.bundles');
if ('doctrine_orm' == $config['db_driver'] && isset($bundles['FOSRestBundle']) && isset($bundles['NelmioApiDocBundle'])) {
$loader->load('api_controllers.xml');
}
if (isset($bundles['SonataNotificationBundle'])) {
$loader->load('consumer.xml');
}
if (isset($bundles['SonataFormatterBundle'])) {
$loader->load('formatter.xml');
}
if (isset($bundles['SonataSeoBundle'])) {
$loader->load('seo_block.xml');
}
if (!isset($bundles['LiipImagineBundle'])) {
$container->removeDefinition('sonata.media.thumbnail.liip_imagine');
}
if (!in_array(strtolower($config['db_driver']), array('doctrine_orm', 'doctrine_mongodb', 'doctrine_phpcr'))) {
throw new \InvalidArgumentException(sprintf('SonataMediaBundle - Invalid db driver "%s".', $config['db_driver']));
}
if (!array_key_exists($config['default_context'], $config['contexts'])) {
throw new \InvalidArgumentException(sprintf('SonataMediaBundle - Invalid default context : %s, available : %s', $config['default_context'], json_encode(array_keys($config['contexts']))));
}
$loader->load(sprintf('%s.xml', $config['db_driver']));
if (isset($bundles['SonataAdminBundle'])) {
$loader->load(sprintf('%s_admin.xml', $config['db_driver']));
}
$this->configureFilesystemAdapter($container, $config);
$this->configureCdnAdapter($container, $config);
$pool = $container->getDefinition('sonata.media.pool');
$pool->replaceArgument(0, $config['default_context']);
// this shameless hack is done in order to have one clean configuration
// for adding formats ....
$pool->addMethodCall('__hack__', $config);
$strategies = array();
foreach ($config['contexts'] as $name => $settings) {
$formats = array();
foreach ($settings['formats'] as $format => $value) {
$formats[$name.'_'.$format] = $value;
}
$strategies[] = $settings['download']['strategy'];
$pool->addMethodCall('addContext', array($name, $settings['providers'], $formats, $settings['download']));
}
$strategies = array_unique($strategies);
foreach ($strategies as $strategyId) {
$pool->addMethodCall('addDownloadSecurity', array($strategyId, new Reference($strategyId)));
}
if ('doctrine_orm' == $config['db_driver']) {
$this->registerDoctrineMapping($config);
}
$container->setParameter('sonata.media.resizer.simple.adapter.mode', $config['resizer']['simple']['mode']);
$container->setParameter('sonata.media.resizer.square.adapter.mode', $config['resizer']['square']['mode']);
$this->configureParameterClass($container, $config);
$this->configureExtra($container, $config);
$this->configureBuzz($container, $config);
$this->configureProviders($container, $config);
$this->configureClassesToCompile();
}
/**
* @param \Symfony\Component\DependencyInjection\ContainerBuilder $container
* @param array $config
*/
public function configureProviders(ContainerBuilder $container, $config)
{
$container->getDefinition('sonata.media.provider.image')
->replaceArgument(5, array_map('strtolower', $config['providers']['image']['allowed_extensions']))
->replaceArgument(6, $config['providers']['image']['allowed_mime_types'])
->replaceArgument(7, new Reference($config['providers']['image']['adapter']))
;
$container->getDefinition('sonata.media.provider.file')
->replaceArgument(5, $config['providers']['file']['allowed_extensions'])
->replaceArgument(6, $config['providers']['file']['allowed_mime_types'])
;
$container->getDefinition('sonata.media.provider.youtube')->replaceArgument(7, $config['providers']['youtube']['html5']);
}
/**
* @param \Symfony\Component\DependencyInjection\ContainerBuilder $container
* @param array $config
*/
public function configureBuzz(ContainerBuilder $container, array $config)
{
$container->getDefinition('sonata.media.buzz.browser')
->replaceArgument(0, new Reference($config['buzz']['connector']));
foreach (array(
'sonata.media.buzz.connector.curl',
'sonata.media.buzz.connector.file_get_contents'
) as $connector) {
$container->getDefinition($connector)
->addMethodCall('setIgnoreErrors', array($config['buzz']['client']['ignore_errors']))
->addMethodCall('setMaxRedirects', array($config['buzz']['client']['max_redirects']))
->addMethodCall('setTimeout', array($config['buzz']['client']['timeout']))
->addMethodCall('setVerifyPeer', array($config['buzz']['client']['verify_peer']))
->addMethodCall('setProxy', array($config['buzz']['client']['proxy']));
}
}
/**
* @param \Symfony\Component\DependencyInjection\ContainerBuilder $container
* @param array $config
*/
public function configureParameterClass(ContainerBuilder $container, array $config)
{
$container->setParameter('sonata.media.admin.media.entity', $config['class']['media']);
$container->setParameter('sonata.media.admin.gallery.entity', $config['class']['gallery']);
$container->setParameter('sonata.media.admin.gallery_has_media.entity', $config['class']['gallery_has_media']);
$container->setParameter('sonata.media.media.class', $config['class']['media']);
$container->setParameter('sonata.media.gallery.class', $config['class']['gallery']);
$container->getDefinition('sonata.media.form.type.media')->replaceArgument(1, $config['class']['media']);
}
/**
* @param array $config
*
* @return void
*/
public function registerDoctrineMapping(array $config)
{
$collector = DoctrineCollector::getInstance();
$collector->addAssociation($config['class']['media'], 'mapOneToMany', array(
'fieldName' => 'galleryHasMedias',
'targetEntity' => $config['class']['gallery_has_media'],
'cascade' => array(
'persist',
),
'mappedBy' => 'media',
'orphanRemoval' => false,
));
$collector->addAssociation($config['class']['gallery_has_media'], 'mapManyToOne', array(
'fieldName' => 'gallery',
'targetEntity' => $config['class']['gallery'],
'cascade' => array(
'persist',
),
'mappedBy' => NULL,
'inversedBy' => 'galleryHasMedias',
'joinColumns' => array(
array(
'name' => 'gallery_id',
'referencedColumnName' => 'id',
),
),
'orphanRemoval' => false,
));
$collector->addAssociation($config['class']['gallery_has_media'], 'mapManyToOne', array(
'fieldName' => 'media',
'targetEntity' => $config['class']['media'],
'cascade' => array(
'persist',
),
'mappedBy' => NULL,
'inversedBy' => 'galleryHasMedias',
'joinColumns' => array(
array(
'name' => 'media_id',
'referencedColumnName' => 'id',
),
),
'orphanRemoval' => false,
));
$collector->addAssociation($config['class']['gallery'], 'mapOneToMany', array(
'fieldName' => 'galleryHasMedias',
'targetEntity' => $config['class']['gallery_has_media'],
'cascade' => array(
'persist',
),
'mappedBy' => 'gallery',
'orphanRemoval' => true,
'orderBy' => array(
'position' => 'ASC',
),
));
}
/**
* Inject CDN dependency to default provider
*
* @param \Symfony\Component\DependencyInjection\ContainerBuilder $container
* @param array $config
*
* @return void
*/
public function configureCdnAdapter(ContainerBuilder $container, array $config)
{
// add the default configuration for the server cdn
if ($container->hasDefinition('sonata.media.cdn.server') && isset($config['cdn']['server'])) {
$container->getDefinition('sonata.media.cdn.server')
->replaceArgument(0, $config['cdn']['server']['path'])
;
} else {
$container->removeDefinition('sonata.media.cdn.server');
}
if ($container->hasDefinition('sonata.media.cdn.panther') && isset($config['cdn']['panther'])) {
$container->getDefinition('sonata.media.cdn.panther')
->replaceArgument(0, $config['cdn']['panther']['path'])
->replaceArgument(1, $config['cdn']['panther']['username'])
->replaceArgument(2, $config['cdn']['panther']['password'])
->replaceArgument(3, $config['cdn']['panther']['site_id'])
;
} else {
$container->removeDefinition('sonata.media.cdn.panther');
}
if ($container->hasDefinition('sonata.media.cdn.fallback') && isset($config['cdn']['fallback'])) {
$container->getDefinition('sonata.media.cdn.fallback')
->replaceArgument(0, new Reference($config['cdn']['fallback']['master']))
->replaceArgument(1, new Reference($config['cdn']['fallback']['fallback']))
;
} else {
$container->removeDefinition('sonata.media.cdn.fallback');
}
}
/**
* Inject filesystem dependency to default provider
*
* @param \Symfony\Component\DependencyInjection\ContainerBuilder $container
* @param array $config
*
* @return void
*/
public function configureFilesystemAdapter(ContainerBuilder $container, array $config)
{
// add the default configuration for the local filesystem
if ($container->hasDefinition('sonata.media.adapter.filesystem.local') && isset($config['filesystem']['local'])) {
$container->getDefinition('sonata.media.adapter.filesystem.local')
->addArgument($config['filesystem']['local']['directory'])
->addArgument($config['filesystem']['local']['create'])
;
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.local');
}
// add the default configuration for the FTP filesystem
if ($container->hasDefinition('sonata.media.adapter.filesystem.ftp') && isset($config['filesystem']['ftp'])) {
$container->getDefinition('sonata.media.adapter.filesystem.ftp')
->addArgument($config['filesystem']['ftp']['directory'])
->addArgument($config['filesystem']['ftp']['host'])
->addArgument(array(
'port' => $config['filesystem']['ftp']['port'],
'username' => $config['filesystem']['ftp']['username'],
'password' => $config['filesystem']['ftp']['password'],
'passive' => $config['filesystem']['ftp']['passive'],
'create' => $config['filesystem']['ftp']['create'],
'mode' => $config['filesystem']['ftp']['mode']
))
;
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.ftp');
$container->removeDefinition('sonata.media.filesystem.ftp');
}
// add the default configuration for the S3 filesystem
if ($container->hasDefinition('sonata.media.adapter.filesystem.s3') && isset($config['filesystem']['s3'])) {
$container->getDefinition('sonata.media.adapter.filesystem.s3')
->replaceArgument(0, new Reference('sonata.media.adapter.service.s3'))
->replaceArgument(1, $config['filesystem']['s3']['bucket'])
->replaceArgument(2, array('create' => $config['filesystem']['s3']['create'], 'region' => $config['filesystem']['s3']['region']))
->addMethodCall('setDirectory', array($config['filesystem']['s3']['directory']));
;
$container->getDefinition('sonata.media.metadata.amazon')
->addArgument(array(
'acl' => $config['filesystem']['s3']['acl'],
'storage' => $config['filesystem']['s3']['storage'],
'encryption' => $config['filesystem']['s3']['encryption'],
'meta' => $config['filesystem']['s3']['meta'],
'cache_control' => $config['filesystem']['s3']['cache_control']
))
;
$container->getDefinition('sonata.media.adapter.service.s3')
->replaceArgument(0, array(
'secret' => $config['filesystem']['s3']['secretKey'],
'key' => $config['filesystem']['s3']['accessKey'],
))
;
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.s3');
$container->removeDefinition('sonata.media.filesystem.s3');
}
if ($container->hasDefinition('sonata.media.adapter.filesystem.replicate') && isset($config['filesystem']['replicate'])) {
$container->getDefinition('sonata.media.adapter.filesystem.replicate')
->replaceArgument(0, new Reference($config['filesystem']['replicate']['master']))
->replaceArgument(1, new Reference($config['filesystem']['replicate']['slave']))
;
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.replicate');
$container->removeDefinition('sonata.media.filesystem.replicate');
}
if ($container->hasDefinition('sonata.media.adapter.filesystem.mogilefs') && isset($config['filesystem']['mogilefs'])) {
$container->getDefinition('sonata.media.adapter.filesystem.mogilefs')
->replaceArgument(0, $config['filesystem']['mogilefs']['domain'])
->replaceArgument(1, $config['filesystem']['mogilefs']['hosts'])
;
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.mogilefs');
$container->removeDefinition('sonata.media.filesystem.mogilefs');
}
if ($container->hasDefinition('sonata.media.adapter.filesystem.opencloud') &&
(isset($config['filesystem']['openstack']) || isset($config['filesystem']['rackspace']))) {
if (isset($config['filesystem']['openstack'])) {
$container->setParameter('sonata.media.adapter.filesystem.opencloud.class', 'OpenCloud\OpenStack');
$settings = 'openstack';
} else {
$container->setParameter('sonata.media.adapter.filesystem.opencloud.class', 'OpenCloud\Rackspace');
$settings = 'rackspace';
}
$container->getDefinition('sonata.media.adapter.filesystem.opencloud.connection')
->replaceArgument(0, $config['filesystem'][$settings]['url'])
->replaceArgument(1, $config['filesystem'][$settings]['secret'])
;
$container->getDefinition('sonata.media.adapter.filesystem.opencloud')
->replaceArgument(1, $config['filesystem'][$settings]['containerName'])
->replaceArgument(2, $config['filesystem'][$settings]['create_container']);
$container->getDefinition('sonata.media.adapter.filesystem.opencloud.objectstore')
->replaceArgument(1, $config['filesystem'][$settings]['region']);
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.opencloud');
$container->removeDefinition('sonata.media.adapter.filesystem.opencloud.connection');
$container->removeDefinition('sonata.media.adapter.filesystem.opencloud.objectstore');
$container->removeDefinition('sonata.media.filesystem.opencloud');
}
}
/**
* @param \Symfony\Component\DependencyInjection\ContainerBuilder $container
* @param array $config
*/
public function configureExtra(ContainerBuilder $container, array $config)
{
if ($config['pixlr']['enabled']) {
$container->getDefinition('sonata.media.extra.pixlr')
->replaceArgument(0, $config['pixlr']['referrer'])
->replaceArgument(1, $config['pixlr']['secret'])
;
} else {
$container->removeDefinition('sonata.media.extra.pixlr');
}
}
/**
* Add class to compile
*/
public function configureClassesToCompile()
{
$this->addClassesToCompile(array(
"Sonata\\MediaBundle\\CDN\\CDNInterface",
"Sonata\\MediaBundle\\CDN\\Fallback",
"Sonata\\MediaBundle\\CDN\\PantherPortal",
"Sonata\\MediaBundle\\CDN\\Server",
"Sonata\\MediaBundle\\Extra\\Pixlr",
"Sonata\\MediaBundle\\Filesystem\\Local",
"Sonata\\MediaBundle\\Filesystem\\Replicate",
"Sonata\\MediaBundle\\Generator\\DefaultGenerator",
"Sonata\\MediaBundle\\Generator\\GeneratorInterface",
"Sonata\\MediaBundle\\Generator\\ODMGenerator",
"Sonata\\MediaBundle\\Generator\\PHPCRGenerator",
"Sonata\\MediaBundle\\Metadata\\AmazonMetadataBuilder",
"Sonata\\MediaBundle\\Metadata\\MetadataBuilderInterface",
"Sonata\\MediaBundle\\Metadata\\NoopMetadataBuilder",
"Sonata\\MediaBundle\\Metadata\\ProxyMetadataBuilder",
"Sonata\\MediaBundle\\Model\\Gallery",
"Sonata\\MediaBundle\\Model\\GalleryHasMedia",
"Sonata\\MediaBundle\\Model\\GalleryHasMediaInterface",
"Sonata\\MediaBundle\\Model\\GalleryInterface",
"Sonata\\MediaBundle\\Model\\GalleryManager",
"Sonata\\MediaBundle\\Model\\GalleryManagerInterface",
"Sonata\\MediaBundle\\Model\\Media",
"Sonata\\MediaBundle\\Model\\MediaInterface",
"Sonata\\MediaBundle\\Model\\MediaManagerInterface",
"Sonata\\MediaBundle\\Provider\\BaseProvider",
"Sonata\\MediaBundle\\Provider\\BaseVideoProvider",
"Sonata\\MediaBundle\\Provider\\DailyMotionProvider",
"Sonata\\MediaBundle\\Provider\\FileProvider",
"Sonata\\MediaBundle\\Provider\\ImageProvider",
"Sonata\\MediaBundle\\Provider\\MediaProviderInterface",
"Sonata\\MediaBundle\\Provider\\Pool",
"Sonata\\MediaBundle\\Provider\\VimeoProvider",
"Sonata\\MediaBundle\\Provider\\YouTubeProvider",
"Sonata\\MediaBundle\\Resizer\\ResizerInterface",
"Sonata\\MediaBundle\\Resizer\\SimpleResizer",
"Sonata\\MediaBundle\\Resizer\\SquareResizer",
"Sonata\\MediaBundle\\Security\\DownloadStrategyInterface",
"Sonata\\MediaBundle\\Security\\ForbiddenDownloadStrategy",
"Sonata\\MediaBundle\\Security\\PublicDownloadStrategy",
"Sonata\\MediaBundle\\Security\\RolesDownloadStrategy",
"Sonata\\MediaBundle\\Security\\SessionDownloadStrategy",
"Sonata\\MediaBundle\\Templating\\Helper\\MediaHelper",
"Sonata\\MediaBundle\\Thumbnail\\ConsumerThumbnail",
"Sonata\\MediaBundle\\Thumbnail\\FormatThumbnail",
"Sonata\\MediaBundle\\Thumbnail\\ThumbnailInterface",
"Sonata\\MediaBundle\\Twig\\Extension\\MediaExtension",
"Sonata\\MediaBundle\\Twig\\Node\\MediaNode",
"Sonata\\MediaBundle\\Twig\\Node\\PathNode",
"Sonata\\MediaBundle\\Twig\\Node\\ThumbnailNode",
));
}
}
| 1 | 6,401 | I think `serializer.xml` can be moved into condition below too | sonata-project-SonataMediaBundle | php |
@@ -21,6 +21,9 @@ namespace Benchmarks
{
public static void Main(string[] args)
{
+ // examples to run from command line:
+ // navigate to opentelemetry-dotnet\src\benchmarks directory and run the following
+ // dotnet run --framework netcoreapp3.1 --configuration Release --filter *OpenTelemetrySdkBenchmarksActivity*
BenchmarkSwitcher.FromAssembly(typeof(Program).Assembly).Run(args);
}
} | 1 | // <copyright file="Program.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using BenchmarkDotNet.Running;
namespace Benchmarks
{
internal static class Program
{
public static void Main(string[] args)
{
BenchmarkSwitcher.FromAssembly(typeof(Program).Assembly).Run(args);
}
}
}
| 1 | 14,269 | Probably put this in a simple README.md file? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -0,0 +1,18 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading.Tasks;
+
+namespace AutoRest.Core.Validation
+{
+ [Flags]
+ public enum ValidationCategory
+ {
+ RPCViolation = 1 << 0,
+ OneAPIViolation = 1 << 1,
+ SDKViolation = 1 << 2,
+ }
+} | 1 | 1 | 23,859 | Super Cool bitwise Minor: Most likely you don't need `System.Collections.Generic`, `System.Linq` & `System.Threading.Tasks` | Azure-autorest | java |
|
@@ -20,7 +20,7 @@ public class DefaultMicroserviceClassLoaderFactory implements MicroserviceClassL
public static final MicroserviceClassLoaderFactory INSTANCE = new DefaultMicroserviceClassLoaderFactory();
@Override
- public ClassLoader create(String microserviceName, String version) {
+ public ClassLoader create(String appId, String microserviceName, String version) {
return Thread.currentThread().getContextClassLoader();
}
} | 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.core.definition.classloader;
public class DefaultMicroserviceClassLoaderFactory implements MicroserviceClassLoaderFactory {
public static final MicroserviceClassLoaderFactory INSTANCE = new DefaultMicroserviceClassLoaderFactory();
@Override
public ClassLoader create(String microserviceName, String version) {
return Thread.currentThread().getContextClassLoader();
}
}
| 1 | 7,078 | If we just return the TCC, maybe we need to update the method name for it. | apache-servicecomb-java-chassis | java |
@@ -111,8 +111,7 @@ namespace AutoRest.Extensions
if (methodList.Count == 1)
{
Method method = methodList.Single();
- return string.Format(CultureInfo.InvariantCulture, "Additional parameters for the {0} operation.",
- createOperationDisplayString(method.MethodGroup.Name.ToPascalCase(), method.Name));
+ return string.Format(CultureInfo.InvariantCulture, "Additional parameters for {0} operation.", method.Name);
}
else if (methodList.Count <= 4)
{ | 1 | using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using AutoRest.Core.Model;
using Newtonsoft.Json.Linq;
using static AutoRest.Core.Utilities.DependencyInjection;
using AutoRest.Core.Utilities;
namespace AutoRest.Extensions
{
public static class ParameterGroupExtensionHelper
{
private class ParameterGroup
{
public string Name { get; }
public Dictionary<Property, Parameter> ParameterMapping { get; }
public ParameterGroup(string name, Dictionary<Property, Parameter> parameterMapping)
{
this.Name = name;
this.ParameterMapping = parameterMapping;
}
}
private static Property CreateParameterGroupProperty(Parameter parameter)
{
Property groupProperty = New<Property>(new
{
IsReadOnly = false, //Since these properties are used as parameters they are never read only
Name = parameter.Name,
IsRequired = parameter.IsRequired,
DefaultValue = parameter.DefaultValue,
//Constraints = parameter.Constraints, Omit these since we don't want to perform parameter validation
Documentation = parameter.Documentation,
ModelType = parameter.ModelType,
RealPath = new string[] { },
SerializedName = default(string) //Parameter is never serialized directly
});
// Copy over extensions
foreach (var key in parameter.Extensions.Keys)
{
groupProperty.Extensions[key] = parameter.Extensions[key];
}
return groupProperty;
}
private static ParameterGroup BuildParameterGroup(string parameterGroupName, Method method)
{
Dictionary<Property, Parameter> parameterMapping = method.Parameters.Where(
p => GetParameterGroupName(method.Group, method.Name, p) == parameterGroupName).ToDictionary(
CreateParameterGroupProperty,
p => p);
return new ParameterGroup(parameterGroupName, parameterMapping);
}
private static string GetParameterGroupName(string methodGroupName, string methodName, Parameter parameter)
{
if (parameter.Extensions.ContainsKey(SwaggerExtensions.ParameterGroupExtension))
{
JContainer extensionObject = parameter.Extensions[SwaggerExtensions.ParameterGroupExtension] as JContainer;
if (extensionObject != null)
{
string specifiedGroupName = extensionObject.Value<string>("name");
string parameterGroupName;
if (specifiedGroupName == null)
{
string postfix = extensionObject.Value<string>("postfix") ?? "Parameters";
parameterGroupName = methodGroupName + "-" + methodName + "-" + postfix;
}
else
{
parameterGroupName = specifiedGroupName;
}
return parameterGroupName;
}
}
return null;
}
private static IEnumerable<string> ExtractParameterGroupNames(Method method)
{
return method.Parameters.Select(p => GetParameterGroupName(method.Group, method.Name, p)).Where(name => !string.IsNullOrEmpty(name)).Distinct();
}
private static IEnumerable<ParameterGroup> ExtractParameterGroups(Method method)
{
IEnumerable<string> parameterGroupNames = ExtractParameterGroupNames(method);
return parameterGroupNames.Select(parameterGroupName => BuildParameterGroup(parameterGroupName, method));
}
private static IEnumerable<Method> GetMethodsUsingParameterGroup(IEnumerable<Method> methods, ParameterGroup parameterGroup)
{
return methods.Where(m => ExtractParameterGroupNames(m).Contains(parameterGroup.Name));
}
private static string GenerateParameterGroupModelText(IEnumerable<Method> methodsWhichUseGroup)
{
Func<string, string, string> createOperationDisplayString = (group, name) =>
{
return string.IsNullOrEmpty(group) ? name : string.Format(CultureInfo.InvariantCulture, "{0}_{1}", group, name);
};
List<Method> methodList = methodsWhichUseGroup.ToList();
if (methodList.Count == 1)
{
Method method = methodList.Single();
return string.Format(CultureInfo.InvariantCulture, "Additional parameters for the {0} operation.",
createOperationDisplayString(method.MethodGroup.Name.ToPascalCase(), method.Name));
}
else if (methodList.Count <= 4)
{
string operationsString = string.Join(", ", methodList.Select(
m => string.Format(CultureInfo.InvariantCulture, createOperationDisplayString(m.MethodGroup.Name.ToPascalCase(), m.Name))));
return string.Format(CultureInfo.InvariantCulture, "Additional parameters for a set of operations, such as: {0}.", operationsString);
}
else
{
return "Additional parameters for a set of operations.";
}
}
/// <summary>
/// Adds the parameter groups to operation parameters.
/// </summary>
/// <param name="codeModelient"></param>
public static void AddParameterGroups(CodeModel codeModel)
{
if (codeModel == null)
{
throw new ArgumentNullException("codeModel");
}
HashSet<CompositeType> generatedParameterGroups = new HashSet<CompositeType>();
foreach (Method method in codeModel.Methods)
{
//Copy out flattening transformations as they should be the last
List<ParameterTransformation> flatteningTransformations = method.InputParameterTransformation.ToList();
method.InputParameterTransformation.Clear();
//This group name is normalized by each languages code generator later, so it need not happen here.
IEnumerable<ParameterGroup> parameterGroups = ExtractParameterGroups(method);
List<Parameter> parametersToAddToMethod = new List<Parameter>();
List<Parameter> parametersToRemoveFromMethod = new List<Parameter>();
foreach (ParameterGroup parameterGroup in parameterGroups)
{
CompositeType parameterGroupType =
generatedParameterGroups.FirstOrDefault(item => item.Name.RawValue == parameterGroup.Name);
if (parameterGroupType == null)
{
IEnumerable<Method> methodsWhichUseGroup = GetMethodsUsingParameterGroup(codeModel.Methods, parameterGroup);
parameterGroupType = New<CompositeType>(parameterGroup.Name,new
{
Documentation = GenerateParameterGroupModelText(methodsWhichUseGroup)
});
generatedParameterGroups.Add(parameterGroupType);
//Add to the service client
codeModel.Add(parameterGroupType);
}
foreach (Property property in parameterGroup.ParameterMapping.Keys)
{
Property matchingProperty = parameterGroupType.Properties.FirstOrDefault(
item => item.Name.RawValue == property.Name.RawValue &&
item.IsReadOnly == property.IsReadOnly &&
item.DefaultValue .RawValue== property.DefaultValue.RawValue &&
item.SerializedName.RawValue == property.SerializedName.RawValue);
if (matchingProperty == null)
{
parameterGroupType.Add(property);
}
}
bool isGroupParameterRequired = parameterGroupType.Properties.Any(p => p.IsRequired);
//Create the new parameter object based on the parameter group type
Parameter newParameter = New<Parameter>(new
{
Name = parameterGroup.Name,
IsRequired = isGroupParameterRequired,
Location = ParameterLocation.None,
SerializedName = string.Empty,
ModelType = parameterGroupType,
Documentation = "Additional parameters for the operation"
});
parametersToAddToMethod.Add(newParameter);
//Link the grouped parameters to their parent, and remove them from the method parameters
foreach (Property property in parameterGroup.ParameterMapping.Keys)
{
Parameter p = parameterGroup.ParameterMapping[property];
var parameterTransformation = new ParameterTransformation
{
OutputParameter = p
};
parameterTransformation.ParameterMappings.Add(new ParameterMapping
{
InputParameter = newParameter,
InputParameterProperty = property.GetClientName()
});
method.InputParameterTransformation.Add(parameterTransformation);
parametersToRemoveFromMethod.Add(p);
}
}
method.Remove(p => parametersToRemoveFromMethod.Contains(p));
method.AddRange(parametersToAddToMethod);
// Copy back flattening transformations if any
flatteningTransformations.ForEach(t => method.InputParameterTransformation.Add(t));
}
}
}
}
| 1 | 25,353 | For consistency, shouldn't this use `SwaggerModeler.GetMethodNameFromOperationId(method.Name)` as above? I'd just reuse `"Additional parameters for " + SwaggerModeler.GetMethodNameFromOperationId(method.Name) + " operation."` here, `string.Format` with `CultureInfo` is complete nonsense here anyways. | Azure-autorest | java |
@@ -32,7 +32,7 @@ TEST(LivelinessQos, Liveliness_Automatic_Reliable)
// Liveliness lease duration and announcement period
uint32_t liveliness_ms = 200;
Duration_t liveliness_s(liveliness_ms * 1e-3);
- Duration_t announcement_period(liveliness_ms * 1e-3 * 0.5);
+ Duration_t announcement_period(liveliness_ms * 1e-3 * 0.01);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS) | 1 | // Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "BlackboxTests.hpp"
#include "PubSubReader.hpp"
#include "PubSubWriter.hpp"
#include "PubSubParticipant.hpp"
#include "ReqRepAsReliableHelloWorldRequester.hpp"
#include "ReqRepAsReliableHelloWorldReplier.hpp"
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
//! Tests that when kind is automatic liveliness is never lost, even if the writer never sends data
TEST(LivelinessQos, Liveliness_Automatic_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Liveliness lease duration and announcement period
uint32_t liveliness_ms = 200;
Duration_t liveliness_s(liveliness_ms * 1e-3);
Duration_t announcement_period(liveliness_ms * 1e-3 * 0.5);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
std::this_thread::sleep_for(std::chrono::milliseconds(liveliness_ms * 10));
// When using automatic kind, liveliness on both publisher and subscriber should never be lost
// It would only be lost if the publishing application crashed, which can't be reproduced in the tests
EXPECT_EQ(writer.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_recovered(), 1u);
EXPECT_EQ(reader.times_liveliness_lost(), 0u);
}
//! Same as above using best-effort reliability
TEST(LivelinessQos, Liveliness_Automatic_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Liveliness lease duration and announcement period
uint32_t liveliness_ms = 200;
Duration_t liveliness_s(liveliness_ms * 1e-3);
Duration_t announcement_period(liveliness_ms * 1e-3 * 0.5);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
std::this_thread::sleep_for(std::chrono::milliseconds(liveliness_ms * 10));
// When using automatic kind, liveliness on both publisher and subscriber should never be lost
// It would only be lost if the publishing application crashed, which can't be reproduced in the tests
EXPECT_EQ(writer.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_recovered(), 1u);
EXPECT_EQ(reader.times_liveliness_lost(), 0u);
}
//! Tests liveliness with the following paramters
//! Writer is reliable, and MANUAL_BY_PARTICIPANT
//! Reader is reliable, and MANUAL_BY_PARTICIPANT
//! Liveliness lease duration is short in comparison to writer write/assert rate
TEST(LivelinessQos, ShortLiveliness_ManualByParticipant_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 200;
uint32_t num_samples = 2;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.05 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.05 * 1e-3 * 0.9);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples * 2);
}
//! Tests liveliness with the following paramters
//! Writer is best-effort, and MANUAL_BY_PARTICIPANT
//! Reader is best-effort, and MANUAL_BY_PARTICIPANT
//! Liveliness lease duration is short in comparison to writer write/assert rate
TEST(LivelinessQos, ShortLiveliness_ManualByParticipant_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 200;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.05 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.05 * 1e-3 * 0.9);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
for (count = 0; count<num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples * 2);
}
//! Tests liveliness with the following paramters
//! Writer is best-effort, and MANUAL_BY_PARTICIPANT
//! Reader is best-effort, and MANUAL_BY_PARTICIPANT
//! Liveliness lease duration is long in comparison to writer write/assert rate
TEST(LivelinessQos, LongLiveliness_ManualByParticipant_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 10;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 100.0 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 100.0 * 1e-3 * 0.1);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
reader.wait_liveliness_recovered();
// Liveliness shouldn't have been lost
EXPECT_EQ(writer.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_recovered(), 1u);
}
//! Tests liveliness with the following paramters
//! Writer is best-effort, and MANUAL_BY_PARTICIPANT
//! Reader is best-effort, and MANUAL_BY_PARTICIPANT
//! Liveliness lease duration is long in comparison to writer write/assert rate
TEST(LivelinessQos, LongLiveliness_ManualByParticipant_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 10;
uint32_t writer_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 100.0 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 100.0 * 1e-3 * 0.1);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(writer_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
for (count = 0; count < writer_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
reader.wait_liveliness_recovered();
// Liveliness shouldn't have been lost
EXPECT_EQ(writer.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_recovered(), 1u);
}
//! Tests liveliness with the following paramters
//! Writer is reliable, and MANUAL_BY_TOPIC
//! Reader is reliable, and MANUAL_BY_TOPIC
//! Liveliness lease duration is short in comparison to writer write/assert rate
TEST(LivelinessQos, ShortLiveliness_ManualByTopic_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 100;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.1 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.1 * 1e-3 * 0.9);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
// Send data
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples * 2);
}
//! Tests liveliness with the following paramters
//! Writer is best-effort, and MANUAL_BY_TOPIC
//! Reader is best-effort, and MANUAL_BY_TOPIC
//! Liveliness lease duration is short in comparison to writer write/assert rate
TEST(LivelinessQos, ShortLiveliness_ManualByTopic_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 100;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.1 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.1 * 1e-3 * 0.9);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
// Send data
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
// Note that in MANUAL_BY_TOPIC liveliness, the assert_liveliness() method relies on sending a heartbeat
// However best-effort writers don't send heartbeats, so the reader in this case will never get notified
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
}
//! Tests liveliness with the following paramters
//! Writer is reliable, and MANUAL_BY_TOPIC
//! Reader is reliable, and MANUAL_BY_TOPIC
//! Liveliness lease duration is long in comparison to writer write/assert rate
TEST(LivelinessQos, LongLiveliness_ManualByTopic_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 10;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 100 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 100 * 1e-3 * 0.1);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
// Send data
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
for (count=0; count<num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
reader.wait_liveliness_recovered();
// Liveliness shouldn't have been lost
EXPECT_EQ(writer.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_recovered(), 1u);
}
//! Tests liveliness with the following paramters
//! Writer is best-effort, and MANUAL_BY_TOPIC
//! Reader is best-effort, and MANUAL_BY_TOPIC
//! Liveliness lease duration is long in comparison to writer write/assert rate
TEST(LivelinessQos, LongLiveliness_ManualByTopic_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 10;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 100 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 100 * 1e-3 * 0.1);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
// Send data
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
for(count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
reader.wait_liveliness_recovered();
// Liveliness shouldn't have been lost
EXPECT_EQ(writer.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_recovered(), 1u);
}
//! Tests liveliness with the following parameters
//! Writer is reliable, liveliness is manual by participant
//! Reader is reliable, liveliness is automatic
//! Liveliness lease duration is long in comparison to the writer write/assert rate
TEST(LivelinessQos, LongLiveliness_ManualByParticipant_Automatic_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 10;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 100.0 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 100.0 * 1e-3 * 0.20);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
// Send data
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
reader.wait_liveliness_recovered();
EXPECT_EQ(writer.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_recovered(), 1u);
}
//! Tests liveliness with the following parameters
//! Writer is reliable, liveliness is manual by participant
//! Reader is reliable, liveliness is automatic
//! Liveliness is short in comparison to the writer write/assert rate
TEST(LivelinessQos, ShortLiveliness_ManualByParticipant_Automatic_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 50;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.1 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.1 * 1e-3 * 0.1);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples * 2);
}
//! Tests liveliness with the following parameters
//! Writer is best-effort, liveliness is manual by participant
//! Reader is best-effort, liveliness is automatic
//! Liveliness is long in comparison to the writer write/assert rate
TEST(LivelinessQos, LongLiveliness_ManualByParticipant_Automatic_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 10;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 100.0 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 100.0 * 1e-3 * 0.1);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
reader.wait_liveliness_recovered();
EXPECT_EQ(writer.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_lost(), 0u);
EXPECT_EQ(reader.times_liveliness_recovered(), 1u);
}
//! Tests liveliness with the following parameters
//! Writer is best-effort, liveliness is manual by participant
//! Reader is best-effort, liveliness is automatic
//! Liveliness is short in comparison to the writer write/assert rate
TEST(LivelinessQos, ShortLiveliness_ManualByParticipant_Automatic_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 100;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.1 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.1 * 1e-3 * 0.9);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples * 2);
}
//! Tests liveliness with the following parameters
//! Writer is reliable, and uses manual by topic liveliness kind
//! Reader is reliable, and uses automatic liveliness kind
//! Liveliness lease duration is short in comparison to writer write/assert rate
TEST(LivelinessQos, ManualByTopic_Automatic_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 100;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.1 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.1 * 1e-3 * 0.9);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
// Write some samples
size_t count = 0;
for (auto data_sample : data)
{
// Send data
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
// Now use assert_liveliness() method
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples * 2);
}
//! Tests liveliness with the following parameters
//! Writer is best-effort, and uses manual by topic liveliness kind
//! Reader is best-effort, and uses automatic liveliness kind
//! Liveliness lease duration is short in comparison to writer write/assert rate
TEST(LivelinessQos, ManualByTopic_Automatic_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 100;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.1 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.1 * 1e-3 * 0.9);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(AUTOMATIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
// Write some samples
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
// Now use assert_liveliness() method
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
// As best-effort readers do not process heartbeats the expected number of times liveliness was lost
// and recovered corresponds to the bit in the test when we sent samples (not when we asserted liveliness)
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
}
//! Tests liveliness with the following parameters
//! Writer is reliable, and uses manual by topic liveliness kind
//! Reader is reliable, and uses manual by participant liveliness kind
//! Liveliness lease duration is short in comparison to writer write/assert rate
TEST(LivelinessQos, ManualByTopic_ManualByParticipant_Reliable)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 100;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.1 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.1 * 1e-3 * 0.9);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
// Write some samples
size_t count = 0;
for (auto data_sample : data)
{
// Send data
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
// Now use assert_liveliness() method
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_lost(), num_samples * 2);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples * 2);
}
//! Tests liveliness with the following parameters
//! Writer is best-effort, and uses manual by topic liveliness kind
//! Reader is best-effort, and uses manual by participant liveliness kind
//! Liveliness lease duration is short in comparison to writer write/assert rate
TEST(LivelinessQos, ManualByTopic_ManualByParticipant_BestEffort)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Write rate in milliseconds and number of samples to write
uint32_t writer_sleep_ms = 100;
uint32_t num_samples = 3;
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(writer_sleep_ms * 0.1 * 1e-3);
Duration_t announcement_period(writer_sleep_ms * 0.1 * 1e-3 * 0.9);
reader.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(BEST_EFFORT_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
auto data = default_helloworld_data_generator(num_samples);
reader.startReception(data);
// Write some samples
size_t count = 0;
for (auto data_sample : data)
{
writer.send_sample(data_sample);
++count;
reader.block_for_at_least(count);
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
// Now use assert_liveliness() method
for (count = 0; count < num_samples; count++)
{
writer.assert_liveliness();
std::this_thread::sleep_for(std::chrono::milliseconds(writer_sleep_ms));
}
EXPECT_EQ(writer.times_liveliness_lost(), num_samples * 2);
// Note that, as best-effor readers do not proccess heartbeats and assert_liveliness() relies on sending a
// heartbeat to assess liveliness, the expected number of times liveliness was lost and recovered
// corresponds only to the bit in the test when the writer wrote samples
EXPECT_EQ(reader.times_liveliness_lost(), num_samples);
EXPECT_EQ(reader.times_liveliness_recovered(), num_samples);
}
//! Tests liveliness in the following scenario
//! A participant with two publishers (AUTOMATIC and MANUAL_BY_PARTICIPANT) and a single topic
//! A participant with one subscriber (AUTOMATIC)
TEST(LivelinessQos, TwoWriters_OneReader_ManualByTopic)
{
unsigned int num_pub = 2;
unsigned int num_sub = 1;
unsigned int lease_duration_ms = 500;
unsigned int announcement_period_ms = 250;
// Publishers
PubSubParticipant<HelloWorldType> publishers(num_pub, 0u, 2u, 0u);
ASSERT_TRUE(publishers.init_participant());
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(AUTOMATIC_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(0u));
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(1u));
// Subscribers
PubSubParticipant<HelloWorldType> subscribers(0u, num_sub, 0u, 2u);
ASSERT_TRUE(subscribers.init_participant());
subscribers.sub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(AUTOMATIC_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(0u));
publishers.pub_wait_discovery();
subscribers.sub_wait_discovery();
// Just sleep a bit to give the subscriber the chance to detect that writers are alive
std::this_thread::sleep_for(std::chrono::milliseconds(lease_duration_ms));
EXPECT_EQ(publishers.pub_times_liveliness_lost(), 0u);
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), 2u);
// Note that from the subscriber point of view both writers recovered liveliness, even if the
// MANUAL_BY_PARTICIPANT one didn't assert liveliness explicitly. This is the expected
// behaviour according to the RTPS standard, section 2.2.3.11 LIVELINESS
}
//! Tests liveliness in the following scenario
//! A participant with two publishers and two topics
//! A participant with two subscribers and two topics
//! Manual by participant liveliness
//! Only one publisher asserts liveliness manually
TEST(LivelinessQos, TwoWriters_TwoReaders_ManualByParticipant)
{
unsigned int num_pub = 2;
unsigned int num_sub = 2;
unsigned int lease_duration_ms = 1000;
unsigned int announcement_period_ms = 100;
// Publishers
PubSubParticipant<HelloWorldType> publishers(num_pub, 0u, num_sub, 0u);
ASSERT_TRUE(publishers.init_participant());
publishers.pub_topic_name(TEST_TOPIC_NAME + "1")
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(0u));
publishers.pub_topic_name(TEST_TOPIC_NAME + "2")
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(1u));
// Subscribers
PubSubParticipant<HelloWorldType> subscribers(0u, num_sub, 0u, num_pub);
ASSERT_TRUE(subscribers.init_participant());
subscribers.sub_topic_name(TEST_TOPIC_NAME + "1")
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(0u));
subscribers.sub_topic_name(TEST_TOPIC_NAME + "2")
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(1u));
publishers.pub_wait_discovery();
subscribers.sub_wait_discovery();
unsigned int num_assertions = 4;
unsigned int assert_rate_ms = 50;
for (unsigned int count = 0; count < num_assertions; count++)
{
publishers.assert_liveliness(0u);
std::this_thread::sleep_for(std::chrono::milliseconds(assert_rate_ms));
}
// Only one publisher asserts liveliness but the other should be asserted by the QoS, as
// liveliness kind is manual by participant
EXPECT_EQ(publishers.pub_times_liveliness_lost(), 0u);
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), num_pub);
EXPECT_EQ(subscribers.sub_times_liveliness_lost(), 0u);
subscribers.sub_wait_liveliness_lost(num_pub);
EXPECT_EQ(publishers.pub_times_liveliness_lost(), num_pub);
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), num_pub);
EXPECT_EQ(subscribers.sub_times_liveliness_lost(), num_pub);
}
//! Tests liveliness in the same scenario as above but using manual by topic liveliness
//! A participant with two publishers and two topics
//! A participant with two subscribers and two topics
//! Manual by topic liveliness
//! Only one publisher asserts liveliness manually
TEST(LivelinessQos, TwoWriters_TwoReaders_ManualByTopic)
{
unsigned int num_pub = 2;
unsigned int num_sub = 2;
unsigned int lease_duration_ms = 500;
unsigned int announcement_period_ms = 250;
// Publishers
PubSubParticipant<HelloWorldType> publishers(num_pub, 0u, num_sub, 0u);
ASSERT_TRUE(publishers.init_participant());
publishers.pub_topic_name(TEST_TOPIC_NAME + "1")
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(0u));
publishers.pub_topic_name(TEST_TOPIC_NAME + "2")
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(1u));
// Subscribers
PubSubParticipant<HelloWorldType> subscribers(0u, num_sub, 0u, num_pub);
ASSERT_TRUE(subscribers.init_participant());
subscribers.sub_topic_name(TEST_TOPIC_NAME + "1")
.reliability(RELIABLE_RELIABILITY_QOS)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(0u));
subscribers.sub_topic_name(TEST_TOPIC_NAME + "2")
.reliability(RELIABLE_RELIABILITY_QOS)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(1u));
publishers.pub_wait_discovery();
subscribers.sub_wait_discovery();
unsigned int num_assertions = 4;
unsigned int assert_rate_ms = 10;
for (unsigned int count = 0; count < num_assertions; count++)
{
publishers.assert_liveliness(0u);
std::this_thread::sleep_for(std::chrono::milliseconds(assert_rate_ms));
}
EXPECT_EQ(publishers.pub_times_liveliness_lost(), 0u);
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), 1u);
EXPECT_EQ(subscribers.sub_times_liveliness_lost(), 0u);
std::this_thread::sleep_for(std::chrono::milliseconds(lease_duration_ms * 2));
EXPECT_EQ(publishers.pub_times_liveliness_lost(), 1u);
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), 1u);
EXPECT_EQ(subscribers.sub_times_liveliness_lost(), 1u);
}
//! Tests liveliness in the following scenario
//! A participant with two publishers with different liveliness kinds
//! A participant with two subscribers with different liveliness kinds
TEST(LivelinessQos, TwoWriters_TwoReaders)
{
unsigned int num_pub = 2;
unsigned int num_sub = 2;
unsigned int lease_duration_ms = 500;
unsigned int announcement_period_ms = 250;
// Publishers
PubSubParticipant<HelloWorldType> publishers(num_pub, 0u, 3u, 0u);
ASSERT_TRUE(publishers.init_participant());
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(AUTOMATIC_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(0u));
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(1u));
// Subscribers
PubSubParticipant<HelloWorldType> subscribers(0u, num_sub, 0u, 3u);
ASSERT_TRUE(subscribers.init_participant());
subscribers.sub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(AUTOMATIC_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(0u));
subscribers.sub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(1u));
publishers.pub_wait_discovery();
subscribers.sub_wait_discovery();
publishers.assert_liveliness(1u);
std::this_thread::sleep_for(std::chrono::milliseconds(announcement_period_ms * 2));
// All three subscribers are notified that liveliness was recovered
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), 3u);
std::this_thread::sleep_for(std::chrono::milliseconds(lease_duration_ms * 2));
EXPECT_EQ(publishers.pub_times_liveliness_lost(), 1u);
EXPECT_EQ(subscribers.sub_times_liveliness_lost(), 1u);
}
//! Tests liveliness in the same scenario as above but using manual by topic liveliness
//! A participant with three publishers with different liveliness kinds
//! A participant with three subscribers with different liveliness kinds
TEST(LivelinessQos, ThreeWriters_ThreeReaders)
{
unsigned int num_pub = 3;
unsigned int num_sub = 3;
unsigned int lease_duration_ms = 100;
unsigned int announcement_period_ms = 10;
// Publishers
PubSubParticipant<HelloWorldType> publishers(num_pub, 0u, 6u, 0u);
ASSERT_TRUE(publishers.init_participant());
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(AUTOMATIC_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(0u));
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(1u));
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(2u));
// Subscribers
PubSubParticipant<HelloWorldType> subscribers(0u, num_sub, 0u, 6u);
ASSERT_TRUE(subscribers.init_participant());
subscribers.sub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(AUTOMATIC_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(0u));
subscribers.sub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(1u));
subscribers.sub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS);
ASSERT_TRUE(subscribers.init_subscriber(2u));
publishers.pub_wait_discovery();
subscribers.sub_wait_discovery();
// From the point of view of the AUTOMATIC reader, the three writers will have recovered liveliness
subscribers.sub_wait_liveliness_recovered(3u);
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), 3u);
// The manual by participant writer asserts liveliness
// The manual by participant reader will consider that both the manual by participant and manual by topic
// writers have recovered liveliness
publishers.assert_liveliness(1u);
subscribers.sub_wait_liveliness_recovered(5u);
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), 5u);
// The manual by topic publisher asserts liveliness
// The manual by topic reader will detect that a new writer has recovered liveliness
publishers.assert_liveliness(2u);
subscribers.sub_wait_liveliness_recovered(6u);
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), 6u);
// Wait so that the manual by participant and manual by topic writers lose liveliness
// The manual by participant subscriber will detect that two writers lost liveliness
// The manual by topic subscriber will detect that one writer lost liveliness
// This means that the subscribing participant will see that liveliness was lost three times
subscribers.sub_wait_liveliness_lost(3u);
EXPECT_EQ(publishers.pub_times_liveliness_lost(), 2u);
EXPECT_EQ(subscribers.sub_times_liveliness_lost(), 3u);
}
//! Tests the case where a writer matched to two readers changes QoS and stays matched to only one reader
TEST(LivelinessQos, UnmatchedWriter)
{
unsigned int num_pub = 1;
unsigned int num_sub = 2;
unsigned int lease_duration_ms = 500;
unsigned int announcement_period_ms = 250;
// Publishers
PubSubParticipant<HelloWorldType> publishers(num_pub, 0u, 2u, 0u);
ASSERT_TRUE(publishers.init_participant());
publishers.pub_topic_name(TEST_TOPIC_NAME)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.pub_deadline_period(0.15);
ASSERT_TRUE(publishers.init_publisher(0u));
// Subscribers
PubSubParticipant<HelloWorldType> subscribers(0u, num_sub, 0u, 2u);
ASSERT_TRUE(subscribers.init_participant());
subscribers.sub_topic_name(TEST_TOPIC_NAME)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.sub_deadline_period(0.5);
ASSERT_TRUE(subscribers.init_subscriber(0u));
subscribers.sub_topic_name(TEST_TOPIC_NAME)
.sub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.sub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
.sub_deadline_period(1.5);
ASSERT_TRUE(subscribers.init_subscriber(1u));
publishers.pub_wait_discovery();
subscribers.sub_wait_discovery();
// Change deadline period of the first subscriber so that it no longer matches with the publisher
subscribers.sub_update_deadline_period(0.10, 0u);
publishers.assert_liveliness(0u);
std::this_thread::sleep_for(std::chrono::milliseconds(announcement_period_ms * 2));
EXPECT_EQ(subscribers.sub_times_liveliness_recovered(), 1u);
}
//! Tests liveliness structs when a writer changes from being alive to losing liveliness
//! Writer is reliable, and MANUAL_BY_TOPIC
//! Reader is reliable, and MANUAL_BY_TOPIC
TEST(LivelinessQos, LivelinessChangedStatus_Alive_NotAlive)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(100 * 1e-3);
Duration_t announcement_period(100 * 1e-3 * 0.5);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
LivelinessChangedStatus status = reader.liveliness_changed_status();
EXPECT_EQ(status.alive_count, 0);
EXPECT_EQ(status.alive_count_change, 0);
EXPECT_EQ(status.not_alive_count, 0);
EXPECT_EQ(status.not_alive_count_change, 0);
// Assert liveliness
writer.assert_liveliness();
reader.wait_liveliness_recovered();
status = reader.liveliness_changed_status();
EXPECT_EQ(status.alive_count, 1);
EXPECT_EQ(status.alive_count_change, 1);
EXPECT_EQ(status.not_alive_count, 0);
EXPECT_EQ(status.not_alive_count_change, 0);
// Wait until liveliness is lost
reader.wait_liveliness_lost();
status = reader.liveliness_changed_status();
EXPECT_EQ(status.alive_count, 0);
EXPECT_EQ(status.alive_count_change, -1);
EXPECT_EQ(status.not_alive_count, 1);
EXPECT_EQ(status.not_alive_count_change, 1);
}
//! Tests liveliness structs when an alive writer is unmatched
//! Writer is reliable, and MANUAL_BY_TOPIC
//! Reader is reliable, and MANUAL_BY_TOPIC
TEST(LivelinessQos, LivelinessChangedStatus_Alive_Unmatched)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(100 * 1e-3);
Duration_t announcement_period(100 * 1e-3 * 0.5);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.deadline_period(0.15)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.deadline_period(0.15)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
// Assert liveliness
writer.assert_liveliness();
reader.wait_liveliness_recovered();
LivelinessChangedStatus status = reader.liveliness_changed_status();
EXPECT_EQ(status.alive_count, 1);
EXPECT_EQ(status.alive_count_change, 1);
EXPECT_EQ(status.not_alive_count, 0);
EXPECT_EQ(status.not_alive_count_change, 0);
// Now unmatch by changing the deadline period of the reader
reader.update_deadline_period(0.10);
status = reader.liveliness_changed_status();
EXPECT_EQ(status.alive_count, 0);
EXPECT_EQ(status.alive_count_change, -1);
EXPECT_EQ(status.not_alive_count, 0);
EXPECT_EQ(status.not_alive_count_change, 0);
}
//! Tests liveliness structs when a not alive writer is unmatched
//! Writer is reliable, and MANUAL_BY_TOPIC
//! Reader is reliable, and MANUAL_BY_TOPIC
TEST(LivelinessQos, LivelinessChangedStatus_NotAlive_Unmatched)
{
PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME);
PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME);
// Liveliness lease duration and announcement period, in seconds
Duration_t liveliness_s(100 * 1e-3);
Duration_t announcement_period(100 * 1e-3 * 0.5);
reader.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_lease_duration(liveliness_s)
.deadline_period(0.15)
.init();
writer.reliability(RELIABLE_RELIABILITY_QOS)
.liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS)
.liveliness_announcement_period(announcement_period)
.liveliness_lease_duration(liveliness_s)
.deadline_period(0.15)
.init();
ASSERT_TRUE(reader.isInitialized());
ASSERT_TRUE(writer.isInitialized());
// Wait for discovery.
writer.wait_discovery();
reader.wait_discovery();
// Assert liveliness
writer.assert_liveliness();
reader.wait_liveliness_recovered();
LivelinessChangedStatus status = reader.liveliness_changed_status();
EXPECT_EQ(status.alive_count, 1);
EXPECT_EQ(status.alive_count_change, 1);
EXPECT_EQ(status.not_alive_count, 0);
EXPECT_EQ(status.not_alive_count_change, 0);
// Wait for liveliness lost
reader.wait_liveliness_lost();
// Now unmatch by changing the deadline period of the reader
reader.update_deadline_period(0.10);
status = reader.liveliness_changed_status();
EXPECT_EQ(status.alive_count, 0);
EXPECT_EQ(status.alive_count_change, 0);
EXPECT_EQ(status.not_alive_count, 0);
EXPECT_EQ(status.not_alive_count_change, -1);
}
//! Tests the assert_liveliness on the participant
//! A participant with three publishers, two MANUAL_BY_PARTICIPANT liveliness, one MANUAL_BY_TOPIC
TEST(LivelinessQos, AssertLivelinessParticipant)
{
unsigned int num_pub = 3;
unsigned int lease_duration_ms = 100;
unsigned int announcement_period_ms = 10;
// Publishers
PubSubParticipant<HelloWorldType> publishers(num_pub, 0u, 0u, 0u);
ASSERT_TRUE(publishers.init_participant());
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(0u));
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_PARTICIPANT_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(1u));
publishers.pub_topic_name(TEST_TOPIC_NAME)
.reliability(RELIABLE_RELIABILITY_QOS)
.pub_liveliness_announcement_period(announcement_period_ms * 1e-3)
.pub_liveliness_lease_duration(lease_duration_ms * 1e-3)
.pub_liveliness_kind(MANUAL_BY_TOPIC_LIVELINESS_QOS);
ASSERT_TRUE(publishers.init_publisher(1u));
// Assert liveliness
publishers.assert_liveliness_participant();
// Wait for alive publishers (only the two MANUAL_BY_PARTICIPANT publishers should be alive) to lose liveliness
std::this_thread::sleep_for(std::chrono::milliseconds(lease_duration_ms * 4));
// Only the two MANUAL_BY_PARTICIPANT publishers will have lost liveliness, as the
// MANUAL_BY_TOPIC was never asserted
EXPECT_EQ(publishers.pub_times_liveliness_lost(), 2u);
}
| 1 | 16,379 | As this is just a backport, I guess it is fine to leave these timings, although they are not enough to make tests stable. | eProsima-Fast-DDS | cpp |
@@ -70,8 +70,8 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS {
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECS {
- if signingName == "" {
- signingName = ServiceName
+ if len(signingName) == 0 {
+ signingName = "ecs"
}
svc := &ECS{
Client: client.New( | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package ecs
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
)
// Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast, container
// management service that makes it easy to run, stop, and manage Docker containers
// on a cluster of Amazon EC2 instances. Amazon ECS lets you launch and stop
// container-enabled applications with simple API calls, allows you to get the
// state of your cluster from a centralized service, and gives you access to
// many familiar Amazon EC2 features like security groups, Amazon EBS volumes,
// and IAM roles.
//
// You can use Amazon ECS to schedule the placement of containers across your
// cluster based on your resource needs, isolation policies, and availability
// requirements. Amazon EC2 Container Service eliminates the need for you to
// operate your own cluster management and configuration management systems
// or worry about scaling your management infrastructure.
// The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type ECS struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "ecs" // Service endpoint prefix API calls made to.
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
)
// New creates a new instance of the ECS client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a ECS client from just a session.
// svc := ecs.New(mySession)
//
// // Create a ECS client with additional configuration
// svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECS {
if signingName == "" {
signingName = ServiceName
}
svc := &ECS{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2014-11-13",
JSONVersion: "1.1",
TargetPrefix: "AmazonEC2ContainerServiceV20141113",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a ECS operation and runs any
// custom request initialization.
func (c *ECS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}
| 1 | 17,209 | Why this way? | aws-amazon-ecs-agent | go |
@@ -38,10 +38,7 @@ const (
func setupEnvironment(t *testing.T) string {
t.Helper()
// TODO(shahms): ExtractCompilations should take an output path.
- output := os.Getenv("TEST_TMPDIR")
- if output == "" {
- t.Skip("Skipping test due to incompatible environment (missing TEST_TMPDIR)")
- }
+ output := t.TempDir()
if err := os.Setenv("KYTHE_OUTPUT_DIRECTORY", output); err != nil {
t.Fatalf("Error setting KYTHE_OUTPUT_DIRECTORY: %v", err)
} | 1 | /*
* Copyright 2018 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compdb
import (
"context"
"os"
"path/filepath"
"reflect"
"testing"
"kythe.io/kythe/go/platform/kzip"
)
const (
testPath = "kythe/go/extractors/config/runextractor/compdb"
extractorPath = "kythe/cxx/extractor/cxx_extractor"
workspace = "io_kythe"
)
// setupEnvironment establishes the necessary environment variables and
// current working directory for running tests.
// Returns expected working directory.
func setupEnvironment(t *testing.T) string {
t.Helper()
// TODO(shahms): ExtractCompilations should take an output path.
output := os.Getenv("TEST_TMPDIR")
if output == "" {
t.Skip("Skipping test due to incompatible environment (missing TEST_TMPDIR)")
}
if err := os.Setenv("KYTHE_OUTPUT_DIRECTORY", output); err != nil {
t.Fatalf("Error setting KYTHE_OUTPUT_DIRECTORY: %v", err)
}
root, err := os.Getwd()
if err != nil {
t.Fatalf("Unable to get working directory: %v", err)
}
return root
}
func TestExtractCompilationsEndToEnd(t *testing.T) {
root := setupEnvironment(t)
defer os.Chdir(root)
extractor, err := filepath.Abs(extractorPath)
if err != nil {
t.Fatalf("Unable to get absolute path to extractor: %v", err)
}
// Paths in compilation_database.json are relative to the testdata directory, so change there.
if err := os.Chdir(filepath.Join(root, testPath, "testdata")); err != nil {
t.Fatalf("Unable to change working directory: %v", err)
}
if err := ExtractCompilations(context.Background(), extractor, "compilation_database.json", nil); err != nil {
t.Fatalf("Error running ExtractCompilations: %v", err)
}
err = filepath.Walk(os.Getenv("KYTHE_OUTPUT_DIRECTORY"), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
} else if info.IsDir() {
return nil
} else if filepath.Ext(path) != ".kzip" {
t.Logf("Ignoring non-kzip file: %v", path)
return nil
}
reader, err := os.Open(path)
if err != nil {
return err
}
defer reader.Close()
err = kzip.Scan(reader, func(r *kzip.Reader, unit *kzip.Unit) error {
if !reflect.DeepEqual(unit.Proto.SourceFile, []string{"test_file.cc"}) {
t.Fatalf("Invalid source_file: %v", unit.Proto.SourceFile)
}
return nil
})
if err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("Error processing extracted output: %v", err)
}
}
| 1 | 13,072 | Changing this because otherwise the second run of testExtractCompilationsEndToEndWithDatabase will try to overwrite a generated file and fail. Maybe there's a better way? | kythe-kythe | go |
@@ -90,6 +90,9 @@ public final class HttpUtils {
* @return the encoded path param
*/
public static String encodePathParam(String pathParam) {
+ if (pathParam.indexOf(';') != -1) {
+ pathParam = pathParam.substring(0, pathParam.indexOf(';'));
+ }
return UrlEscapers.urlPathSegmentEscaper().escape(pathParam);
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.foundation.common.http;
import java.io.File;
import java.net.URI;
import java.net.URISyntaxException;
import com.google.common.net.UrlEscapers;
import org.apache.commons.lang3.StringUtils;
public final class HttpUtils {
private HttpUtils() {
}
/**
* paramName is not case sensitive
* @param headerValue example: attachment;filename=a.txt
*
*/
public static String parseParamFromHeaderValue(String headerValue, String paramName) {
if (StringUtils.isEmpty(headerValue)) {
return null;
}
for (String value : headerValue.split(";")) {
int idx = value.indexOf('=');
if (idx == -1) {
continue;
}
if (paramName.equalsIgnoreCase(value.substring(0, idx))) {
return value.substring(idx + 1);
}
}
return null;
}
/**
* <pre>
* foo://example.com:8042/over/there?name=ferret#nose
* \_/ \______________/\_________/ \_________/ \__/
* | | | | |
* scheme authority path query fragment
* | _____________________|__
* / \ / \
* urn:example:animal:ferret:nose
* </pre>
* <p>the URI syntax components above is referred from <a href="https://tools.ietf.org/html/rfc3986#page-16">RFC3986</a>.
* This method is used to encode the entire path part(e.g. /over/there in the example).</p>
* <em>In order to keep the structure of path, slash '/' will not be encoded. If you want to encode '/' into {@code %2F},
* please consider the {@link #encodePathParam(String)}
* </em>
*
* @param path the entire url path
* @return the encoded url path
*/
public static String uriEncodePath(String path) {
try {
URI uri = new URI(null, null, path, null);
return uri.toASCIIString();
} catch (URISyntaxException e) {
throw new IllegalArgumentException(String.format("uriEncode failed, path=\"%s\".", path), e);
}
}
/**
* Encode path params. For example, if the path of an operation is {@code /over/there/{pathParam}/tail}, this method
* should be used to encoded {@code {pathParam}}. In order to keep the path structure, the slash '/' will be encoded
* into {@code %2F} to avoid path matching problem.
*
* @see UrlEscapers#urlPathSegmentEscaper()
*
* @param pathParam the path param to be encoded
* @return the encoded path param
*/
public static String encodePathParam(String pathParam) {
return UrlEscapers.urlPathSegmentEscaper().escape(pathParam);
}
public static String uriDecodePath(String path) {
if (path == null) {
return null;
}
try {
return new URI(path).getPath();
} catch (URISyntaxException e) {
throw new IllegalArgumentException(String.format("uriDecode failed, path=\"%s\".", path), e);
}
}
/**
* only used by SDK to download from serviceComb producer<br>
* no need to check rtf6266's "filename*" rule.
*/
public static String parseFileNameFromHeaderValue(String headerValue) {
String fileName = parseParamFromHeaderValue(headerValue, "filename");
fileName = StringUtils.isEmpty(fileName) ? "default" : fileName;
fileName = uriDecodePath(fileName);
return new File(fileName).getName();
}
/**
* Parse the character encoding from the specified content type header.
* If the content type is null, or there is no explicit character encoding,
* <code>null</code> is returned.
*
* @param contentType a content type header
*/
public static String getCharsetFromContentType(String contentType) {
if (contentType == null) {
return null;
}
int start = contentType.indexOf("charset=");
if (start < 0) {
return null;
}
String encoding = contentType.substring(start + 8);
int end = encoding.indexOf(';');
if (end >= 0) {
encoding = encoding.substring(0, end);
}
encoding = encoding.trim();
if ((encoding.length() > 2) && (encoding.startsWith("\""))
&& (encoding.endsWith("\""))) {
encoding = encoding.substring(1, encoding.length() - 1);
}
return encoding.trim();
}
}
| 1 | 12,576 | This modification is not correct. Suggections: 1. upgread guava to 30.0-jre will fix this issue | apache-servicecomb-java-chassis | java |
@@ -17,8 +17,8 @@ type syscalls struct {
var _ specsruntime.Syscalls = (*syscalls)(nil)
// VerifySignature implements Syscalls.
-func (sys syscalls) VerifySignature(signature specscrypto.Signature, signer address.Address, plaintext []byte) bool {
- return crypto.IsValidSignature(plaintext, signer, signature)
+func (sys syscalls) VerifySignature(signature specscrypto.Signature, signer address.Address, plaintext []byte) error {
+ return crypto.ValidateSignature(plaintext, signer, signature)
}
// HashBlake2b implements Syscalls. | 1 | package vmcontext
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-filecoin/internal/pkg/crypto"
"github.com/filecoin-project/specs-actors/actors/abi"
specscrypto "github.com/filecoin-project/specs-actors/actors/crypto"
specsruntime "github.com/filecoin-project/specs-actors/actors/runtime"
"github.com/ipfs/go-cid"
"github.com/minio/blake2b-simd"
)
type syscalls struct {
gasTank *GasTracker
}
var _ specsruntime.Syscalls = (*syscalls)(nil)
// VerifySignature implements Syscalls.
func (sys syscalls) VerifySignature(signature specscrypto.Signature, signer address.Address, plaintext []byte) bool {
return crypto.IsValidSignature(plaintext, signer, signature)
}
// HashBlake2b implements Syscalls.
func (sys syscalls) HashBlake2b(data []byte) [32]byte {
return blake2b.Sum256(data)
}
// ComputeUnsealedSectorCID implements Syscalls.
// Review: why is this returning an error instead of aborting? is this failing recoverable by actors?
func (sys syscalls) ComputeUnsealedSectorCID(sectorSize abi.SectorSize, pieces []abi.PieceInfo) (cid.Cid, error) {
panic("TODO")
}
// VerifySeal implements Syscalls.
func (sys syscalls) VerifySeal(sectorSize abi.SectorSize, info abi.SealVerifyInfo) bool {
panic("TODO")
}
// VerifyPoSt implements Syscalls.
func (sys syscalls) VerifyPoSt(sectorSize abi.SectorSize, info abi.PoStVerifyInfo) bool {
panic("TODO")
}
// VerifyConsensusFault implements Syscalls.
func (sys syscalls) VerifyConsensusFault(h1, h2 []byte) bool {
panic("TODO")
}
| 1 | 23,055 | All changes to signature code stem from here. The syscalls interfaces expects VerifySignature to return an error. I performed the change here and bubbled it up through the rest of the code - mostly mechanical. | filecoin-project-venus | go |
@@ -77,15 +77,12 @@ public final class InMemoryStorage extends StorageComponent implements SpanStore
int maxSpanCount = 500000;
List<String> autocompleteKeys = Collections.emptyList();
- /** {@inheritDoc} */
- @Override
- public Builder strictTraceId(boolean strictTraceId) {
+ @Override public Builder strictTraceId(boolean strictTraceId) {
this.strictTraceId = strictTraceId;
return this;
}
- @Override
- public Builder searchEnabled(boolean searchEnabled) {
+ @Override public Builder searchEnabled(boolean searchEnabled) {
this.searchEnabled = searchEnabled;
return this;
} | 1 | /*
* Copyright 2015-2020 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin2.storage;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicInteger;
import zipkin2.Call;
import zipkin2.Callback;
import zipkin2.DependencyLink;
import zipkin2.Endpoint;
import zipkin2.Span;
import zipkin2.internal.DependencyLinker;
/**
* Test storage component that keeps all spans in memory, accepting them on the calling thread.
*
* <p>Internally, spans are indexed on 64-bit trace ID
*
* <p>Here's an example of some traces in memory:
*
* <pre>{@code
* spansByTraceIdTimeStamp:
* <aaaa,July 4> --> ( spanA(time:July 4, traceId:aaaa, service:foo, name:GET),
* spanB(time:July 4, traceId:aaaa, service:bar, name:GET) )
* <cccc,July 4> --> ( spanC(time:July 4, traceId:aaaa, service:foo, name:GET) )
* <bbbb,July 5> --> ( spanD(time:July 5, traceId:bbbb, service:biz, name:GET) )
* <bbbb,July 6> --> ( spanE(time:July 6, traceId:bbbb) service:foo, name:POST )
*
* traceIdToTraceIdTimeStamps:
* aaaa --> [ <aaaa,July 4> ]
* bbbb --> [ <bbbb,July 5>, <bbbb,July 6> ]
* cccc --> [ <cccc,July 4> ]
*
* serviceToTraceIds:
* foo --> [ <aaaa>, <cccc>, <bbbb> ]
* bar --> [ <aaaa> ]
* biz --> [ <bbbb> ]
*
* serviceToSpanNames:
* bar --> ( GET )
* biz --> ( GET )
* foo --> ( GET, POST )
* }</pre>
*/
public final class InMemoryStorage extends StorageComponent implements SpanStore, SpanConsumer,
AutocompleteTags, ServiceAndSpanNames, Traces {
public static Builder newBuilder() {
return new Builder();
}
public static final class Builder extends StorageComponent.Builder {
boolean strictTraceId = true, searchEnabled = true;
int maxSpanCount = 500000;
List<String> autocompleteKeys = Collections.emptyList();
/** {@inheritDoc} */
@Override
public Builder strictTraceId(boolean strictTraceId) {
this.strictTraceId = strictTraceId;
return this;
}
@Override
public Builder searchEnabled(boolean searchEnabled) {
this.searchEnabled = searchEnabled;
return this;
}
@Override public Builder autocompleteKeys(List<String> autocompleteKeys) {
if (autocompleteKeys == null) throw new NullPointerException("autocompleteKeys == null");
this.autocompleteKeys = autocompleteKeys;
return this;
}
/** Eldest traces are removed to ensure spans in memory don't exceed this value */
public Builder maxSpanCount(int maxSpanCount) {
if (maxSpanCount <= 0) throw new IllegalArgumentException("maxSpanCount <= 0");
this.maxSpanCount = maxSpanCount;
return this;
}
@Override
public InMemoryStorage build() {
return new InMemoryStorage(this);
}
}
/**
* Primary source of data is this map, which includes spans ordered descending by timestamp. All
* other maps are derived from the span values here. This uses a list for the spans, so that it is
* visible (via /api/v2/trace/{traceId}) when instrumentation report the same spans multiple times.
*/
private final SortedMultimap<TraceIdTimestamp, Span> spansByTraceIdTimeStamp =
new SortedMultimap(TIMESTAMP_DESCENDING) {
@Override
Collection<Span> valueContainer() {
return new LinkedHashSet<>();
}
};
/** This supports span lookup by {@link Span#traceId() lower 64-bits of the trace ID} */
private final SortedMultimap<String, TraceIdTimestamp> traceIdToTraceIdTimeStamps =
new SortedMultimap<String, TraceIdTimestamp>(STRING_COMPARATOR) {
@Override
Collection<TraceIdTimestamp> valueContainer() {
return new LinkedHashSet<>();
}
};
/** This is an index of {@link Span#traceId()} by {@link Endpoint#serviceName() service name} */
private final ServiceNameToTraceIds serviceToTraceIds = new ServiceNameToTraceIds();
/** This is an index of {@link Span#name()} by {@link Endpoint#serviceName() service name} */
private final SortedMultimap<String, String> serviceToSpanNames =
new SortedMultimap<String, String>(STRING_COMPARATOR) {
@Override
Collection<String> valueContainer() {
return new LinkedHashSet<>();
}
};
/**
* This is an index of {@link Span#remoteServiceName()} by {@link Endpoint#serviceName() service
* name}
*/
private final SortedMultimap<String, String> serviceToRemoteServiceNames =
new SortedMultimap<String, String>(STRING_COMPARATOR) {
@Override
Collection<String> valueContainer() {
return new LinkedHashSet<>();
}
};
private final SortedMultimap<String, String> autocompleteTags =
new SortedMultimap<String, String>(STRING_COMPARATOR) {
@Override
Collection<String> valueContainer() {
return new LinkedHashSet<>();
}
};
final boolean strictTraceId, searchEnabled;
final int maxSpanCount;
final Call<List<String>> autocompleteKeysCall;
final Set<String> autocompleteKeys;
final AtomicInteger acceptedSpanCount = new AtomicInteger();
InMemoryStorage(Builder builder) {
this.strictTraceId = builder.strictTraceId;
this.searchEnabled = builder.searchEnabled;
this.maxSpanCount = builder.maxSpanCount;
this.autocompleteKeysCall = Call.create(builder.autocompleteKeys);
this.autocompleteKeys = new LinkedHashSet<>(builder.autocompleteKeys);
}
public int acceptedSpanCount() {
return acceptedSpanCount.get();
}
public synchronized void clear() {
acceptedSpanCount.set(0);
traceIdToTraceIdTimeStamps.clear();
spansByTraceIdTimeStamp.clear();
serviceToTraceIds.clear();
serviceToRemoteServiceNames.clear();
serviceToSpanNames.clear();
autocompleteTags.clear();
}
@Override public Call<Void> accept(List<Span> spans) {
return new StoreSpansCall(spans);
}
synchronized void doAccept(List<Span> spans) {
int delta = spans.size();
acceptedSpanCount.addAndGet(delta);
int spansToRecover = (spansByTraceIdTimeStamp.size() + delta) - maxSpanCount;
evictToRecoverSpans(spansToRecover);
for (Span span : spans) {
long timestamp = span.timestampAsLong();
String lowTraceId = lowTraceId(span.traceId());
TraceIdTimestamp traceIdTimeStamp = new TraceIdTimestamp(lowTraceId, timestamp);
spansByTraceIdTimeStamp.put(traceIdTimeStamp, span);
traceIdToTraceIdTimeStamps.put(lowTraceId, traceIdTimeStamp);
if (!searchEnabled) continue;
String serviceName = span.localServiceName();
if (serviceName != null) {
serviceToTraceIds.put(serviceName, lowTraceId);
String remoteServiceName = span.remoteServiceName();
if (remoteServiceName != null) {
serviceToRemoteServiceNames.put(serviceName, remoteServiceName);
}
String spanName = span.name();
if (spanName != null) {
serviceToSpanNames.put(serviceName, spanName);
}
}
for (Map.Entry<String, String> tag : span.tags().entrySet()) {
if (autocompleteKeys.contains(tag.getKey())) {
autocompleteTags.put(tag.getKey(), tag.getValue());
}
}
}
}
final class StoreSpansCall extends Call.Base<Void> {
final List<Span> spans;
StoreSpansCall(List<Span> spans) {
this.spans = spans;
}
@Override protected Void doExecute() {
doAccept(spans);
return null;
}
@Override protected void doEnqueue(Callback<Void> callback) {
try {
callback.onSuccess(doExecute());
} catch (Throwable t) {
propagateIfFatal(t);
callback.onError(t);
}
}
@Override public Call<Void> clone() {
return new StoreSpansCall(spans);
}
@Override public String toString() {
return "StoreSpansCall{" + spans + "}";
}
}
/** Returns the count of spans evicted. */
int evictToRecoverSpans(int spansToRecover) {
int spansEvicted = 0;
while (spansToRecover > 0) {
int spansInOldestTrace = deleteOldestTrace();
spansToRecover -= spansInOldestTrace;
spansEvicted += spansInOldestTrace;
}
return spansEvicted;
}
/** Returns the count of spans evicted. */
private int deleteOldestTrace() {
int spansEvicted = 0;
String lowTraceId = spansByTraceIdTimeStamp.delegate.lastKey().lowTraceId;
Collection<TraceIdTimestamp> traceIdTimeStamps = traceIdToTraceIdTimeStamps.remove(lowTraceId);
for (Iterator<TraceIdTimestamp> traceIdTimeStampIter = traceIdTimeStamps.iterator();
traceIdTimeStampIter.hasNext(); ) {
TraceIdTimestamp traceIdTimeStamp = traceIdTimeStampIter.next();
Collection<Span> spans = spansByTraceIdTimeStamp.remove(traceIdTimeStamp);
spansEvicted += spans.size();
}
if (searchEnabled) {
for (String orphanedService : serviceToTraceIds.removeServiceIfTraceId(lowTraceId)) {
serviceToRemoteServiceNames.remove(orphanedService);
serviceToSpanNames.remove(orphanedService);
}
}
return spansEvicted;
}
@Override public Call<List<List<Span>>> getTraces(QueryRequest request) {
return getTraces(request, strictTraceId);
}
synchronized Call<List<List<Span>>> getTraces(QueryRequest request, boolean strictTraceId) {
Set<String> traceIdsInTimerange = traceIdsDescendingByTimestamp(request);
if (traceIdsInTimerange.isEmpty()) return Call.emptyList();
List<List<Span>> result = new ArrayList<>();
for (Iterator<String> lowTraceId = traceIdsInTimerange.iterator();
lowTraceId.hasNext() && result.size() < request.limit(); ) {
List<Span> next = spansByTraceId(lowTraceId.next());
if (!request.test(next)) continue;
if (!strictTraceId) {
result.add(next);
continue;
}
// re-run the query as now spans are strictly grouped
for (List<Span> strictTrace : strictByTraceId(next)) {
if (request.test(strictTrace)) result.add(strictTrace);
}
}
return Call.create(result);
}
static Collection<List<Span>> strictByTraceId(List<Span> next) {
Map<String, List<Span>> groupedByTraceId = new LinkedHashMap<>();
for (Span span : next) {
String traceId = span.traceId();
if (!groupedByTraceId.containsKey(traceId)) {
groupedByTraceId.put(traceId, new ArrayList<>());
}
groupedByTraceId.get(traceId).add(span);
}
return groupedByTraceId.values();
}
/** Used for testing. Returns all traces unconditionally. */
public synchronized List<List<Span>> getTraces() {
List<List<Span>> result = new ArrayList<>();
for (String lowTraceId : traceIdToTraceIdTimeStamps.keySet()) {
List<Span> sameTraceId = spansByTraceId(lowTraceId);
if (strictTraceId) {
result.addAll(strictByTraceId(sameTraceId));
} else {
result.add(sameTraceId);
}
}
return result;
}
/** Used for testing. Returns all dependency links unconditionally. */
public List<DependencyLink> getDependencies() {
return LinkDependencies.INSTANCE.map(getTraces());
}
Set<String> traceIdsDescendingByTimestamp(QueryRequest request) {
if (!searchEnabled) return Collections.emptySet();
Collection<TraceIdTimestamp> traceIdTimestamps =
request.serviceName() != null
? traceIdTimestampsByServiceName(request.serviceName())
: spansByTraceIdTimeStamp.keySet();
long endTs = request.endTs() * 1000;
long startTs = endTs - request.lookback() * 1000;
if (traceIdTimestamps == null || traceIdTimestamps.isEmpty()) return Collections.emptySet();
Set<String> result = new LinkedHashSet<>();
for (TraceIdTimestamp traceIdTimestamp : traceIdTimestamps) {
if (traceIdTimestamp.timestamp >= startTs || traceIdTimestamp.timestamp <= endTs) {
result.add(traceIdTimestamp.lowTraceId);
}
}
return Collections.unmodifiableSet(result);
}
@Override public synchronized Call<List<Span>> getTrace(String traceId) {
traceId = Span.normalizeTraceId(traceId);
List<Span> spans = spansByTraceId(lowTraceId(traceId));
if (spans.isEmpty()) return Call.emptyList();
if (!strictTraceId) return Call.create(spans);
List<Span> filtered = new ArrayList<>(spans);
Iterator<Span> iterator = filtered.iterator();
while (iterator.hasNext()) {
if (!iterator.next().traceId().equals(traceId)) {
iterator.remove();
}
}
return Call.create(filtered);
}
@Override public synchronized Call<List<List<Span>>> getTraces(Iterable<String> traceIds) {
Set<String> normalized = new LinkedHashSet<>();
for (String traceId : traceIds) {
normalized.add(Span.normalizeTraceId(traceId));
}
// Our index is by lower-64 bit trace ID, so let's build trace IDs to fetch
Set<String> lower64Bit = new LinkedHashSet<>();
for (String traceId : normalized) {
lower64Bit.add(lowTraceId(traceId));
}
List<List<Span>> result = new ArrayList<>();
for (String lowTraceId : lower64Bit) {
List<Span> sameTraceId = spansByTraceId(lowTraceId);
if (strictTraceId) {
for (List<Span> trace : strictByTraceId(sameTraceId)) {
if (normalized.contains(trace.get(0).traceId())) {
result.add(trace);
}
}
} else {
result.add(sameTraceId);
}
}
return Call.create(result);
}
@Override public synchronized Call<List<String>> getServiceNames() {
if (!searchEnabled) return Call.emptyList();
return Call.create(new ArrayList<>(serviceToTraceIds.keySet()));
}
@Override public synchronized Call<List<String>> getRemoteServiceNames(String service) {
if (service.isEmpty() || !searchEnabled) return Call.emptyList();
service = service.toLowerCase(Locale.ROOT); // service names are always lowercase!
return Call.create(new ArrayList<>(serviceToRemoteServiceNames.get(service)));
}
@Override public synchronized Call<List<String>> getSpanNames(String service) {
if (service.isEmpty() || !searchEnabled) return Call.emptyList();
service = service.toLowerCase(Locale.ROOT); // service names are always lowercase!
return Call.create(new ArrayList<>(serviceToSpanNames.get(service)));
}
@Override public Call<List<DependencyLink>> getDependencies(long endTs, long lookback) {
QueryRequest request =
QueryRequest.newBuilder().endTs(endTs).lookback(lookback).limit(Integer.MAX_VALUE).build();
// We don't have a query parameter for strictTraceId when fetching dependency links, so we
// ignore traceIdHigh. Otherwise, a single trace can appear as two, doubling callCount.
Call<List<List<Span>>> getTracesCall = getTraces(request, false);
return getTracesCall.map(LinkDependencies.INSTANCE);
}
@Override public synchronized Call<List<String>> getKeys() {
if (!searchEnabled) return Call.emptyList();
return autocompleteKeysCall.clone();
}
@Override public synchronized Call<List<String>> getValues(String key) {
if (key == null) throw new NullPointerException("key == null");
if (key.isEmpty()) throw new IllegalArgumentException("key was empty");
if (!searchEnabled) return Call.emptyList();
return Call.create(new ArrayList<>(autocompleteTags.get(key)));
}
enum LinkDependencies implements Call.Mapper<List<List<Span>>, List<DependencyLink>> {
INSTANCE;
@Override
public List<DependencyLink> map(List<List<Span>> traces) {
DependencyLinker linksBuilder = new DependencyLinker();
for (List<Span> trace : traces) linksBuilder.putTrace(trace);
return linksBuilder.link();
}
@Override
public String toString() {
return "LinkDependencies";
}
}
static final Comparator<String> STRING_COMPARATOR =
new Comparator<String>() {
@Override
public int compare(String left, String right) {
if (left == null) return -1;
return left.compareTo(right);
}
@Override
public String toString() {
return "String::compareTo";
}
};
static final Comparator<TraceIdTimestamp> TIMESTAMP_DESCENDING =
new Comparator<TraceIdTimestamp>() {
@Override
public int compare(TraceIdTimestamp left, TraceIdTimestamp right) {
long x = left.timestamp, y = right.timestamp;
int result = (x < y) ? -1 : ((x == y) ? 0 : 1); // Long.compareTo is JRE 7+
if (result != 0) return -result; // use negative as we are descending
return right.lowTraceId.compareTo(left.lowTraceId);
}
@Override
public String toString() {
return "TimestampDescending{}";
}
};
static final class ServiceNameToTraceIds extends SortedMultimap<String, String> {
ServiceNameToTraceIds() {
super(STRING_COMPARATOR);
}
@Override
Set<String> valueContainer() {
return new LinkedHashSet<>();
}
/** Returns service names orphaned by removing the trace ID */
Set<String> removeServiceIfTraceId(String lowTraceId) {
Set<String> result = new LinkedHashSet<>();
for (Map.Entry<String, Collection<String>> entry : delegate.entrySet()) {
Collection<String> lowTraceIds = entry.getValue();
if (lowTraceIds.remove(lowTraceId) && lowTraceIds.isEmpty()) {
result.add(entry.getKey());
}
}
delegate.keySet().removeAll(result);
return result;
}
}
// Not synchronized as every exposed method on the enclosing type is
abstract static class SortedMultimap<K, V> {
final SortedMap<K, Collection<V>> delegate;
int size = 0;
SortedMultimap(Comparator<K> comparator) {
delegate = new TreeMap<>(comparator);
}
abstract Collection<V> valueContainer();
Set<K> keySet() {
return delegate.keySet();
}
int size() {
return size;
}
void put(K key, V value) {
Collection<V> valueContainer = delegate.get(key);
if (valueContainer == null) {
delegate.put(key, valueContainer = valueContainer());
}
if (valueContainer.add(value)) size++;
}
Collection<V> remove(K key) {
Collection<V> value = delegate.remove(key);
if (value != null) size -= value.size();
return value;
}
void clear() {
delegate.clear();
size = 0;
}
Collection<V> get(K key) {
Collection<V> result = delegate.get(key);
return result != null ? result : Collections.emptySet();
}
}
List<Span> spansByTraceId(String lowTraceId) {
List<Span> sameTraceId = new ArrayList<>();
for (TraceIdTimestamp traceIdTimestamp : traceIdToTraceIdTimeStamps.get(lowTraceId)) {
sameTraceId.addAll(spansByTraceIdTimeStamp.get(traceIdTimestamp));
}
return sameTraceId;
}
Collection<TraceIdTimestamp> traceIdTimestampsByServiceName(String serviceName) {
List<TraceIdTimestamp> traceIdTimestamps = new ArrayList<>();
for (String lowTraceId : serviceToTraceIds.get(serviceName)) {
traceIdTimestamps.addAll(traceIdToTraceIdTimeStamps.get(lowTraceId));
}
Collections.sort(traceIdTimestamps, TIMESTAMP_DESCENDING);
return traceIdTimestamps;
}
static String lowTraceId(String traceId) {
return traceId.length() == 32 ? traceId.substring(16) : traceId;
}
@Override public InMemoryStorage traces() {
return this;
}
@Override public InMemoryStorage spanStore() {
return this;
}
@Override public InMemoryStorage autocompleteTags() {
return this;
}
@Override public InMemoryStorage serviceAndSpanNames() {
return this;
}
@Override public SpanConsumer spanConsumer() {
return this;
}
@Override public void close() {
}
static final class TraceIdTimestamp {
final String lowTraceId;
final long timestamp;
TraceIdTimestamp(String lowTraceId, long timestamp) {
this.lowTraceId = lowTraceId;
this.timestamp = timestamp;
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if (!(o instanceof TraceIdTimestamp)) return false;
TraceIdTimestamp that = (TraceIdTimestamp) o;
return lowTraceId.equals(that.lowTraceId) && timestamp == that.timestamp;
}
@Override
public int hashCode() {
int h$ = 1;
h$ *= 1000003;
h$ ^= lowTraceId.hashCode();
h$ *= 1000003;
h$ ^= (int) ((timestamp >>> 32) ^ timestamp);
return h$;
}
}
@Override public String toString() {
return "InMemoryStorage{}";
}
}
| 1 | 17,203 | This strategy seems good. Just wondering do you think this is a good time to move stuff out of core? For example, I guess storage, since it's for use by server and not client, doesn't need to be Java 6? | openzipkin-zipkin | java |
@@ -283,7 +283,8 @@ class Reader implements DataSourceReader, SupportsScanColumnarBatch, SupportsPus
return new Stats(0L, 0L);
}
- if (filterExpressions == null || filterExpressions == Expressions.alwaysTrue()) {
+ // estimate stats using snapshot summary only for partitioned tables (metadata tables are unpartitioned)
+ if (table.spec().fields().size() > 0 && filterExpression() == Expressions.alwaysTrue()) {
long totalRecords = PropertyUtil.propertyAsLong(table.currentSnapshot().summary(),
SnapshotSummary.TOTAL_RECORDS_PROP, Long.MAX_VALUE);
return new Stats(SparkSchemaUtil.estimateSize(lazyType(), totalRecords), totalRecords); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.IOException;
import java.io.Serializable;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SchemaParser;
import org.apache.iceberg.SnapshotSummary;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.TableScan;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.hadoop.Util;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.spark.SparkFilters;
import org.apache.iceberg.spark.SparkSchemaUtil;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.sources.Filter;
import org.apache.spark.sql.sources.v2.DataSourceOptions;
import org.apache.spark.sql.sources.v2.reader.DataSourceReader;
import org.apache.spark.sql.sources.v2.reader.InputPartition;
import org.apache.spark.sql.sources.v2.reader.InputPartitionReader;
import org.apache.spark.sql.sources.v2.reader.Statistics;
import org.apache.spark.sql.sources.v2.reader.SupportsPushDownFilters;
import org.apache.spark.sql.sources.v2.reader.SupportsPushDownRequiredColumns;
import org.apache.spark.sql.sources.v2.reader.SupportsReportStatistics;
import org.apache.spark.sql.sources.v2.reader.SupportsScanColumnarBatch;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.vectorized.ColumnarBatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.TableProperties.DEFAULT_NAME_MAPPING;
class Reader implements DataSourceReader, SupportsScanColumnarBatch, SupportsPushDownFilters,
SupportsPushDownRequiredColumns, SupportsReportStatistics {
private static final Logger LOG = LoggerFactory.getLogger(Reader.class);
private static final Filter[] NO_FILTERS = new Filter[0];
private static final ImmutableSet<String> LOCALITY_WHITELIST_FS = ImmutableSet.of("hdfs");
private final Table table;
private final Long snapshotId;
private final Long startSnapshotId;
private final Long endSnapshotId;
private final Long asOfTimestamp;
private final Long splitSize;
private final Integer splitLookback;
private final Long splitOpenFileCost;
private final Broadcast<FileIO> io;
private final Broadcast<EncryptionManager> encryptionManager;
private final boolean caseSensitive;
private StructType requestedSchema = null;
private List<Expression> filterExpressions = null;
private Filter[] pushedFilters = NO_FILTERS;
private final boolean localityPreferred;
private final boolean batchReadsEnabled;
private final int batchSize;
// lazy variables
private Schema schema = null;
private StructType type = null; // cached because Spark accesses it multiple times
private List<CombinedScanTask> tasks = null; // lazy cache of tasks
private Boolean readUsingBatch = null;
Reader(Table table, Broadcast<FileIO> io, Broadcast<EncryptionManager> encryptionManager,
boolean caseSensitive, DataSourceOptions options) {
this.table = table;
this.snapshotId = options.get("snapshot-id").map(Long::parseLong).orElse(null);
this.asOfTimestamp = options.get("as-of-timestamp").map(Long::parseLong).orElse(null);
if (snapshotId != null && asOfTimestamp != null) {
throw new IllegalArgumentException(
"Cannot scan using both snapshot-id and as-of-timestamp to select the table snapshot");
}
this.startSnapshotId = options.get("start-snapshot-id").map(Long::parseLong).orElse(null);
this.endSnapshotId = options.get("end-snapshot-id").map(Long::parseLong).orElse(null);
if (snapshotId != null || asOfTimestamp != null) {
if (startSnapshotId != null || endSnapshotId != null) {
throw new IllegalArgumentException(
"Cannot specify start-snapshot-id and end-snapshot-id to do incremental scan when either snapshot-id or " +
"as-of-timestamp is specified");
}
} else {
if (startSnapshotId == null && endSnapshotId != null) {
throw new IllegalArgumentException("Cannot only specify option end-snapshot-id to do incremental scan");
}
}
// look for split behavior overrides in options
this.splitSize = options.get("split-size").map(Long::parseLong).orElse(null);
this.splitLookback = options.get("lookback").map(Integer::parseInt).orElse(null);
this.splitOpenFileCost = options.get("file-open-cost").map(Long::parseLong).orElse(null);
if (io.getValue() instanceof HadoopFileIO) {
String fsscheme = "no_exist";
try {
Configuration conf = SparkSession.active().sessionState().newHadoopConf();
// merge hadoop config set on table
mergeIcebergHadoopConfs(conf, table.properties());
// merge hadoop config passed as options and overwrite the one on table
mergeIcebergHadoopConfs(conf, options.asMap());
FileSystem fs = new Path(table.location()).getFileSystem(conf);
fsscheme = fs.getScheme().toLowerCase(Locale.ENGLISH);
} catch (IOException ioe) {
LOG.warn("Failed to get Hadoop Filesystem", ioe);
}
String scheme = fsscheme; // Makes an effectively final version of scheme
this.localityPreferred = options.get("locality").map(Boolean::parseBoolean)
.orElseGet(() -> LOCALITY_WHITELIST_FS.contains(scheme));
} else {
this.localityPreferred = false;
}
this.schema = table.schema();
this.io = io;
this.encryptionManager = encryptionManager;
this.caseSensitive = caseSensitive;
this.batchReadsEnabled = options.get("vectorization-enabled").map(Boolean::parseBoolean).orElseGet(() ->
PropertyUtil.propertyAsBoolean(table.properties(),
TableProperties.PARQUET_VECTORIZATION_ENABLED, TableProperties.PARQUET_VECTORIZATION_ENABLED_DEFAULT));
this.batchSize = options.get("batch-size").map(Integer::parseInt).orElseGet(() ->
PropertyUtil.propertyAsInt(table.properties(),
TableProperties.PARQUET_BATCH_SIZE, TableProperties.PARQUET_BATCH_SIZE_DEFAULT));
}
private Schema lazySchema() {
if (schema == null) {
if (requestedSchema != null) {
// the projection should include all columns that will be returned, including those only used in filters
this.schema = SparkSchemaUtil.prune(table.schema(), requestedSchema, filterExpression(), caseSensitive);
} else {
this.schema = table.schema();
}
}
return schema;
}
private Expression filterExpression() {
if (filterExpressions != null) {
return filterExpressions.stream().reduce(Expressions.alwaysTrue(), Expressions::and);
}
return Expressions.alwaysTrue();
}
private StructType lazyType() {
if (type == null) {
this.type = SparkSchemaUtil.convert(lazySchema());
}
return type;
}
@Override
public StructType readSchema() {
return lazyType();
}
/**
* This is called in the Spark Driver when data is to be materialized into {@link ColumnarBatch}
*/
@Override
public List<InputPartition<ColumnarBatch>> planBatchInputPartitions() {
Preconditions.checkState(enableBatchRead(), "Batched reads not enabled");
Preconditions.checkState(batchSize > 0, "Invalid batch size");
String tableSchemaString = SchemaParser.toJson(table.schema());
String expectedSchemaString = SchemaParser.toJson(lazySchema());
String nameMappingString = table.properties().get(DEFAULT_NAME_MAPPING);
List<InputPartition<ColumnarBatch>> readTasks = Lists.newArrayList();
for (CombinedScanTask task : tasks()) {
readTasks.add(new ReadTask<>(
task, tableSchemaString, expectedSchemaString, nameMappingString, io, encryptionManager, caseSensitive,
localityPreferred, new BatchReaderFactory(batchSize)));
}
LOG.info("Batching input partitions with {} tasks.", readTasks.size());
return readTasks;
}
/**
* This is called in the Spark Driver when data is to be materialized into {@link InternalRow}
*/
@Override
public List<InputPartition<InternalRow>> planInputPartitions() {
String tableSchemaString = SchemaParser.toJson(table.schema());
String expectedSchemaString = SchemaParser.toJson(lazySchema());
String nameMappingString = table.properties().get(DEFAULT_NAME_MAPPING);
List<InputPartition<InternalRow>> readTasks = Lists.newArrayList();
for (CombinedScanTask task : tasks()) {
readTasks.add(new ReadTask<>(
task, tableSchemaString, expectedSchemaString, nameMappingString, io, encryptionManager, caseSensitive,
localityPreferred, InternalRowReaderFactory.INSTANCE));
}
return readTasks;
}
@Override
public Filter[] pushFilters(Filter[] filters) {
this.tasks = null; // invalidate cached tasks, if present
List<Expression> expressions = Lists.newArrayListWithExpectedSize(filters.length);
List<Filter> pushed = Lists.newArrayListWithExpectedSize(filters.length);
for (Filter filter : filters) {
Expression expr = SparkFilters.convert(filter);
if (expr != null) {
expressions.add(expr);
pushed.add(filter);
}
}
this.filterExpressions = expressions;
this.pushedFilters = pushed.toArray(new Filter[0]);
// invalidate the schema that will be projected
this.schema = null;
this.type = null;
// Spark doesn't support residuals per task, so return all filters
// to get Spark to handle record-level filtering
return filters;
}
@Override
public Filter[] pushedFilters() {
return pushedFilters;
}
@Override
public void pruneColumns(StructType newRequestedSchema) {
this.requestedSchema = newRequestedSchema;
// invalidate the schema that will be projected
this.schema = null;
this.type = null;
}
@Override
public Statistics estimateStatistics() {
// its a fresh table, no data
if (table.currentSnapshot() == null) {
return new Stats(0L, 0L);
}
if (filterExpressions == null || filterExpressions == Expressions.alwaysTrue()) {
long totalRecords = PropertyUtil.propertyAsLong(table.currentSnapshot().summary(),
SnapshotSummary.TOTAL_RECORDS_PROP, Long.MAX_VALUE);
return new Stats(SparkSchemaUtil.estimateSize(lazyType(), totalRecords), totalRecords);
}
long sizeInBytes = 0L;
long numRows = 0L;
for (CombinedScanTask task : tasks()) {
for (FileScanTask file : task.files()) {
sizeInBytes += file.length();
numRows += file.file().recordCount();
}
}
return new Stats(sizeInBytes, numRows);
}
@Override
public boolean enableBatchRead() {
if (readUsingBatch == null) {
boolean allParquetFileScanTasks =
tasks().stream()
.allMatch(combinedScanTask -> !combinedScanTask.isDataTask() && combinedScanTask.files()
.stream()
.allMatch(fileScanTask -> fileScanTask.file().format().equals(
FileFormat.PARQUET)));
boolean allOrcFileScanTasks =
tasks().stream()
.allMatch(combinedScanTask -> !combinedScanTask.isDataTask() && combinedScanTask.files()
.stream()
.allMatch(fileScanTask -> fileScanTask.file().format().equals(
FileFormat.ORC)));
boolean atLeastOneColumn = lazySchema().columns().size() > 0;
boolean onlyPrimitives = lazySchema().columns().stream().allMatch(c -> c.type().isPrimitiveType());
this.readUsingBatch = batchReadsEnabled && (allOrcFileScanTasks ||
(allParquetFileScanTasks && atLeastOneColumn && onlyPrimitives));
}
return readUsingBatch;
}
private static void mergeIcebergHadoopConfs(
Configuration baseConf, Map<String, String> options) {
options.keySet().stream()
.filter(key -> key.startsWith("hadoop."))
.forEach(key -> baseConf.set(key.replaceFirst("hadoop.", ""), options.get(key)));
}
private List<CombinedScanTask> tasks() {
if (tasks == null) {
TableScan scan = table
.newScan()
.caseSensitive(caseSensitive)
.project(lazySchema());
if (snapshotId != null) {
scan = scan.useSnapshot(snapshotId);
}
if (asOfTimestamp != null) {
scan = scan.asOfTime(asOfTimestamp);
}
if (startSnapshotId != null) {
if (endSnapshotId != null) {
scan = scan.appendsBetween(startSnapshotId, endSnapshotId);
} else {
scan = scan.appendsAfter(startSnapshotId);
}
}
if (splitSize != null) {
scan = scan.option(TableProperties.SPLIT_SIZE, splitSize.toString());
}
if (splitLookback != null) {
scan = scan.option(TableProperties.SPLIT_LOOKBACK, splitLookback.toString());
}
if (splitOpenFileCost != null) {
scan = scan.option(TableProperties.SPLIT_OPEN_FILE_COST, splitOpenFileCost.toString());
}
if (filterExpressions != null) {
for (Expression filter : filterExpressions) {
scan = scan.filter(filter);
}
}
try (CloseableIterable<CombinedScanTask> tasksIterable = scan.planTasks()) {
this.tasks = Lists.newArrayList(tasksIterable);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close table scan: %s", scan);
}
}
return tasks;
}
@Override
public String toString() {
return String.format(
"IcebergScan(table=%s, type=%s, filters=%s, caseSensitive=%s, batchedReads=%s)",
table, lazySchema().asStruct(), filterExpressions, caseSensitive, enableBatchRead());
}
private static class ReadTask<T> implements Serializable, InputPartition<T> {
private final CombinedScanTask task;
private final String tableSchemaString;
private final String expectedSchemaString;
private final String nameMappingString;
private final Broadcast<FileIO> io;
private final Broadcast<EncryptionManager> encryptionManager;
private final boolean caseSensitive;
private final boolean localityPreferred;
private final ReaderFactory<T> readerFactory;
private transient Schema tableSchema = null;
private transient Schema expectedSchema = null;
private transient String[] preferredLocations = null;
private ReadTask(CombinedScanTask task, String tableSchemaString, String expectedSchemaString,
String nameMappingString, Broadcast<FileIO> io, Broadcast<EncryptionManager> encryptionManager,
boolean caseSensitive, boolean localityPreferred, ReaderFactory<T> readerFactory) {
this.task = task;
this.tableSchemaString = tableSchemaString;
this.expectedSchemaString = expectedSchemaString;
this.io = io;
this.encryptionManager = encryptionManager;
this.caseSensitive = caseSensitive;
this.localityPreferred = localityPreferred;
this.preferredLocations = getPreferredLocations();
this.readerFactory = readerFactory;
this.nameMappingString = nameMappingString;
}
@Override
public InputPartitionReader<T> createPartitionReader() {
return readerFactory.create(task, lazyTableSchema(), lazyExpectedSchema(), nameMappingString, io.value(),
encryptionManager.value(), caseSensitive);
}
@Override
public String[] preferredLocations() {
return preferredLocations;
}
private Schema lazyTableSchema() {
if (tableSchema == null) {
this.tableSchema = SchemaParser.fromJson(tableSchemaString);
}
return tableSchema;
}
private Schema lazyExpectedSchema() {
if (expectedSchema == null) {
this.expectedSchema = SchemaParser.fromJson(expectedSchemaString);
}
return expectedSchema;
}
@SuppressWarnings("checkstyle:RegexpSingleline")
private String[] getPreferredLocations() {
if (!localityPreferred) {
return new String[0];
}
Configuration conf = SparkSession.active().sparkContext().hadoopConfiguration();
return Util.blockLocations(task, conf);
}
}
private interface ReaderFactory<T> extends Serializable {
InputPartitionReader<T> create(CombinedScanTask task, Schema tableSchema, Schema expectedSchema,
String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive);
}
private static class InternalRowReaderFactory implements ReaderFactory<InternalRow> {
private static final InternalRowReaderFactory INSTANCE = new InternalRowReaderFactory();
private InternalRowReaderFactory() {
}
@Override
public InputPartitionReader<InternalRow> create(CombinedScanTask task, Schema tableSchema, Schema expectedSchema,
String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive) {
return new RowReader(task, tableSchema, expectedSchema, nameMapping, io, encryptionManager, caseSensitive);
}
}
private static class BatchReaderFactory implements ReaderFactory<ColumnarBatch> {
private final int batchSize;
BatchReaderFactory(int batchSize) {
this.batchSize = batchSize;
}
@Override
public InputPartitionReader<ColumnarBatch> create(CombinedScanTask task, Schema tableSchema, Schema expectedSchema,
String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive) {
return new BatchReader(task, expectedSchema, nameMapping, io, encryptionManager, caseSensitive, batchSize);
}
}
private static class RowReader extends RowDataReader implements InputPartitionReader<InternalRow> {
RowReader(CombinedScanTask task, Schema tableSchema, Schema expectedSchema, String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive) {
super(task, tableSchema, expectedSchema, nameMapping, io, encryptionManager, caseSensitive);
}
}
private static class BatchReader extends BatchDataReader implements InputPartitionReader<ColumnarBatch> {
BatchReader(CombinedScanTask task, Schema expectedSchema, String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive, int size) {
super(task, expectedSchema, nameMapping, io, encryptionManager, caseSensitive, size);
}
}
}
| 1 | 23,661 | We had a wrong predicate before: we compared a list to an expression. | apache-iceberg | java |
@@ -21,6 +21,13 @@ func Test_acl_decode(t *testing.T) {
},
want: "user::rw-\nuser:0:rwx\nuser:65534:rwx\ngroup::rwx\nmask::rwx\nother::r--\n",
},
+ {
+ name: "decode group",
+ args: args{
+ xattr: []byte{2, 0, 0, 0, 8, 0, 1, 0, 254, 255, 0, 0},
+ },
+ want: "group:65534:--x\n",
+ },
{
name: "decode fail",
args: args{ | 1 | package dump
import (
"reflect"
"testing"
)
func Test_acl_decode(t *testing.T) {
type args struct {
xattr []byte
}
tests := []struct {
name string
args args
want string
}{
{
name: "decode string",
args: args{
xattr: []byte{2, 0, 0, 0, 1, 0, 6, 0, 255, 255, 255, 255, 2, 0, 7, 0, 0, 0, 0, 0, 2, 0, 7, 0, 254, 255, 0, 0, 4, 0, 7, 0, 255, 255, 255, 255, 16, 0, 7, 0, 255, 255, 255, 255, 32, 0, 4, 0, 255, 255, 255, 255},
},
want: "user::rw-\nuser:0:rwx\nuser:65534:rwx\ngroup::rwx\nmask::rwx\nother::r--\n",
},
{
name: "decode fail",
args: args{
xattr: []byte("abctest"),
},
want: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := &acl{}
a.decode(tt.args.xattr)
if tt.want != a.String() {
t.Errorf("acl.decode() = %v, want: %v", a.String(), tt.want)
}
})
}
}
func Test_acl_encode(t *testing.T) {
tests := []struct {
name string
want []byte
args []aclElement
}{
{
name: "encode values",
want: []byte{2, 0, 0, 0, 1, 0, 6, 0, 255, 255, 255, 255, 2, 0, 7, 0, 0, 0, 0, 0, 2, 0, 7, 0, 254, 255, 0, 0, 4, 0, 7, 0, 255, 255, 255, 255, 16, 0, 7, 0, 255, 255, 255, 255, 32, 0, 4, 0, 255, 255, 255, 255},
args: []aclElement{
{
aclSID: 8589934591,
Perm: 6,
},
{
aclSID: 8589934592,
Perm: 7,
},
{
aclSID: 8590000126,
Perm: 7,
},
{
aclSID: 21474836479,
Perm: 7,
},
{
aclSID: 73014444031,
Perm: 7,
},
{
aclSID: 141733920767,
Perm: 4,
},
},
},
{
name: "encode fail",
want: []byte{2, 0, 0, 0},
args: []aclElement{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := &acl{
Version: 2,
List: tt.args,
}
if got := a.encode(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("acl.encode() = %v, want %v", got, tt.want)
}
})
}
}
| 1 | 14,349 | Name copy-pasted from above. "empty"? | restic-restic | go |
@@ -98,7 +98,7 @@ class Bottle2neck(_Bottleneck):
self.stage_type = stage_type
self.scales = scales
self.width = width
- delattr(self, 'conv2')
+ # delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x): | 1 | import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
"""Bottle2neck block for Res2Net.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(nn.Sequential):
"""Res2Layer to build Res2Net style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
"""Res2Net backbone.
Args:
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
depth (int): Depth of res2net, from {50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Res2net stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmdet.models import Res2Net
>>> import torch
>>> self = Res2Net(depth=50, scales=4, base_width=26)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch', deep_stem=True, avg_down=True, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 1 | 21,235 | May I ask why change this? | open-mmlab-mmdetection | py |
@@ -23,15 +23,7 @@ namespace Datadog.Trace.Agent.MessagePack
len++;
}
- if (value.Tags != null)
- {
- len++;
- }
-
- if (value.Metrics != null)
- {
- len++;
- }
+ len += 2; // Tags and metrics
int originalOffset = offset;
| 1 | using System;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Vendors.MessagePack;
using Datadog.Trace.Vendors.MessagePack.Formatters;
namespace Datadog.Trace.Agent.MessagePack
{
internal class SpanMessagePackFormatter : IMessagePackFormatter<Span>
{
public int Serialize(ref byte[] bytes, int offset, Span value, IFormatterResolver formatterResolver)
{
// First, pack array length (or map length).
// It should be the number of members of the object to be serialized.
var len = 8;
if (value.Context.ParentId != null)
{
len++;
}
if (value.Error)
{
len++;
}
if (value.Tags != null)
{
len++;
}
if (value.Metrics != null)
{
len++;
}
int originalOffset = offset;
offset += MessagePackBinary.WriteMapHeader(ref bytes, offset, len);
offset += MessagePackBinary.WriteString(ref bytes, offset, "trace_id");
offset += MessagePackBinary.WriteUInt64(ref bytes, offset, value.Context.TraceId);
offset += MessagePackBinary.WriteString(ref bytes, offset, "span_id");
offset += MessagePackBinary.WriteUInt64(ref bytes, offset, value.Context.SpanId);
offset += MessagePackBinary.WriteString(ref bytes, offset, "name");
offset += MessagePackBinary.WriteString(ref bytes, offset, value.OperationName);
offset += MessagePackBinary.WriteString(ref bytes, offset, "resource");
offset += MessagePackBinary.WriteString(ref bytes, offset, value.ResourceName);
offset += MessagePackBinary.WriteString(ref bytes, offset, "service");
offset += MessagePackBinary.WriteString(ref bytes, offset, value.ServiceName);
offset += MessagePackBinary.WriteString(ref bytes, offset, "type");
offset += MessagePackBinary.WriteString(ref bytes, offset, value.Type);
offset += MessagePackBinary.WriteString(ref bytes, offset, "start");
offset += MessagePackBinary.WriteInt64(ref bytes, offset, value.StartTime.ToUnixTimeNanoseconds());
offset += MessagePackBinary.WriteString(ref bytes, offset, "duration");
offset += MessagePackBinary.WriteInt64(ref bytes, offset, value.Duration.ToNanoseconds());
if (value.Context.ParentId != null)
{
offset += MessagePackBinary.WriteString(ref bytes, offset, "parent_id");
offset += MessagePackBinary.WriteUInt64(ref bytes, offset, (ulong)value.Context.ParentId);
}
if (value.Error)
{
offset += MessagePackBinary.WriteString(ref bytes, offset, "error");
offset += MessagePackBinary.WriteByte(ref bytes, offset, 1);
}
if (value.Tags != null)
{
offset += MessagePackBinary.WriteString(ref bytes, offset, "meta");
offset += MessagePackBinary.WriteMapHeader(ref bytes, offset, value.Tags.Count);
foreach (var pair in value.Tags)
{
offset += MessagePackBinary.WriteString(ref bytes, offset, pair.Key);
offset += MessagePackBinary.WriteString(ref bytes, offset, pair.Value);
}
}
if (value.Metrics != null)
{
offset += MessagePackBinary.WriteString(ref bytes, offset, "metrics");
offset += MessagePackBinary.WriteMapHeader(ref bytes, offset, value.Metrics.Count);
foreach (var pair in value.Metrics)
{
offset += MessagePackBinary.WriteString(ref bytes, offset, pair.Key);
offset += MessagePackBinary.WriteDouble(ref bytes, offset, pair.Value);
}
}
return offset - originalOffset;
}
public Span Deserialize(byte[] bytes, int offset, IFormatterResolver formatterResolver, out int readSize)
{
throw new NotImplementedException();
}
}
}
| 1 | 17,733 | Does this mean we now always include the dictionaries even if they're empty? If so, we should make sure that this doesn't break the Agent (even older versions). It's possible that it doesn't handle empty dictionaries well. | DataDog-dd-trace-dotnet | .cs |
@@ -29,6 +29,9 @@ namespace Nethermind.Blockchain
[ConfigItem(Description = "If set to 'true' then the Fast Sync (eth/63) synchronization algorithm will be used.", DefaultValue = "false")]
bool FastSync { get; set; }
+ [ConfigItem(Description = "Relevant only if 'FastSync' is 'true'. If set to value, then it will set a minimum height threshold limit up to witch Full Sync will stay on when chain will be behind network.", DefaultValue = "null")]
+ long? FastSyncCatchUpHeightDelta { get; set; }
+
[ConfigItem(Description = "If set to 'true' then in the Fast Sync mode blocks will be first downloaded from the provided PivotNumber downwards. This allows for parallelization of requests with many sync peers and with no need to worry about syncing a valid branch (syncing downwards to 0). You need to enter the pivot block number, hash and total difficulty from a trusted source (you can use etherscan and confirm with other sources if you wan to change it).", DefaultValue = "false")]
bool FastBlocks { get; set; }
| 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Config;
namespace Nethermind.Blockchain
{
public interface ISyncConfig : IConfig
{
[ConfigItem(Description = "If 'false' then the node does not download/process new blocks..", DefaultValue = "true")]
bool SynchronizationEnabled { get; set; }
[ConfigItem(Description = "Beam Sync - only for DEBUG / DEV - not working in prod yet.", DefaultValue = "false")]
bool BeamSyncEnabled { get; set; }
[ConfigItem(Description = "If set to 'true' then the Fast Sync (eth/63) synchronization algorithm will be used.", DefaultValue = "false")]
bool FastSync { get; set; }
[ConfigItem(Description = "If set to 'true' then in the Fast Sync mode blocks will be first downloaded from the provided PivotNumber downwards. This allows for parallelization of requests with many sync peers and with no need to worry about syncing a valid branch (syncing downwards to 0). You need to enter the pivot block number, hash and total difficulty from a trusted source (you can use etherscan and confirm with other sources if you wan to change it).", DefaultValue = "false")]
bool FastBlocks { get; set; }
[ConfigItem(Description = "If set to 'true' then in the Fast Blocks mode Nethermind generates smaller requests to avoid Geth from disconnecting. On the Geth heavy networks (mainnet) it is desired while on Parity or Nethermind heavy networks (Goerli, AuRa) it slows down the sync by a factor of ~4", DefaultValue = "true")]
public bool UseGethLimitsInFastBlocks { get; set; }
[ConfigItem(Description = "If set to 'true' then the block bodies will be downloaded in the Fast Sync mode.", DefaultValue = "true")]
bool DownloadBodiesInFastSync { get; set; }
[ConfigItem(Description = "If set to 'true' then the receipts will be downloaded in the Fast Sync mode. This will slow down the process by a few hours but will allow you to interact with dApps that execute extensive historical logs searches (like Maker CDPs).", DefaultValue = "true")]
bool DownloadReceiptsInFastSync { get; set; }
[ConfigItem(Description = "Total Difficulty of the pivot block for the Fast Blocks sync (not - this is total difficulty and not difficulty).", DefaultValue = "null")]
string PivotTotalDifficulty { get; }
[ConfigItem(Description = "Number of the pivot block for the Fast Blocks sync.", DefaultValue = "null")]
string PivotNumber { get; }
[ConfigItem(Description = "Hash of the pivot block for the Fast Blocks sync.", DefaultValue = "null")]
string PivotHash { get; }
}
} | 1 | 23,080 | The description and name is unclear. Typo in 'which'. | NethermindEth-nethermind | .cs |
@@ -43,7 +43,12 @@ module Selenium
def text
@bridge.getAlertText
end
+
+ def authenticate(username, password)
+ @bridge.setAuthentication username: username, password: password
+ accept
+ end
end # Alert
end # WebDriver
-end # Selenium
+end # Selenium | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
class Alert
def initialize(bridge)
@bridge = bridge
# fail fast if the alert doesn't exist
bridge.getAlertText
end
def accept
@bridge.acceptAlert
end
def dismiss
@bridge.dismissAlert
end
def send_keys(keys)
@bridge.setAlertValue keys
end
def text
@bridge.getAlertText
end
end # Alert
end # WebDriver
end # Selenium | 1 | 13,040 | Files should have an extra line at the end of them. | SeleniumHQ-selenium | js |
@@ -74,6 +74,12 @@ proc_num_simd_saved(void)
return num_simd_saved;
}
+void
+proc_set_num_simd_saved(int num)
+{
+ num_simd_saved = num;
+}
+
DR_API
int
proc_num_simd_registers(void) | 1 | /* **********************************************************
* Copyright (c) 2016 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "../globals.h"
#include "proc.h"
#include "instr.h"
static int num_simd_saved;
static int num_simd_registers;
void
proc_init_arch(void)
{
num_simd_saved = MCXT_NUM_SIMD_SLOTS;
num_simd_registers = MCXT_NUM_SIMD_SLOTS;
/* FIXME i#1569: NYI */
}
bool
proc_has_feature(feature_bit_t f)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return false;
}
void
machine_cache_sync(void *pc_start, void *pc_end, bool flush_icache)
{
clear_icache(pc_start, pc_end);
}
DR_API
size_t
proc_fpstate_save_size(void)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return 0;
}
DR_API
int
proc_num_simd_saved(void)
{
return num_simd_saved;
}
DR_API
int
proc_num_simd_registers(void)
{
return num_simd_registers;
}
int
proc_num_simd_sse_avx_registers(void)
{
CLIENT_ASSERT(false, "Incorrect usage for ARM/AArch64.");
return 0;
}
int
proc_num_simd_sse_avx_saved(void)
{
CLIENT_ASSERT(false, "Incorrect usage for ARM/AArch64.");
return 0;
}
DR_API
size_t
proc_save_fpstate(byte *buf)
{
/* All registers are saved by insert_push_all_registers so nothing extra
* needs to be saved here.
*/
return DR_FPSTATE_BUF_SIZE;
}
DR_API
void
proc_restore_fpstate(byte *buf)
{
/* Nothing to restore. */
}
void
dr_insert_save_fpstate(void *drcontext, instrlist_t *ilist, instr_t *where, opnd_t buf)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
}
void
dr_insert_restore_fpstate(void *drcontext, instrlist_t *ilist, instr_t *where, opnd_t buf)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
}
uint64
proc_get_timestamp(void)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return 0;
}
| 1 | 17,739 | > i#1312 AVX-512 support: Add code cache to/from context switching. s|to/from|| (redundant and mildly confusing (called "enter" and "return" in code)). | DynamoRIO-dynamorio | c |
@@ -521,10 +521,10 @@ void dag_to_cyto(struct dag *d, int condense_display, int change_size)
fprintf(cytograph, "\t<att name = \"layoutAlgorithm\" value = \"Grid Layout\" type = \"string\" cy:hidden = \"1\"/>\n");
if(change_size) {
- hash_table_firstkey(d->completed_files);
- while(hash_table_nextkey(d->completed_files, &label, (void **) &name)) {
- stat(label, &st);
- average += ((double) st.st_size) / ((double) hash_table_size(d->completed_files));
+ hash_table_firstkey(d->files);
+ while(hash_table_nextkey(d->files, &name, (void **) &f) && dag_file_exists(f)) {
+ stat(name, &st);
+ average += ((double) st.st_size) / ((double) d->completed_files);
}
}
| 1 | /*
Copyright (C) 2013- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/utsname.h>
#include <inttypes.h>
#include <ctype.h>
#include <limits.h>
#include <pwd.h>
#include <time.h>
#include <unistd.h>
#include "hash_table.h"
#include "xxmalloc.h"
#include "list.h"
#include "itable.h"
#include "debug.h"
#include "path.h"
#include "set.h"
#include "stringtools.h"
#include "dag.h"
#include "dag_variable.h"
#include "dag_visitors.h"
#include "rmsummary.h"
/*
* BUG: Error handling is not very good.
* BUG: Integrate more with dttools (use DEBUG, etc.)
*/
/* Writes 'var=value' pairs for special vars to the stream */
int dag_to_file_var(const char *name, struct hash_table *vars, int nodeid, FILE * dag_stream, const char *prefix)
{
struct dag_variable_value *v;
v = dag_variable_get_value(name, vars, nodeid);
if(v && !string_null_or_empty(v->value))
fprintf(dag_stream, "%s%s=\"%s\"\n", prefix, name, (char *) v->value);
return 0;
}
int dag_to_file_vars(struct set *var_names, struct hash_table *vars, int nodeid, FILE * dag_stream, const char *prefix)
{
char *name;
set_first_element(var_names);
while((name = set_next_element(var_names)))
{
dag_to_file_var(name, vars, nodeid, dag_stream, prefix);
}
return 0;
}
/* Writes 'export var' tokens from the dag to the stream */
int dag_to_file_exports(const struct dag *d, FILE * dag_stream, const char *prefix)
{
char *name;
struct set *vars = d->export_vars;
struct dag_variable_value *v;
set_first_element(vars);
for(name = set_next_element(vars); name; name = set_next_element(vars))
{
v = hash_table_lookup(d->variables, name);
if(v)
{
fprintf(dag_stream, "%s%s=", prefix, name);
if(!string_null_or_empty(v->value))
fprintf(dag_stream, "\"%s\"", (char *) v->value);
fprintf(dag_stream, "\n");
fprintf(dag_stream, "export %s\n", name);
}
}
return 0;
}
/* Writes a list of files to the the stream */
int dag_to_file_files(struct dag_node *n, struct list *fs, FILE * dag_stream, char *(*rename) (struct dag_node * n, const char *filename))
{
//here we may want to call the linker renaming function,
//instead of using f->remotename
const struct dag_file *f;
list_first_item(fs);
while((f = list_next_item(fs)))
if(rename)
fprintf(dag_stream, "%s ", rename(n, f->filename));
else {
const char *remotename = dag_node_get_remote_name(n, f->filename);
if(remotename)
fprintf(dag_stream, "%s->%s ", f->filename, remotename);
else
fprintf(dag_stream, "%s ", f->filename);
}
return 0;
}
/* Writes a production rule to the stream, using remotenames when
* available.
*
* Eventually, we would like to pass a 'convert_name' function,
* instead of using just the remotenames.
*
* BUG: Currently, expansions are writen instead of variables.
*
* The entry function is dag_to_file(dag, filename).
* */
int dag_to_file_node(struct dag_node *n, FILE * dag_stream, char *(*rename) (struct dag_node * n, const char *filename))
{
dag_to_file_files(n, n->target_files, dag_stream, rename);
fprintf(dag_stream, ": ");
dag_to_file_files(n, n->source_files, dag_stream, rename);
fprintf(dag_stream, "\n");
dag_to_file_vars(n->d->special_vars, n->variables, n->nodeid, dag_stream, "@");
dag_to_file_vars(n->d->export_vars, n->variables, n->nodeid, dag_stream, "@");
if(n->local_job)
fprintf(dag_stream, "\tLOCAL %s", n->command);
else
fprintf(dag_stream, "\t%s\n", n->command);
fprintf(dag_stream, "\n");
return 0;
}
/* Writes all the rules to the stream, per category, plus any variables from the category */
int dag_to_file_category(struct dag_task_category *c, FILE * dag_stream, char *(*rename) (struct dag_node * n, const char *filename))
{
struct dag_node *n;
list_first_item(c->nodes);
while((n = list_next_item(c->nodes)))
{
dag_to_file_vars(n->d->special_vars, n->d->variables, n->nodeid, dag_stream, "");
dag_to_file_vars(n->d->export_vars, n->d->variables, n->nodeid, dag_stream, "");
dag_to_file_node(n, dag_stream, rename);
}
return 0;
}
int dag_to_file_categories(const struct dag *d, FILE * dag_stream, char *(*rename) (struct dag_node * n, const char *filename))
{
char *name;
struct dag_task_category *c;
hash_table_firstkey(d->task_categories);
while(hash_table_nextkey(d->task_categories, &name, (void *) &c))
dag_to_file_category(c, dag_stream, rename);
return 0;
}
/* Entry point of the dag_to_file* functions. Writes a dag as an
* equivalent makeflow file. */
int dag_to_file(const struct dag *d, const char *dag_file, char *(*rename) (struct dag_node * n, const char *filename))
{
FILE *dag_stream;
if(dag_file)
dag_stream = fopen(dag_file, "w");
else
dag_stream = stdout;
if(!dag_stream)
return 1;
// For the collect list, use the their final value (the value at node with id nodeid_counter).
dag_to_file_var("GC_COLLECT_LIST", d->variables, d->nodeid_counter, dag_stream, "");
dag_to_file_var("GC_PRESERVE_LIST", d->variables, d->nodeid_counter, dag_stream, "");
dag_to_file_exports(d, dag_stream, "");
dag_to_file_categories(d, dag_stream, rename);
if(dag_file)
fclose(dag_stream);
return 0;
}
/* Writes the xml header incantation for DAX */
static void dag_to_dax_header( const char *name, FILE *output )
{
fprintf(output,"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
time_t current_raw_time;
struct tm *time_info;
char buffer[64];
time(¤t_raw_time);
time_info = localtime(¤t_raw_time);
strftime(buffer, 64, "%Y-%m-%d %T", time_info);
fprintf(output,"<!-- generated: %s -->\n", buffer);
uid_t uid = getuid();
struct passwd *current_user_info;
current_user_info = getpwuid(uid);
fprintf(output,"<!-- generated by: %s -->\n", current_user_info->pw_name);
fprintf(output,"<!-- generator: Makeflow -->\n");
fprintf(output,"<adag ");
fprintf(output,"xmlns=\"http://pegasus.isi.edu/schema/DAX\" ");
fprintf(output,"xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" ");
fprintf(output,"xsi:schemaLocation=\"http://pegasus.isi.edu/schema/DAX http://pegasus.isi.edu/schema/dax-3.4.xsd\" ");
fprintf(output,"version=\"3.4\" ");
fprintf(output,"name=\"%s\">\n", name);
}
/* Write list of files in DAX format for a given node
* @param type 0 for input 1 for output
*/
static void dag_to_dax_files(struct list *fs, int type, FILE *output)
{
const struct dag_file *f;
list_first_item(fs);
while((f = list_next_item(fs))) {
if(type == 0)
fprintf(output, "\t\t<uses name=\"%s\" link=\"input\" />\n", f->filename);
else
fprintf(output, "\t\t<uses name=\"%s\" link=\"output\" register=\"false\" transfer=\"true\" />\n", f->filename);
}
}
/* Extract the executable from a node */
const char *node_executable(const struct dag_node *n)
{
int first_space = strpos(n->command, ' ');
char *executable_path = string_front(n->command, first_space);
int executable_path_length = strlen(executable_path);
int last_slash = strrpos(executable_path, '/');
return string_back(executable_path, executable_path_length - last_slash - 1);
}
const char *node_executable_arguments(const struct dag_node *n)
{
int command_length = strlen(n->command);
int first_space = strpos(n->command, ' ');
const char *before_redirection = string_back(n->command, command_length - first_space - 1);
int first_redirect = strpos(n->command, '>');
if(first_redirect < 0) return before_redirection;
return string_trim_spaces(string_front(before_redirection, first_redirect - first_space - 1));
}
const char *node_executable_redirect(const struct dag_node *n)
{
int command_length = strlen(n->command);
int last_redirect = strrpos(n->command, '>');
int first_redirect = strpos(n->command, '>');
if(last_redirect < 0) return NULL;
if(last_redirect != first_redirect) fatal("makeflow: One of your tasks (%s) contains multiple redirects. Currently Makeflow does not support DAX export with multiple redirects.\n", n->command);
char *raw_redirect = (char *) string_back(n->command, command_length - last_redirect - 1);
return string_trim_spaces(raw_redirect);
}
/* Writes the DAX representation of a node */
void dag_to_dax_individual_node(const struct dag_node *n, UINT64_T node_id, FILE *output)
{
fprintf(output, "\t<job id=\"ID%07" PRIu64 "\" name=\"%s\">\n", node_id, node_executable(n));
fprintf(output, "\t\t<argument>%s</argument>\n", node_executable_arguments(n));
const char *redirection = node_executable_redirect(n);
if(redirection) fprintf(output, "\t\t<stdout name=\"%s\" link=\"output\" />\n", redirection);
dag_to_dax_files(n->source_files, 0, output);
dag_to_dax_files(n->target_files, 1, output);
fprintf(output, "\t</job>\n");
}
/* Iterates over each node to output as DAX */
void dag_to_dax_nodes(const struct dag *d, FILE *output)
{
struct dag_node *n;
UINT64_T node_id;
itable_firstkey(d->node_table);
while(itable_nextkey(d->node_table, &node_id, (void *) &n))
dag_to_dax_individual_node(n, node_id, output);
}
/* Writes the DAX for a node's parent relationships */
void dag_to_dax_parents(const struct dag_node *n, FILE *output)
{
struct dag_node *p;
if(set_size(n->ancestors) > 0){
fprintf(output, "\t<child ref=\"ID%07d\">\n", n->nodeid);
set_first_element(n->ancestors);
while((p = set_next_element(n->ancestors)))
fprintf(output, "\t\t<parent ref=\"ID%07d\" />\n", p->nodeid);
fprintf(output, "\t</child>\n");
}
}
/* Writes the DAX version of each relationship in the dag */
void dag_to_dax_relationships(const struct dag *d, FILE *output)
{
struct dag_node *n;
UINT64_T node_id;
itable_firstkey(d->node_table);
while(itable_nextkey(d->node_table, &node_id, (void *) &n))
dag_to_dax_parents(n, output);
}
/* Writes the xml footer for DAX */
void dag_to_dax_footer(FILE *output)
{
fprintf(output, "</adag>\n");
}
/* Write replica catalog to file */
void dag_to_dax_replica_catalog(const struct dag *d, FILE *output)
{
struct dag_file *f = NULL;
char fn[PATH_MAX];
struct list *input_files = dag_input_files((struct dag*) d);
list_first_item(input_files);
while((f = (struct dag_file*)list_next_item(input_files)))
{
realpath(f->filename, fn);
fprintf(output, "%s\tfile://%s\t%s\n", path_basename(f->filename), fn, "pool=\"local\"");
}
}
/* Write transform catalog to file */
void dag_to_dax_transform_catalog(const struct dag *d, FILE *output)
{
struct dag_node *n;
uint64_t id;
char *fn, *pfn;
char *type;
pfn = (char *) malloc(PATH_MAX * sizeof(char));
struct utsname *name = malloc(sizeof(struct utsname));
uname(name);
struct list *transforms = list_create();
itable_firstkey(d->node_table);
while(itable_nextkey(d->node_table, &id, (void *) &n))
{
fn = xxstrdup(node_executable(n));
if(!list_find(transforms, (int (*)(void *, const void*)) string_equal, fn))
list_push_tail(transforms, fn);
}
list_first_item(transforms);
while((fn = list_next_item(transforms))) {
if(path_lookup(getenv("PATH"), fn, pfn, PATH_MAX)){
realpath(fn, pfn);
type = "STAGEABLE";
} else {
type = "INSTALLED";
}
fprintf(output, "tr %s {\n", fn);
fprintf(output, " site local {\n");
fprintf(output, " pfn \"%s\"\n", pfn);
fprintf(output, " arch \"%s\"\n", name->machine);
fprintf(output, " os \"%s\"\n", name->sysname);
fprintf(output, " type \"%s\"\n", type);
fprintf(output, " }\n");
fprintf(output, "}\n\n");
}
list_free(transforms);
list_delete(transforms);
free(pfn);
}
void dag_to_dax_print_usage(const char *name)
{
printf( "To plan your workflow try:\n");
printf( "\tpegasus-plan -Dpegasus.catalog.replica.file=%s.rc \\\n", name);
printf( "\t -Dpegasus.catalog.transformation.file=%s.rc \\\n", name);
printf( "\t -d %s.dax\n\n", name);
}
/* Entry Point of the dag_to_dax* functions.
* Writes a dag in DAX format to file.
* see: http://pegasus.isi.edu/wms/docs/schemas/dax-3.4/dax-3.4.html
*/
int dag_to_dax( const struct dag *d, const char *name )
{
char dax_filename[PATH_MAX];
sprintf(dax_filename, "%s.dax", name);
FILE *dax = fopen(dax_filename, "w");
dag_to_dax_header(name, dax);
dag_to_dax_nodes(d, dax);
dag_to_dax_relationships(d, dax);
dag_to_dax_footer(dax);
fclose(dax);
sprintf(dax_filename, "%s.rc", name);
dax = fopen(dax_filename, "w");
dag_to_dax_replica_catalog(d, dax);
fclose(dax);
sprintf(dax_filename, "%s.tc", name);
dax = fopen(dax_filename, "w");
dag_to_dax_transform_catalog(d, dax);
fclose(dax);
dag_to_dax_print_usage(name);
return 0;
}
/* The following functions and structures are used to write a dot
* file (graphviz) that shows the graphical presentation of the
* workflow. */
struct dot_node {
int id;
int count;
int print;
};
struct file_node {
int id;
char *name;
double size;
};
void write_node_to_xgmml(FILE *f, char idheader, int id, char* nodename, int process)
{
//file *f must already be open!
fprintf(f,"\t<node id=\"%c%d\" label=\"%s\">\n", idheader, id, nodename);
fprintf(f,"\t\t<att name=\"shared name\" value=\"%s\" type=\"string\"/>\n", nodename);
fprintf(f,"\t\t<att name=\"name\" value=\"%s\" type=\"string\"/>\n", nodename);
fprintf(f,"\t\t<att name=\"process\" value=\"%d\" type=\"boolean\"/>\n", process);
fprintf(f,"\t</node>\n");
}
void write_edge_to_xgmml(FILE *f, char sourceheader, int sourceid, char targetheader, int targetid, int directed)
{
//file *f must already be open!
fprintf(f, "\t<edge id=\"%c%d-%c%d\" label=\"%c%d-%c%d\" source=\"%c%d\" target=\"%c%d\" cy:directed=\"%d\">\n", sourceheader, sourceid, targetheader, targetid, sourceheader, sourceid, targetheader, targetid, sourceheader, sourceid, targetheader, targetid, directed);
fprintf(f,"\t\t<att name=\"shared name\" value=\"%c%d-%c%d\" type=\"string\"/>\n", sourceheader, sourceid, targetheader, targetid);
fprintf(f,"\t\t<att name=\"shared interaction\" value=\"\" type=\"string\"/>\n");
fprintf(f,"\t\t<att name=\"name\" value=\"%c%d-%c%d\" type=\"string\"/>\n", sourceheader, sourceid, targetheader, targetid);
fprintf(f,"\t\t<att name=\"selected\" value=\"0\" type=\"boolean\"/>\n");
fprintf(f,"\t\t<att name=\"interaction\" value=\"\" type=\"string\"/>\n");
fprintf(f,"\t\t<att name=\"weight\" value=\"8\" type=\"integer\"/>\n");
fprintf(f,"\t</edge>\n");
}
void write_styles_file()
{
FILE *styles = fopen("style.xml", "w");
fprintf(styles, "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<vizmap documentVersion=\"3.0\" id=\"VizMap-2015_02_24-21_58\">\n <visualStyle name=\"BioPAX_SIF\">\n <network>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"ROUND_RECTANGLE\" attributeValue=\"GenericRnaReference\"/>\n <discreteMappingEntry value=\"OCTAGON\" attributeValue=\"Generic\"/>\n <discreteMappingEntry value=\"HEXAGON\" attributeValue=\"ComplexGroup\"/>\n <discreteMappingEntry value=\"ROUND_RECTANGLE\" attributeValue=\"GenericDnaRegionReference\"/>\n <discreteMappingEntry value=\"OCTAGON\" attributeValue=\"GenericSmallMoleculeReference\"/>\n <discreteMappingEntry value=\"ROUND_RECTANGLE\" attributeValue=\"GenericDnaReference\"/>\n <discreteMappingEntry value=\"HEXAGON\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"OCTAGON\" attributeValue=\"GenericProteinReference\"/>\n <discreteMappingEntry value=\"ROUND_RECTANGLE\" attributeValue=\"GenericRnaRegionReference\"/>\n <discreteMappingEntry value=\"OCTAGON\" attributeValue=\"GenericEntityReference\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FF9999\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"#33CCCC\" attributeValue=\"GenericRnaReference\"/>\n <discreteMappingEntry value=\"#CCCCFF\" attributeValue=\"Generic\"/>\n <discreteMappingEntry value=\"#99CCFF\" attributeValue=\"ComplexGroup\"/>\n <discreteMappingEntry value=\"#33CCCC\" attributeValue=\"GenericDnaRegionReference\"/>\n <discreteMappingEntry value=\"#CCCCFF\" attributeValue=\"GenericSmallMoleculeReference\"/>\n <discreteMappingEntry value=\"#33CCCC\" attributeValue=\"GenericDnaReference\"/>\n <discreteMappingEntry value=\"#99CCFF\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"#FF3300\" attributeValue=\"GenericProteinReference\"/>\n <discreteMappingEntry value=\"#33CCCC\" attributeValue=\"GenericRnaRegionReference\"/>\n <discreteMappingEntry value=\"#CCCCFF\" attributeValue=\"GenericEntityReference\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"125\"/>\n <visualProperty name=\"NODE_LABEL\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"true\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#000000\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"#00B0F0\" attributeValue=\"METABOLIC_CATALYSIS\"/>\n <discreteMappingEntry value=\"#7F7F7F\" attributeValue=\"SEQUENTIAL_CATALYSIS\"/>\n <discreteMappingEntry value=\"#FF0000\" attributeValue=\"CO_CONTROL\"/>\n <discreteMappingEntry value=\"#000000\" attributeValue=\"GENERIC_OF\"/>\n <discreteMappingEntry value=\"#0070C0\" attributeValue=\"STATE_CHANGE\"/>\n <discreteMappingEntry value=\"#FFC000\" attributeValue=\"COMPONENT_OF\"/>\n <discreteMappingEntry value=\"#7030A0\" attributeValue=\"INTERACTS_WITH\"/>\n <discreteMappingEntry value=\"#FFFF00\" attributeValue=\"IN_SAME_COMPONENT\"/>\n <discreteMappingEntry value=\"#CCC1DA\" attributeValue=\"REACTS_WITH\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#000000\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"#00B0F0\" attributeValue=\"METABOLIC_CATALYSIS\"/>\n <discreteMappingEntry value=\"#7F7F7F\" attributeValue=\"SEQUENTIAL_CATALYSIS\"/>\n <discreteMappingEntry value=\"#FF0000\" attributeValue=\"CO_CONTROL\"/>\n <discreteMappingEntry value=\"#000000\" attributeValue=\"GENERIC_OF\"/>\n <discreteMappingEntry value=\"#0070C0\" attributeValue=\"STATE_CHANGE\"/>\n <discreteMappingEntry value=\"#FFC000\" attributeValue=\"COMPONENT_OF\"/>\n <discreteMappingEntry value=\"#7030A0\" attributeValue=\"INTERACTS_WITH\"/>\n <discreteMappingEntry value=\"#FFFF00\" attributeValue=\"IN_SAME_COMPONENT\"/>\n <discreteMappingEntry value=\"#CCC1DA\" attributeValue=\"REACTS_WITH\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"EDGE_WIDTH\" default=\"4.0\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"ARROW\" attributeValue=\"METABOLIC_CATALYSIS\"/>\n <discreteMappingEntry value=\"ARROW\" attributeValue=\"SEQUENTIAL_CATALYSIS\"/>\n <discreteMappingEntry value=\"ARROW\" attributeValue=\"COMPONENT_OF\"/>\n <discreteMappingEntry value=\"ARROW\" attributeValue=\"STATE_CHANGE\"/>\n </discreteMapping>\n </visualProperty>\n </edge>\n </visualStyle>\n <visualStyle name=\"Big Labels_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"24\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#333333\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FF0066\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#666666\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#808080\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Solid\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#999999\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"14\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"40.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#CCCCCC\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"interaction\"/>\n </visualProperty>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"12.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Big Labels\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"24\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#333333\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FF0066\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#666666\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#808080\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Ripple_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"80.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"80.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#CCFFFF\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"8\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#666666\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFFCC\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#66CCFF\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#808080\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"3.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Sample1\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#CCCCFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"40.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#333333\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#333333\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"interaction\"/>\n </visualProperty>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"SOLID\" attributeValue=\"pp\"/>\n <discreteMappingEntry value=\"LONG_DASH\" attributeValue=\"pd\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"1.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Nested Network Style\">\n <network>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"false\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"40.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#C80000\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"#FFFFFF\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"SE,NW,c,0.00,0.00\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"60.0\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"#0066CC\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"shared name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"2.0\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\"/>\n </visualProperty>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"#0066CC\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"1.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Ripple\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"80.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"80.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#CCFFFF\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"8\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#666666\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFFCC\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#66CCFF\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#808080\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"3.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Directed_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#333333\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"5.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"8\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#0099CC\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FF0066\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#333333\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#808080\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"interaction\"/>\n </visualProperty>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#333333\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"ARROW\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Universe\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"20.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"20.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"24\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#FFFFCC\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"40.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Monospaced.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"LONG_DASH\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"default black\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"S,NW,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#1E90FF\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#009900\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#CCCCCC\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"15.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#009900\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Minimal_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#EBEBEB\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ROUND_RECTANGLE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"8\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#333333\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#999999\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#808080\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Solid_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#999999\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"14\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"40.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#CCCCCC\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"interaction\"/>\n </visualProperty>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"12.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Sample1_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#CCCCFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"40.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#333333\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#333333\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"interaction\"/>\n </visualProperty>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"SOLID\" attributeValue=\"pp\"/>\n <discreteMappingEntry value=\"LONG_DASH\" attributeValue=\"pd\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"1.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"BioPAX_SIF_0\">\n <network>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"ROUND_RECTANGLE\" attributeValue=\"GenericRnaReference\"/>\n <discreteMappingEntry value=\"OCTAGON\" attributeValue=\"Generic\"/>\n <discreteMappingEntry value=\"HEXAGON\" attributeValue=\"ComplexGroup\"/>\n <discreteMappingEntry value=\"ROUND_RECTANGLE\" attributeValue=\"GenericDnaRegionReference\"/>\n <discreteMappingEntry value=\"OCTAGON\" attributeValue=\"GenericSmallMoleculeReference\"/>\n <discreteMappingEntry value=\"ROUND_RECTANGLE\" attributeValue=\"GenericDnaReference\"/>\n <discreteMappingEntry value=\"HEXAGON\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"OCTAGON\" attributeValue=\"GenericProteinReference\"/>\n <discreteMappingEntry value=\"ROUND_RECTANGLE\" attributeValue=\"GenericRnaRegionReference\"/>\n <discreteMappingEntry value=\"OCTAGON\" attributeValue=\"GenericEntityReference\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"40.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FF9999\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"#33CCCC\" attributeValue=\"GenericRnaReference\"/>\n <discreteMappingEntry value=\"#CCCCFF\" attributeValue=\"Generic\"/>\n <discreteMappingEntry value=\"#99CCFF\" attributeValue=\"ComplexGroup\"/>\n <discreteMappingEntry value=\"#33CCCC\" attributeValue=\"GenericDnaRegionReference\"/>\n <discreteMappingEntry value=\"#CCCCFF\" attributeValue=\"GenericSmallMoleculeReference\"/>\n <discreteMappingEntry value=\"#33CCCC\" attributeValue=\"GenericDnaReference\"/>\n <discreteMappingEntry value=\"#99CCFF\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"#FF3300\" attributeValue=\"GenericProteinReference\"/>\n <discreteMappingEntry value=\"#33CCCC\" attributeValue=\"GenericRnaRegionReference\"/>\n <discreteMappingEntry value=\"#CCCCFF\" attributeValue=\"GenericEntityReference\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"60.0\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"125\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"0.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"0.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"true\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#000000\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"#00B0F0\" attributeValue=\"METABOLIC_CATALYSIS\"/>\n <discreteMappingEntry value=\"#7F7F7F\" attributeValue=\"SEQUENTIAL_CATALYSIS\"/>\n <discreteMappingEntry value=\"#FF0000\" attributeValue=\"CO_CONTROL\"/>\n <discreteMappingEntry value=\"#000000\" attributeValue=\"GENERIC_OF\"/>\n <discreteMappingEntry value=\"#0070C0\" attributeValue=\"STATE_CHANGE\"/>\n <discreteMappingEntry value=\"#FFC000\" attributeValue=\"COMPONENT_OF\"/>\n <discreteMappingEntry value=\"#7030A0\" attributeValue=\"INTERACTS_WITH\"/>\n <discreteMappingEntry value=\"#FFFF00\" attributeValue=\"IN_SAME_COMPONENT\"/>\n <discreteMappingEntry value=\"#CCC1DA\" attributeValue=\"REACTS_WITH\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#000000\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"#00B0F0\" attributeValue=\"METABOLIC_CATALYSIS\"/>\n <discreteMappingEntry value=\"#7F7F7F\" attributeValue=\"SEQUENTIAL_CATALYSIS\"/>\n <discreteMappingEntry value=\"#FF0000\" attributeValue=\"CO_CONTROL\"/>\n <discreteMappingEntry value=\"#000000\" attributeValue=\"GENERIC_OF\"/>\n <discreteMappingEntry value=\"#0070C0\" attributeValue=\"STATE_CHANGE\"/>\n <discreteMappingEntry value=\"#FFC000\" attributeValue=\"COMPONENT_OF\"/>\n <discreteMappingEntry value=\"#7030A0\" attributeValue=\"INTERACTS_WITH\"/>\n <discreteMappingEntry value=\"#FFFF00\" attributeValue=\"IN_SAME_COMPONENT\"/>\n <discreteMappingEntry value=\"#CCC1DA\" attributeValue=\"REACTS_WITH\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"4.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"ARROW\" attributeValue=\"METABOLIC_CATALYSIS\"/>\n <discreteMappingEntry value=\"ARROW\" attributeValue=\"SEQUENTIAL_CATALYSIS\"/>\n <discreteMappingEntry value=\"ARROW\" attributeValue=\"COMPONENT_OF\"/>\n <discreteMappingEntry value=\"ARROW\" attributeValue=\"STATE_CHANGE\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"default_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ROUND_RECTANGLE\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"process\">\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"false\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"65.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"65.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#1E90FF\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"9\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"35.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#666666\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"3.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"default black_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"S,NW,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#1E90FF\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#009900\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#CCCCCC\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"15.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#009900\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"BioPAX\">\n <network/>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"SimplePhysicalEntity\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"Rna\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"GeneticInteraction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"BiochemicalReaction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"Interaction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"TransportWithBiochemicalReaction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"Conversion\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"ComplexAssembly\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"Protein\"/>\n <discreteMappingEntry value=\"DIAMOND\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"RnaRegion\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"Degradation\"/>\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"Control\"/>\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"TemplateReactionRegulation\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"PhysicalEntity\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"DnaRegion\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"SmallMolecule\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"Dna\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"TemplateReaction\"/>\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"Modulation\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"MolecularInteraction\"/>\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"Catalysis\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"Transport\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"Protein-phosphorylated\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_HEIGHT\" default=\"20.0\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"13.4\" attributeValue=\"GeneticInteraction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"BiochemicalReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Interaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TransportWithBiochemicalReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Conversion\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"ComplexAssembly\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Degradation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Control\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TemplateReactionRegulation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TemplateReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Modulation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"MolecularInteraction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Catalysis\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Transport\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"#FFFFFF\" attributeValue=\"Complex\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_WIDTH\" default=\"20.0\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"13.4\" attributeValue=\"GeneticInteraction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"BiochemicalReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Interaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TransportWithBiochemicalReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Conversion\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"ComplexAssembly\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Degradation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Control\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TemplateReactionRegulation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TemplateReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Modulation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"MolecularInteraction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Catalysis\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Transport\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#006666\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"#006666\" attributeValue=\"Complex\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_LABEL\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"true\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_NONCOMPETITIVE\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_OTHER\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"ACTIVATION\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_UNCOMPETITIVE\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"cofactor\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"ACTIVATION_ALLOSTERIC\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"right\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_ALLOSTERIC\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"controlled\"/>\n <discreteMappingEntry value=\"CIRCLE\" attributeValue=\"contains\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_UNKMECH\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_IRREVERSIBLE\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_COMPETITIVE\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"ACTIVATION_UNKMECH\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"ACTIVATION_NONALLOSTERIC\"/>\n </discreteMapping>\n </visualProperty>\n </edge>\n </visualStyle>\n <visualStyle name=\"Minimal\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#EBEBEB\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ROUND_RECTANGLE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"8\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#333333\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#999999\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#808080\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"makeflow\">\n <network>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"process\">\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"false\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"90.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"process\">\n <discreteMappingEntry value=\"#00CCFF\" attributeValue=\"false\"/>\n <discreteMappingEntry value=\"#00FF00\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"90.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"0.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"0.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"1.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"DELTA\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Nested Network Style_0\">\n <network>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"false\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"40.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#C80000\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"#FFFFFF\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"SE,NW,c,0.00,0.00\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"60.0\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"#0066CC\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"shared name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"2.0\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\"/>\n </visualProperty>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\">\n <discreteMapping attributeType=\"boolean\" attributeName=\"has_nested_network\">\n <discreteMappingEntry value=\"#0066CC\" attributeValue=\"true\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"1.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Universe_0\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"20.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"20.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"24\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#FFFFCC\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"40.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Monospaced.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"LONG_DASH\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"default\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ROUND_RECTANGLE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"30.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#0099CC\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"70.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#1E90FF\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#006699\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"4.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"35.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#666666\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#323232\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"3.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"Dialog.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"Directed\">\n <network>\n <visualProperty name=\"NETWORK_SIZE\" default=\"550.0\"/>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeSizeLocked\" value=\"true\"/>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_8\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_8, name=Node Custom Paint 8)\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_7\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_7, name=Node Custom Paint 7)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_1\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_1, name=Node Custom Paint 1)\"/>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_PAINT\" default=\"#787878\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_5\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_5, name=Node Custom Paint 5)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_3\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_3, name=Node Custom Paint 3)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#333333\"/>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_4\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_4, name=Node Custom Paint 4)\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"50.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"5.0\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_2\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_2, name=Node Custom Paint 2)\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"8\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#0099CC\"/>\n <visualProperty name=\"NODE_SIZE\" default=\"50.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_6\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_6, name=Node Custom Paint 6)\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"50.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMPAINT_9\" default=\"DefaultVisualizableVisualProperty(id=NODE_CUSTOMPAINT_9, name=Node Custom Paint 9)\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FF0066\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"50.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_BEND\" default=\"\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#333333\"/>\n <visualProperty name=\"EDGE_PAINT\" default=\"#808080\"/>\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"interaction\"/>\n </visualProperty>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#333333\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"ARROW\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n </edge>\n </visualStyle>\n <visualStyle name=\"BioPAX_0\">\n <network>\n <visualProperty name=\"NETWORK_EDGE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_HEIGHT\" default=\"400.0\"/>\n <visualProperty name=\"NETWORK_CENTER_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_CENTER_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_SCALE_FACTOR\" default=\"1.0\"/>\n <visualProperty name=\"NETWORK_BACKGROUND_PAINT\" default=\"#FFFFFF\"/>\n <visualProperty name=\"NETWORK_TITLE\" default=\"\"/>\n <visualProperty name=\"NETWORK_NODE_SELECTION\" default=\"true\"/>\n <visualProperty name=\"NETWORK_CENTER_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NETWORK_WIDTH\" default=\"550.0\"/>\n </network>\n <node>\n <dependency name=\"nodeCustomGraphicsSizeSync\" value=\"true\"/>\n <dependency name=\"nodeSizeLocked\" value=\"false\"/>\n <visualProperty name=\"NODE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"SimplePhysicalEntity\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"Rna\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"GeneticInteraction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"BiochemicalReaction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"Interaction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"TransportWithBiochemicalReaction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"ComplexAssembly\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"Conversion\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"Protein\"/>\n <discreteMappingEntry value=\"DIAMOND\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"RnaRegion\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"Degradation\"/>\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"TemplateReactionRegulation\"/>\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"Control\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"DnaRegion\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"PhysicalEntity\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"SmallMolecule\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"Dna\"/>\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"Modulation\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"TemplateReaction\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"MolecularInteraction\"/>\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"Catalysis\"/>\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"Transport\"/>\n <discreteMappingEntry value=\"ELLIPSE\" attributeValue=\"Protein-phosphorylated\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_2\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_3\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_5\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_8\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_9\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_6\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_NESTED_NETWORK_IMAGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_1\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_9\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_1\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_HEIGHT\" default=\"20.0\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"13.4\" attributeValue=\"GeneticInteraction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"BiochemicalReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Interaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TransportWithBiochemicalReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"ComplexAssembly\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Conversion\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Degradation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TemplateReactionRegulation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Control\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Modulation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TemplateReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"MolecularInteraction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Catalysis\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Transport\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_3\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_9\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_BORDER_STROKE\" default=\"SOLID\"/>\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#FFFFFF\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"#FFFFFF\" attributeValue=\"Complex\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_6\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_LABEL_POSITION\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_7\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"NODE_WIDTH\" default=\"20.0\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"13.4\" attributeValue=\"GeneticInteraction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"BiochemicalReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Interaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TransportWithBiochemicalReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"ComplexAssembly\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Conversion\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Complex\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Degradation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TemplateReactionRegulation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Control\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Modulation\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"TemplateReaction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"MolecularInteraction\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Catalysis\"/>\n <discreteMappingEntry value=\"13.4\" attributeValue=\"Transport\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_Z_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_1\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_DEPTH\" default=\"0.0\"/>\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_2\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_4\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_7\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL_WIDTH\" default=\"200.0\"/>\n <visualProperty name=\"NODE_BORDER_PAINT\" default=\"#006666\">\n <discreteMapping attributeType=\"string\" attributeName=\"BIOPAX_TYPE\">\n <discreteMappingEntry value=\"#006666\" attributeValue=\"Complex\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"NODE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_6\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL\" default=\"\">\n <passthroughMapping attributeType=\"string\" attributeName=\"name\"/>\n </visualProperty>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_7\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_Y_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_POSITION_8\" default=\"C,C,c,0.00,0.00\"/>\n <visualProperty name=\"NODE_X_LOCATION\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_3\" default=\"0.0\"/>\n <visualProperty name=\"NODE_BORDER_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"NODE_BORDER_WIDTH\" default=\"2.0\"/>\n <visualProperty name=\"NODE_LABEL_FONT_SIZE\" default=\"12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_2\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_8\" default=\"0.0\"/>\n <visualProperty name=\"NODE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"NODE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,12\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_5\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_5\" default=\"0.0\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_4\" default=\"org.cytoscape.ding.customgraphics.NullCustomGraphics,0,[ Remove Graphics ],\"/>\n <visualProperty name=\"NODE_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"NODE_CUSTOMGRAPHICS_SIZE_4\" default=\"0.0\"/>\n </node>\n <edge>\n <dependency name=\"arrowColorMatchesEdge\" value=\"true\"/>\n <visualProperty name=\"EDGE_LABEL_COLOR\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_CURVED\" default=\"true\"/>\n <visualProperty name=\"EDGE_STROKE_SELECTED_PAINT\" default=\"#FF0000\"/>\n <visualProperty name=\"EDGE_STROKE_UNSELECTED_PAINT\" default=\"#404040\"/>\n <visualProperty name=\"EDGE_LABEL\" default=\"\"/>\n <visualProperty name=\"EDGE_SELECTED\" default=\"false\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_SIZE\" default=\"10\"/>\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\"/>\n <visualProperty name=\"EDGE_VISIBLE\" default=\"true\"/>\n <visualProperty name=\"EDGE_TOOLTIP\" default=\"\"/>\n <visualProperty name=\"EDGE_WIDTH\" default=\"1.0\"/>\n <visualProperty name=\"EDGE_LABEL_TRANSPARENCY\" default=\"255\"/>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SHAPE\" default=\"NONE\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SELECTED_PAINT\" default=\"#FFFF00\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_UNSELECTED_PAINT\" default=\"#000000\"/>\n <visualProperty name=\"EDGE_LABEL_FONT_FACE\" default=\"SansSerif.plain,plain,10\"/>\n <visualProperty name=\"EDGE_TARGET_ARROW_SHAPE\" default=\"NONE\">\n <discreteMapping attributeType=\"string\" attributeName=\"interaction\">\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_NONCOMPETITIVE\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_OTHER\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"ACTIVATION\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_UNCOMPETITIVE\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"cofactor\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"ACTIVATION_ALLOSTERIC\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"right\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_ALLOSTERIC\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"controlled\"/>\n <discreteMappingEntry value=\"CIRCLE\" attributeValue=\"contains\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_UNKMECH\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_IRREVERSIBLE\"/>\n <discreteMappingEntry value=\"T\" attributeValue=\"INHIBITION_COMPETITIVE\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"ACTIVATION_UNKMECH\"/>\n <discreteMappingEntry value=\"DELTA\" attributeValue=\"ACTIVATION_NONALLOSTERIC\"/>\n </discreteMapping>\n </visualProperty>\n <visualProperty name=\"EDGE_SOURCE_ARROW_SELECTED_PAINT\" default=\"#FFFF00\\\"/>\n </edge>\n </visualStyle>\n</vizmap>");
fclose(styles);
}
void dag_to_cyto(struct dag *d, int condense_display, int change_size)
{
struct dag_node *n;
struct dag_file *f;
struct hash_table *h, *g;
struct dot_node *t;
struct file_node *e;
struct stat st;
const char *fn;
char *name;
char *label;
double average = 0;
double width = 0;
FILE *cytograph = stdout;
//FILE *cytograph = fopen("cytoscape.xgmml", "w");
fprintf(cytograph, "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n");
fprintf(cytograph, "<graph id=\"1\" label=\"small example\" directed=\"1\" cy:documentVersion=\"3.0\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" xmlns:cy=\"http://www.cytoscape.org/\" xmlns=\"http://www.cs.rpi.edu/XGMML\">\n");
fprintf(cytograph, "\t<att name=\"networkMetadata\">\n");
fprintf(cytograph, "\t\t<rdf:RDF>\n");
fprintf(cytograph, "\t\t\t<rdf:Description rdf:about=\"http://ccl.cse.nd.edu/\">\n");
fprintf(cytograph, "\t\t\t\t<dc:type>Makeflow Structure</dc:type>\n");
fprintf(cytograph, "\t\t\t\t<dc:description>N/A</dc:description>\n");
fprintf(cytograph, "\t\t\t\t<dc:identifier>N/A</dc:identifier>\n");
time_t timer;
time(&timer);
struct tm* currenttime = localtime(&timer);
char timestring[20];
strftime(timestring, sizeof(timestring), "%Y-%m-%d %H:%M:%S", currenttime);
fprintf(cytograph, "\t\t\t\t<dc:date>%s</dc:date>\n", timestring);
fprintf(cytograph, "\t\t\t\t<dc:title>Makeflow Visualization</dc:title>\n");
fprintf(cytograph, "\t\t\t\t<dc:source>http://ccl.cse.nd.edu/</dc:source>\n");
fprintf(cytograph, "\t\t\t\t<dc:format>Cytoscape-XGMML</dc:format>\n");
fprintf(cytograph, "\t\t\t</rdf:Description>\n");
fprintf(cytograph, "\t\t</rdf:RDF>\n");
fprintf(cytograph, "\t</att>\n");
fprintf(cytograph, "\t<att name=\"shared name\" value=\"Makeflow Visualization\" type=\"string\"/>\n");
fprintf(cytograph, "\t<att name=\"name\" value=\"Makeflow Visualization\" type=\"string\"/>\n");
fprintf(cytograph, "\t<att name=\"selected\" value=\"1\" type=\"boolean\"/>\n");
fprintf(cytograph, "\t<att name=\"__Annotations\" type=\"list\">\n");
fprintf(cytograph, "\t</att>\n");
fprintf(cytograph, "\t<att name = \"layoutAlgorithm\" value = \"Grid Layout\" type = \"string\" cy:hidden = \"1\"/>\n");
if(change_size) {
hash_table_firstkey(d->completed_files);
while(hash_table_nextkey(d->completed_files, &label, (void **) &name)) {
stat(label, &st);
average += ((double) st.st_size) / ((double) hash_table_size(d->completed_files));
}
}
h = hash_table_create(0, 0);
for(n = d->nodes; n; n = n->next) {
name = xxstrdup(n->command);
label = strtok(name, " \t\n");
t = hash_table_lookup(h, label);
if(!t) {
t = malloc(sizeof(*t));
t->id = n->nodeid;
t->count = 1;
t->print = 1;
hash_table_insert(h, label, t);
} else {
t->count++;
}
free(name);
}
for(n = d->nodes; n; n = n->next) {
name = xxstrdup(n->command);
label = strtok(name, " \t\n");
t = hash_table_lookup(h, label);
if(!condense_display || t->print) {
t->print = 0;
}
write_node_to_xgmml(cytograph, 'N', n->nodeid, label,1);
free(name);
}
g = hash_table_create(0, 0);
for(n = d->nodes; n; n = n->next) {
list_first_item(n->source_files);
while((f = list_next_item(n->source_files))) {
fn = f->filename;
e = hash_table_lookup(g, fn);
if(!e) {
e = malloc(sizeof(*e));
e->id = hash_table_size(g);
e->name = xxstrdup(fn);
if(stat(fn, &st) == 0) {
e->size = (double) (st.st_size);
} else
e->size = -1;
hash_table_insert(g, fn, e);
}
}
list_first_item(n->target_files);
while((f = list_next_item(n->target_files))) {
fn = f->filename;
e = hash_table_lookup(g, fn);
if(!e) {
e = malloc(sizeof(*e));
e->id = hash_table_size(g);
e->name = xxstrdup(fn);
if(stat(fn, &st) == 0) {
e->size = (double) (st.st_size);
} else
e->size = -1;
hash_table_insert(g, fn, e);
}
}
}
hash_table_firstkey(g);
while(hash_table_nextkey(g, &label, (void **) &e)) {
fn = e->name;
write_node_to_xgmml(cytograph, 'F', e->id, (char *)fn, 0);
if(change_size) {
if(e->size >= 0) {
width = 5 * (e->size / average);
if(width < 2.5)
width = 2.5;
if(width > 25)
width = 25;
}
}
}
for(n = d->nodes; n; n = n->next) {
name = xxstrdup(n->command);
label = strtok(name, " \t\n");
t = hash_table_lookup(h, label);
list_first_item(n->source_files);
while((f = list_next_item(n->source_files))) {
e = hash_table_lookup(g, f->filename);
write_edge_to_xgmml(cytograph, 'F', e->id, 'N', n->nodeid, 1);
}
list_first_item(n->target_files);
while((f = list_next_item(n->target_files))) {
e = hash_table_lookup(g, f->filename);
write_edge_to_xgmml(cytograph, 'N', n->nodeid, 'F', e->id, 1);
}
free(name);
}
fprintf(cytograph, "</graph>\n");
fclose(cytograph);
write_styles_file();
hash_table_firstkey(h);
while(hash_table_nextkey(h, &label, (void **) &t)) {
free(t);
hash_table_remove(h, label);
}
hash_table_firstkey(g);
while(hash_table_nextkey(g, &label, (void **) &e)) {
free(e);
hash_table_remove(g, label);
}
hash_table_delete(g);
hash_table_delete(h);
}
void dag_to_dot(struct dag *d, int condense_display, int change_size, int with_labels, int with_details )
{
struct dag_node *n;
struct dag_file *f;
struct hash_table *h, *g;
struct dot_node *t;
struct file_node *e;
struct stat st;
const char *fn;
char *name;
char *label;
//Dot Details Variables
int i;
int j;
struct file_node *src;
struct file_node *tar;
double average = 0;
double width = 0;
printf( "digraph {\n");
if(change_size) {
hash_table_firstkey(d->files);
while(hash_table_nextkey(d->files, &name, (void**)&f )) {
if(stat(name,&st)==0) {
average += ((double) st.st_size) / ((double) hash_table_size(d->completed_files));
}
}
}
h = hash_table_create(0, 0);
printf( "node [shape=ellipse,color = green,style = %s,fixedsize = false];\n", with_labels ? "unfilled" : "filled" );
for(n = d->nodes; n; n = n->next) {
name = xxstrdup(n->command);
label = strtok(name, " \t\n");
t = hash_table_lookup(h, label);
if(!t) {
t = malloc(sizeof(*t));
t->id = n->nodeid;
t->count = 1;
t->print = 1;
hash_table_insert(h, label, t);
} else {
t->count++;
}
free(name);
}
g = hash_table_create(0, 0);
for(n = d->nodes; n; n = n->next) {
list_first_item(n->source_files);
while((f = list_next_item(n->source_files))) {
fn = f->filename;
e = hash_table_lookup(g, fn);
if(!e) {
e = malloc(sizeof(*e));
e->id = hash_table_size(g);
e->name = xxstrdup(fn);
if(stat(fn, &st) == 0) {
e->size = (double) (st.st_size);
} else
e->size = -1;
hash_table_insert(g, fn, e);
}
}
list_first_item(n->target_files);
while((f = list_next_item(n->target_files))) {
fn = f->filename;
e = hash_table_lookup(g, fn);
if(!e) {
e = malloc(sizeof(*e));
e->id = hash_table_size(g);
e->name = xxstrdup(fn);
if(stat(fn, &st) == 0) {
e->size = (double) (st.st_size);
} else
e->size = -1;
hash_table_insert(g, fn, e);
}
}
}
for(n = d->nodes; n; n = n->next) {
name = xxstrdup(n->command);
label = strtok(name, " \t\n");
t = hash_table_lookup(h, label);
if(!condense_display || t->print) {
//Dot Details
if(with_details) {
printf("subgraph cluster_S%d { \n", condense_display ? t->id : n->nodeid);
printf("\tstyle=unfilled;\n\tcolor=red\n");
printf("\tcores%d [style=filled, color=white, label=\"Cores: %"PRId64"\"]\n", condense_display ? t->id : n->nodeid, n->resources->cores);
printf("\tresMem%d [style=filled, color=white, label=\"Memory: %"PRId64" MB\"]\n", condense_display ? t->id : n->nodeid, n->resources->resident_memory);
printf("\tworkDirFtprnt%d [style=filled, color=white, label=\"Footprint: %"PRId64" MB\"]\n", condense_display ? t->id : n->nodeid, n->resources->workdir_footprint);
printf("\tcores%d -> resMem%d -> workDirFtprnt%d [color=white]", condense_display ? t->id : n->nodeid, condense_display ? t->id : n->nodeid, condense_display ? t->id : n->nodeid);
//Source Files
list_first_item(n->source_files);
i = 0;
while((f = list_next_item(n->source_files))) {
fn = f->filename;
e = hash_table_lookup(g, fn);
if(e) {
i++;
printf("\tsrc_%d_%d [label=\"%s\", style=unfilled, color=purple, shape=box];\n", condense_display ? t->id : n->nodeid, e->id, e->name);
printf("\tsrc_%d_%d -> N%d;\n", condense_display ? t->id : n->nodeid, e->id, condense_display ? t->id : n->nodeid);
}
}
//Target Files
list_first_item(n->target_files);
j = 0;
while((f = list_next_item(n->target_files))) {
fn = f->filename;
e = hash_table_lookup(g, fn);
if(e) {
j++;
printf("\ttar_%d_%d [label=\"%s\", style=dotted, color=purple, shape=box];\n", condense_display ? t->id : n->nodeid, e->id, e->name);
printf("\tN%d -> tar_%d_%d;\n", condense_display ? t->id : n->nodeid, condense_display ? t->id : n->nodeid, e->id);
}
}
}
if((t->count == 1) || !condense_display) {
printf( "N%d [label=\"%s\"];\n", condense_display ? t->id : n->nodeid, with_labels ? label : "");
} else {
printf( "N%d [label=\"%s x%d\"];\n", t->id, with_labels ? label : "", t->count);
}
if(with_details) {
printf( "}\n" );
}
t->print = 0;
}
free(name);
}
printf( "node [shape=box,color=blue,style=%s,fixedsize=false];\n",with_labels ? "unfilled" : "filled" );
hash_table_firstkey(g);
while(hash_table_nextkey(g, &label, (void **) &e)) {
fn = e->name;
printf( "F%d [label = \"%s", e->id, with_labels ? fn : "" );
char cytoid[6];
sprintf(cytoid, "F%d", e->id);
if(change_size) {
if(e->size >= 0) {
width = 5 * (e->size / average);
if(width < 2.5)
width = 2.5;
if(width > 25)
width = 25;
printf( "\\nsize:%.0lfkb\", style=filled, fillcolor=skyblue1, fixedsize=true, width=%lf, height=0.75", e->size / 1024, width);
} else {
printf( "\", fixedsize = false, style = unfilled, ");
}
} else
printf( "\"");
printf( "];\n");
}
printf( "\n");
for(n = d->nodes; n; n = n->next) {
name = xxstrdup(n->command);
label = strtok(name, " \t\n");
t = hash_table_lookup(h, label);
list_first_item(n->source_files);
while((f = list_next_item(n->source_files))) {
e = hash_table_lookup(g, f->filename);
if(with_details) {
src = hash_table_lookup(g, f->filename);
if(src) {
printf( "F%d -> src_%d_%d;\n", e->id, condense_display ? t->id : n->nodeid, e->id );
}
}
else {
printf( "F%d -> N%d;\n", e->id, condense_display ? t->id : n->nodeid);
}
}
list_first_item(n->target_files);
while((f = list_next_item(n->target_files))) {
e = hash_table_lookup(g, f->filename);
if(with_details) {
tar = hash_table_lookup(g, f->filename);
if(tar) {
printf( "tar_%d_%d -> F%d;\n", condense_display ? t->id : n->nodeid, e->id, e->id );
}
}
else {
printf( "N%d -> F%d;\n", condense_display ? t->id : n->nodeid, e->id);
}
}
free(name);
}
printf( "}\n");
hash_table_firstkey(h);
while(hash_table_nextkey(h, &label, (void **) &t)) {
free(t);
hash_table_remove(h, label);
}
hash_table_firstkey(g);
while(hash_table_nextkey(g, &label, (void **) &e)) {
free(e);
hash_table_remove(g, label);
}
hash_table_delete(g);
hash_table_delete(h);
}
void ppm_color_parser(struct dag_node *n, char *color_array, int ppm_mode, char (*ppm_option), int current_level, int whitespace_on)
{
if(whitespace_on) {
color_array[0] = 1;
color_array[1] = 1;
color_array[2] = 1;
return;
}
struct dag_file *f;
int ppm_option_int;
char *name, *label;
memset(color_array, 0, 3 * sizeof(char));
if(ppm_mode == 1) {
switch (n->state) {
case DAG_NODE_STATE_WAITING:
break;
case DAG_NODE_STATE_RUNNING:
color_array[0] = 1;
color_array[1] = 1;
color_array[2] = 0;
break;
case DAG_NODE_STATE_COMPLETE:
color_array[0] = 0;
color_array[1] = 1;
color_array[2] = 0;
break;
case DAG_NODE_STATE_FAILED:
color_array[0] = 1;
color_array[1] = 0;
color_array[2] = 0;
break;
case DAG_NODE_STATE_ABORTED:
color_array[0] = 1;
color_array[1] = 0;
color_array[2] = 0;
break;
default:
color_array[0] = 0;
color_array[1] = 0;
color_array[2] = 1;
break;
}
}
if(ppm_mode == 2) {
name = xxstrdup(n->command);
label = strtok(name, " \t\n");
if(strcmp(label, ppm_option) == 0) {
//node name is matched, set to yellow
color_array[0] = 0;
color_array[1] = 1;
color_array[2] = 1;
}
}
if(ppm_mode == 3) {
//searches the files for a result file named such
list_first_item(n->target_files);
while((f = list_next_item(n->target_files))) {
if(strcmp(f->filename, ppm_option) == 0) {
//makes this file, set to purple
color_array[0] = 1;
color_array[1] = 0;
color_array[2] = 1;
break;
}
}
}
if(ppm_mode == 4) {
ppm_option_int = atoi(ppm_option);
if(current_level == ppm_option_int) {
//sets everything at that level to yellow
color_array[0] = 0;
color_array[1] = 1;
color_array[2] = 1;
}
}
if(ppm_mode == 5) {
color_array[current_level % 3] = 1;
}
}
void dag_to_ppm(struct dag *d, int ppm_mode, char *ppm_option)
{
int count, count_row, max_ancestor = 0, max_size = 0;
UINT64_T key;
struct dag_node *n;
char *name;
char *label;
struct hash_table *h;
dag_find_ancestor_depth(d);
h = hash_table_create(0, 0);
itable_firstkey(d->node_table);
while(itable_nextkey(d->node_table, &key, (void **) &n)) {
name = xxstrdup(n->command);
label = strtok(name, " \t\n");
if(max_ancestor < n->ancestor_depth)
max_ancestor = n->ancestor_depth;
sprintf(name, "%d", n->nodeid);
hash_table_insert(h, name, n);
}
struct list **ancestor_count_list = malloc((max_ancestor + 1) * sizeof(struct list *));
//initialize all of the lists
for(count = 0; count <= max_ancestor; count++) {
ancestor_count_list[count] = list_create();
}
hash_table_firstkey(h);
while(hash_table_nextkey(h, &label, (void **) &n)) {
list_push_tail(ancestor_count_list[n->ancestor_depth], n);
if(list_size(ancestor_count_list[n->ancestor_depth]) > max_size)
max_size = list_size(ancestor_count_list[n->ancestor_depth]);
}
int i;
int node_num_rows = 0;
int max_image_width = 1200;
int node_width = max_image_width / max_size;
if(node_width < 5)
node_width = 5;
for(i = 0; i <= max_ancestor; i++) {
node_num_rows = node_num_rows + ((node_width * list_size(ancestor_count_list[i])) - 1) / (max_image_width) + 1;
}
int max_image_height = 800;
int row_height = max_image_height / node_num_rows;
if(row_height < 5)
row_height = 5;
//calculate the column size so that we can center the data
int x_length = (max_image_width / node_width) * node_width;
int y_length = row_height * (node_num_rows);
int current_depth_width;
int current_depth_nodesPrinted;
int current_depth_pixel_nodesPrinted;
int nodesCanBePrinted = x_length / node_width;
int current_depth_nodesCanBePrinted;
int current_depth_numRows;
int numRows;
int pixel_count_col;
int pixel_count_height;
int whitespace;
int whitespace_left;
int whitespace_right;
int whitespace_on;
printf( "P6\n"); //"Magic Number", don't change
printf( "%d %d\n", x_length, y_length); //Width and Height
printf( "1\n"); //maximum color value
char color_array[3];
for(count_row = 0; count_row <= max_ancestor; count_row++) { //each ancestor depth in the dag
current_depth_width = list_size(ancestor_count_list[count_row]); //the width of this particular level of the dag
current_depth_numRows = (node_width * current_depth_width - 1) / (x_length) + 1;
current_depth_nodesPrinted = 0;
for(numRows = 0; numRows < current_depth_numRows; numRows++) {
if((current_depth_width - current_depth_nodesPrinted) < nodesCanBePrinted)
current_depth_nodesCanBePrinted = current_depth_width - current_depth_nodesPrinted;
else
current_depth_nodesCanBePrinted = nodesCanBePrinted;
whitespace = x_length - (current_depth_nodesCanBePrinted * node_width);
whitespace_left = whitespace / 2;
whitespace_right = x_length - (whitespace - whitespace_left);
for(pixel_count_height = 0; pixel_count_height < row_height; pixel_count_height++) { //each pixel row of said ancestor height
list_first_item(ancestor_count_list[count_row]);
current_depth_pixel_nodesPrinted = 0;
for(pixel_count_col = 0; pixel_count_col < x_length; pixel_count_col++) { //for each node in the width
if((pixel_count_col < whitespace_left) || (pixel_count_col >= whitespace_right)) {
whitespace_on = 1;
} else {
whitespace_on = 0;
if((pixel_count_col - whitespace_left - (current_depth_pixel_nodesPrinted * node_width)) == 0) {
n = list_next_item(ancestor_count_list[count_row]);
current_depth_pixel_nodesPrinted++;
if(pixel_count_height == 0)
current_depth_nodesPrinted++;
}
}
ppm_color_parser(n, color_array, ppm_mode, ppm_option, count_row, whitespace_on);
printf( "%c%c%c", color_array[0], color_array[1], color_array[2]);
}
}
}
}
hash_table_delete(h);
free(ancestor_count_list);
}
/* vim: set noexpandtab tabstop=4: */
| 1 | 12,046 | This stat may fail, right? I think dag_file_exists does not actually check the file is there. | cooperative-computing-lab-cctools | c |
@@ -16,7 +16,11 @@ describe('Options Validation', function() {
});
const testObject = { a: 1 };
- const validatedObject = objectValidator(testObject, { validationLevel: testValidationLevel });
+ const validatedObject = objectValidator(
+ testObject,
+ {},
+ { validationLevel: testValidationLevel }
+ );
expect(validatedObject).to.deep.equal({ a: 1 });
expect(validatedObject).to.be.frozen; | 1 | 'use strict';
const expect = require('chai').expect;
const createValidationFunction = require('../../lib/options_validator').createValidationFunction;
const sinonChai = require('sinon-chai');
const sinon = require('sinon');
const chai = require('chai');
chai.use(sinonChai);
describe('Options Validation', function() {
const testValidationLevel = 'error';
it('Should validate a basic object with type number', function() {
const objectValidator = createValidationFunction({
a: { type: 'number' }
});
const testObject = { a: 1 };
const validatedObject = objectValidator(testObject, { validationLevel: testValidationLevel });
expect(validatedObject).to.deep.equal({ a: 1 });
expect(validatedObject).to.be.frozen;
});
it('Should validate a basic object with type object', function() {
const objectValidator = createValidationFunction({
a: { type: 'object' }
});
const testObject = { a: { b: 1 } };
const validatedObject = objectValidator(testObject, { validationLevel: testValidationLevel });
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
});
it('Should validate a basic object with array of types', function() {
const objectValidator = createValidationFunction({
a: { type: ['number', 'object'] }
});
const testObject1 = { a: 1 };
const validatedObject1 = objectValidator(testObject1, { validationLevel: testValidationLevel });
expect(validatedObject1).to.deep.equal(testObject1);
expect(validatedObject1).to.be.frozen;
const testObject2 = { a: { b: true } };
const validatedObject2 = objectValidator(testObject2, { validationLevel: testValidationLevel });
expect(validatedObject2).to.deep.equal(testObject2);
expect(validatedObject2).to.be.frozen;
});
it('Should validate a basic object with custom type', function() {
function CustomType() {
this.type = 'custom';
}
const objectValidator = createValidationFunction({ a: { type: CustomType } });
const testObject = { a: new CustomType() };
const validatedObject = objectValidator(testObject, { validationLevel: testValidationLevel });
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
});
it('Should ignore fields not in schema', function() {
const objectValidator = createValidationFunction({
a: { type: 'boolean' }
});
const testObject = { b: 1 };
const validatedObject = objectValidator(testObject, { validationLevel: testValidationLevel });
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
});
it('Should use default validationLevel', function() {
const objectValidator = createValidationFunction({
a: { type: 'boolean' }
});
const testObject = { b: 1 };
const validatedObject = objectValidator(testObject);
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
});
it('Should skip validation if validationLevel is none', function() {
const objectValidator = createValidationFunction({
a: { type: 'boolean' }
});
const testObject = { a: 45 };
const validatedObject = objectValidator(testObject, { validationLevel: 'none' });
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
});
it('Should warn if validationLevel is warn', function() {
const stub = sinon.stub(console, 'warn');
const objectValidator = createValidationFunction({
a: { type: 'boolean' }
});
const testObject = { a: 45 };
const validatedObject = objectValidator(testObject, { validationLevel: 'warn' });
expect(stub).to.have.been.calledOnce;
expect(stub).to.have.been.calledWith('a should be of type boolean, but is of type number.');
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
console.warn.restore();
});
it('Should error if validationLevel is error', function() {
const objectValidator = createValidationFunction({
a: { type: 'boolean' }
});
const testObject = { a: 45 };
try {
const validatedObject = objectValidator(testObject, { validationLevel: 'error' });
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
} catch (err) {
expect(err).to.not.be.null;
expect(err.message).to.equal('a should be of type boolean, but is of type number.');
}
});
it('Should fail validation if required option is not present', function() {
const stub = sinon.stub(console, 'warn');
const objectValidator = createValidationFunction({
a: { required: true }
});
const testObject = { b: 45 };
const validatedObject = objectValidator(testObject, { validationLevel: 'warn' });
expect(stub).to.have.been.calledOnce;
expect(stub).to.have.been.calledWith('required option [a] was not found.');
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
console.warn.restore();
});
it('Should validate an object with required and type fields', function() {
const objectValidator = createValidationFunction({
a: { type: 'boolean', required: true }
});
const testObject = { a: true };
const validatedObject = objectValidator(testObject, { validationLevel: testValidationLevel });
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
});
it('Should fail validation if required or type fails', function() {
const objectValidator = createValidationFunction({
a: { type: 'boolean', required: true }
});
const testObject = { b: 1 };
try {
const validatedObject = objectValidator(testObject, { validationLevel: testValidationLevel });
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
} catch (err) {
expect(err).to.not.be.null;
expect(err.message).to.equal('required option [a] was not found.');
}
});
it('Should set defaults', function() {
const objectValidator = createValidationFunction({
a: { default: true }
});
const testObject = { b: 3 };
const validatedObject = objectValidator(testObject, { validationLevel: testValidationLevel });
expect(validatedObject.a).to.equal(true);
expect(validatedObject.b).to.equal(3);
expect(validatedObject).to.be.frozen;
});
it('Should deprecate options', function() {
const stub = process.emitWarning
? sinon.stub(process, 'emitWarning')
: sinon.stub(console, 'error');
const objectValidator = createValidationFunction({
a: { deprecated: true }
});
const testObject = { a: 3 };
const validatedObject = objectValidator(testObject);
expect(stub).to.have.been.calledOnce;
expect(stub).to.have.been.calledWith(
'option [a] is deprecated and will be removed in a later version.'
);
expect(validatedObject).to.deep.equal(testObject);
expect(validatedObject).to.be.frozen;
process.emitWarning ? process.emitWarning.restore() : console.error.restore();
});
});
| 1 | 14,844 | I think we can make a safe assumption that if only two values are passed in then you have `(optionsToValidate, optionsForValidation)`, if its three then you have `(optionsToValidate, overrideOptions, optionsForValidation)` | mongodb-node-mongodb-native | js |
@@ -141,12 +141,14 @@ func runExperimentalBeamPipeline(ctx context.Context) error {
entries := beamio.ReadEntries(s, *entriesFile)
k := pipeline.FromEntries(s, entries)
shards := 8 // TODO(schroederc): better determine number of shards
+ edgeSets, edgePages := k.Edges()
xrefSets, xrefPages := k.CrossReferences()
beamio.WriteLevelDB(s, *tablePath, shards,
k.CorpusRoots(),
k.Directories(),
k.Decorations(),
xrefSets, xrefPages,
+ edgeSets, edgePages,
)
return beamx.Run(ctx, p)
} | 1 | /*
* Copyright 2015 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Binary write_tables creates a combined xrefs/filetree/search serving table
// based on a given GraphStore.
package main
import (
"context"
"errors"
"flag"
"log"
"kythe.io/kythe/go/platform/vfs"
"kythe.io/kythe/go/services/graphstore"
"kythe.io/kythe/go/serving/pipeline"
"kythe.io/kythe/go/serving/pipeline/beamio"
"kythe.io/kythe/go/storage/gsutil"
"kythe.io/kythe/go/storage/leveldb"
"kythe.io/kythe/go/storage/stream"
"kythe.io/kythe/go/util/flagutil"
"kythe.io/kythe/go/util/profile"
spb "kythe.io/kythe/proto/storage_go_proto"
"github.com/apache/beam/sdks/go/pkg/beam"
"github.com/apache/beam/sdks/go/pkg/beam/x/beamx"
_ "kythe.io/kythe/go/services/graphstore/proxy"
_ "kythe.io/third_party/beam/sdks/go/pkg/beam/runners/disksort"
)
var (
gs graphstore.Service
entriesFile = flag.String("entries", "", "Path to GraphStore-ordered entries file (mutually exclusive with --graphstore)")
tablePath = flag.String("out", "", "Directory path to output serving table")
maxPageSize = flag.Int("max_page_size", 4000,
"If positive, edge/cross-reference pages are restricted to under this number of edges/references")
compressShards = flag.Bool("compress_shards", false,
"Determines whether intermediate data written to disk should be compressed.")
maxShardSize = flag.Int("max_shard_size", 32000,
"Maximum number of elements (edges, decoration fragments, etc.) to keep in-memory before flushing an intermediary data shard to disk.")
verbose = flag.Bool("verbose", false, "Whether to emit extra, and possibly excessive, log messages")
experimentalBeamPipeline = flag.Bool("experimental_beam_pipeline", false, "Whether to use the Beam experimental pipeline implementation")
)
func init() {
gsutil.Flag(&gs, "graphstore", "GraphStore to read (mutually exclusive with --entries)")
flag.Usage = flagutil.SimpleUsage(
"Creates a combined xrefs/filetree/search serving table based on a given GraphStore or stream of GraphStore-ordered entries",
"(--graphstore spec | --entries path) --out path")
}
func main() {
flag.Parse()
ctx := context.Background()
if *experimentalBeamPipeline {
if err := runExperimentalBeamPipeline(ctx); err != nil {
log.Fatalf("Pipeline error: %v", err)
}
return
}
if gs == nil && *entriesFile == "" {
flagutil.UsageError("missing --graphstore or --entries")
} else if gs != nil && *entriesFile != "" {
flagutil.UsageError("--graphstore and --entries are mutually exclusive")
} else if *tablePath == "" {
flagutil.UsageError("missing required --out flag")
}
db, err := leveldb.Open(*tablePath, nil)
if err != nil {
log.Fatal(err)
}
defer db.Close()
if err := profile.Start(ctx); err != nil {
log.Fatal(err)
}
defer profile.Stop()
var rd stream.EntryReader
if gs != nil {
rd = func(f func(e *spb.Entry) error) error {
defer gs.Close(ctx)
return gs.Scan(ctx, &spb.ScanRequest{}, f)
}
} else {
f, err := vfs.Open(ctx, *entriesFile)
if err != nil {
log.Fatalf("Error opening %q: %v", *entriesFile, err)
}
defer f.Close()
rd = stream.NewReader(f)
}
if err := pipeline.Run(ctx, rd, db, &pipeline.Options{
Verbose: *verbose,
MaxPageSize: *maxPageSize,
CompressShards: *compressShards,
MaxShardSize: *maxShardSize,
}); err != nil {
log.Fatal("FATAL ERROR: ", err)
}
}
func runExperimentalBeamPipeline(ctx context.Context) error {
beam.Init()
if runnerFlag := flag.Lookup("runner"); runnerFlag.Value.String() == "direct" {
runnerFlag.Value.Set("disksort")
}
if gs != nil {
return errors.New("--graphstore input not supported with --experimental_beam_pipeline")
} else if *entriesFile == "" {
return errors.New("--entries file path required")
} else if *tablePath == "" {
return errors.New("--out table path required")
}
p, s := beam.NewPipelineWithRoot()
entries := beamio.ReadEntries(s, *entriesFile)
k := pipeline.FromEntries(s, entries)
shards := 8 // TODO(schroederc): better determine number of shards
xrefSets, xrefPages := k.CrossReferences()
beamio.WriteLevelDB(s, *tablePath, shards,
k.CorpusRoots(),
k.Directories(),
k.Decorations(),
xrefSets, xrefPages,
)
return beamx.Run(ctx, p)
}
| 1 | 8,494 | If there are more items to add to what's being written out here, please add a TODO. | kythe-kythe | go |
@@ -106,8 +106,12 @@ class SuperSocket(six.with_metaclass(_SuperSocket_metaclass)):
pkt = pkt[:12] + tag + pkt[12:]
elif cmsg_lvl == socket.SOL_SOCKET and \
cmsg_type == SO_TIMESTAMPNS:
- tmp = struct.unpack("iiii", cmsg_data)
- timestamp = tmp[0] + tmp[2] * 1e-9
+ length = len(cmsg_data)
+ if length == 16: # __kernel_timespec
+ tmp = struct.unpack("ll", cmsg_data)
+ elif length == 8: # timespec
+ tmp = struct.unpack("ii", cmsg_data)
+ timestamp = tmp[0] + tmp[1] * 1e-9
return pkt, sa_ll, timestamp
def recv_raw(self, x=MTU): | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
SuperSocket.
"""
from __future__ import absolute_import
from select import select, error as select_error
import ctypes
import errno
import os
import socket
import struct
import time
from scapy.config import conf
from scapy.consts import LINUX, DARWIN, WINDOWS
from scapy.data import MTU, ETH_P_IP, SOL_PACKET, SO_TIMESTAMPNS
from scapy.compat import raw, bytes_encode
from scapy.error import warning, log_runtime
import scapy.modules.six as six
import scapy.packet
from scapy.utils import PcapReader, tcpdump
# Utils
class _SuperSocket_metaclass(type):
def __repr__(self):
if self.desc is not None:
return "<%s: %s>" % (self.__name__, self.desc)
else:
return "<%s>" % self.__name__
# Used to get ancillary data
PACKET_AUXDATA = 8
ETH_P_8021Q = 0x8100
TP_STATUS_VLAN_VALID = 1 << 4
class tpacket_auxdata(ctypes.Structure):
_fields_ = [
("tp_status", ctypes.c_uint),
("tp_len", ctypes.c_uint),
("tp_snaplen", ctypes.c_uint),
("tp_mac", ctypes.c_ushort),
("tp_net", ctypes.c_ushort),
("tp_vlan_tci", ctypes.c_ushort),
("tp_padding", ctypes.c_ushort),
]
# SuperSocket
class SuperSocket(six.with_metaclass(_SuperSocket_metaclass)):
desc = None
closed = 0
nonblocking_socket = False
read_allowed_exceptions = ()
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): # noqa: E501
self.ins = socket.socket(family, type, proto)
self.outs = self.ins
self.promisc = None
def send(self, x):
sx = raw(x)
try:
x.sent_time = time.time()
except AttributeError:
pass
return self.outs.send(sx)
if six.PY2:
def _recv_raw(self, sock, x):
"""Internal function to receive a Packet"""
pkt, sa_ll = sock.recvfrom(x)
return pkt, sa_ll, None
else:
def _recv_raw(self, sock, x):
"""Internal function to receive a Packet,
and process ancillary data.
"""
flags_len = socket.CMSG_LEN(4096)
timestamp = None
pkt, ancdata, flags, sa_ll = sock.recvmsg(x, flags_len)
if not pkt:
return pkt, sa_ll
for cmsg_lvl, cmsg_type, cmsg_data in ancdata:
# Check available ancillary data
if (cmsg_lvl == SOL_PACKET and cmsg_type == PACKET_AUXDATA):
# Parse AUXDATA
auxdata = tpacket_auxdata.from_buffer_copy(cmsg_data)
if auxdata.tp_vlan_tci != 0 or \
auxdata.tp_status & TP_STATUS_VLAN_VALID:
# Insert VLAN tag
tag = struct.pack(
"!HH",
ETH_P_8021Q,
auxdata.tp_vlan_tci
)
pkt = pkt[:12] + tag + pkt[12:]
elif cmsg_lvl == socket.SOL_SOCKET and \
cmsg_type == SO_TIMESTAMPNS:
tmp = struct.unpack("iiii", cmsg_data)
timestamp = tmp[0] + tmp[2] * 1e-9
return pkt, sa_ll, timestamp
def recv_raw(self, x=MTU):
"""Returns a tuple containing (cls, pkt_data, time)"""
return conf.raw_layer, self.ins.recv(x), None
def recv(self, x=MTU):
cls, val, ts = self.recv_raw(x)
if not val or not cls:
return
try:
pkt = cls(val)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
from scapy.sendrecv import debug
debug.crashed_on = (cls, val)
raise
pkt = conf.raw_layer(val)
if ts:
pkt.time = ts
return pkt
def fileno(self):
return self.ins.fileno()
def close(self):
if self.closed:
return
self.closed = True
if getattr(self, "outs", None):
if getattr(self, "ins", None) != self.outs:
if WINDOWS or self.outs.fileno() != -1:
self.outs.close()
if getattr(self, "ins", None):
if WINDOWS or self.ins.fileno() != -1:
self.ins.close()
def sr(self, *args, **kargs):
from scapy import sendrecv
return sendrecv.sndrcv(self, *args, **kargs)
def sr1(self, *args, **kargs):
from scapy import sendrecv
a, b = sendrecv.sndrcv(self, *args, **kargs)
if len(a) > 0:
return a[0][1]
else:
return None
def sniff(self, *args, **kargs):
from scapy import sendrecv
return sendrecv.sniff(opened_socket=self, *args, **kargs)
def tshark(self, *args, **kargs):
from scapy import sendrecv
return sendrecv.tshark(opened_socket=self, *args, **kargs)
@staticmethod
def select(sockets, remain=conf.recv_poll_rate):
"""This function is called during sendrecv() routine to select
the available sockets.
:param sockets: an array of sockets that need to be selected
:returns: an array of sockets that were selected and
the function to be called next to get the packets (i.g. recv)
"""
try:
inp, _, _ = select(sockets, [], [], remain)
except (IOError, select_error) as exc:
# select.error has no .errno attribute
if exc.args[0] != errno.EINTR:
raise
return inp, None
def __del__(self):
"""Close the socket"""
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Close the socket"""
self.close()
class L3RawSocket(SuperSocket):
desc = "Layer 3 using Raw sockets (PF_INET/SOCK_RAW)"
def __init__(self, type=ETH_P_IP, filter=None, iface=None, promisc=None, nofilter=0): # noqa: E501
self.outs = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW) # noqa: E501
self.outs.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) # noqa: E501
if iface is not None:
self.ins.bind((iface, type))
if not six.PY2:
# Receive Auxiliary Data (VLAN tags)
self.ins.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1)
self.ins.setsockopt(
socket.SOL_SOCKET,
SO_TIMESTAMPNS,
1
)
def recv(self, x=MTU):
pkt, sa_ll, ts = self._recv_raw(self.ins, x)
if sa_ll[2] == socket.PACKET_OUTGOING:
return None
if sa_ll[3] in conf.l2types:
cls = conf.l2types[sa_ll[3]]
lvl = 2
elif sa_ll[1] in conf.l3types:
cls = conf.l3types[sa_ll[1]]
lvl = 3
else:
cls = conf.default_l2
warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s", sa_ll[0], sa_ll[1], sa_ll[3], cls.name) # noqa: E501
lvl = 3
try:
pkt = cls(pkt)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(pkt)
if lvl == 2:
pkt = pkt.payload
if pkt is not None:
if ts is None:
from scapy.arch import get_last_packet_timestamp
ts = get_last_packet_timestamp(self.ins)
pkt.time = ts
return pkt
def send(self, x):
try:
sx = raw(x)
x.sent_time = time.time()
return self.outs.sendto(sx, (x.dst, 0))
except socket.error as msg:
log_runtime.error(msg)
class SimpleSocket(SuperSocket):
desc = "wrapper around a classic socket"
def __init__(self, sock):
self.ins = sock
self.outs = sock
class StreamSocket(SimpleSocket):
desc = "transforms a stream socket into a layer 2"
nonblocking_socket = True
def __init__(self, sock, basecls=None):
if basecls is None:
basecls = conf.raw_layer
SimpleSocket.__init__(self, sock)
self.basecls = basecls
def recv(self, x=MTU):
pkt = self.ins.recv(x, socket.MSG_PEEK)
x = len(pkt)
if x == 0:
return None
pkt = self.basecls(pkt)
pad = pkt.getlayer(conf.padding_layer)
if pad is not None and pad.underlayer is not None:
del(pad.underlayer.payload)
from scapy.packet import NoPayload
while pad is not None and not isinstance(pad, NoPayload):
x -= len(pad.load)
pad = pad.payload
self.ins.recv(x)
return pkt
class SSLStreamSocket(StreamSocket):
desc = "similar usage than StreamSocket but specialized for handling SSL-wrapped sockets" # noqa: E501
def __init__(self, sock, basecls=None):
self._buf = b""
super(SSLStreamSocket, self).__init__(sock, basecls)
# 65535, the default value of x is the maximum length of a TLS record
def recv(self, x=65535):
pkt = None
if self._buf != b"":
try:
pkt = self.basecls(self._buf)
except Exception:
# We assume that the exception is generated by a buffer underflow # noqa: E501
pass
if not pkt:
buf = self.ins.recv(x)
if len(buf) == 0:
raise socket.error((100, "Underlying stream socket tore down"))
self._buf += buf
x = len(self._buf)
pkt = self.basecls(self._buf)
pad = pkt.getlayer(conf.padding_layer)
if pad is not None and pad.underlayer is not None:
del(pad.underlayer.payload)
while pad is not None and not isinstance(pad, scapy.packet.NoPayload):
x -= len(pad.load)
pad = pad.payload
self._buf = self._buf[x:]
return pkt
class L2ListenTcpdump(SuperSocket):
desc = "read packets at layer 2 using tcpdump"
def __init__(self, iface=None, promisc=None, filter=None, nofilter=False,
prog=None, *arg, **karg):
self.outs = None
args = ['-w', '-', '-s', '65535']
if iface is not None:
if WINDOWS:
try:
args.extend(['-i', iface.pcap_name])
except AttributeError:
args.extend(['-i', iface])
else:
args.extend(['-i', iface])
elif WINDOWS or DARWIN:
args.extend(['-i', conf.iface.pcap_name if WINDOWS else conf.iface]) # noqa: E501
if not promisc:
args.append('-p')
if not nofilter:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if filter is not None:
args.append(filter)
self.tcpdump_proc = tcpdump(None, prog=prog, args=args, getproc=True)
self.ins = PcapReader(self.tcpdump_proc.stdout)
def recv(self, x=MTU):
return self.ins.recv(x)
def close(self):
SuperSocket.close(self)
self.tcpdump_proc.kill()
class TunTapInterface(SuperSocket):
"""A socket to act as the host's peer of a tun / tap interface.
"""
desc = "Act as the host's peer of a tun / tap interface"
def __init__(self, iface=None, mode_tun=None, *arg, **karg):
self.iface = conf.iface if iface is None else iface
self.mode_tun = ("tun" in self.iface) if mode_tun is None else mode_tun
self.closed = True
self.open()
def open(self):
"""Open the TUN or TAP device."""
if not self.closed:
return
self.outs = self.ins = open(
"/dev/net/tun" if LINUX else ("/dev/%s" % self.iface), "r+b",
buffering=0
)
if LINUX:
from fcntl import ioctl
# TUNSETIFF = 0x400454ca
# IFF_TUN = 0x0001
# IFF_TAP = 0x0002
# IFF_NO_PI = 0x1000
ioctl(self.ins, 0x400454ca, struct.pack(
"16sH", bytes_encode(self.iface),
0x0001 if self.mode_tun else 0x1002,
))
self.closed = False
def __call__(self, *arg, **karg):
"""Needed when using an instantiated TunTapInterface object for
conf.L2listen, conf.L2socket or conf.L3socket.
"""
return self
def recv(self, x=MTU):
if self.mode_tun:
data = os.read(self.ins.fileno(), x + 4)
proto = struct.unpack('!H', data[2:4])[0]
return conf.l3types.get(proto, conf.raw_layer)(data[4:])
return conf.l2types.get(1, conf.raw_layer)(
os.read(self.ins.fileno(), x)
)
def send(self, x):
sx = raw(x)
if self.mode_tun:
try:
proto = conf.l3types[type(x)]
except KeyError:
log_runtime.warning(
"Cannot find layer 3 protocol value to send %s in "
"conf.l3types, using 0",
x.name if hasattr(x, "name") else type(x).__name__
)
proto = 0
sx = struct.pack('!HH', 0, proto) + sx
try:
try:
x.sent_time = time.time()
except AttributeError:
pass
return os.write(self.outs.fileno(), sx)
except socket.error:
log_runtime.error("%s send", self.__class__.__name__, exc_info=True) # noqa: E501
| 1 | 16,762 | Can you add an `else:` case to handle an invalid length? That will prevent weird errors. | secdev-scapy | py |
@@ -9,14 +9,10 @@ public class ASTAttribute extends AbstractJspNode {
private String name;
- public ASTAttribute(int id) {
+ ASTAttribute(int id) {
super(id);
}
- public ASTAttribute(JspParser p, int id) {
- super(p, id);
- }
-
/**
* @return Returns the name.
*/ | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
/* Generated By:JJTree: Do not edit this line. ASTAttribute.java */
package net.sourceforge.pmd.lang.jsp.ast;
public class ASTAttribute extends AbstractJspNode {
private String name;
public ASTAttribute(int id) {
super(id);
}
public ASTAttribute(JspParser p, int id) {
super(p, id);
}
/**
* @return Returns the name.
*/
public String getName() {
return name;
}
/**
* @param name
* The name to set.
*/
public void setName(String name) {
this.name = name;
}
/**
* @return boolean - true if the element has a namespace-prefix, false
* otherwise
*/
public boolean isHasNamespacePrefix() {
return name.indexOf(':') >= 0;
}
/**
* @return String - the part of the name that is before the (first) colon
* (":")
*/
public String getNamespacePrefix() {
int colonIndex = name.indexOf(':');
return colonIndex >= 0 ? name.substring(0, colonIndex) : "";
}
/**
* @return String - The part of the name that is after the first colon
* (":"). If the name does not contain a colon, the full name is
* returned.
*/
public String getLocalName() {
int colonIndex = name.indexOf(':');
return colonIndex >= 0 ? name.substring(colonIndex + 1) : name;
}
/**
* Accept the visitor. *
*/
@Override
public Object jjtAccept(JspParserVisitor visitor, Object data) {
return visitor.visit(this, data);
}
}
| 1 | 17,006 | * We need to deprecate/internalize first on master. * We should directly make the AST node final now * The setter `setName()` can be package-private. | pmd-pmd | java |
@@ -137,12 +137,6 @@ public class InitCodeTransformer {
// Remove the request object for flattened method
orderedItems.remove(orderedItems.size() - 1);
}
- for (InitCodeNode param :
- sampleFuncParams(
- root, initCodeContext.sampleArgStrings(), initCodeContext.sampleParamConfigMap())) {
- List<InitCodeNode> paramInits = param.listInInitializationOrder();
- orderedItems.removeAll(paramInits);
- }
return orderedItems;
}
| 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import com.google.api.codegen.config.FieldConfig;
import com.google.api.codegen.config.MethodContext;
import com.google.api.codegen.config.ProtoTypeRef;
import com.google.api.codegen.config.ResourceNameConfig;
import com.google.api.codegen.config.ResourceNameOneofConfig;
import com.google.api.codegen.config.ResourceNameType;
import com.google.api.codegen.config.SampleParameterConfig;
import com.google.api.codegen.config.SingleResourceNameConfig;
import com.google.api.codegen.config.TypeModel;
import com.google.api.codegen.metacode.FieldStructureParser;
import com.google.api.codegen.metacode.InitCodeContext;
import com.google.api.codegen.metacode.InitCodeContext.InitCodeOutputType;
import com.google.api.codegen.metacode.InitCodeLineType;
import com.google.api.codegen.metacode.InitCodeNode;
import com.google.api.codegen.metacode.InitValue;
import com.google.api.codegen.metacode.InitValueConfig;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.Scanner;
import com.google.api.codegen.util.SymbolTable;
import com.google.api.codegen.util.testing.TestValueGenerator;
import com.google.api.codegen.viewmodel.FieldSettingView;
import com.google.api.codegen.viewmodel.FormattedInitValueView;
import com.google.api.codegen.viewmodel.InitCodeLineView;
import com.google.api.codegen.viewmodel.InitCodeView;
import com.google.api.codegen.viewmodel.InitValueView;
import com.google.api.codegen.viewmodel.ListInitCodeLineView;
import com.google.api.codegen.viewmodel.MapEntryView;
import com.google.api.codegen.viewmodel.MapInitCodeLineView;
import com.google.api.codegen.viewmodel.OneofConfigView;
import com.google.api.codegen.viewmodel.ReadFileInitCodeLineView;
import com.google.api.codegen.viewmodel.RepeatedResourceNameInitValueView;
import com.google.api.codegen.viewmodel.ResourceNameInitValueView;
import com.google.api.codegen.viewmodel.ResourceNameOneofInitValueView;
import com.google.api.codegen.viewmodel.SampleFunctionParameterView;
import com.google.api.codegen.viewmodel.SimpleInitCodeLineView;
import com.google.api.codegen.viewmodel.SimpleInitValueView;
import com.google.api.codegen.viewmodel.StructureInitCodeLineView;
import com.google.api.codegen.viewmodel.testing.ClientTestAssertView;
import com.google.api.pathtemplate.PathTemplate;
import com.google.api.tools.framework.model.TypeRef;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* InitCodeTransformer generates initialization code for a given method and then transforms it to a
* view object which can be rendered by a template engine.
*/
public class InitCodeTransformer {
private static final String FORMAT_SPEC_PLACEHOLDER = "FORMAT_SPEC_PLACEHOLDER";
// Note: Markdown backticks for code reference should be converted to an idiomatic representation
// by the language-appropriate CommentReformatter when this String is formatted.
private static final String UNINITIALIZED_REQUIRED_FIELD_COMMENT = "TODO: Initialize `%s`:";
private final ImportSectionTransformer importSectionTransformer;
// Whether the initialization code should include non-configurable comments like TODOs. This
// should only be true when generating in-code samples.
//
// This field should be set to false when generating tests since comments in unit tests are
// unnecessary.
//
// This field must be set to false when generating standalone samples because comments in
// standalone samples should be derived from user configurations, not hard-coded.
private final boolean generateStandardComments;
public InitCodeTransformer() {
this(new StandardImportSectionTransformer(), true);
}
public InitCodeTransformer(ImportSectionTransformer importSectionTransformer) {
this(importSectionTransformer, true);
}
public InitCodeTransformer(boolean generateStandardComments) {
this(new StandardImportSectionTransformer(), generateStandardComments);
}
public InitCodeTransformer(
ImportSectionTransformer importSectionTransformer, boolean generateStandardComments) {
this.importSectionTransformer = importSectionTransformer;
this.generateStandardComments = generateStandardComments;
}
public ImportSectionTransformer getImportSectionTransformer() {
return this.importSectionTransformer;
}
/** Generates initialization code from the given MethodContext and InitCodeContext objects. */
public InitCodeView generateInitCode(
MethodContext methodContext, InitCodeContext initCodeContext) {
InitCodeNode rootNode = InitCodeNode.createTree(initCodeContext);
if (initCodeContext.outputType() == InitCodeOutputType.FieldList) {
return buildInitCodeViewFlattened(methodContext, initCodeContext, rootNode);
} else {
return buildInitCodeViewRequestObject(methodContext, initCodeContext, rootNode);
}
}
public List<InitCodeNode> getInitCodeNodes(
MethodContext methodContext, InitCodeContext initCodeContext) {
InitCodeNode root = InitCodeNode.createTree(initCodeContext);
List<InitCodeNode> orderedItems = root.listInInitializationOrder();
if (initCodeContext.outputType() == InitCodeOutputType.FieldList) {
// Remove the request object for flattened method
orderedItems.remove(orderedItems.size() - 1);
}
for (InitCodeNode param :
sampleFuncParams(
root, initCodeContext.sampleArgStrings(), initCodeContext.sampleParamConfigMap())) {
List<InitCodeNode> paramInits = param.listInInitializationOrder();
orderedItems.removeAll(paramInits);
}
return orderedItems;
}
public InitCodeContext createRequestInitCodeContext(
MethodContext context,
SymbolTable symbolTable,
Collection<FieldConfig> fieldConfigs,
InitCodeOutputType outputType,
TestValueGenerator valueGenerator) {
return InitCodeContext.newBuilder()
.initObjectType(context.getMethodModel().getInputType())
.symbolTable(symbolTable)
.suggestedName(Name.from("request"))
.initFieldConfigStrings(context.getMethodConfig().getSampleCodeInitFields())
.initValueConfigMap(InitCodeTransformer.createCollectionMap(context))
.initFields(FieldConfig.toFieldTypeIterable(fieldConfigs))
.fieldConfigMap(FieldConfig.toFieldConfigMap(fieldConfigs))
.outputType(outputType)
.valueGenerator(valueGenerator)
.build();
}
/** Generates assert views for the test of the tested method and its fields. */
List<ClientTestAssertView> generateRequestAssertViews(
MethodContext methodContext, InitCodeContext initContext) {
InitCodeNode rootNode =
InitCodeNode.createTree(
InitCodeContext.newBuilder()
.initObjectType(methodContext.getMethodModel().getInputType())
.initFields(initContext.initFields())
.initValueConfigMap(createCollectionMap(methodContext))
.suggestedName(Name.from("request"))
.fieldConfigMap(initContext.fieldConfigMap())
.build());
List<ClientTestAssertView> assertViews = new ArrayList<>();
SurfaceNamer namer = methodContext.getNamer();
// Add request fields checking
for (InitCodeNode fieldItemTree : rootNode.getChildren().values()) {
FieldConfig fieldConfig = fieldItemTree.getFieldConfig();
String getterMethod =
namer.getFieldGetFunctionName(methodContext.getFeatureConfig(), fieldConfig);
String expectedValueIdentifier = getVariableName(methodContext, fieldItemTree);
String expectedTransformFunction = null;
String actualTransformFunction = null;
if (methodContext.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) {
if (fieldConfig.requiresParamTransformationFromAny()) {
expectedTransformFunction = namer.getToStringMethod();
actualTransformFunction = namer.getToStringMethod();
} else if (fieldConfig.requiresParamTransformation()) {
if (methodContext.getFeatureConfig().useResourceNameConverters(fieldConfig)) {
expectedTransformFunction = namer.getToStringMethod();
} else {
expectedTransformFunction =
namer.getResourceOneofCreateMethod(methodContext.getTypeTable(), fieldConfig);
}
} else if (methodContext.getFeatureConfig().useResourceNameConverters(fieldConfig)) {
if (fieldConfig.getField().isRepeated()) {
actualTransformFunction =
namer.getResourceTypeParseListMethodName(methodContext.getTypeTable(), fieldConfig);
} else if (fieldConfig.getResourceNameConfig().getResourceNameType()
== ResourceNameType.ONEOF) {
actualTransformFunction =
namer.getResourceTypeParentParseMethod(methodContext.getTypeTable(), fieldConfig);
} else {
actualTransformFunction =
namer.getResourceTypeParseMethodName(methodContext.getTypeTable(), fieldConfig);
}
}
}
boolean isMap = fieldConfig.getField().isMap();
boolean isArray = fieldConfig.getField().isRepeated() && !isMap;
TypeModel fieldType = fieldItemTree.getType();
String messageTypeName = null;
if (fieldType.isMessage()) {
messageTypeName = methodContext.getTypeTable().getFullNameForMessageType(fieldType);
}
assertViews.add(
createAssertView(
expectedValueIdentifier,
expectedTransformFunction,
actualTransformFunction,
isMap,
isArray,
getterMethod,
messageTypeName));
}
return assertViews;
}
/**
* A utility method which creates the InitValueConfig map that contains the collection config
* data.
*/
public static ImmutableMap<String, InitValueConfig> createCollectionMap(MethodContext context) {
ImmutableMap.Builder<String, InitValueConfig> mapBuilder = ImmutableMap.builder();
Map<String, String> fieldNamePatterns = context.getMethodConfig().getFieldNamePatterns();
for (Map.Entry<String, String> fieldNamePattern : fieldNamePatterns.entrySet()) {
SingleResourceNameConfig resourceNameConfig =
context.getSingleResourceNameConfig(fieldNamePattern.getValue());
String apiWrapperClassName =
context.getNamer().getApiWrapperClassName(context.getInterfaceConfig());
InitValueConfig initValueConfig =
InitValueConfig.create(apiWrapperClassName, resourceNameConfig);
mapBuilder.put(fieldNamePattern.getKey(), initValueConfig);
}
return mapBuilder.build();
}
private ClientTestAssertView createAssertView(
String expected,
String expectedTransformFunction,
String actualTransformFunction,
boolean isMap,
boolean isArray,
String actual,
String messageTypeName) {
return ClientTestAssertView.newBuilder()
.expectedValueIdentifier(expected)
.isMap(isMap)
.isArray(isArray)
.expectedValueTransformFunction(expectedTransformFunction)
.actualValueTransformFunction(actualTransformFunction)
.actualValueGetter(actual)
.messageTypeName(messageTypeName)
.build();
}
private InitCodeView buildInitCodeViewFlattened(
MethodContext context, InitCodeContext initCodeContext, InitCodeNode root) {
assertNoOverlap(root, initCodeContext.sampleArgStrings());
// Remove the request object for flattened method
List<InitCodeNode> orderedItems = root.listInInitializationOrder();
orderedItems.remove(orderedItems.size() - 1);
return buildInitCodeView(
context,
orderedItems,
ImmutableList.copyOf(root.getChildren().values()),
sampleFuncParams(
root, initCodeContext.sampleArgStrings(), initCodeContext.sampleParamConfigMap()));
}
private InitCodeView buildInitCodeViewRequestObject(
MethodContext context, InitCodeContext initCodeContext, InitCodeNode root) {
assertNoOverlap(root, initCodeContext.sampleArgStrings());
return buildInitCodeView(
context,
root.listInInitializationOrder(),
ImmutableList.of(root),
sampleFuncParams(
root, initCodeContext.sampleArgStrings(), initCodeContext.sampleParamConfigMap()));
}
/**
* Returns all the nodes to be rendered as sample function parameters.
*
* <p>If path is:
* <li>a normal node, returns that node.
* <li>a ReadFile node, returns the child node of that node.
* <li>a resource path, returns the child node whose key equals the entity name in the path.
*
* @param paramConfigMap the sample parameter configurations derived from {@code InitCodeContext}
*/
private List<InitCodeNode> sampleFuncParams(
InitCodeNode root, List<String> paths, Map<String, SampleParameterConfig> paramConfigMap) {
List<InitCodeNode> params = new ArrayList<>();
for (String path : paths) {
Scanner scanner = new Scanner(path);
InitCodeNode node = FieldStructureParser.parsePath(root, scanner);
int token = scanner.lastToken();
if (token == '%') {
scanner.scan();
node = node.getChildren().get(scanner.tokenStr());
node.setDescription(paramConfigMap.get(path).description());
params.add(node);
} else if (node.getLineType() == InitCodeLineType.ReadFileInitLine) {
node = node.getChildren().get(InitCodeNode.FILE_NAME_KEY);
node.setDescription(paramConfigMap.get(path).description());
params.add(node);
} else {
node.setDescription(paramConfigMap.get(path).description());
params.add(node);
}
}
return params;
}
/**
* Given node `root` and `paths` describing subtrees of `root`, verify that all subtrees are
* disjoint. i.e., no two subtrees are the same, and no subtrees are themselves part of other
* subtrees.
*/
@VisibleForTesting
static void assertNoOverlap(InitCodeNode root, List<String> paths) {
// Keep track of the path that adds a node. If we detect collision we can report the two paths
// that reference the same nodes.
HashMap<InitCodeNode, String> refFrom = new HashMap<>();
// Keep track of the resource name entities. Configuring an entity twice or configuring an
// entity and the parent node at the same time will cause collision. Configuring two different
// entities will not.
Multimap<InitCodeNode, String> nodeEntities = HashMultimap.create();
// Below we'll perform depth-first search, keep a list of nodes we've seen but have not
// descended into. It doesn't really matter if we search breath- or depth-first; DFS is a little
// more efficient on average.
ArrayDeque<InitCodeNode> subNodes = new ArrayDeque<>();
for (String path : paths) {
subNodes.add(root.subTree(path));
String entity = FieldStructureParser.parseEntityName(path);
while (!subNodes.isEmpty()) {
InitCodeNode node = subNodes.pollLast();
String oldPath = refFrom.put(node, path);
if (oldPath == null) {
// The node has not been specified before, thus check if entity has been specified
checkArgument(
entity == null || nodeEntities.put(node, entity),
"Entity %s in path %s specified multiple times",
entity,
path);
} else {
// The node has been specified before. The will be no overlap if and only if:
// All previous paths are configuring entities
// This path is configuraing an entity
// The same entity is never specified before
checkArgument(
entity != null && nodeEntities.containsKey(node),
"SampleInitAttribute %s overlaps with %s",
oldPath,
path);
checkArgument(
nodeEntities.put(node, entity),
"Entity %s in path %s specified multiple times",
entity,
path);
}
subNodes.addAll(node.getChildren().values());
}
}
}
/**
* Transform {@code InitCodeNode}s into {@code InitCodeView}.
*
* @param orderedItems These nodes are converted into request-initialization code. It contains all
* initializations regardless of whether they are parameters to the sample function. The
* initialization is "shallow": children nodes are not initialized. If children nodes should
* also be initialized, callers must also include them in the list.
* @param libArguments Used by samples for flattened client lib methods. These nodes contain
* values that become arguments to the method.
* @param sampleFuncParams Subset of {@code orderedItems} containing only items that are function
* parameters. Unlike {@code orderedItems}, the {@code sampleFuncParams} are "deep". The init
* code for these nodes and their children are commented out so that they don't clobber the
* function arguments.
*/
private InitCodeView buildInitCodeView(
MethodContext context,
List<InitCodeNode> orderedItems,
List<InitCodeNode> libArguments,
List<InitCodeNode> sampleFuncParams) {
ImportTypeTable typeTable = context.getTypeTable();
SurfaceNamer namer = context.getNamer();
// Initialize the type table with the apiClassName since each sample will be using the
// apiClass.
typeTable.getAndSaveNicknameFor(
namer.getFullyQualifiedApiWrapperClassName(context.getInterfaceConfig()));
List<FieldSettingView> fieldSettings = getFieldSettings(context, libArguments);
List<FieldSettingView> optionalFieldSettings =
fieldSettings.stream().filter(f -> !f.required()).collect(Collectors.toList());
List<FieldSettingView> requiredFieldSettings =
fieldSettings.stream().filter(FieldSettingView::required).collect(Collectors.toList());
List<SampleFunctionParameterView> argDefaultParams = new ArrayList<>();
List<InitCodeLineView> argDefaultLines = new ArrayList<>();
for (InitCodeNode param : sampleFuncParams) {
List<InitCodeNode> paramInits = param.listInInitializationOrder();
argDefaultLines.addAll(generateSurfaceInitCodeLines(context, paramInits));
// The param itself is always at the end.
InitCodeLineView initLine = argDefaultLines.get(argDefaultLines.size() - 1);
checkArgument(
initLine.lineType() == InitCodeLineType.SimpleInitLine,
"Standalone samples only support primitive types for CLI arguments for now.");
SimpleInitCodeLineView simpleInitLine = (SimpleInitCodeLineView) initLine;
argDefaultParams.add(
SampleFunctionParameterView.newBuilder()
.initValue(simpleInitLine.initValue())
.identifier(simpleInitLine.identifier())
.typeName(simpleInitLine.typeName())
.cliFlagName(param.getIdentifier().toLowerUnderscore())
.description(param.getDescription())
.build());
// Since we're going to write the inits for the params here,
// remove so we don't init twice.
orderedItems.removeAll(paramInits);
}
return InitCodeView.newBuilder()
.argDefaultLines(argDefaultLines)
.argDefaultParams(argDefaultParams)
.lines(generateSurfaceInitCodeLines(context, orderedItems))
.topLevelLines(generateSurfaceInitCodeLines(context, libArguments))
.fieldSettings(fieldSettings)
.optionalFieldSettings(optionalFieldSettings)
.requiredFieldSettings(requiredFieldSettings)
.importSection(importSectionTransformer.generateImportSection(context, orderedItems))
.topLevelIndexFileImportName(namer.getTopLevelIndexFileImportName())
.build();
}
private List<InitCodeLineView> generateSurfaceInitCodeLines(
MethodContext context, Iterable<InitCodeNode> specItemNode) {
boolean isFirstReadFileView = true;
List<InitCodeLineView> surfaceLines = new ArrayList<>();
for (InitCodeNode item : specItemNode) {
surfaceLines.add(
generateSurfaceInitCodeLine(context, item, surfaceLines.isEmpty(), isFirstReadFileView));
isFirstReadFileView =
isFirstReadFileView && item.getLineType() != InitCodeLineType.ReadFileInitLine;
}
return surfaceLines;
}
private InitCodeLineView generateSurfaceInitCodeLine(
MethodContext context,
InitCodeNode specItemNode,
boolean isFirstItem,
boolean isFirstReadFileView) {
switch (specItemNode.getLineType()) {
case StructureInitLine:
return generateStructureInitCodeLine(context, specItemNode);
case ListInitLine:
return generateListInitCodeLine(context, specItemNode);
case SimpleInitLine:
return generateSimpleInitCodeLine(context, specItemNode, isFirstItem);
case MapInitLine:
return generateMapInitCodeLine(context, specItemNode);
case ReadFileInitLine:
return generateReadFileInitCodeLine(context, specItemNode, isFirstReadFileView);
default:
throw new RuntimeException("unhandled line type: " + specItemNode.getLineType());
}
}
private InitCodeLineView generateSimpleInitCodeLine(
MethodContext context, InitCodeNode item, boolean isFirstItem) {
SimpleInitCodeLineView.Builder surfaceLine = SimpleInitCodeLineView.newBuilder();
FieldConfig fieldConfig = item.getFieldConfig();
SurfaceNamer namer = context.getNamer();
ImportTypeTable typeTable = context.getTypeTable();
surfaceLine.lineType(InitCodeLineType.SimpleInitLine);
if (context.getFeatureConfig().useResourceNameFormatOptionInSample(context, fieldConfig)) {
if (!context.isFlattenedMethodContext()) {
// In a non-flattened context, we always use the resource name type set on the message
// instead of set on the flattened method
fieldConfig = fieldConfig.getMessageFieldConfig();
}
if (item.getType().isRepeated()) {
surfaceLine.typeName(namer.getAndSaveResourceTypeName(typeTable, fieldConfig));
} else {
surfaceLine.typeName(namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig));
}
} else {
surfaceLine.typeName(typeTable.getAndSaveNicknameFor(item.getType()));
}
surfaceLine.identifier(getVariableName(context, item));
setInitValueAndComments(surfaceLine, context, item, isFirstItem);
return surfaceLine.build();
}
private InitCodeLineView generateStructureInitCodeLine(MethodContext context, InitCodeNode item) {
StructureInitCodeLineView.Builder surfaceLine = StructureInitCodeLineView.newBuilder();
SurfaceNamer namer = context.getNamer();
ImportTypeTable typeTable = context.getTypeTable();
surfaceLine.lineType(InitCodeLineType.StructureInitLine);
surfaceLine.identifier(namer.localVarName(item.getIdentifier()));
String typeName = typeTable.getAndSaveNicknameFor(item.getType());
surfaceLine.typeName(typeName);
surfaceLine.typeConstructor(namer.getTypeConstructor(typeName));
surfaceLine.fieldSettings(getFieldSettings(context, item.getChildren().values()));
surfaceLine.descriptions(context.getNamer().getWrappedDocLines(item.getDescription(), false));
return surfaceLine.build();
}
private InitCodeLineView generateListInitCodeLine(MethodContext context, InitCodeNode item) {
ListInitCodeLineView.Builder surfaceLine = ListInitCodeLineView.newBuilder();
FieldConfig fieldConfig = item.getFieldConfig();
SurfaceNamer namer = context.getNamer();
ImportTypeTable typeTable = context.getTypeTable();
surfaceLine.lineType(InitCodeLineType.ListInitLine);
surfaceLine.identifier(namer.localVarName(item.getIdentifier()));
if (context.getFeatureConfig().useResourceNameFormatOptionInSample(context, fieldConfig)) {
surfaceLine.elementTypeName(namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig));
} else {
surfaceLine.elementTypeName(
typeTable.getAndSaveNicknameForElementType(item.getType().makeOptional()));
}
List<String> entries = new ArrayList<>();
List<InitCodeLineView> elements = new ArrayList<>();
for (InitCodeNode child : item.getChildren().values()) {
entries.add(namer.localVarName(child.getIdentifier()));
elements.add(generateSurfaceInitCodeLine(context, child, elements.isEmpty(), false));
}
surfaceLine.elementIdentifiers(entries);
surfaceLine.elements(elements);
surfaceLine.descriptions(context.getNamer().getWrappedDocLines(item.getDescription(), false));
return surfaceLine.build();
}
private InitCodeLineView generateMapInitCodeLine(MethodContext context, InitCodeNode item) {
MapInitCodeLineView.Builder surfaceLine = MapInitCodeLineView.newBuilder();
SurfaceNamer namer = context.getNamer();
ImportTypeTable typeTable = context.getTypeTable();
surfaceLine.lineType(InitCodeLineType.MapInitLine);
surfaceLine.identifier(namer.localVarName(item.getIdentifier()));
surfaceLine.keyTypeName(typeTable.getAndSaveNicknameFor(item.getType().getMapKeyType()));
surfaceLine.valueTypeName(typeTable.getAndSaveNicknameFor(item.getType().getMapValueType()));
List<MapEntryView> entries = new ArrayList<>();
for (Map.Entry<String, InitCodeNode> entry : item.getChildren().entrySet()) {
MapEntryView.Builder mapEntry = MapEntryView.newBuilder();
mapEntry.key(typeTable.renderPrimitiveValue(item.getType().getMapKeyType(), entry.getKey()));
mapEntry.valueString(context.getNamer().localVarName(entry.getValue().getIdentifier()));
mapEntry.value(
generateSurfaceInitCodeLine(context, entry.getValue(), entries.isEmpty(), false));
entries.add(mapEntry.build());
}
surfaceLine.initEntries(entries);
surfaceLine.descriptions(context.getNamer().getWrappedDocLines(item.getDescription(), false));
return surfaceLine.build();
}
/**
* @param isFirstReadFileView Used in Java. We need to reuse local variables "path" and "data" if
* we have rendered ReadFileViews before so that we don't declare them twice.
*/
private InitCodeLineView generateReadFileInitCodeLine(
MethodContext context, InitCodeNode item, boolean isFirstReadFileView) {
ReadFileInitCodeLineView.Builder surfaceLine = ReadFileInitCodeLineView.newBuilder();
SurfaceNamer namer = context.getNamer();
ImportTypeTable typeTable = context.getTypeTable();
checkState(
item.getType().isBytesType(),
"Error setting %s to be read from file. Replacing field value with file contents is only"
+ " allowed for fields of type 'bytes', but the type is %s.",
item.getIdentifier(),
item.getType());
typeTable.getAndSaveNicknameFor(item.getType());
String value = item.getInitValueConfig().getInitialValue().getValue();
switch (item.getInitValueConfig().getInitialValue().getType()) {
case Literal:
// File names are always strings
value =
typeTable.renderPrimitiveValue(
ProtoTypeRef.create(TypeRef.fromPrimitiveName("string")), value);
break;
case Variable:
value = namer.localVarReference(Name.anyLower(value));
break;
default:
throw new IllegalArgumentException("Unhandled init value type");
}
return surfaceLine
.identifier(namer.localVarName(item.getIdentifier()))
.fileName(SimpleInitValueView.newBuilder().initialValue(value).build())
.isFirstReadFileView(isFirstReadFileView)
.descriptions(namer.getWrappedDocLines(item.getDescription(), false))
.build();
}
private void setInitValueAndComments(
SimpleInitCodeLineView.Builder surfaceLine,
MethodContext context,
InitCodeNode item,
boolean isFirstItem) {
SurfaceNamer namer = context.getNamer();
ImportTypeTable typeTable = context.getTypeTable();
InitValueConfig initValueConfig = item.getInitValueConfig();
FieldConfig fieldConfig = item.getFieldConfig();
// Output variables
InitValueView initValue;
String comment = "";
if (context.getFeatureConfig().useResourceNameFormatOptionInSample(context, fieldConfig)) {
if (!context.isFlattenedMethodContext()) {
ResourceNameConfig messageResNameConfig = fieldConfig.getMessageResourceNameConfig();
if (messageResNameConfig == null
|| messageResNameConfig.getResourceNameType() != ResourceNameType.ANY) {
// In a non-flattened context, we always use the resource name type set on the message
// instead of set on the flattened method, unless the resource name type on message
// is ANY.
fieldConfig = fieldConfig.getMessageFieldConfig();
}
}
if (item.getType().isRepeated()) {
initValue =
RepeatedResourceNameInitValueView.newBuilder()
.resourceTypeName(
namer.getAndSaveElementResourceTypeName(context.getTypeTable(), fieldConfig))
.build();
} else {
initValue = createInitValueView(context, fieldConfig, namer, typeTable, item, false);
}
} else if (initValueConfig.hasFormattingConfig() && !item.getType().isRepeated()) {
if (context.getFeatureConfig().enableStringFormatFunctions()
|| fieldConfig.getResourceNameConfig() == null) {
FormattedInitValueView.Builder formattedInitValue = FormattedInitValueView.newBuilder();
formattedInitValue.apiVariableName(
context.getNamer().getApiWrapperVariableName(context.getInterfaceConfig()));
formattedInitValue.apiWrapperName(
context.getNamer().getApiWrapperClassName(context.getInterfaceConfig()));
formattedInitValue.fullyQualifiedApiWrapperName(
context.getNamer().getFullyQualifiedApiWrapperClassName(context.getInterfaceConfig()));
formattedInitValue.formatFunctionName(
context
.getNamer()
.getFormatFunctionName(
context.getInterfaceConfig(), initValueConfig.getSingleResourceNameConfig()));
PathTemplate template = initValueConfig.getSingleResourceNameConfig().getNameTemplate();
String[] encodeArgs = new String[template.vars().size()];
Arrays.fill(encodeArgs, FORMAT_SPEC_PLACEHOLDER);
// Format spec usually contains reserved character, escaped by path template.
// So we first encode using FORMAT_SPEC_PLACEHOLDER, then do straight string replace.
formattedInitValue.formatSpec(
template
.withoutVars()
.encode(encodeArgs)
.replace(FORMAT_SPEC_PLACEHOLDER, context.getNamer().formatSpec()));
List<String> varList =
Lists.newArrayList(
initValueConfig.getSingleResourceNameConfig().getNameTemplate().vars());
formattedInitValue.formatArgs(getFormatFunctionArgs(context, varList, initValueConfig));
initValue = formattedInitValue.build();
} else {
initValue = createInitValueView(context, fieldConfig, namer, typeTable, item, true);
}
} else {
SimpleInitValueView.Builder simpleInitValue = SimpleInitValueView.newBuilder();
if (initValueConfig.hasSimpleInitialValue()) {
String value = initValueConfig.getInitialValue().getValue();
switch (initValueConfig.getInitialValue().getType()) {
case Literal:
if (item.getType().isEnum()) {
value = context.getTypeTable().getEnumValue(item.getType(), value);
} else {
value = context.getTypeTable().renderPrimitiveValue(item.getType(), value);
}
break;
case Random:
value = context.getNamer().injectRandomStringGeneratorCode(value);
break;
case Variable:
value = context.getNamer().localVarReference(Name.anyLower(value));
break;
default:
throw new IllegalArgumentException("Unhandled init value type");
}
simpleInitValue.initialValue(value);
} else {
simpleInitValue.initialValue(
context.getTypeTable().getSnippetZeroValueAndSaveNicknameFor(item.getType()));
simpleInitValue.isRepeated(item.getType().isRepeated());
if (isRequired(item.getFieldConfig(), context)) {
String name = getVariableName(context, item);
comment = String.format(UNINITIALIZED_REQUIRED_FIELD_COMMENT, name);
}
}
initValue = simpleInitValue.build();
}
surfaceLine.initValue(initValue);
surfaceLine.needsLeadingNewline(!isFirstItem);
if (generateStandardComments) {
surfaceLine.doc(context.getNamer().getDocLines(comment));
} else {
surfaceLine.doc(ImmutableList.of());
}
surfaceLine.descriptions(context.getNamer().getWrappedDocLines(item.getDescription(), false));
}
private InitValueView createInitValueView(
MethodContext context,
FieldConfig fieldConfig,
SurfaceNamer namer,
ImportTypeTable typeTable,
InitCodeNode item,
boolean convertToString) {
SingleResourceNameConfig singleResourceNameConfig;
switch (fieldConfig.getResourceNameType()) {
case ANY:
// TODO(michaelbausor): handle case where there are no other resource names at all...
singleResourceNameConfig =
Iterables.get(context.getProductConfig().getSingleResourceNameConfigs(), 0);
FieldConfig anyResourceNameFieldConfig =
fieldConfig.withResourceNameConfig(singleResourceNameConfig);
return createResourceNameInitValueView(context, anyResourceNameFieldConfig, item)
.convertToString(convertToString)
.build();
case FIXED:
throw new UnsupportedOperationException("entity name invalid");
case ONEOF:
ResourceNameOneofConfig oneofConfig =
(ResourceNameOneofConfig) fieldConfig.getResourceNameConfig();
singleResourceNameConfig = Iterables.get(oneofConfig.getSingleResourceNameConfigs(), 0);
FieldConfig singleResourceNameFieldConfig =
fieldConfig.withResourceNameConfig(singleResourceNameConfig);
ResourceNameInitValueView initView =
createResourceNameInitValueView(context, singleResourceNameFieldConfig, item)
.convertToString(convertToString)
.build();
return ResourceNameOneofInitValueView.newBuilder()
.resourceOneofTypeName(namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig))
.specificResourceNameView(initView)
.build();
case SINGLE:
return createResourceNameInitValueView(context, fieldConfig, item)
.convertToString(convertToString)
.build();
case NONE:
// Fall-through
default:
throw new UnsupportedOperationException(
"unexpected entity name type '" + fieldConfig.getResourceNameType() + "'");
}
}
private ResourceNameInitValueView.Builder createResourceNameInitValueView(
MethodContext context, FieldConfig fieldConfig, InitCodeNode item) {
String resourceName =
context.getNamer().getAndSaveElementResourceTypeName(context.getTypeTable(), fieldConfig);
SingleResourceNameConfig singleResourceNameConfig =
(SingleResourceNameConfig) fieldConfig.getResourceNameConfig();
List<String> varList = Lists.newArrayList(singleResourceNameConfig.getNameTemplate().vars());
return ResourceNameInitValueView.newBuilder()
.resourceTypeName(resourceName)
.formatArgs(getFormatFunctionArgs(context, varList, item.getInitValueConfig()));
}
private static List<String> getFormatFunctionArgs(
MethodContext context, List<String> varList, InitValueConfig initValueConfig) {
List<String> formatFunctionArgs = new ArrayList<>();
for (String entityName : varList) {
String entityValue =
context
.getTypeTable()
.renderValueAsString("[" + Name.anyLower(entityName).toUpperUnderscore() + "]");
if (initValueConfig.getResourceNameBindingValues().containsKey(entityName)) {
InitValue initValue = initValueConfig.getResourceNameBindingValues().get(entityName);
switch (initValue.getType()) {
case Variable:
entityValue = context.getNamer().localVarReference(Name.anyLower(initValue.getValue()));
break;
case Random:
entityValue = context.getNamer().injectRandomStringGeneratorCode(initValue.getValue());
break;
case Literal:
entityValue =
context
.getTypeTable()
.renderPrimitiveValue(
ProtoTypeRef.create(TypeRef.fromPrimitiveName("string")),
initValue.getValue());
break;
default:
throw new IllegalArgumentException("Unhandled init value type");
}
}
formatFunctionArgs.add(entityValue);
}
return formatFunctionArgs;
}
private List<FieldSettingView> getFieldSettings(
MethodContext context, Iterable<InitCodeNode> childItems) {
SurfaceNamer namer = context.getNamer();
List<FieldSettingView> allSettings = new ArrayList<>();
for (InitCodeNode item : childItems) {
FieldSettingView.Builder fieldSetting = FieldSettingView.newBuilder();
FieldConfig fieldConfig = item.getFieldConfig();
if (context.getFeatureConfig().useResourceNameProtoAccessor(fieldConfig)) {
fieldSetting.fieldSetFunction(
namer.getResourceNameFieldSetFunctionName(fieldConfig.getMessageFieldConfig()));
} else {
fieldSetting.fieldSetFunction(
namer.getFieldSetFunctionName(item.getType(), Name.anyLower(item.getVarName())));
}
fieldSetting.fieldAddFunction(
namer.getFieldAddFunctionName(item.getType(), Name.anyLower(item.getVarName())));
fieldSetting.fieldGetFunction(
namer.getFieldGetFunctionName(item.getType(), Name.anyLower(item.getVarName())));
fieldSetting.identifier(getVariableName(context, item));
fieldSetting.initCodeLine(
generateSurfaceInitCodeLine(context, item, allSettings.isEmpty(), false));
fieldSetting.fieldName(context.getNamer().publicFieldName(Name.anyLower(item.getVarName())));
fieldSetting.isMap(item.getType().isMap());
fieldSetting.isArray(!item.getType().isMap() && item.getType().isRepeated());
fieldSetting.elementTypeName(context.getTypeTable().getFullNameFor(item.getType()));
if (item.getOneofConfig() != null) {
fieldSetting.oneofConfig(
OneofConfigView.newBuilder()
.groupName(namer.publicFieldName(item.getOneofConfig().groupName()))
.variantType(namer.getOneofVariantTypeName(item.getOneofConfig()))
.build());
}
fieldSetting.required(isRequired(fieldConfig, context));
String formatMethodName = "";
String transformParamFunctionName = "";
// If resource name converters should only be used in the sample, we need to convert the
// resource name to a string before passing it or setting it on the next thing
boolean needsConversion =
context.getFeatureConfig().useResourceNameConvertersInSampleOnly(context, fieldConfig);
// If resource name converters should be used and this is not a flattened method context
// (i.e. it is for setting fields on a proto object), we need to convert the resource name
// to a string.
// For flattened method contexts, if the resource names are used in more than just the sample
// (i.e. in the flattened method signature), then we don't convert (that will be done in the
// flattened method implementation when setting fields on the proto object).
if (context.getFeatureConfig().useResourceNameConverters(fieldConfig)
&& !context.isFlattenedMethodContext()) {
needsConversion = true;
}
if (needsConversion) {
if (fieldConfig.getField().isRepeated()) {
// TODO (https://github.com/googleapis/toolkit/issues/1806) support repeated one-ofs
transformParamFunctionName =
namer.getResourceTypeFormatListMethodName(context.getTypeTable(), fieldConfig);
} else {
formatMethodName = namer.getResourceNameFormatMethodName();
}
}
fieldSetting.transformParamFunctionName(transformParamFunctionName);
fieldSetting.formatMethodName(formatMethodName);
allSettings.add(fieldSetting.build());
}
return allSettings;
}
/** Determines whether a field is required */
private static boolean isRequired(FieldConfig fieldConfig, MethodContext context) {
return fieldConfig != null
&& context
.getMethodConfig()
.getRequiredFieldConfigs()
.stream()
.anyMatch(
fc -> fc.getField().getSimpleName().equals(fieldConfig.getField().getSimpleName()));
}
private static String getVariableName(MethodContext context, InitCodeNode item) {
if (!context
.getFeatureConfig()
.useResourceNameFormatOptionInSample(context, item.getFieldConfig())
&& item.getInitValueConfig().hasFormattingConfig()) {
return context.getNamer().getFormattedVariableName(item.getIdentifier());
}
return context.getNamer().localVarName(item.getIdentifier());
}
}
| 1 | 29,396 | Where did this functionality move to? | googleapis-gapic-generator | java |
@@ -68,6 +68,19 @@ public class TiDBJDBCClient implements AutoCloseable {
}
}
+ // SPLIT TABLE table_name [INDEX index_name] BETWEEN (lower_value) AND (upper_value) REGIONS
+ // region_num
+ public boolean splitTableRegion(
+ String dbName, String tblName, long minVal, long maxVal, long regionNum) throws SQLException {
+ try (Statement tidbStmt = connection.createStatement()) {
+ String sql =
+ String.format(
+ "split table %s.%s between (%d) and (%d) regions %d",
+ dbName, tblName, minVal, maxVal, regionNum);
+ return tidbStmt.execute(sql);
+ }
+ }
+
public boolean isClosed() throws SQLException {
return connection.isClosed();
} | 1 | /*
* Copyright 2019 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.sql.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
public class TiDBJDBCClient implements AutoCloseable {
private Connection connection;
private static final String UNLOCK_TABLES_SQL = "unlock tables";
private static final String SELECT_TIDB_CONFIG_SQL = "select @@tidb_config";
private static final String ENABLE_TABLE_LOCK_KEY = "enable-table-lock";
private static final Boolean ENABLE_TABLE_LOCK_DEFAULT = false;
public TiDBJDBCClient(Connection connection) {
this.connection = connection;
}
public boolean isEnableTableLock() throws IOException, SQLException {
String configJSON = (String) queryTiDBViaJDBC(SELECT_TIDB_CONFIG_SQL).get(0).get(0);
ObjectMapper objectMapper = new ObjectMapper();
TypeReference<HashMap<String, Object>> typeRef =
new TypeReference<HashMap<String, Object>>() {};
HashMap<String, Object> configMap = objectMapper.readValue(configJSON, typeRef);
Object enableTableLock =
configMap.getOrDefault(ENABLE_TABLE_LOCK_KEY, ENABLE_TABLE_LOCK_DEFAULT);
return (Boolean) enableTableLock;
}
public boolean lockTableWriteLocal(String databaseName, String tableName) throws SQLException {
try (Statement tidbStmt = connection.createStatement()) {
String sql = "lock tables `" + databaseName + "`.`" + tableName + "` write local";
int result = tidbStmt.executeUpdate(sql);
return result == 0;
}
}
public boolean unlockTables() throws SQLException {
try (Statement tidbStmt = connection.createStatement()) {
int result = tidbStmt.executeUpdate(UNLOCK_TABLES_SQL);
return result == 0;
}
}
public boolean dropTable(String databaseName, String tableName) throws SQLException {
try (Statement tidbStmt = connection.createStatement()) {
String sql = "drop table if exists `" + databaseName + "`.`" + tableName + "`";
return tidbStmt.execute(sql);
}
}
public boolean isClosed() throws SQLException {
return connection.isClosed();
}
@Override
public void close() throws Exception {
connection.close();
}
private List<List<Object>> queryTiDBViaJDBC(String query) throws SQLException {
ArrayList<List<Object>> result = new ArrayList<>();
try (Statement tidbStmt = connection.createStatement()) {
ResultSet resultSet = tidbStmt.executeQuery(query);
ResultSetMetaData rsMetaData = resultSet.getMetaData();
while (resultSet.next()) {
ArrayList<Object> row = new ArrayList<>();
for (int i = 1; i <= rsMetaData.getColumnCount(); i++) {
row.add(resultSet.getObject(i));
}
result.add(row);
}
}
return result;
}
}
| 1 | 10,439 | maybe should firstly check whether current tidb support `split table region`? | pingcap-tispark | java |
@@ -27,7 +27,8 @@ import { Fragment } from '@wordpress/element';
import Data from 'googlesitekit-data';
import Logo from './Logo';
import UserMenu from './UserMenu';
-import ErrorNotification from '../components/notifications/error-notification';
+import LegacyErrorNotification from './legacy-notifications/error-notification';
+import ErrorNotification from './notifications/ErrorNotifications';
import { STORE_NAME as CORE_USER } from '../googlesitekit/datastore/user/constants';
const { useSelect } = Data;
| 1 | /**
* Header component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { Fragment } from '@wordpress/element';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import Logo from './Logo';
import UserMenu from './UserMenu';
import ErrorNotification from '../components/notifications/error-notification';
import { STORE_NAME as CORE_USER } from '../googlesitekit/datastore/user/constants';
const { useSelect } = Data;
const Header = () => {
const isAuthenticated = useSelect( ( select ) => select( CORE_USER ).isAuthenticated() );
return (
<Fragment>
<header className="googlesitekit-header">
<section className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--align-middle
mdc-layout-grid__cell--span-3-phone
mdc-layout-grid__cell--span-4-tablet
mdc-layout-grid__cell--span-6-desktop
">
<Logo />
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--align-middle
mdc-layout-grid__cell--align-right-phone
mdc-layout-grid__cell--span-1-phone
mdc-layout-grid__cell--span-4-tablet
mdc-layout-grid__cell--span-6-desktop
">
{ isAuthenticated && <UserMenu /> }
</div>
</div>
</section>
</header>
<ErrorNotification />
</Fragment>
);
};
export default Header;
| 1 | 34,118 | Please use the same (plural) name for the new component since it renders multiple notifications. | google-site-kit-wp | js |
@@ -466,6 +466,7 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
}
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
+ cs.log.Infof("Failed to download block %d. %v", topBlock.Round()-basics.Round(blocksFetched), err)
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload failed after multiple blocks download attempts")) | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package catchup
import (
"context"
"fmt"
"sync"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
)
// CatchpointCatchupNodeServices defines the extenal node support needed
// for the catchpoint service to switch the node between "regular" operational mode and catchup mode.
type CatchpointCatchupNodeServices interface {
SetCatchpointCatchupMode(bool) (newContextCh <-chan context.Context)
}
// CatchpointCatchupStats is used for querying and reporting the current state of the catchpoint catchup process
type CatchpointCatchupStats struct {
CatchpointLabel string
TotalAccounts uint64
ProcessedAccounts uint64
TotalBlocks uint64
AcquiredBlocks uint64
VerifiedBlocks uint64
ProcessedBytes uint64
StartTime time.Time
}
// CatchpointCatchupService represents the catchpoint catchup service.
type CatchpointCatchupService struct {
// stats is the statistics object, updated async while downloading the ledger
stats CatchpointCatchupStats
// statsMu syncronizes access to stats, as we could attempt to update it while querying for it's current state
statsMu deadlock.Mutex
node CatchpointCatchupNodeServices
// ctx is the node cancelation context, used when the node is being stopped.
ctx context.Context
cancelCtxFunc context.CancelFunc
// running is a waitgroup counting the running goroutine(1), and allow us to exit cleanly.
running sync.WaitGroup
// ledgerAccessor is the ledger accessor used to perform ledger-level operation on the database
ledgerAccessor ledger.CatchpointCatchupAccessor
// stage is the current stage of the catchpoint catchup process
stage ledger.CatchpointCatchupState
// log is the logger object
log logging.Logger
// newService indicates whether this service was created after the node was running ( i.e. true ) or the node just started to find that it was previously perfoming catchup
newService bool
// net is the underlaying network module
net network.GossipNode
// ledger points to the ledger object
ledger *ledger.Ledger
// lastBlockHeader is the latest block we have before going into catchpoint catchup mode. We use it to serve the node status requests instead of going to the ledger.
lastBlockHeader bookkeeping.BlockHeader
// config is a copy of the node configuration
config config.Local
// abortCtx used as a syncronized flag to let us know when the user asked us to abort the catchpoint catchup process. note that it's not being used when we decided to abort
// the catchup due to an internal issue ( such as exceeding number of retries )
abortCtx context.Context
abortCtxFunc context.CancelFunc
}
// MakeResumedCatchpointCatchupService creates a catchpoint catchup service for a node that is already in catchpoint catchup mode
func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, l *ledger.Ledger, cfg config.Local) (service *CatchpointCatchupService, err error) {
service = &CatchpointCatchupService{
stats: CatchpointCatchupStats{
StartTime: time.Now(),
},
node: node,
ledgerAccessor: ledger.MakeCatchpointCatchupAccessor(l, log),
log: log,
newService: false,
net: net,
ledger: l,
config: cfg,
}
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
}
err = service.loadStateVariables(ctx)
if err != nil {
return nil, err
}
return service, nil
}
// MakeNewCatchpointCatchupService creates a new catchpoint catchup service for a node that is not in catchpoint catchup mode
func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, l *ledger.Ledger, cfg config.Local) (service *CatchpointCatchupService, err error) {
if catchpoint == "" {
return nil, fmt.Errorf("MakeNewCatchpointCatchupService: catchpoint is invalid")
}
service = &CatchpointCatchupService{
stats: CatchpointCatchupStats{
CatchpointLabel: catchpoint,
StartTime: time.Now(),
},
node: node,
ledgerAccessor: ledger.MakeCatchpointCatchupAccessor(l, log),
stage: ledger.CatchpointCatchupStateInactive,
log: log,
newService: true,
net: net,
ledger: l,
config: cfg,
}
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
}
return service, nil
}
// Start starts the catchpoint catchup service ( continue in the process )
func (cs *CatchpointCatchupService) Start(ctx context.Context) {
cs.ctx, cs.cancelCtxFunc = context.WithCancel(ctx)
cs.abortCtx, cs.abortCtxFunc = context.WithCancel(context.Background())
cs.running.Add(1)
go cs.run()
}
// Abort aborts the catchpoint catchup process
func (cs *CatchpointCatchupService) Abort() {
// In order to abort the catchpoint catchup process, we need to first set the flag of abortCtxFunc, and follow that by canceling the main context.
// The order of these calls is crucial : The various stages are blocked on the main context. When that one expires, it uses the abort context to determine
// if the cancelation meaning that we want to shut down the process, or aborting the catchpoint catchup completly.
cs.abortCtxFunc()
cs.cancelCtxFunc()
}
// Stop stops the catchpoint catchup service - unlike Abort, this is not intended to abort the process but rather to allow
// cleanup of in-memory resources for the purpose of clean shutdown.
func (cs *CatchpointCatchupService) Stop() {
// signal the running goroutine that we want to stop
cs.cancelCtxFunc()
// wait for the running goroutine to terminate.
cs.running.Wait()
// call the abort context canceling, just to release it's goroutine.
cs.abortCtxFunc()
}
// GetLatestBlockHeader returns the last block header that was available at the time the catchpoint catchup service started
func (cs *CatchpointCatchupService) GetLatestBlockHeader() bookkeeping.BlockHeader {
return cs.lastBlockHeader
}
// run is the main stage-swtiching background service function. It switches the current stage into the correct stage handler.
func (cs *CatchpointCatchupService) run() {
defer cs.running.Done()
var err error
for {
// check if we need to abort.
select {
case <-cs.ctx.Done():
return
default:
}
switch cs.stage {
case ledger.CatchpointCatchupStateInactive:
err = cs.processStageInactive()
case ledger.CatchpointCatchupStateLedgerDownload:
err = cs.processStageLedgerDownload()
case ledger.CatchpointCatchupStateLastestBlockDownload:
err = cs.processStageLastestBlockDownload()
case ledger.CatchpointCatchupStateBlocksDownload:
err = cs.processStageBlocksDownload()
case ledger.CatchpointCatchupStateSwitch:
err = cs.processStageSwitch()
default:
err = cs.abort(fmt.Errorf("unexpected catchpoint catchup stage encountered : %v", cs.stage))
}
if cs.ctx.Err() != nil {
if err != nil {
cs.log.Warnf("catchpoint catchup stage error : %v", err)
}
continue
}
if err != nil {
cs.log.Warnf("catchpoint catchup stage error : %v", err)
time.Sleep(200 * time.Millisecond)
}
}
}
// loadStateVariables loads the current stage and catchpoint label from disk. It's used only in the case of catchpoint catchup recovery.
// ( i.e. the node never completed the catchup, and the node was shutdown )
func (cs *CatchpointCatchupService) loadStateVariables(ctx context.Context) (err error) {
var label string
label, err = cs.ledgerAccessor.GetLabel(ctx)
if err != nil {
return err
}
cs.statsMu.Lock()
cs.stats.CatchpointLabel = label
cs.statsMu.Unlock()
cs.stage, err = cs.ledgerAccessor.GetState(ctx)
if err != nil {
return err
}
return nil
}
// processStageInactive is the first catchpoint stage. It stores the desired label for catching up, so that if the catchpoint catchup is interrupted
// it could be resumed from that point.
func (cs *CatchpointCatchupService) processStageInactive() (err error) {
cs.statsMu.Lock()
label := cs.stats.CatchpointLabel
cs.statsMu.Unlock()
err = cs.ledgerAccessor.SetLabel(cs.ctx, label)
if err != nil {
return cs.abort(fmt.Errorf("processStageInactive failed to set a catchpoint label : %v", err))
}
err = cs.updateStage(ledger.CatchpointCatchupStateLedgerDownload)
if err != nil {
return cs.abort(fmt.Errorf("processStageInactive failed to update stage : %v", err))
}
if cs.newService {
// we need to let the node know that it should shut down all the unneed services to avoid clashes.
cs.updateNodeCatchupMode(true)
}
return nil
}
// processStageLedgerDownload is the second catchpoint catchup stage. It downloads the ledger.
func (cs *CatchpointCatchupService) processStageLedgerDownload() (err error) {
cs.statsMu.Lock()
label := cs.stats.CatchpointLabel
cs.statsMu.Unlock()
round, _, err0 := ledger.ParseCatchpointLabel(label)
if err0 != nil {
return cs.abort(fmt.Errorf("processStageLedgerDownload failed to patse label : %v", err0))
}
// download balances file.
ledgerFetcher := makeLedgerFetcher(cs.net, cs.ledgerAccessor, cs.log, cs)
attemptsCount := 0
for {
attemptsCount++
err = cs.ledgerAccessor.ResetStagingBalances(cs.ctx, true)
if err != nil {
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
return cs.abort(fmt.Errorf("processStageLedgerDownload failed to reset staging balances : %v", err))
}
err = ledgerFetcher.downloadLedger(cs.ctx, round)
if err == nil {
break
}
// instead of testing for err == cs.ctx.Err() , we'll check on the context itself.
// this is more robust, as the http client library sometimes wrap the context canceled
// error with other erros.
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
if attemptsCount >= cs.config.CatchupLedgerDownloadRetryAttempts {
err = fmt.Errorf("catchpoint catchup exceeded number of attempts to retrieve ledger")
return cs.abort(err)
}
cs.log.Warnf("unable to download ledger : %v", err)
}
err = cs.updateStage(ledger.CatchpointCatchupStateLastestBlockDownload)
if err != nil {
return cs.abort(fmt.Errorf("processStageLedgerDownload failed to update stage to CatchpointCatchupStateLastestBlockDownload : %v", err))
}
return nil
}
// processStageLastestBlockDownload is the third catchpoint catchup stage. It downloads the latest block and verify that against the previously downloaded ledger.
func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err error) {
blockRound, err := cs.ledgerAccessor.GetCatchupBlockRound(cs.ctx)
if err != nil {
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to retrieve catchup block round : %v", err))
}
fetcherFactory := MakeNetworkFetcherFactory(cs.net, 10, nil, &cs.config)
attemptsCount := 0
var blk *bookkeeping.Block
var client FetcherClient
// check to see if the current ledger might have this block. If so, we should try this first instead of downloading anything.
if ledgerBlock, err := cs.ledger.Block(blockRound); err == nil {
blk = &ledgerBlock
}
for {
attemptsCount++
if blk == nil {
fetcher := fetcherFactory.New()
blk, _, client, err = fetcher.FetchBlock(cs.ctx, blockRound)
if err != nil {
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to get block %d : %v", blockRound, err))
}
// success
client.Close()
}
// check block protocol version support.
if _, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
cs.log.Warnf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
}
// check to see that the block header and the block payset aligns
if !blk.ContentsMatchHeader() {
cs.log.Warnf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header")
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header"))
}
// verify that the catchpoint is valid.
err = cs.ledgerAccessor.VerifyCatchpoint(cs.ctx, blk)
if err != nil {
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling VerifyCatchpoint : %v", err))
}
err = cs.ledgerAccessor.StoreBalancesRound(cs.ctx, blk)
if err != nil {
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling StoreBalancesRound : %v", err))
}
err = cs.ledgerAccessor.StoreFirstBlock(cs.ctx, blk)
if err != nil {
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling StoreFirstBlock : %v", err))
}
err = cs.updateStage(ledger.CatchpointCatchupStateBlocksDownload)
if err != nil {
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to update stage : %v", err))
}
// great ! everything is ready for next stage.
break
}
return nil
}
// processStageBlocksDownload is the fourth catchpoint catchup stage. It downloads all the reminder of the blocks, verifying each one of them against it's predecessor.
func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
topBlock, err := cs.ledgerAccessor.EnsureFirstBlock(cs.ctx)
if err != nil {
return cs.abort(fmt.Errorf("processStageBlocksDownload failed, unable to ensure first block : %v", err))
}
// pick the lookback with the greater of either MaxTxnLife or MaxBalLookback
lookback := config.Consensus[topBlock.CurrentProtocol].MaxTxnLife
if lookback < config.Consensus[topBlock.CurrentProtocol].MaxBalLookback {
lookback = config.Consensus[topBlock.CurrentProtocol].MaxBalLookback
}
// in case the effective lookback is going before our rounds count, trim it there.
// ( a catchpoint is generated starting round MaxBalLookback, and this is a possible in any round in the range of MaxBalLookback..MaxTxnLife)
if lookback >= uint64(topBlock.Round()) {
lookback = uint64(topBlock.Round() - 1)
}
cs.statsMu.Lock()
cs.stats.TotalBlocks = uint64(lookback)
cs.stats.AcquiredBlocks = 0
cs.stats.VerifiedBlocks = 0
cs.statsMu.Unlock()
prevBlock := &topBlock
fetcherFactory := MakeNetworkFetcherFactory(cs.net, 10, nil, &cs.config)
blocksFetched := uint64(1) // we already got the first block in the previous step.
var blk *bookkeeping.Block
var client FetcherClient
for attemptsCount := uint64(1); blocksFetched <= lookback; attemptsCount++ {
if err := cs.ctx.Err(); err != nil {
return cs.stopOrAbort()
}
blk = nil
// check to see if the current ledger might have this block. If so, we should try this first instead of downloading anything.
if ledgerBlock, err := cs.ledger.Block(topBlock.Round() - basics.Round(blocksFetched)); err == nil {
blk = &ledgerBlock
} else {
switch err.(type) {
case ledger.ErrNoEntry:
// this is expected, ignore this one.
default:
cs.log.Warnf("processStageBlocksDownload encountered the following error when attempting to retrieve the block for round %d : %v", topBlock.Round()-basics.Round(blocksFetched), err)
}
}
if blk == nil {
fetcher := fetcherFactory.New()
blk, _, client, err = fetcher.FetchBlock(cs.ctx, topBlock.Round()-basics.Round(blocksFetched))
if err != nil {
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload failed after multiple blocks download attempts"))
}
// success
client.Close()
}
cs.updateBlockRetrievalStatistics(1, 0)
// validate :
if prevBlock.BlockHeader.Branch != blk.Hash() {
// not identical, retry download.
cs.log.Warnf("processStageBlocksDownload downloaded block(%d) did not match it's successor(%d) block hash %v != %v", blk.Round(), prevBlock.Round(), blk.Hash(), prevBlock.BlockHeader.Branch)
cs.updateBlockRetrievalStatistics(-1, 0)
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload downloaded block(%d) did not match it's successor(%d) block hash %v != %v", blk.Round(), prevBlock.Round(), blk.Hash(), prevBlock.BlockHeader.Branch))
}
// check block protocol version support.
if _, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
cs.log.Warnf("processStageBlocksDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
cs.updateBlockRetrievalStatistics(-1, 0)
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
}
// check to see that the block header and the block payset aligns
if !blk.ContentsMatchHeader() {
cs.log.Warnf("processStageBlocksDownload: downloaded block content does not match downloaded block header")
// try again.
cs.updateBlockRetrievalStatistics(-1, 0)
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload: downloaded block content does not match downloaded block header"))
}
cs.updateBlockRetrievalStatistics(0, 1)
// all good, persist and move on.
err = cs.ledgerAccessor.StoreBlock(cs.ctx, blk)
if err != nil {
cs.log.Warnf("processStageBlocksDownload failed to store downloaded staging block for round %d", blk.Round())
cs.updateBlockRetrievalStatistics(-1, -1)
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload failed to store downloaded staging block for round %d", blk.Round()))
}
prevBlock = blk
blocksFetched++
}
err = cs.updateStage(ledger.CatchpointCatchupStateSwitch)
if err != nil {
return cs.abort(fmt.Errorf("processStageBlocksDownload failed to update stage : %v", err))
}
return nil
}
// processStageLedgerDownload is the fifth catchpoint catchup stage. It completes the catchup process, swap the new tables and restart the node functionality.
func (cs *CatchpointCatchupService) processStageSwitch() (err error) {
err = cs.ledgerAccessor.CompleteCatchup(cs.ctx)
if err != nil {
return cs.abort(fmt.Errorf("processStageSwitch failed to complete catchup : %v", err))
}
err = cs.updateStage(ledger.CatchpointCatchupStateInactive)
if err != nil {
return cs.abort(fmt.Errorf("processStageSwitch failed to update stage : %v", err))
}
cs.updateNodeCatchupMode(false)
// we've completed the catchup, so we want to cancel the context so that the
// run function would exit.
cs.cancelCtxFunc()
return nil
}
// stopOrAbort is called when any of the stage processing function sees that cs.ctx has been canceled. It can be
// due to the end user attempting to abort the current catchpoint catchup operation or due to a node shutdown.
func (cs *CatchpointCatchupService) stopOrAbort() error {
if cs.abortCtx.Err() == context.Canceled {
return cs.abort(context.Canceled)
}
return nil
}
// abort aborts the current catchpoint catchup process, reverting to node to standard operation.
func (cs *CatchpointCatchupService) abort(originatingErr error) error {
outError := originatingErr
err0 := cs.ledgerAccessor.ResetStagingBalances(cs.ctx, false)
if err0 != nil {
outError = fmt.Errorf("unable to reset staging balances : %v; %v", err0, outError)
}
cs.updateNodeCatchupMode(false)
// we want to abort the catchpoint catchup process, and the node already reverted to normal operation.
// as part of the returning to normal operation, we've re-created our context. This context need to be
// canceled so that when we go back to run(), we would exit from there right away.
cs.cancelCtxFunc()
return outError
}
// updateStage updates the current catchpoint catchup stage to the provided new stage.
func (cs *CatchpointCatchupService) updateStage(newStage ledger.CatchpointCatchupState) (err error) {
err = cs.ledgerAccessor.SetState(cs.ctx, newStage)
if err != nil {
return err
}
cs.stage = newStage
return nil
}
// updateNodeCatchupMode requests the node to change it's operational mode from
// catchup mode to normal mode and vice versa.
func (cs *CatchpointCatchupService) updateNodeCatchupMode(catchupModeEnabled bool) {
newCtxCh := cs.node.SetCatchpointCatchupMode(catchupModeEnabled)
select {
case newCtx, open := <-newCtxCh:
if open {
cs.ctx, cs.cancelCtxFunc = context.WithCancel(newCtx)
} else {
// channel is closed, this means that the node is stopping
}
case <-cs.ctx.Done():
// the node context was canceled before the SetCatchpointCatchupMode goroutine had
// the chance of completing. We At this point, the service is shutting down. However,
// we don't know how long it would take for the node mutex until it's become available.
// given that the SetCatchpointCatchupMode gave us a non-buffered channel, it might get blocked
// if we won't be draining that channel. To resolve that, we will create another goroutine here
// which would drain that channel.
go func() {
// We'll wait here for the above goroutine to complete :
<-newCtxCh
}()
}
}
func (cs *CatchpointCatchupService) updateLedgerFetcherProgress(fetcherStats *ledger.CatchpointCatchupAccessorProgress) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
cs.stats.TotalAccounts = fetcherStats.TotalAccounts
cs.stats.ProcessedAccounts = fetcherStats.ProcessedAccounts
cs.stats.ProcessedBytes = fetcherStats.ProcessedBytes
}
// GetStatistics returns a copy of the current catchpoint catchup statistics
func (cs *CatchpointCatchupService) GetStatistics() (out CatchpointCatchupStats) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
out = cs.stats
return
}
// updateBlockRetrievalStatistics updates the blocks retrieval statistics by applying the provided deltas
func (cs *CatchpointCatchupService) updateBlockRetrievalStatistics(aquiredBlocksDelta, verifiedBlocksDelta int64) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
cs.stats.AcquiredBlocks = uint64(int64(cs.stats.AcquiredBlocks) + aquiredBlocksDelta)
cs.stats.VerifiedBlocks = uint64(int64(cs.stats.VerifiedBlocks) + verifiedBlocksDelta)
}
| 1 | 40,342 | Perhaps show attempt x of y in message ie: Failed to download block %d (attempt %d of %d). %v | algorand-go-algorand | go |
@@ -94,8 +94,7 @@ public class PrivateTransactionHandler {
}
}
- public String getPrivacyGroup(final String key, final PrivateTransaction privateTransaction)
- throws Exception {
+ public String getPrivacyGroup(final String key, final PrivateTransaction privateTransaction) {
if (privateTransaction.getPrivacyGroupId().isPresent()) {
return BytesValues.asBase64String(privateTransaction.getPrivacyGroupId().get());
} | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.privacy;
import org.hyperledger.besu.enclave.Enclave;
import org.hyperledger.besu.enclave.types.ReceiveRequest;
import org.hyperledger.besu.enclave.types.ReceiveResponse;
import org.hyperledger.besu.enclave.types.SendRequest;
import org.hyperledger.besu.enclave.types.SendRequestBesu;
import org.hyperledger.besu.enclave.types.SendRequestLegacy;
import org.hyperledger.besu.enclave.types.SendResponse;
import org.hyperledger.besu.ethereum.core.Account;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.mainnet.TransactionValidator;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.privacy.markertransaction.PrivateMarkerTransactionFactory;
import org.hyperledger.besu.ethereum.privacy.storage.PrivateStateStorage;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.util.bytes.BytesValues;
import java.math.BigInteger;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class PrivateTransactionHandler {
private static final Logger LOG = LogManager.getLogger();
private final Enclave enclave;
private final String enclavePublicKey;
private final PrivateStateStorage privateStateStorage;
private final WorldStateArchive privateWorldStateArchive;
private final PrivateTransactionValidator privateTransactionValidator;
private final PrivateMarkerTransactionFactory privateMarkerTransactionFactory;
public PrivateTransactionHandler(
final PrivacyParameters privacyParameters,
final Optional<BigInteger> chainId,
final PrivateMarkerTransactionFactory privateMarkerTransactionFactory) {
this(
new Enclave(privacyParameters.getEnclaveUri()),
privacyParameters.getEnclavePublicKey(),
privacyParameters.getPrivateStateStorage(),
privacyParameters.getPrivateWorldStateArchive(),
new PrivateTransactionValidator(chainId),
privateMarkerTransactionFactory);
}
public PrivateTransactionHandler(
final Enclave enclave,
final String enclavePublicKey,
final PrivateStateStorage privateStateStorage,
final WorldStateArchive privateWorldStateArchive,
final PrivateTransactionValidator privateTransactionValidator,
final PrivateMarkerTransactionFactory privateMarkerTransactionFactory) {
this.enclave = enclave;
this.enclavePublicKey = enclavePublicKey;
this.privateStateStorage = privateStateStorage;
this.privateWorldStateArchive = privateWorldStateArchive;
this.privateTransactionValidator = privateTransactionValidator;
this.privateMarkerTransactionFactory = privateMarkerTransactionFactory;
}
public String sendToOrion(final PrivateTransaction privateTransaction) throws Exception {
final SendRequest sendRequest = createSendRequest(privateTransaction);
final SendResponse sendResponse;
try {
LOG.trace("Storing private transaction in enclave");
sendResponse = enclave.send(sendRequest);
return sendResponse.getKey();
} catch (Exception e) {
LOG.error("Failed to store private transaction in enclave", e);
throw e;
}
}
public String getPrivacyGroup(final String key, final PrivateTransaction privateTransaction)
throws Exception {
if (privateTransaction.getPrivacyGroupId().isPresent()) {
return BytesValues.asBase64String(privateTransaction.getPrivacyGroupId().get());
}
final ReceiveRequest receiveRequest =
new ReceiveRequest(key, BytesValues.asBase64String(privateTransaction.getPrivateFrom()));
LOG.debug(
"Getting privacy group for {}",
BytesValues.asBase64String(privateTransaction.getPrivateFrom()));
final ReceiveResponse receiveResponse;
try {
receiveResponse = enclave.receive(receiveRequest);
return receiveResponse.getPrivacyGroupId();
} catch (Exception e) {
LOG.error("Failed to retrieve private transaction in enclave", e);
throw e;
}
}
public Transaction createPrivacyMarkerTransaction(
final String transactionEnclaveKey, final PrivateTransaction privateTransaction) {
return privateMarkerTransactionFactory.create(transactionEnclaveKey, privateTransaction);
}
public ValidationResult<TransactionValidator.TransactionInvalidReason> validatePrivateTransaction(
final PrivateTransaction privateTransaction, final String privacyGroupId) {
return privateTransactionValidator.validate(
privateTransaction, getSenderNonce(privateTransaction.getSender(), privacyGroupId));
}
private SendRequest createSendRequest(final PrivateTransaction privateTransaction) {
final BytesValueRLPOutput bvrlp = new BytesValueRLPOutput();
privateTransaction.writeTo(bvrlp);
final String payload = BytesValues.asBase64String(bvrlp.encoded());
if (privateTransaction.getPrivacyGroupId().isPresent()) {
return new SendRequestBesu(
payload,
enclavePublicKey,
BytesValues.asBase64String(privateTransaction.getPrivacyGroupId().get()));
} else {
final List<String> privateFor =
privateTransaction.getPrivateFor().get().stream()
.map(BytesValues::asBase64String)
.collect(Collectors.toList());
// FIXME: orion should accept empty privateFor
if (privateFor.isEmpty()) {
privateFor.add(BytesValues.asBase64String(privateTransaction.getPrivateFrom()));
}
return new SendRequestLegacy(
payload, BytesValues.asBase64String(privateTransaction.getPrivateFrom()), privateFor);
}
}
public long getSenderNonce(final Address sender, final String privacyGroupId) {
return privateStateStorage
.getLatestStateRoot(BytesValues.fromBase64(privacyGroupId))
.map(
lastRootHash ->
privateWorldStateArchive
.getMutable(lastRootHash)
.map(
worldState -> {
final Account maybePrivateSender = worldState.get(sender);
if (maybePrivateSender != null) {
return maybePrivateSender.getNonce();
}
// account has not interacted in this private state
return Account.DEFAULT_NONCE;
})
// private state does not exist
.orElse(Account.DEFAULT_NONCE))
.orElse(
// private state does not exist
Account.DEFAULT_NONCE);
}
}
| 1 | 20,427 | why did you took `Exception` out? Is this method only throwing runtime exceptions? | hyperledger-besu | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.