patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -58,6 +58,12 @@ var (
Value: metadata.DefaultNetwork.AccessPolicyOracleAddress,
})
+ mmnAddressFlag = altsrc.NewStringFlag(cli.StringFlag{
+ Name: "mmn-address",
+ Usage: "URL of my.mysterium.network API",
+ Value: metadata.DefaultNetwork.MMNAddress,
+ })
+
brokerAddressFlag = altsrc.NewStringFlag(cli.StringFlag{
Name: "broker-address",
Usage: "URI of message broker", | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cmd
import (
"fmt"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/metadata"
"gopkg.in/urfave/cli.v1"
"gopkg.in/urfave/cli.v1/altsrc"
)
var (
testFlag = altsrc.NewBoolFlag(cli.BoolFlag{
Name: "testnet",
Usage: "Defines test network configuration",
})
localnetFlag = altsrc.NewBoolFlag(cli.BoolFlag{
Name: "localnet",
Usage: "Defines network configuration which expects locally deployed broker and discovery services",
})
identityCheckFlag = altsrc.NewBoolFlag(cli.BoolFlag{
Name: "experiment-identity-check",
Usage: "Enables experimental identity check",
})
apiAddressFlag = altsrc.NewStringFlag(cli.StringFlag{
Name: "api.address",
Usage: "URL of Mysterium API",
Value: metadata.DefaultNetwork.MysteriumAPIAddress,
})
apiAddressFlagDeprecated = altsrc.NewStringFlag(cli.StringFlag{
Name: "discovery-address",
Usage: fmt.Sprintf("URL of Mysterium API (DEPRECATED, start using '--%s')", apiAddressFlag.Name),
Value: apiAddressFlag.Value,
})
accessPolicyAddressFlag = altsrc.NewStringFlag(cli.StringFlag{
Name: "access-policy-address",
Usage: "URL of trust oracle endpoint for retrieving lists of access policies",
Value: metadata.DefaultNetwork.AccessPolicyOracleAddress,
})
brokerAddressFlag = altsrc.NewStringFlag(cli.StringFlag{
Name: "broker-address",
Usage: "URI of message broker",
Value: metadata.DefaultNetwork.BrokerAddress,
})
etherRPCFlag = altsrc.NewStringFlag(cli.StringFlag{
Name: "ether.client.rpc",
Usage: "URL or IPC socket to connect to ethereum node, anything what ethereum client accepts - works",
Value: metadata.DefaultNetwork.EtherClientRPC,
})
etherContractPaymentsFlag = altsrc.NewStringFlag(cli.StringFlag{
Name: "ether.contract.payments",
Usage: "Address of payments contract",
Value: metadata.DefaultNetwork.PaymentsContractAddress.String(),
})
qualityOracleFlag = altsrc.NewStringFlag(cli.StringFlag{
Name: "quality-oracle.address",
Usage: "Address of the quality oracle service",
Value: metadata.DefaultNetwork.QualityOracle,
})
natPunchingFlag = altsrc.NewBoolTFlag(cli.BoolTFlag{
Name: "experiment-natpunching",
Usage: "Enables experimental NAT hole punching",
})
)
// RegisterFlagsNetwork function register network flags to flag list
func RegisterFlagsNetwork(flags *[]cli.Flag) {
*flags = append(
*flags,
testFlag, localnetFlag,
identityCheckFlag,
natPunchingFlag,
apiAddressFlag, apiAddressFlagDeprecated,
brokerAddressFlag,
etherRPCFlag, etherContractPaymentsFlag,
qualityOracleFlag, accessPolicyAddressFlag,
)
}
// ParseFlagsNetwork function fills in directory options from CLI context
func ParseFlagsNetwork(ctx *cli.Context) node.OptionsNetwork {
return node.OptionsNetwork{
Testnet: ctx.GlobalBool(testFlag.Name),
Localnet: ctx.GlobalBool(localnetFlag.Name),
ExperimentIdentityCheck: ctx.GlobalBool(identityCheckFlag.Name),
ExperimentNATPunching: ctx.GlobalBool(natPunchingFlag.Name),
MysteriumAPIAddress: ctx.GlobalString(apiAddressFlag.Name),
AccessPolicyEndpointAddress: ctx.GlobalString(accessPolicyAddressFlag.Name),
BrokerAddress: ctx.GlobalString(brokerAddressFlag.Name),
EtherClientRPC: ctx.GlobalString(etherRPCFlag.Name),
EtherPaymentsAddress: ctx.GlobalString(etherContractPaymentsFlag.Name),
QualityOracle: ctx.GlobalString(qualityOracleFlag.Name),
}
}
| 1 | 14,935 | This looks a bit cryptic to me, and also it contains a dash. How about `mymysterium.url`? | mysteriumnetwork-node | go |
@@ -0,0 +1,8 @@
+#include<stdio.h>
+
+void main(){
+ int var = 100;
+ if (&var == 0){
+ print("Got the value");
+ }
+} | 1 | 1 | 12,493 | You did notice the no newline at the end of this file, right? | Ericsson-codechecker | c |
|
@@ -75,10 +75,10 @@ namespace Microsoft.CodeAnalysis.Sarif
public ISet<Stack> Stacks { get; set; }
/// <summary>
- /// An array of arrays of 'annotatedCodeLocation` objects, each inner array of which comprises a code flow (a possible execution path through the code).
+ /// An array of 'codeFlow' objects relevant to the result.
/// </summary>
[DataMember(Name = "codeFlows", IsRequired = false, EmitDefaultValue = false)]
- public IList<IList<AnnotatedCodeLocation>> CodeFlows { get; set; }
+ public ISet<CodeFlow> CodeFlows { get; set; }
/// <summary>
/// A grouped set of locations and messages, if available, that represent code areas that are related to this result. | 1 | // Copyright (c) Microsoft. All Rights Reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.CodeDom.Compiler;
using System.Collections.Generic;
using System.Runtime.Serialization;
namespace Microsoft.CodeAnalysis.Sarif
{
/// <summary>
/// A result produced by an analysis tool.
/// </summary>
[DataContract]
[GeneratedCode("Microsoft.Json.Schema.ToDotNet", "0.16.0.0")]
public partial class Result : ISarifNode, IEquatable<Result>
{
/// <summary>
/// Gets a value indicating the type of object implementing <see cref="ISarifNode" />.
/// </summary>
public SarifNodeKind SarifNodeKind
{
get
{
return SarifNodeKind.Result;
}
}
/// <summary>
/// A stable, opaque identifier for the rule that was evaluated to produce the result.
/// </summary>
[DataMember(Name = "ruleId", IsRequired = false, EmitDefaultValue = false)]
public string RuleId { get; set; }
/// <summary>
/// The kind of observation this result represents. If this property is not present, its implied value is 'warning'.
/// </summary>
[DataMember(Name = "kind", IsRequired = false, EmitDefaultValue = false)]
public ResultKind Kind { get; set; }
/// <summary>
/// A string that describes the result.
/// </summary>
[DataMember(Name = "fullMessage", IsRequired = false, EmitDefaultValue = false)]
public string FullMessage { get; set; }
/// <summary>
/// A string that describes the result, displayed when visible space is limited to a single line of text.
/// </summary>
[DataMember(Name = "shortMessage", IsRequired = false, EmitDefaultValue = false)]
public string ShortMessage { get; set; }
/// <summary>
/// A 'formattedMessage' object that can be used to construct a formatted message that describes the result. If the 'formattedMessage' property is present on a result, the 'fullMessage' property shall not be present. If the 'fullMessage' property is present on an result, the 'formattedMessage' property shall not be present
/// </summary>
[DataMember(Name = "formattedMessage", IsRequired = false, EmitDefaultValue = false)]
public FormattedMessage FormattedMessage { get; set; }
/// <summary>
/// One or more locations where the result occurred. Specify only one location unless the problem indicated by the result can only be corrected by making a change at every specified location.
/// </summary>
[DataMember(Name = "locations", IsRequired = false, EmitDefaultValue = false)]
public ISet<Location> Locations { get; set; }
/// <summary>
/// A string that contributes to the unique identity of the result.
/// </summary>
[DataMember(Name = "toolFingerprint", IsRequired = false, EmitDefaultValue = false)]
public string ToolFingerprint { get; set; }
/// <summary>
/// An array of 'stack' objects relevant to the result.
/// </summary>
[DataMember(Name = "stacks", IsRequired = false, EmitDefaultValue = false)]
public ISet<Stack> Stacks { get; set; }
/// <summary>
/// An array of arrays of 'annotatedCodeLocation` objects, each inner array of which comprises a code flow (a possible execution path through the code).
/// </summary>
[DataMember(Name = "codeFlows", IsRequired = false, EmitDefaultValue = false)]
public IList<IList<AnnotatedCodeLocation>> CodeFlows { get; set; }
/// <summary>
/// A grouped set of locations and messages, if available, that represent code areas that are related to this result.
/// </summary>
[DataMember(Name = "relatedLocations", IsRequired = false, EmitDefaultValue = false)]
public ISet<AnnotatedCodeLocation> RelatedLocations { get; set; }
/// <summary>
/// A flag indicating whether or not this result was suppressed in source code.
/// </summary>
[DataMember(Name = "isSuppressedInSource", IsRequired = false, EmitDefaultValue = false)]
public bool IsSuppressedInSource { get; set; }
/// <summary>
/// An array of 'fix' objects, each of which represents a proposed fix to the problem indicated by the result.
/// </summary>
[DataMember(Name = "fixes", IsRequired = false, EmitDefaultValue = false)]
public ISet<Fix> Fixes { get; set; }
/// <summary>
/// Key/value pairs that provide additional information about the result.
/// </summary>
[DataMember(Name = "properties", IsRequired = false, EmitDefaultValue = false)]
public IDictionary<string, string> Properties { get; set; }
/// <summary>
/// A set of distinct strings that provide additional information about the result.
/// </summary>
[DataMember(Name = "tags", IsRequired = false, EmitDefaultValue = false)]
public ISet<string> Tags { get; set; }
public override bool Equals(object other)
{
return Equals(other as Result);
}
public override int GetHashCode()
{
int result = 17;
unchecked
{
if (RuleId != null)
{
result = (result * 31) + RuleId.GetHashCode();
}
result = (result * 31) + Kind.GetHashCode();
if (FullMessage != null)
{
result = (result * 31) + FullMessage.GetHashCode();
}
if (ShortMessage != null)
{
result = (result * 31) + ShortMessage.GetHashCode();
}
if (FormattedMessage != null)
{
result = (result * 31) + FormattedMessage.GetHashCode();
}
if (Locations != null)
{
foreach (var value_0 in Locations)
{
result = result * 31;
if (value_0 != null)
{
result = (result * 31) + value_0.GetHashCode();
}
}
}
if (ToolFingerprint != null)
{
result = (result * 31) + ToolFingerprint.GetHashCode();
}
if (Stacks != null)
{
foreach (var value_1 in Stacks)
{
result = result * 31;
if (value_1 != null)
{
result = (result * 31) + value_1.GetHashCode();
}
}
}
if (CodeFlows != null)
{
foreach (var value_2 in CodeFlows)
{
result = result * 31;
if (value_2 != null)
{
foreach (var value_3 in value_2)
{
result = result * 31;
if (value_3 != null)
{
result = (result * 31) + value_3.GetHashCode();
}
}
}
}
}
if (RelatedLocations != null)
{
foreach (var value_4 in RelatedLocations)
{
result = result * 31;
if (value_4 != null)
{
result = (result * 31) + value_4.GetHashCode();
}
}
}
result = (result * 31) + IsSuppressedInSource.GetHashCode();
if (Fixes != null)
{
foreach (var value_5 in Fixes)
{
result = result * 31;
if (value_5 != null)
{
result = (result * 31) + value_5.GetHashCode();
}
}
}
if (Properties != null)
{
// Use xor for dictionaries to be order-independent.
int xor_0 = 0;
foreach (var value_6 in Properties)
{
xor_0 ^= value_6.Key.GetHashCode();
if (value_6.Value != null)
{
xor_0 ^= value_6.Value.GetHashCode();
}
}
result = (result * 31) + xor_0;
}
if (Tags != null)
{
foreach (var value_7 in Tags)
{
result = result * 31;
if (value_7 != null)
{
result = (result * 31) + value_7.GetHashCode();
}
}
}
}
return result;
}
public bool Equals(Result other)
{
if (other == null)
{
return false;
}
if (RuleId != other.RuleId)
{
return false;
}
if (Kind != other.Kind)
{
return false;
}
if (FullMessage != other.FullMessage)
{
return false;
}
if (ShortMessage != other.ShortMessage)
{
return false;
}
if (!Object.Equals(FormattedMessage, other.FormattedMessage))
{
return false;
}
if (!Object.ReferenceEquals(Locations, other.Locations))
{
if (Locations == null || other.Locations == null)
{
return false;
}
if (!Locations.SetEquals(other.Locations))
{
return false;
}
}
if (ToolFingerprint != other.ToolFingerprint)
{
return false;
}
if (!Object.ReferenceEquals(Stacks, other.Stacks))
{
if (Stacks == null || other.Stacks == null)
{
return false;
}
if (!Stacks.SetEquals(other.Stacks))
{
return false;
}
}
if (!Object.ReferenceEquals(CodeFlows, other.CodeFlows))
{
if (CodeFlows == null || other.CodeFlows == null)
{
return false;
}
if (CodeFlows.Count != other.CodeFlows.Count)
{
return false;
}
for (int index_0 = 0; index_0 < CodeFlows.Count; ++index_0)
{
if (!Object.ReferenceEquals(CodeFlows[index_0], other.CodeFlows[index_0]))
{
if (CodeFlows[index_0] == null || other.CodeFlows[index_0] == null)
{
return false;
}
if (CodeFlows[index_0].Count != other.CodeFlows[index_0].Count)
{
return false;
}
for (int index_1 = 0; index_1 < CodeFlows[index_0].Count; ++index_1)
{
if (!Object.Equals(CodeFlows[index_0][index_1], other.CodeFlows[index_0][index_1]))
{
return false;
}
}
}
}
}
if (!Object.ReferenceEquals(RelatedLocations, other.RelatedLocations))
{
if (RelatedLocations == null || other.RelatedLocations == null)
{
return false;
}
if (!RelatedLocations.SetEquals(other.RelatedLocations))
{
return false;
}
}
if (IsSuppressedInSource != other.IsSuppressedInSource)
{
return false;
}
if (!Object.ReferenceEquals(Fixes, other.Fixes))
{
if (Fixes == null || other.Fixes == null)
{
return false;
}
if (!Fixes.SetEquals(other.Fixes))
{
return false;
}
}
if (!Object.ReferenceEquals(Properties, other.Properties))
{
if (Properties == null || other.Properties == null || Properties.Count != other.Properties.Count)
{
return false;
}
foreach (var value_0 in Properties)
{
string value_1;
if (!other.Properties.TryGetValue(value_0.Key, out value_1))
{
return false;
}
if (value_0.Value != value_1)
{
return false;
}
}
}
if (!Object.ReferenceEquals(Tags, other.Tags))
{
if (Tags == null || other.Tags == null)
{
return false;
}
if (!Tags.SetEquals(other.Tags))
{
return false;
}
}
return true;
}
/// <summary>
/// Initializes a new instance of the <see cref="Result" /> class.
/// </summary>
public Result()
{
}
/// <summary>
/// Initializes a new instance of the <see cref="Result" /> class from the supplied values.
/// </summary>
/// <param name="ruleId">
/// An initialization value for the <see cref="P: RuleId" /> property.
/// </param>
/// <param name="kind">
/// An initialization value for the <see cref="P: Kind" /> property.
/// </param>
/// <param name="fullMessage">
/// An initialization value for the <see cref="P: FullMessage" /> property.
/// </param>
/// <param name="shortMessage">
/// An initialization value for the <see cref="P: ShortMessage" /> property.
/// </param>
/// <param name="formattedMessage">
/// An initialization value for the <see cref="P: FormattedMessage" /> property.
/// </param>
/// <param name="locations">
/// An initialization value for the <see cref="P: Locations" /> property.
/// </param>
/// <param name="toolFingerprint">
/// An initialization value for the <see cref="P: ToolFingerprint" /> property.
/// </param>
/// <param name="stacks">
/// An initialization value for the <see cref="P: Stacks" /> property.
/// </param>
/// <param name="codeFlows">
/// An initialization value for the <see cref="P: CodeFlows" /> property.
/// </param>
/// <param name="relatedLocations">
/// An initialization value for the <see cref="P: RelatedLocations" /> property.
/// </param>
/// <param name="isSuppressedInSource">
/// An initialization value for the <see cref="P: IsSuppressedInSource" /> property.
/// </param>
/// <param name="fixes">
/// An initialization value for the <see cref="P: Fixes" /> property.
/// </param>
/// <param name="properties">
/// An initialization value for the <see cref="P: Properties" /> property.
/// </param>
/// <param name="tags">
/// An initialization value for the <see cref="P: Tags" /> property.
/// </param>
public Result(string ruleId, ResultKind kind, string fullMessage, string shortMessage, FormattedMessage formattedMessage, ISet<Location> locations, string toolFingerprint, ISet<Stack> stacks, IEnumerable<IEnumerable<AnnotatedCodeLocation>> codeFlows, ISet<AnnotatedCodeLocation> relatedLocations, bool isSuppressedInSource, ISet<Fix> fixes, IDictionary<string, string> properties, ISet<string> tags)
{
Init(ruleId, kind, fullMessage, shortMessage, formattedMessage, locations, toolFingerprint, stacks, codeFlows, relatedLocations, isSuppressedInSource, fixes, properties, tags);
}
/// <summary>
/// Initializes a new instance of the <see cref="Result" /> class from the specified instance.
/// </summary>
/// <param name="other">
/// The instance from which the new instance is to be initialized.
/// </param>
/// <exception cref="ArgumentNullException">
/// Thrown if <paramref name="other" /> is null.
/// </exception>
public Result(Result other)
{
if (other == null)
{
throw new ArgumentNullException(nameof(other));
}
Init(other.RuleId, other.Kind, other.FullMessage, other.ShortMessage, other.FormattedMessage, other.Locations, other.ToolFingerprint, other.Stacks, other.CodeFlows, other.RelatedLocations, other.IsSuppressedInSource, other.Fixes, other.Properties, other.Tags);
}
ISarifNode ISarifNode.DeepClone()
{
return DeepCloneCore();
}
/// <summary>
/// Creates a deep copy of this instance.
/// </summary>
public Result DeepClone()
{
return (Result)DeepCloneCore();
}
private ISarifNode DeepCloneCore()
{
return new Result(this);
}
private void Init(string ruleId, ResultKind kind, string fullMessage, string shortMessage, FormattedMessage formattedMessage, ISet<Location> locations, string toolFingerprint, ISet<Stack> stacks, IEnumerable<IEnumerable<AnnotatedCodeLocation>> codeFlows, ISet<AnnotatedCodeLocation> relatedLocations, bool isSuppressedInSource, ISet<Fix> fixes, IDictionary<string, string> properties, ISet<string> tags)
{
RuleId = ruleId;
Kind = kind;
FullMessage = fullMessage;
ShortMessage = shortMessage;
if (formattedMessage != null)
{
FormattedMessage = new FormattedMessage(formattedMessage);
}
if (locations != null)
{
var destination_0 = new HashSet<Location>();
foreach (var value_0 in locations)
{
if (value_0 == null)
{
destination_0.Add(null);
}
else
{
destination_0.Add(new Location(value_0));
}
}
Locations = destination_0;
}
ToolFingerprint = toolFingerprint;
if (stacks != null)
{
var destination_1 = new HashSet<Stack>();
foreach (var value_1 in stacks)
{
if (value_1 == null)
{
destination_1.Add(null);
}
else
{
destination_1.Add(new Stack(value_1));
}
}
Stacks = destination_1;
}
if (codeFlows != null)
{
var destination_2 = new List<IList<AnnotatedCodeLocation>>();
foreach (var value_2 in codeFlows)
{
if (value_2 == null)
{
destination_2.Add(null);
}
else
{
var destination_3 = new List<AnnotatedCodeLocation>();
foreach (var value_3 in value_2)
{
if (value_3 == null)
{
destination_3.Add(null);
}
else
{
destination_3.Add(new AnnotatedCodeLocation(value_3));
}
}
destination_2.Add(destination_3);
}
}
CodeFlows = destination_2;
}
if (relatedLocations != null)
{
var destination_4 = new HashSet<AnnotatedCodeLocation>();
foreach (var value_4 in relatedLocations)
{
if (value_4 == null)
{
destination_4.Add(null);
}
else
{
destination_4.Add(new AnnotatedCodeLocation(value_4));
}
}
RelatedLocations = destination_4;
}
IsSuppressedInSource = isSuppressedInSource;
if (fixes != null)
{
var destination_5 = new HashSet<Fix>();
foreach (var value_5 in fixes)
{
if (value_5 == null)
{
destination_5.Add(null);
}
else
{
destination_5.Add(new Fix(value_5));
}
}
Fixes = destination_5;
}
if (properties != null)
{
Properties = new Dictionary<string, string>(properties);
}
if (tags != null)
{
var destination_6 = new HashSet<string>();
foreach (var value_6 in tags)
{
destination_6.Add(value_6);
}
Tags = destination_6;
}
}
}
} | 1 | 10,597 | Here's the object model change. `CodeFlows` is a set of objects of type `CodeFlow`, rather than a list of list of `AnnotatedCodeLocation`. Exactly parallel with `Stacks`. | microsoft-sarif-sdk | .cs |
@@ -31,6 +31,7 @@ from .core import decode_node, encode_node, find_object_hashes, hash_contents, F
from .models import (Access, Customer, Instance, Invitation, Log, Package,
S3Blob, Tag, UTF8_GENERAL_CI, Version)
from .schemas import LOG_SCHEMA, PACKAGE_SCHEMA
+from .config import BAN_PUBLIC_USERS
QUILT_CDN = 'https://cdn.quiltdata.com/'
| 1 | # Copyright (c) 2017 Quilt Data, Inc. All rights reserved.
"""
API routes.
"""
from datetime import timedelta, timezone
from functools import wraps
import json
import time
from urllib.parse import urlencode
import boto3
from flask import abort, g, redirect, render_template, request, Response
from flask_cors import CORS
from flask_json import as_json, jsonify
import httpagentparser
from jsonschema import Draft4Validator, ValidationError
from oauthlib.oauth2 import OAuth2Error
import requests
from requests_oauthlib import OAuth2Session
import sqlalchemy as sa
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import undefer
import stripe
from . import app, db
from .analytics import MIXPANEL_EVENT, mp
from .const import EMAILREGEX, PaymentPlan, PUBLIC
from .core import decode_node, encode_node, find_object_hashes, hash_contents, FileNode, GroupNode
from .models import (Access, Customer, Instance, Invitation, Log, Package,
S3Blob, Tag, UTF8_GENERAL_CI, Version)
from .schemas import LOG_SCHEMA, PACKAGE_SCHEMA
QUILT_CDN = 'https://cdn.quiltdata.com/'
DEPLOYMENT_ID = app.config['DEPLOYMENT_ID']
OAUTH_ACCESS_TOKEN_URL = app.config['OAUTH']['access_token_url']
OAUTH_AUTHORIZE_URL = app.config['OAUTH']['authorize_url']
OAUTH_CLIENT_ID = app.config['OAUTH']['client_id']
OAUTH_CLIENT_SECRET = app.config['OAUTH']['client_secret']
OAUTH_REDIRECT_URL = app.config['OAUTH']['redirect_url']
OAUTH_USER_API = app.config['OAUTH']['user_api']
OAUTH_PROFILE_API = app.config['OAUTH']['profile_api']
OAUTH_HAVE_REFRESH_TOKEN = app.config['OAUTH']['have_refresh_token']
CATALOG_REDIRECT_URLS = app.config['CATALOG_REDIRECT_URLS']
AUTHORIZATION_HEADER = 'Authorization'
INVITE_SEND_URL = app.config['INVITE_SEND_URL']
PACKAGE_BUCKET_NAME = app.config['PACKAGE_BUCKET_NAME']
PACKAGE_URL_EXPIRATION = app.config['PACKAGE_URL_EXPIRATION']
S3_HEAD_OBJECT = 'head_object'
S3_GET_OBJECT = 'get_object'
S3_PUT_OBJECT = 'put_object'
OBJ_DIR = 'objs'
# Limit the JSON metadata to 100MB.
# This is mostly a sanity check; it's already limited by app.config['MAX_CONTENT_LENGTH'].
MAX_METADATA_SIZE = 100 * 1024 * 1024
PREVIEW_MAX_CHILDREN = 10
PREVIEW_MAX_DEPTH = 4
s3_client = boto3.client(
's3',
endpoint_url=app.config.get('S3_ENDPOINT'),
aws_access_key_id=app.config.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=app.config.get('AWS_SECRET_ACCESS_KEY')
)
stripe.api_key = app.config['STRIPE_SECRET_KEY']
HAVE_PAYMENTS = stripe.api_key is not None
class QuiltCli(httpagentparser.Browser):
look_for = 'quilt-cli'
version_markers = [('/', '')]
httpagentparser.detectorshub.register(QuiltCli())
### Web routes ###
def _create_session(next=''):
return OAuth2Session(
client_id=OAUTH_CLIENT_ID,
redirect_uri=OAUTH_REDIRECT_URL,
state=json.dumps(dict(next=next))
)
@app.route('/healthcheck')
def healthcheck():
"""ELB health check; just needs to return a 200 status code."""
return Response("ok", content_type='text/plain')
ROBOTS_TXT = '''
User-agent: *
Disallow: /
'''.lstrip()
@app.route('/robots.txt')
def robots():
"""Disallow crawlers; there's nothing useful for them here."""
return Response(ROBOTS_TXT, mimetype='text/plain')
def _valid_catalog_redirect(next):
return next is None or any(next.startswith(url) for url in CATALOG_REDIRECT_URLS)
@app.route('/login')
def login():
next = request.args.get('next')
if not _valid_catalog_redirect(next):
return render_template('oauth_fail.html', error="Invalid redirect", QUILT_CDN=QUILT_CDN)
session = _create_session(next=next)
url, state = session.authorization_url(url=OAUTH_AUTHORIZE_URL)
return redirect(url)
@app.route('/oauth_callback')
def oauth_callback():
# TODO: Check `state`? Do we need CSRF protection here?
try:
state = json.loads(request.args.get('state', '{}'))
except ValueError:
abort(requests.codes.bad_request)
if not isinstance(state, dict):
abort(requests.codes.bad_request)
next = state.get('next')
if not _valid_catalog_redirect(next):
abort(requests.codes.bad_request)
error = request.args.get('error')
if error is not None:
return render_template('oauth_fail.html', error=error, QUILT_CDN=QUILT_CDN)
code = request.args.get('code')
if code is None:
abort(requests.codes.bad_request)
session = _create_session()
try:
resp = session.fetch_token(
token_url=OAUTH_ACCESS_TOKEN_URL,
code=code,
client_secret=OAUTH_CLIENT_SECRET
)
if next:
return redirect('%s#%s' % (next, urlencode(resp)))
else:
token = resp['refresh_token' if OAUTH_HAVE_REFRESH_TOKEN else 'access_token']
return render_template('oauth_success.html', code=token, QUILT_CDN=QUILT_CDN)
except OAuth2Error as ex:
return render_template('oauth_fail.html', error=ex.error, QUILT_CDN=QUILT_CDN)
@app.route('/api/token', methods=['POST'])
@as_json
def token():
refresh_token = request.values.get('refresh_token')
if refresh_token is None:
abort(requests.codes.bad_request)
if not OAUTH_HAVE_REFRESH_TOKEN:
return dict(
refresh_token='',
access_token=refresh_token,
expires_at=float('inf')
)
session = _create_session()
try:
resp = session.refresh_token(
token_url=OAUTH_ACCESS_TOKEN_URL,
client_id=OAUTH_CLIENT_ID, # Why??? The session object already has it!
client_secret=OAUTH_CLIENT_SECRET,
refresh_token=refresh_token
)
except OAuth2Error as ex:
return dict(error=ex.error)
return dict(
refresh_token=resp['refresh_token'],
access_token=resp['access_token'],
expires_at=resp['expires_at']
)
### API routes ###
# Allow CORS requests to API routes.
# The "*" origin is more secure than specific origins because it blocks cookies.
# Cache the settings for a day to avoid pre-flight requests.
CORS(app, resources={"/api/*": {"origins": "*", "max_age": timedelta(days=1)}})
class Auth:
"""
Info about the user making the API request.
"""
def __init__(self, user, email):
self.user = user
self.email = email
class ApiException(Exception):
"""
Base class for API exceptions.
"""
def __init__(self, status_code, message):
super().__init__()
self.status_code = status_code
self.message = message
class PackageNotFoundException(ApiException):
"""
API exception for missing packages.
"""
def __init__(self, owner, package, logged_in=True):
message = "Package %s/%s does not exist" % (owner, package)
if not logged_in:
message = "%s (do you need to log in?)" % message
super().__init__(requests.codes.not_found, message)
@app.errorhandler(ApiException)
def handle_api_exception(error):
"""
Converts an API exception into an error response.
"""
_mp_track(
type="exception",
status_code=error.status_code,
message=error.message,
)
response = jsonify(dict(
message=error.message
))
response.status_code = error.status_code
return response
def api(require_login=True, schema=None):
"""
Decorator for API requests.
Handles auth and adds the username as the first argument.
"""
if schema is not None:
Draft4Validator.check_schema(schema)
validator = Draft4Validator(schema)
else:
validator = None
def innerdec(f):
@wraps(f)
def wrapper(*args, **kwargs):
g.auth = Auth(PUBLIC, None)
user_agent_str = request.headers.get('user-agent', '')
g.user_agent = httpagentparser.detect(user_agent_str, fill_none=True)
if validator is not None:
try:
validator.validate(request.get_json(cache=True))
except ValidationError as ex:
raise ApiException(requests.codes.bad_request, ex.message)
auth = request.headers.get(AUTHORIZATION_HEADER)
g.auth_header = auth
if auth is None:
if require_login:
raise ApiException(requests.codes.unauthorized, "Not logged in")
else:
headers = {
AUTHORIZATION_HEADER: auth
}
try:
resp = requests.get(OAUTH_USER_API, headers=headers)
resp.raise_for_status()
data = resp.json()
# TODO(dima): Generalize this.
user = data.get('current_user', data.get('login'))
assert user
email = data['email']
g.auth = Auth(user, email)
except requests.HTTPError as ex:
if resp.status_code == requests.codes.unauthorized:
raise ApiException(
requests.codes.unauthorized,
"Invalid credentials"
)
else:
raise ApiException(requests.codes.server_error, "Server error")
except (ConnectionError, requests.RequestException) as ex:
raise ApiException(requests.codes.server_error, "Server error")
return f(*args, **kwargs)
return wrapper
return innerdec
def _get_package(auth, owner, package_name):
"""
Helper for looking up a package and checking permissions.
Only useful for *_list functions; all others should use more efficient queries.
"""
package = (
Package.query
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(Access.user.in_([auth.user, PUBLIC]))
.one_or_none()
)
if package is None:
raise PackageNotFoundException(owner, package_name, auth.user is not PUBLIC)
return package
def _get_instance(auth, owner, package_name, package_hash):
instance = (
Instance.query
.filter_by(hash=package_hash)
.options(undefer('contents')) # Contents is deferred by default.
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(Access.user.in_([auth.user, PUBLIC]))
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Package hash does not exist"
)
return instance
def _utc_datetime_to_ts(dt):
"""
Convert a UTC datetime object to a UNIX timestamp.
"""
return dt.replace(tzinfo=timezone.utc).timestamp()
def _mp_track(**kwargs):
if g.user_agent['browser']['name'] == 'QuiltCli':
source = 'cli'
else:
source = 'web'
# Use the user ID if the user is logged in; otherwise, let MP use the IP address.
distinct_id = g.auth.user if g.auth.user != PUBLIC else None
# Try to get the ELB's forwarded IP, and fall back to the actual IP (in dev).
ip_addr = request.headers.get('x-forwarded-for', request.remote_addr)
# Set common attributes sent with each event. kwargs cannot override these.
all_args = dict(
kwargs,
time=time.time(),
ip=ip_addr,
user=g.auth.user,
source=source,
browser_name=g.user_agent['browser']['name'],
browser_version=g.user_agent['browser']['version'],
platform_name=g.user_agent['platform']['name'],
platform_version=g.user_agent['platform']['version'],
deployment_id=DEPLOYMENT_ID,
)
mp.track(distinct_id, MIXPANEL_EVENT, all_args)
def _generate_presigned_url(method, owner, blob_hash):
return s3_client.generate_presigned_url(
method,
Params=dict(
Bucket=PACKAGE_BUCKET_NAME,
Key='%s/%s/%s' % (OBJ_DIR, owner, blob_hash)
),
ExpiresIn=PACKAGE_URL_EXPIRATION
)
def _get_or_create_customer():
assert HAVE_PAYMENTS, "Payments are not enabled"
assert g.auth.user != PUBLIC
db_customer = Customer.query.filter_by(id=g.auth.user).one_or_none()
if db_customer is None:
try:
# Insert a placeholder with no Stripe ID just to lock the row.
db_customer = Customer(id=g.auth.user)
db.session.add(db_customer)
db.session.flush()
except IntegrityError:
# Someone else just created it, so look it up.
db.session.rollback()
db_customer = Customer.query.filter_by(id=g.auth.user).one()
else:
# Create a new customer.
plan = PaymentPlan.FREE.value
customer = stripe.Customer.create(
email=g.auth.email,
description=g.auth.user,
)
stripe.Subscription.create(
customer=customer.id,
plan=plan,
)
db_customer.stripe_customer_id = customer.id
db.session.commit()
customer = stripe.Customer.retrieve(db_customer.stripe_customer_id)
assert customer.subscriptions.total_count == 1
return customer
def _get_customer_plan(customer):
return PaymentPlan(customer.subscriptions.data[0].plan.id)
@app.route('/api/blob/<owner>/<blob_hash>', methods=['GET'])
@api()
@as_json
def blob_get(owner, blob_hash):
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the owner can upload objects.")
return dict(
head=_generate_presigned_url(S3_HEAD_OBJECT, owner, blob_hash),
get=_generate_presigned_url(S3_GET_OBJECT, owner, blob_hash),
put=_generate_presigned_url(S3_PUT_OBJECT, owner, blob_hash),
)
@app.route('/api/package/<owner>/<package_name>/<package_hash>', methods=['PUT'])
@api(schema=PACKAGE_SCHEMA)
@as_json
def package_put(owner, package_name, package_hash):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the package owner can push packages.")
# TODO: Description.
data = json.loads(request.data.decode('utf-8'), object_hook=decode_node)
dry_run = data.get('dry_run', False)
public = data.get('public', False)
contents = data['contents']
if hash_contents(contents) != package_hash:
raise ApiException(requests.codes.bad_request, "Wrong contents hash")
all_hashes = set(find_object_hashes(contents))
# Insert a package if it doesn't already exist.
# TODO: Separate endpoint for just creating a package with no versions?
package = (
Package.query
.with_for_update()
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if package is None:
# Check for case-insensitive matches, and reject the push.
package_ci = (
Package.query
.filter(
sa.and_(
sa.sql.collate(Package.owner, UTF8_GENERAL_CI) == owner,
sa.sql.collate(Package.name, UTF8_GENERAL_CI) == package_name
)
)
.one_or_none()
)
if package_ci is not None:
raise ApiException(
requests.codes.forbidden,
"Package already exists: %s/%s" % (package_ci.owner, package_ci.name)
)
if HAVE_PAYMENTS and not public:
customer = _get_or_create_customer()
plan = _get_customer_plan(customer)
if plan == PaymentPlan.FREE:
raise ApiException(
requests.codes.payment_required,
("Insufficient permissions. Run `quilt push --public %s/%s` to make " +
"this package public, or upgrade your service plan to create " +
"private packages: https://quiltdata.com/profile.") %
(owner, package_name)
)
package = Package(owner=owner, name=package_name)
db.session.add(package)
owner_access = Access(package=package, user=owner)
db.session.add(owner_access)
if public:
public_access = Access(package=package, user=PUBLIC)
db.session.add(public_access)
else:
if public:
public_access = (
Access.query
.filter(sa.and_(
Access.package == package,
Access.user == PUBLIC
))
.one_or_none()
)
if public_access is None:
raise ApiException(
requests.codes.forbidden,
("%(user)s/%(pkg)s is private. To make it public, " +
"run `quilt access add %(user)s/%(pkg)s public`.") %
dict(user=owner, pkg=package_name)
)
# Insert an instance if it doesn't already exist.
instance = (
Instance.query
.with_for_update()
.filter_by(package=package, hash=package_hash)
.one_or_none()
)
contents_str = json.dumps(contents, default=encode_node)
if len(contents_str) > MAX_METADATA_SIZE:
# Should never actually happen because of nginx limits.
raise ApiException(
requests.codes.server_error,
"Metadata size too large"
)
# No more error checking at this point, so return from dry-run early.
if dry_run:
db.session.rollback()
# List of signed URLs is potentially huge, so stream it.
def _generate():
yield '{"upload_urls":{'
for idx, blob_hash in enumerate(all_hashes):
comma = ('' if idx == 0 else ',')
value = dict(
head=_generate_presigned_url(S3_HEAD_OBJECT, owner, blob_hash),
put=_generate_presigned_url(S3_PUT_OBJECT, owner, blob_hash)
)
yield '%s%s:%s' % (comma, json.dumps(blob_hash), json.dumps(value))
yield '}}'
return Response(_generate(), content_type='application/json')
if instance is None:
instance = Instance(
package=package,
contents=contents_str,
hash=package_hash,
created_by=g.auth.user,
updated_by=g.auth.user
)
# Add all the hashes that don't exist yet.
blobs = (
S3Blob.query
.with_for_update()
.filter(
sa.and_(
S3Blob.owner == owner,
S3Blob.hash.in_(all_hashes)
)
)
.all()
) if all_hashes else []
existing_hashes = {blob.hash for blob in blobs}
for blob_hash in all_hashes:
if blob_hash not in existing_hashes:
instance.blobs.append(S3Blob(owner=owner, hash=blob_hash))
else:
# Just update the contents dictionary.
# Nothing else could've changed without invalidating the hash.
instance.contents = contents_str
instance.updated_by = g.auth.user
db.session.add(instance)
# Insert a log.
log = Log(
package=package,
instance=instance,
author=owner,
)
db.session.add(log)
db.session.commit()
_mp_track(
type="push",
package_owner=owner,
package_name=package_name,
public=public,
)
return dict()
@app.route('/api/package/<owner>/<package_name>/<package_hash>', methods=['GET'])
@api(require_login=False)
@as_json
def package_get(owner, package_name, package_hash):
subpath = request.args.get('subpath')
instance = _get_instance(g.auth, owner, package_name, package_hash)
contents = json.loads(instance.contents, object_hook=decode_node)
subnode = contents
for component in subpath.split('/') if subpath else []:
try:
subnode = subnode.children[component]
except (AttributeError, KeyError):
raise ApiException(requests.codes.not_found, "Invalid subpath: %r" % component)
all_hashes = set(find_object_hashes(subnode))
urls = {
blob_hash: _generate_presigned_url(S3_GET_OBJECT, owner, blob_hash)
for blob_hash in all_hashes
}
_mp_track(
type="install",
package_owner=owner,
package_name=package_name,
subpath=subpath,
)
return dict(
contents=contents,
urls=urls,
created_by=instance.created_by,
created_at=_utc_datetime_to_ts(instance.created_at),
updated_by=instance.updated_by,
updated_at=_utc_datetime_to_ts(instance.updated_at),
)
def _generate_preview(node, max_depth=PREVIEW_MAX_DEPTH):
if isinstance(node, GroupNode):
max_children = PREVIEW_MAX_CHILDREN if max_depth else 0
children_preview = [
(name, _generate_preview(child, max_depth - 1))
for name, child in sorted(node.children.items())[:max_children]
]
if len(node.children) > max_children:
children_preview.append(('...', None))
return children_preview
else:
return None
@app.route('/api/package_preview/<owner>/<package_name>/<package_hash>', methods=['GET'])
@api(require_login=False)
@as_json
def package_preview(owner, package_name, package_hash):
instance = _get_instance(g.auth, owner, package_name, package_hash)
contents = json.loads(instance.contents, object_hook=decode_node)
readme = contents.children.get('README')
if isinstance(readme, FileNode):
assert len(readme.hashes) == 1
readme_url = _generate_presigned_url(S3_GET_OBJECT, owner, readme.hashes[0])
else:
readme_url = None
contents_preview = _generate_preview(contents)
_mp_track(
type="preview",
package_owner=owner,
package_name=package_name,
)
return dict(
preview=contents_preview,
readme_url=readme_url,
created_by=instance.created_by,
created_at=_utc_datetime_to_ts(instance.created_at),
updated_by=instance.updated_by,
updated_at=_utc_datetime_to_ts(instance.updated_at),
)
@app.route('/api/package/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def package_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
instances = (
Instance.query
.filter_by(package=package)
)
return dict(
hashes=[instance.hash for instance in instances]
)
@app.route('/api/package/<owner>/<package_name>/', methods=['DELETE'])
@api()
@as_json
def package_delete(owner, package_name):
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the package owner can delete packages.")
package = _get_package(g.auth, owner, package_name)
db.session.delete(package)
db.session.commit()
return dict()
@app.route('/api/package/<owner>/', methods=['GET'])
@api(require_login=False)
@as_json
def user_packages(owner):
packages = (
db.session.query(Package, sa.func.max(Access.user == PUBLIC))
.filter_by(owner=owner)
.join(Package.access)
.filter(Access.user.in_([g.auth.user, PUBLIC]))
.group_by(Package.id)
.order_by(Package.name)
.all()
)
return dict(
packages=[
dict(
name=package.name,
is_public=is_public
)
for package, is_public in packages
]
)
@app.route('/api/log/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def logs_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
logs = (
db.session.query(Log, Instance)
.filter_by(package=package)
.join(Log.instance)
# Sort chronologically, but rely on IDs in case of duplicate created times.
.order_by(Log.created, Log.id)
)
return dict(
logs=[dict(
hash=instance.hash,
created=_utc_datetime_to_ts(log.created),
author=log.author
) for log, instance in logs]
)
VERSION_SCHEMA = {
'type': 'object',
'properties': {
'hash': {
'type': 'string'
}
},
'required': ['hash']
}
def normalize_version(version):
try:
version = Version.normalize(version)
except ValueError:
raise ApiException(requests.codes.bad_request, "Malformed version")
return version
@app.route('/api/version/<owner>/<package_name>/<package_version>', methods=['PUT'])
@api(schema=VERSION_SCHEMA)
@as_json
def version_put(owner, package_name, package_version):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can create versions"
)
user_version = package_version
package_version = normalize_version(package_version)
data = request.get_json()
package_hash = data['hash']
instance = (
Instance.query
.filter_by(hash=package_hash)
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if instance is None:
raise ApiException(requests.codes.not_found, "Package hash does not exist")
version = Version(
package_id=instance.package_id,
version=package_version,
user_version=user_version,
instance=instance
)
try:
db.session.add(version)
db.session.commit()
except IntegrityError:
raise ApiException(requests.codes.conflict, "Version already exists")
return dict()
@app.route('/api/version/<owner>/<package_name>/<package_version>', methods=['GET'])
@api(require_login=False)
@as_json
def version_get(owner, package_name, package_version):
package_version = normalize_version(package_version)
package = _get_package(g.auth, owner, package_name)
instance = (
Instance.query
.join(Instance.versions)
.filter_by(package=package, version=package_version)
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Version %s does not exist" % package_version
)
_mp_track(
type="get_hash",
package_owner=owner,
package_name=package_name,
package_version=package_version,
)
return dict(
hash=instance.hash,
created_by=instance.created_by,
created_at=_utc_datetime_to_ts(instance.created_at),
updated_by=instance.updated_by,
updated_at=_utc_datetime_to_ts(instance.updated_at),
)
@app.route('/api/version/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def version_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
versions = (
db.session.query(Version, Instance)
.filter_by(package=package)
.join(Version.instance)
.all()
)
sorted_versions = sorted(versions, key=lambda row: row.Version.sort_key())
return dict(
versions=[
dict(
version=version.user_version,
hash=instance.hash
) for version, instance in sorted_versions
]
)
TAG_SCHEMA = {
'type': 'object',
'properties': {
'hash': {
'type': 'string'
}
},
'required': ['hash']
}
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['PUT'])
@api(schema=TAG_SCHEMA)
@as_json
def tag_put(owner, package_name, package_tag):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can modify tags"
)
data = request.get_json()
package_hash = data['hash']
instance = (
Instance.query
.filter_by(hash=package_hash)
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if instance is None:
raise ApiException(requests.codes.not_found, "Package hash does not exist")
# Update an existing tag or create a new one.
tag = (
Tag.query
.with_for_update()
.filter_by(package_id=instance.package_id, tag=package_tag)
.one_or_none()
)
if tag is None:
tag = Tag(
package_id=instance.package_id,
tag=package_tag,
instance=instance
)
db.session.add(tag)
else:
tag.instance = instance
db.session.commit()
return dict()
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['GET'])
@api(require_login=False)
@as_json
def tag_get(owner, package_name, package_tag):
package = _get_package(g.auth, owner, package_name)
instance = (
Instance.query
.join(Instance.tags)
.filter_by(package=package, tag=package_tag)
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Tag %r does not exist" % package_tag
)
_mp_track(
type="get_hash",
package_owner=owner,
package_name=package_name,
package_tag=package_tag,
)
return dict(
hash=instance.hash,
created_by=instance.created_by,
created_at=_utc_datetime_to_ts(instance.created_at),
updated_by=instance.updated_by,
updated_at=_utc_datetime_to_ts(instance.updated_at),
)
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['DELETE'])
@api()
@as_json
def tag_delete(owner, package_name, package_tag):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can delete tags"
)
tag = (
Tag.query
.with_for_update()
.filter_by(tag=package_tag)
.join(Tag.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if tag is None:
raise ApiException(
requests.codes.not_found,
"Package %s/%s tag %r does not exist" % (owner, package_name, package_tag)
)
db.session.delete(tag)
db.session.commit()
return dict()
@app.route('/api/tag/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def tag_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
tags = (
db.session.query(Tag, Instance)
.filter_by(package=package)
.order_by(Tag.tag)
.join(Tag.instance)
.all()
)
return dict(
tags=[
dict(
tag=tag.tag,
hash=instance.hash
) for tag, instance in tags
]
)
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['PUT'])
@api()
@as_json
def access_put(owner, package_name, user):
# TODO: use re to check for valid username (e.g., not ../, etc.)
if not user:
raise ApiException(requests.codes.bad_request, "A valid user is required")
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can grant access"
)
package = (
Package.query
.with_for_update()
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if package is None:
raise PackageNotFoundException(owner, package_name)
if EMAILREGEX.match(user):
email = user
invitation = Invitation(package=package, email=email)
db.session.add(invitation)
db.session.commit()
# Call to Django to send invitation email
headers = {
AUTHORIZATION_HEADER: g.auth_header
}
resp = requests.post(INVITE_SEND_URL,
headers=headers,
data=dict(email=email,
owner=g.auth.user,
package=package.name,
client_id=OAUTH_CLIENT_ID,
client_secret=OAUTH_CLIENT_SECRET,
callback_url=OAUTH_REDIRECT_URL))
if resp.status_code == requests.codes.unauthorized:
raise ApiException(
requests.codes.unauthorized,
"Invalid credentials"
)
elif resp.status_code != requests.codes.ok:
raise ApiException(requests.codes.server_error, "Server error")
return dict()
else:
if user != PUBLIC:
resp = requests.get(OAUTH_PROFILE_API % user)
if resp.status_code == requests.codes.not_found:
raise ApiException(
requests.codes.not_found,
"User %s does not exist" % user
)
elif resp.status_code != requests.codes.ok:
raise ApiException(
requests.codes.server_error,
"Unknown error"
)
try:
access = Access(package=package, user=user)
db.session.add(access)
db.session.commit()
except IntegrityError:
raise ApiException(requests.codes.conflict, "The user already has access")
return dict()
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['GET'])
@api()
@as_json
def access_get(owner, package_name, user):
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can view access"
)
access = (
db.session.query(Access)
.filter_by(user=user)
.join(Access.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if access is None:
raise PackageNotFoundException(owner, package_name)
return dict()
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['DELETE'])
@api()
@as_json
def access_delete(owner, package_name, user):
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can revoke access"
)
if user == owner:
raise ApiException(
requests.codes.forbidden,
"Cannot revoke the owner's access"
)
if HAVE_PAYMENTS and user == PUBLIC:
customer = _get_or_create_customer()
plan = _get_customer_plan(customer)
if plan == PaymentPlan.FREE:
raise ApiException(
requests.codes.payment_required,
"Insufficient permissions. " +
"Upgrade your plan to create private packages: https://quiltdata.com/profile."
)
access = (
Access.query
.with_for_update()
.filter_by(user=user)
.join(Access.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if access is None:
raise PackageNotFoundException(owner, package_name)
db.session.delete(access)
db.session.commit()
return dict()
@app.route('/api/access/<owner>/<package_name>/', methods=['GET'])
@api()
@as_json
def access_list(owner, package_name):
accesses = (
Access.query
.join(Access.package)
.filter_by(owner=owner, name=package_name)
)
can_access = [access.user for access in accesses]
is_collaborator = g.auth.user in can_access
is_public = PUBLIC in can_access
if is_public or is_collaborator:
return dict(users=can_access)
else:
raise PackageNotFoundException(owner, package_name)
@app.route('/api/recent_packages/', methods=['GET'])
@api(require_login=False)
@as_json
def recent_packages():
try:
count = int(request.args.get('count', ''))
except ValueError:
count = 10
results = (
db.session.query(Package, sa.func.max(Instance.updated_at))
.join(Package.access)
.filter_by(user=PUBLIC)
.join(Package.instances)
.group_by(Package.id)
.order_by(sa.func.max(Instance.updated_at).desc())
.limit(count)
.all()
)
return dict(
packages=[
dict(
owner=package.owner,
name=package.name,
updated_at=updated_at
) for package, updated_at in results
]
)
@app.route('/api/search/', methods=['GET'])
@api(require_login=False)
@as_json
def search():
query = request.args.get('q', '')
keywords = query.split()
if len(keywords) > 5:
# Let's not overload the DB with crazy queries.
raise ApiException(requests.codes.bad_request, "Too many search terms (max is 5)")
filter_list = [
sa.func.instr(
sa.sql.collate(sa.func.concat(Package.owner, '/', Package.name), UTF8_GENERAL_CI),
keyword
) > 0
for keyword in keywords
]
results = (
db.session.query(Package, sa.func.max(Access.user == PUBLIC))
.filter(sa.and_(*filter_list))
.join(Package.access)
.filter(Access.user.in_([g.auth.user, PUBLIC]))
.group_by(Package.id)
.order_by(
sa.sql.collate(Package.owner, UTF8_GENERAL_CI),
sa.sql.collate(Package.name, UTF8_GENERAL_CI)
)
.all()
)
return dict(
packages=[
dict(
owner=package.owner,
name=package.name,
is_public=is_public,
) for package, is_public in results
]
)
@app.route('/api/profile', methods=['GET'])
@api()
@as_json
def profile():
if HAVE_PAYMENTS:
customer = _get_or_create_customer()
plan = _get_customer_plan(customer).value
have_cc = customer.sources.total_count > 0
else:
plan = None
have_cc = None
public_access = sa.orm.aliased(Access)
# Check for outstanding package sharing invitations
invitations = (
db.session.query(Invitation, Package)
.filter_by(email=g.auth.email)
.join(Invitation.package)
)
for invitation, package in invitations:
access = Access(package=package, user=g.auth.user)
db.session.add(access)
db.session.delete(invitation)
if invitations:
db.session.commit()
packages = (
db.session.query(Package, public_access.user.isnot(None))
.join(Package.access)
.filter(Access.user == g.auth.user)
.outerjoin(public_access, sa.and_(
Package.id == public_access.package_id, public_access.user == PUBLIC))
.order_by(Package.owner, Package.name)
.all()
)
return dict(
packages=dict(
own=[
dict(
owner=package.owner,
name=package.name,
is_public=bool(is_public)
)
for package, is_public in packages if package.owner == g.auth.user
],
shared=[
dict(
owner=package.owner,
name=package.name,
is_public=bool(is_public)
)
for package, is_public in packages if package.owner != g.auth.user
],
),
plan=plan,
have_credit_card=have_cc,
)
@app.route('/api/payments/update_plan', methods=['POST'])
@api()
@as_json
def payments_update_plan():
if not HAVE_PAYMENTS:
raise ApiException(requests.codes.not_found, "Payments not enabled")
plan = request.values.get('plan')
try:
plan = PaymentPlan(plan)
except ValueError:
raise ApiException(requests.codes.bad_request, "Invalid plan: %r" % plan)
if plan not in (PaymentPlan.FREE, PaymentPlan.INDIVIDUAL, PaymentPlan.BUSINESS_ADMIN):
# Cannot switch to the BUSINESS_MEMBER plan manually.
raise ApiException(requests.codes.forbidden, "Not allowed to switch to plan: %r" % plan)
stripe_token = request.values.get('token')
customer = _get_or_create_customer()
if _get_customer_plan(customer) == PaymentPlan.BUSINESS_MEMBER:
raise ApiException(
requests.codes.forbidden,
"Not allowed to leave Business plan; contact your admin."
)
if stripe_token is not None:
customer.source = stripe_token
try:
customer.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.bad_request, str(ex))
assert customer.sources.total_count
if plan != PaymentPlan.FREE and not customer.sources.total_count:
# No payment info.
raise ApiException(
requests.codes.payment_required,
"Payment information required to upgrade to %r" % plan.value
)
subscription = customer.subscriptions.data[0]
subscription.plan = plan.value
try:
subscription.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.server_error, str(ex))
return dict(
plan=plan.value
)
@app.route('/api/payments/update_payment', methods=['POST'])
@api()
@as_json
def payments_update_payment():
if not HAVE_PAYMENTS:
raise ApiException(requests.codes.not_found, "Payments not enabled")
stripe_token = request.values.get('token')
if not stripe_token:
raise ApiException(requests.codes.bad_request, "Missing token")
customer = _get_or_create_customer()
customer.source = stripe_token
try:
customer.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.bad_request, str(ex))
return dict()
@app.route('/api/invite/', methods=['GET'])
@api(require_login=False)
@as_json
def invitation_user_list():
invitations = (
db.session.query(Invitation, Package)
.filter_by(email=g.auth.email)
.join(Invitation.package)
.all()
)
return dict(invitations=[dict(invitation_id=invite.id,
owner=package.owner,
package=package.name,
email=invite.email,
invited_at=invite.invited_at)
for invite, package in invitations])
@app.route('/api/invite/<owner>/<package_name>/', methods=['GET'])
@api()
@as_json
def invitation_package_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
invitations = (
Invitation.query
.filter_by(package_id=package.id)
)
return dict(invitations=[dict(invitation_id=invite.id,
owner=package.owner,
package=package.name,
email=invite.email,
invited_at=invite.invited_at)
for invite in invitations])
@app.route('/api/log', methods=['POST'])
@api(require_login=False, schema=LOG_SCHEMA)
@as_json
def client_log():
data = request.get_json()
for event in data:
_mp_track(**event)
return dict()
| 1 | 15,646 | Sorry, one more thing... You should use `app.config` instead of importing it directly. See the code below. | quiltdata-quilt | py |
@@ -44,8 +44,13 @@ public interface VectorizedReader<T> {
void setRowGroupInfo(PageReadStore pages, Map<ColumnPath, ColumnChunkMetaData> metadata);
/**
- * Set up the reader to reuse the underlying containers used for storing batches
+ * Setup the reader to reuse the underlying containers used for storing batches
*/
void reuseContainers(boolean reuse);
+
+ /**
+ * Release any resources allocated
+ */
+ void close();
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.parquet;
import java.util.Map;
import org.apache.parquet.column.page.PageReadStore;
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
import org.apache.parquet.hadoop.metadata.ColumnPath;
/**
* Interface for vectorized Iceberg readers.
*/
public interface VectorizedReader<T> {
/**
* Reads a batch of type @param <T> and of size numRows
* @param numRows number of rows to read
* @return batch of records of type @param <T>
*/
T read(int numRows);
/**
*
* @param pages row group information for all the columns
* @param metadata map of {@link ColumnPath} -> {@link ColumnChunkMetaData} for the row group
*/
void setRowGroupInfo(PageReadStore pages, Map<ColumnPath, ColumnChunkMetaData> metadata);
/**
* Set up the reader to reuse the underlying containers used for storing batches
*/
void reuseContainers(boolean reuse);
}
| 1 | 17,450 | This was correct before; "setup" is a noun and "set up" is the verb form. | apache-iceberg | java |
@@ -61,7 +61,7 @@ class AdSenseSetupWidget extends Component {
async getAccounts() {
try {
- const responseData = await data.get( TYPE_MODULES, 'adsense', 'accounts' );
+ const responseData = await data.get( TYPE_MODULES, 'adsense', 'accounts', { maybeSetAccount: true } );
/**
* Defines the account status. Possible values: | 1 | /**
* AdSenseSetupWidget component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import data, { TYPE_MODULES } from 'GoogleComponents/data';
/**
* Internal dependencies
*/
import AdSenseSetupAuthFlowWidget from './setup-auth-flow-widget';
import Spinner from 'GoogleComponents/spinner';
/**
* WordPress dependencies
*/
import { Component, Fragment } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
class AdSenseSetupWidget extends Component {
constructor( props ) {
super( props );
this.state = {
isLoading: true,
error: false,
message: '',
accounts: [],
accountStatus: '',
};
}
componentDidMount() {
const {
isOpen,
onSettingsPage,
} = this.props;
// If on settings page, only run the rest if the module is "open".
if ( onSettingsPage && ! isOpen ) {
return;
}
this.getAccounts();
}
async getAccounts() {
try {
const responseData = await data.get( TYPE_MODULES, 'adsense', 'accounts' );
/**
* Defines the account status. Possible values:
* no-account, incomplete, under-verification, denied, completed.
*/
let accountStatus = '';
if ( ! responseData || ! responseData.length ) {
accountStatus = 'no-account';
}
const accounts = responseData;
this.setState( {
isLoading: false,
accountStatus,
accounts,
error: false,
} );
} catch ( err ) {
this.setState( {
isLoading: false,
error: err.code,
message: err.message,
} );
}
}
renderErrorMessage() {
const {
error,
message,
} = this.state;
return error && 0 < message.length
? <div className="googlesitekit-error-text">
<p>{ __( 'Error:', 'google-site-kit' ) } { message }</p>
</div> : null;
}
render() {
const {
isLoading,
accounts,
accountStatus,
} = this.state;
return (
<Fragment>
<div className="googlesitekit-module-page googlesitekit-module-page--adsense">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
{ isLoading ? <Spinner isSaving={ isLoading } /> : <AdSenseSetupAuthFlowWidget accountStatus={ accountStatus } accounts={ accounts } /> }
{ ! isLoading ? this.renderErrorMessage() : null }
</div>
</div>
</div>
</Fragment>
);
}
}
export default AdSenseSetupWidget;
| 1 | 26,815 | What's the reason for this change here? I didn't see it mentioned in the PR and it's a bit of a confusing param name | google-site-kit-wp | js |
@@ -12,7 +12,9 @@ function Search() {
}
Search.prototype.query = function(q) {
- return this.index.search(q)
+ return q === '~'
+ ? this.storage.config.localList.get().map( function( package ){ return { ref: package, score: 1 }; } )
+ : this.index.search(q);
}
Search.prototype.add = function(package) { | 1 | var lunr = require('lunr')
function Search() {
var self = Object.create(Search.prototype)
self.index = lunr(function() {
this.field('name' , { boost: 10 })
this.field('description' , { boost: 4 })
this.field('author' , { boost: 6 })
this.field('readme')
})
return self
}
Search.prototype.query = function(q) {
return this.index.search(q)
}
Search.prototype.add = function(package) {
this.index.add({
id: package.name,
name: package.name,
description: package.description,
author: package._npmUser ? package._npmUser.name : '???',
})
},
Search.prototype.remove = function(name) {
this.index.remove({ id: name })
}
Search.prototype.reindex = function() {
var self = this
this.storage.get_local(function(err, packages) {
if (err) throw err // that function shouldn't produce any
var i = packages.length
while (i--) {
self.add(packages[i])
}
})
}
Search.prototype.configureStorage = function(storage) {
this.storage = storage
this.reindex()
}
module.exports = Search()
| 1 | 16,942 | I wonder why ~ and not a wildcard instead? | verdaccio-verdaccio | js |
@@ -63,6 +63,9 @@ public class PojoProducers implements BeanPostProcessor {
// aop后,新的实例的父类可能是原class,也可能只是个proxy,父类不是原class
// 所以,需要先取出原class,再取标注
Class<?> beanCls = BeanUtils.getImplClassFromBean(bean);
+ if(beanCls == null) {
+ return;
+ }
RpcSchema rpcSchema = beanCls.getAnnotation(RpcSchema.class);
if (rpcSchema == null) {
return; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.provider.pojo.schema;
import java.util.Collection;
import org.apache.servicecomb.foundation.common.RegisterManager;
import org.apache.servicecomb.foundation.common.utils.BeanUtils;
import org.apache.servicecomb.provider.pojo.RpcSchema;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
@Component
public class PojoProducers implements BeanPostProcessor {
// key为schemaId
private RegisterManager<String, PojoProducerMeta> pojoMgr = new RegisterManager<>("pojo service manager");
public void registerPojoProducer(PojoProducerMeta pojoProducer) {
pojoMgr.register(pojoProducer.getSchemaId(), pojoProducer);
}
public Collection<PojoProducerMeta> getProducers() {
return pojoMgr.values();
}
/**
* @deprecated Replaced by {@link #getProducers()}
*/
@Deprecated
public Collection<PojoProducerMeta> getProcucers() {
return getProducers();
}
@Override
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException {
return bean;
}
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
processProvider(beanName, bean);
return bean;
}
protected void processProvider(String beanName, Object bean) {
// aop后,新的实例的父类可能是原class,也可能只是个proxy,父类不是原class
// 所以,需要先取出原class,再取标注
Class<?> beanCls = BeanUtils.getImplClassFromBean(bean);
RpcSchema rpcSchema = beanCls.getAnnotation(RpcSchema.class);
if (rpcSchema == null) {
return;
}
String schemaId = rpcSchema.schemaId();
if (StringUtils.isEmpty(schemaId)) {
Class<?>[] intfs = beanCls.getInterfaces();
if (intfs.length == 1) {
schemaId = intfs[0].getName();
} else {
throw new Error("Must be schemaId or implements only one interface");
}
}
PojoProducerMeta pojoProducerMeta = new PojoProducerMeta();
pojoProducerMeta.setSchemaId(schemaId);
pojoProducerMeta.setInstance(bean);
pojoProducerMeta.setInstanceClass(beanCls);
registerPojoProducer(pojoProducerMeta);
}
}
| 1 | 10,458 | when will this happened? if happened, just ignore it Silently? | apache-servicecomb-java-chassis | java |
@@ -239,8 +239,10 @@ public class TestMergeAppend extends TableTestBase {
public void testManifestMergeMinCount() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "2")
- // each manifest file is 5227 bytes, so 12000 bytes limit will give us 2 bins with 3 manifest/data files.
- .set(TableProperties.MANIFEST_TARGET_SIZE_BYTES, "12000")
+ // Each initial v1/v2 ManifestFile is 5661/6397 bytes respectively. Merging two of the given
+ // manifests make one v1/v2 ManifestFile of 5672/6408 bytes respectively, so 12850 bytes
+ // limit will give us two bins with three manifest/data files.
+ .set(TableProperties.MANIFEST_TARGET_SIZE_BYTES, "12850")
.commit();
TableMetadata base = readMetadata(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.ManifestEntry.Status;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import static org.apache.iceberg.relocated.com.google.common.collect.Iterators.concat;
@RunWith(Parameterized.class)
public class TestMergeAppend extends TableTestBase {
@Parameterized.Parameters(name = "formatVersion = {0}")
public static Object[] parameters() {
return new Object[] { 1, 2 };
}
public TestMergeAppend(int formatVersion) {
super(formatVersion);
}
@Test
public void testEmptyTableAppend() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Assert.assertEquals("Last sequence number should be 0", 0, base.lastSequenceNumber());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Snapshot committedSnapshot = table.currentSnapshot();
Assert.assertNotNull("Should create a snapshot", table.currentSnapshot());
V1Assert.assertEquals("Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, table.ops().current().lastSequenceNumber());
Assert.assertEquals("Should create 1 manifest for initial write",
1, committedSnapshot.allManifests().size());
long snapshotId = committedSnapshot.snapshotId();
validateManifest(committedSnapshot.allManifests().get(0),
seqs(1, 1),
ids(snapshotId, snapshotId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
}
@Test
public void testEmptyTableAppendManifest() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Assert.assertEquals("Last sequence number should be 0", 0, base.lastSequenceNumber());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
table.newAppend()
.appendManifest(manifest)
.commit();
Snapshot committedSnapshot = table.currentSnapshot();
Assert.assertNotNull("Should create a snapshot", table.currentSnapshot());
V1Assert.assertEquals("Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, table.ops().current().lastSequenceNumber());
Assert.assertEquals("Should create 1 manifest for initial write", 1, committedSnapshot.allManifests().size());
long snapshotId = committedSnapshot.snapshotId();
validateManifest(committedSnapshot.allManifests().get(0),
seqs(1, 1),
ids(snapshotId, snapshotId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
// validate that the metadata summary is correct when using appendManifest
Assert.assertEquals("Summary metadata should include 2 added files",
"2", committedSnapshot.summary().get("added-data-files"));
}
@Test
public void testEmptyTableAppendFilesAndManifest() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Assert.assertEquals("Last sequence number should be 0", 0, base.lastSequenceNumber());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.appendManifest(manifest)
.commit();
Snapshot committedSnapshot = table.currentSnapshot();
Assert.assertNotNull("Should create a snapshot", table.currentSnapshot());
V1Assert.assertEquals("Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, table.ops().current().lastSequenceNumber());
Assert.assertEquals("Should create 2 manifests for initial write",
2, committedSnapshot.allManifests().size());
long snapshotId = committedSnapshot.snapshotId();
validateManifest(committedSnapshot.allManifests().get(0),
seqs(1, 1),
ids(snapshotId, snapshotId),
files(FILE_C, FILE_D),
statuses(Status.ADDED, Status.ADDED));
validateManifest(committedSnapshot.allManifests().get(1),
seqs(1, 1),
ids(snapshotId, snapshotId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
}
@Test
public void testMergeWithAppendFilesAndManifest() throws IOException {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Assert.assertEquals("Last sequence number should be 0", 0, base.lastSequenceNumber());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.appendManifest(manifest)
.commit();
Snapshot committedSnapshot = table.currentSnapshot();
Assert.assertNotNull("Should create a snapshot", table.currentSnapshot());
V1Assert.assertEquals("Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, table.ops().current().lastSequenceNumber());
long snapshotId = committedSnapshot.snapshotId();
Assert.assertEquals("Should create 1 merged manifest", 1, committedSnapshot.allManifests().size());
validateManifest(committedSnapshot.allManifests().get(0),
seqs(1, 1, 1, 1),
ids(snapshotId, snapshotId, snapshotId, snapshotId),
files(FILE_C, FILE_D, FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED, Status.ADDED, Status.ADDED)
);
}
@Test
public void testMergeWithExistingManifest() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertNotNull("Should create a snapshot", table.currentSnapshot());
V1Assert.assertEquals("Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, table.ops().current().lastSequenceNumber());
TableMetadata base = readMetadata();
Snapshot commitBefore = table.currentSnapshot();
long baseId = commitBefore.snapshotId();
validateSnapshot(null, commitBefore, 1, FILE_A, FILE_B);
Assert.assertEquals("Should create 1 manifest for initial write",
1, commitBefore.allManifests().size());
ManifestFile initialManifest = base.currentSnapshot().allManifests().get(0);
validateManifest(initialManifest,
seqs(1, 1),
ids(baseId, baseId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
V1Assert.assertEquals("Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, table.ops().current().lastSequenceNumber());
Snapshot committedAfter = table.currentSnapshot();
Assert.assertEquals("Should contain 1 merged manifest for second write",
1, committedAfter.allManifests().size());
ManifestFile newManifest = committedAfter.allManifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long snapshotId = committedAfter.snapshotId();
validateManifest(newManifest,
seqs(2, 2, 1, 1),
ids(snapshotId, snapshotId, baseId, baseId),
concat(files(FILE_C, FILE_D), files(initialManifest)),
statuses(Status.ADDED, Status.ADDED, Status.EXISTING, Status.EXISTING)
);
}
@Test
public void testManifestMergeMinCount() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "2")
// each manifest file is 5227 bytes, so 12000 bytes limit will give us 2 bins with 3 manifest/data files.
.set(TableProperties.MANIFEST_TARGET_SIZE_BYTES, "12000")
.commit();
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Assert.assertEquals("Last sequence number should be 0", 0, base.lastSequenceNumber());
ManifestFile manifest = writeManifest(FILE_A);
ManifestFile manifest2 = writeManifestWithName("FILE_C", FILE_C);
ManifestFile manifest3 = writeManifestWithName("FILE_D", FILE_D);
table.newAppend()
.appendManifest(manifest)
.appendManifest(manifest2)
.appendManifest(manifest3)
.commit();
Snapshot snap1 = table.currentSnapshot();
long commitId1 = snap1.snapshotId();
base = readMetadata();
V2Assert.assertEquals("Snapshot sequence number should be 1", 1, snap1.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, base.lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, base.lastSequenceNumber());
Assert.assertEquals("Should contain 2 merged manifest for first write",
2, readMetadata().currentSnapshot().allManifests().size());
validateManifest(snap1.allManifests().get(0),
seqs(1),
ids(commitId1),
files(FILE_A),
statuses(Status.ADDED));
validateManifest(snap1.allManifests().get(1),
seqs(1, 1),
ids(commitId1, commitId1),
files(FILE_C, FILE_D),
statuses(Status.ADDED, Status.ADDED));
table.newAppend()
.appendManifest(manifest)
.appendManifest(manifest2)
.appendManifest(manifest3)
.commit();
Snapshot snap2 = table.currentSnapshot();
long commitId2 = snap2.snapshotId();
base = readMetadata();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, snap2.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, base.lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, base.lastSequenceNumber());
Assert.assertEquals("Should contain 3 merged manifest for second write",
3, readMetadata().currentSnapshot().allManifests().size());
validateManifest(snap2.allManifests().get(0),
seqs(2),
ids(commitId2),
files(FILE_A),
statuses(Status.ADDED));
validateManifest(snap2.allManifests().get(1),
seqs(2, 2),
ids(commitId2, commitId2),
files(FILE_C, FILE_D),
statuses(Status.ADDED, Status.ADDED));
validateManifest(snap2.allManifests().get(2),
seqs(1, 1, 1),
ids(commitId1, commitId1, commitId1),
files(FILE_A, FILE_C, FILE_D),
statuses(Status.EXISTING, Status.EXISTING, Status.EXISTING));
// validate that the metadata summary is correct when using appendManifest
Assert.assertEquals("Summary metadata should include 3 added files",
"3", readMetadata().currentSnapshot().summary().get("added-data-files"));
}
@Test
public void testManifestsMergeIntoOne() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend().appendFile(FILE_A).commit();
Snapshot snap1 = table.currentSnapshot();
TableMetadata base = readMetadata();
V2Assert.assertEquals("Snapshot sequence number should be 1", 1, snap1.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, base.lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, base.lastSequenceNumber());
long commitId1 = snap1.snapshotId();
Assert.assertEquals("Should contain 1 manifest", 1, snap1.allManifests().size());
validateManifest(snap1.allManifests().get(0), seqs(1), ids(commitId1), files(FILE_A), statuses(Status.ADDED));
table.newAppend().appendFile(FILE_B).commit();
Snapshot snap2 = table.currentSnapshot();
long commitId2 = snap2.snapshotId();
base = readMetadata();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, snap2.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, base.lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, base.lastSequenceNumber());
Assert.assertEquals("Should contain 2 manifests", 2, snap2.allManifests().size());
validateManifest(snap2.allManifests().get(0),
seqs(2),
ids(commitId2),
files(FILE_B),
statuses(Status.ADDED));
validateManifest(snap2.allManifests().get(1),
seqs(1),
ids(commitId1),
files(FILE_A),
statuses(Status.ADDED));
table.newAppend()
.appendManifest(writeManifest("input-m0.avro",
manifestEntry(ManifestEntry.Status.ADDED, null, FILE_C)))
.commit();
Snapshot snap3 = table.currentSnapshot();
base = readMetadata();
V2Assert.assertEquals("Snapshot sequence number should be 3", 3, snap3.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 3", 3, base.lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, base.lastSequenceNumber());
Assert.assertEquals("Should contain 3 manifests", 3, snap3.allManifests().size());
long commitId3 = snap3.snapshotId();
validateManifest(snap3.allManifests().get(0),
seqs(3),
ids(commitId3),
files(FILE_C),
statuses(Status.ADDED));
validateManifest(snap3.allManifests().get(1),
seqs(2),
ids(commitId2),
files(FILE_B),
statuses(Status.ADDED));
validateManifest(snap3.allManifests().get(2),
seqs(1),
ids(commitId1),
files(FILE_A),
statuses(Status.ADDED));
table.updateProperties()
.set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1")
.commit();
table.newAppend()
.appendManifest(writeManifest("input-m1.avro",
manifestEntry(ManifestEntry.Status.ADDED, null, FILE_D)))
.commit();
Snapshot snap4 = table.currentSnapshot();
base = readMetadata();
V2Assert.assertEquals("Snapshot sequence number should be 4", 4, snap4.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 4", 4, base.lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, base.lastSequenceNumber());
long commitId4 = snap4.snapshotId();
Assert.assertEquals("Should only contains 1 merged manifest", 1, snap4.allManifests().size());
validateManifest(snap4.allManifests().get(0),
seqs(4, 3, 2, 1),
ids(commitId4, commitId3, commitId2, commitId1),
files(FILE_D, FILE_C, FILE_B, FILE_A),
statuses(Status.ADDED, Status.EXISTING, Status.EXISTING, Status.EXISTING));
}
@Test
public void testManifestDoNotMergeMinCount() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.updateProperties().set("commit.manifest.min-count-to-merge", "4").commit();
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Assert.assertEquals("Last sequence number should be 0", 0, base.lastSequenceNumber());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
ManifestFile manifest2 = writeManifestWithName("FILE_C", FILE_C);
ManifestFile manifest3 = writeManifestWithName("FILE_D", FILE_D);
table.newAppend()
.appendManifest(manifest)
.appendManifest(manifest2)
.appendManifest(manifest3)
.commit();
Assert.assertNotNull("Should create a snapshot", table.currentSnapshot());
V1Assert.assertEquals("Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, table.ops().current().lastSequenceNumber());
Snapshot committed = table.currentSnapshot();
Assert.assertEquals("Should contain 3 merged manifest after 1st write write",
3, committed.allManifests().size());
long snapshotId = table.currentSnapshot().snapshotId();
validateManifest(committed.allManifests().get(0),
seqs(1, 1),
ids(snapshotId, snapshotId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED)
);
validateManifest(committed.allManifests().get(1),
seqs(1),
ids(snapshotId),
files(FILE_C),
statuses(Status.ADDED)
);
validateManifest(committed.allManifests().get(2),
seqs(1),
ids(snapshotId),
files(FILE_D),
statuses(Status.ADDED)
);
// validate that the metadata summary is correct when using appendManifest
Assert.assertEquals("Summary metadata should include 4 added files",
"4", committed.summary().get("added-data-files"));
}
@Test
public void testMergeWithExistingManifestAfterDelete() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Snapshot snap = table.currentSnapshot();
validateSnapshot(null, snap, 1, FILE_A, FILE_B);
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().allManifests().size());
ManifestFile initialManifest = base.currentSnapshot().allManifests().get(0);
validateManifest(initialManifest,
seqs(1, 1),
ids(baseId, baseId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
table.newDelete()
.deleteFile(FILE_A)
.commit();
Snapshot deleteSnapshot = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, deleteSnapshot.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
TableMetadata delete = readMetadata();
long deleteId = delete.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 filtered manifest for delete",
1, delete.currentSnapshot().allManifests().size());
ManifestFile deleteManifest = delete.currentSnapshot().allManifests().get(0);
validateManifest(deleteManifest,
seqs(2, 1),
ids(deleteId, baseId),
files(FILE_A, FILE_B),
statuses(Status.DELETED, Status.EXISTING));
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Snapshot committedSnapshot = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 3", 3, committedSnapshot.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 3", 3, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Should contain 1 merged manifest for second write",
1, committedSnapshot.allManifests().size());
ManifestFile newManifest = committedSnapshot.allManifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long snapshotId = committedSnapshot.snapshotId();
// the deleted entry from the previous manifest should be removed
validateManifestEntries(newManifest,
ids(snapshotId, snapshotId, baseId),
files(FILE_C, FILE_D, FILE_B),
statuses(Status.ADDED, Status.ADDED, Status.EXISTING));
}
@Test
public void testMinMergeCount() {
// only merge when there are at least 4 manifests
table.updateProperties().set("commit.manifest.min-count-to-merge", "4").commit();
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newFastAppend()
.appendFile(FILE_A)
.commit();
Snapshot snap1 = table.currentSnapshot();
long idFileA = snap1.snapshotId();
validateSnapshot(null, snap1, 1, FILE_A);
table.newFastAppend()
.appendFile(FILE_B)
.commit();
Snapshot snap2 = table.currentSnapshot();
long idFileB = snap2.snapshotId();
validateSnapshot(snap1, snap2, 2, FILE_B);
Assert.assertEquals("Should have 2 manifests from setup writes",
2, readMetadata().currentSnapshot().allManifests().size());
table.newAppend()
.appendFile(FILE_C)
.commit();
Snapshot snap3 = table.currentSnapshot();
long idFileC = snap3.snapshotId();
validateSnapshot(snap2, snap3, 3, FILE_C);
TableMetadata base = readMetadata();
Assert.assertEquals("Should have 3 unmerged manifests",
3, base.currentSnapshot().allManifests().size());
Set<ManifestFile> unmerged = Sets.newHashSet(base.currentSnapshot().allManifests());
table.newAppend()
.appendFile(FILE_D)
.commit();
Snapshot committed = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 4", 4, committed.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 4", 4, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Should contain 1 merged manifest after the 4th write",
1, committed.allManifests().size());
ManifestFile newManifest = committed.allManifests().get(0);
Assert.assertFalse("Should not contain previous manifests", unmerged.contains(newManifest));
long lastSnapshotId = committed.snapshotId();
validateManifest(newManifest,
seqs(4, 3, 2, 1),
ids(lastSnapshotId, idFileC, idFileB, idFileA),
files(FILE_D, FILE_C, FILE_B, FILE_A),
statuses(Status.ADDED, Status.EXISTING, Status.EXISTING, Status.EXISTING)
);
}
@Test
public void testMergeSizeTargetWithExistingManifest() {
// use a small limit on manifest size to prevent merging
table.updateProperties()
.set(TableProperties.MANIFEST_TARGET_SIZE_BYTES, "10")
.commit();
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Snapshot snap = table.currentSnapshot();
validateSnapshot(null, snap, 1, FILE_A, FILE_B);
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().allManifests().size());
ManifestFile initialManifest = base.currentSnapshot().allManifests().get(0);
validateManifest(initialManifest,
seqs(1, 1),
ids(baseId, baseId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Snapshot committed = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, committed.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Should contain 2 unmerged manifests after second write",
2, committed.allManifests().size());
ManifestFile newManifest = committed.allManifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = committed.snapshotId();
validateManifest(newManifest,
seqs(2, 2),
ids(pendingId, pendingId),
files(FILE_C, FILE_D),
statuses(Status.ADDED, Status.ADDED)
);
validateManifest(committed.allManifests().get(1),
seqs(1, 1),
ids(baseId, baseId),
files(initialManifest),
statuses(Status.ADDED, Status.ADDED)
);
}
@Test
public void testChangedPartitionSpec() {
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Snapshot snap = table.currentSnapshot();
long commitId = snap.snapshotId();
validateSnapshot(null, snap, 1, FILE_A, FILE_B);
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().allManifests().size());
ManifestFile initialManifest = base.currentSnapshot().allManifests().get(0);
validateManifest(initialManifest,
seqs(1, 1),
ids(commitId, commitId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
Snapshot snap2 = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 1", 1, snap2.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
DataFile newFileY = DataFiles.builder(newSpec)
.withPath("/path/to/data-y.parquet")
.withFileSizeInBytes(10)
.withPartitionPath("data_bucket=2/id_bucket=3")
.withRecordCount(1)
.build();
table.newAppend()
.appendFile(newFileY)
.commit();
Snapshot lastSnapshot = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, lastSnapshot.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Should use 2 manifest files",
2, lastSnapshot.allManifests().size());
// new manifest comes first
validateManifest(lastSnapshot.allManifests().get(0),
seqs(2),
ids(lastSnapshot.snapshotId()),
files(newFileY),
statuses(Status.ADDED)
);
Assert.assertEquals("Second manifest should be the initial manifest with the old spec",
initialManifest, lastSnapshot.allManifests().get(1));
}
@Test
public void testChangedPartitionSpecMergeExisting() {
table.newAppend()
.appendFile(FILE_A)
.commit();
Snapshot snap1 = table.currentSnapshot();
long id1 = snap1.snapshotId();
validateSnapshot(null, snap1, 1, FILE_A);
// create a second compatible manifest
table.newFastAppend()
.appendFile(FILE_B)
.commit();
Snapshot snap2 = table.currentSnapshot();
long id2 = snap2.snapshotId();
validateSnapshot(snap1, snap2, 2, FILE_B);
TableMetadata base = readMetadata();
Assert.assertEquals("Should contain 2 manifests",
2, base.currentSnapshot().allManifests().size());
ManifestFile manifest = base.currentSnapshot().allManifests().get(0);
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
Snapshot snap3 = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, snap3.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
DataFile newFileY = DataFiles.builder(newSpec)
.withPath("/path/to/data-y.parquet")
.withFileSizeInBytes(10)
.withPartitionPath("data_bucket=2/id_bucket=3")
.withRecordCount(1)
.build();
table.newAppend()
.appendFile(newFileY)
.commit();
Snapshot lastSnapshot = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 3", 3, lastSnapshot.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 3", 3, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Should use 2 manifest files",
2, lastSnapshot.allManifests().size());
Assert.assertFalse("First manifest should not be in the new snapshot",
lastSnapshot.allManifests().contains(manifest));
validateManifest(lastSnapshot.allManifests().get(0),
seqs(3),
ids(lastSnapshot.snapshotId()),
files(newFileY),
statuses(Status.ADDED)
);
validateManifest(lastSnapshot.allManifests().get(1),
seqs(2, 1),
ids(id2, id1),
files(FILE_B, FILE_A),
statuses(Status.EXISTING, Status.EXISTING)
);
}
@Test
public void testFailure() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
V2Assert.assertEquals("Last sequence number should be 1", 1, base.lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, base.lastSequenceNumber());
ManifestFile initialManifest = base.currentSnapshot().allManifests().get(0);
validateManifest(initialManifest, seqs(1), ids(baseId), files(FILE_A), statuses(Status.ADDED));
table.ops().failCommits(5);
AppendFiles append = table.newAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
Assert.assertEquals("Should merge to 1 manifest", 1, pending.allManifests().size());
ManifestFile newManifest = pending.allManifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
validateManifest(newManifest,
ids(pending.snapshotId(), baseId),
concat(files(FILE_B), files(initialManifest)));
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", append::commit);
V2Assert.assertEquals("Last sequence number should be 1", 1, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Should only contain 1 manifest file",
1, table.currentSnapshot().allManifests().size());
validateManifest(table.currentSnapshot().allManifests().get(0),
seqs(1),
ids(baseId),
files(initialManifest),
statuses(Status.ADDED)
);
Assert.assertFalse("Should clean up new manifest", new File(newManifest.path()).exists());
}
@Test
public void testAppendManifestCleanup() throws IOException {
// inject 5 failures
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(5);
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
AppendFiles append = table.newAppend().appendManifest(manifest);
Snapshot pending = append.apply();
ManifestFile newManifest = pending.allManifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", append::commit);
V2Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertFalse("Should clean up new manifest", new File(newManifest.path()).exists());
}
@Test
public void testRecovery() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
V2Assert.assertEquals("Last sequence number should be 1", 1, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
ManifestFile initialManifest = base.currentSnapshot().allManifests().get(0);
validateManifest(initialManifest, seqs(1), ids(baseId), files(FILE_A), statuses(Status.ADDED));
table.ops().failCommits(3);
AppendFiles append = table.newAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
Assert.assertEquals("Should merge to 1 manifest", 1, pending.allManifests().size());
ManifestFile newManifest = pending.allManifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
validateManifest(newManifest,
ids(pending.snapshotId(), baseId),
concat(files(FILE_B), files(initialManifest)));
V2Assert.assertEquals("Snapshot sequence number should be 1", 1, table.currentSnapshot().sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 1", 1, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
append.commit();
Snapshot snapshot = table.currentSnapshot();
long snapshotId = snapshot.snapshotId();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, table.currentSnapshot().sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
TableMetadata metadata = readMetadata();
Assert.assertTrue("Should reuse the new manifest", new File(newManifest.path()).exists());
Assert.assertEquals("Should commit the same new manifest during retry",
Lists.newArrayList(newManifest), metadata.currentSnapshot().allManifests());
Assert.assertEquals("Should only contain 1 merged manifest file",
1, table.currentSnapshot().allManifests().size());
ManifestFile manifestFile = snapshot.allManifests().get(0);
validateManifest(manifestFile,
seqs(2, 1),
ids(snapshotId, baseId),
files(FILE_B, FILE_A),
statuses(Status.ADDED, Status.EXISTING));
}
@Test
public void testAppendManifestWithSnapshotIdInheritance() throws IOException {
table.updateProperties()
.set(TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED, "true")
.commit();
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
table.newAppend()
.appendManifest(manifest)
.commit();
Snapshot snapshot = table.currentSnapshot();
long snapshotId = snapshot.snapshotId();
validateSnapshot(null, snapshot, 1, FILE_A, FILE_B);
List<ManifestFile> manifests = table.currentSnapshot().allManifests();
Assert.assertEquals("Should have 1 committed manifest", 1, manifests.size());
ManifestFile manifestFile = snapshot.allManifests().get(0);
validateManifest(manifestFile,
seqs(1, 1),
ids(snapshotId, snapshotId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
// validate that the metadata summary is correct when using appendManifest
Assert.assertEquals("Summary metadata should include 2 added files",
"2", snapshot.summary().get("added-data-files"));
Assert.assertEquals("Summary metadata should include 2 added records",
"2", snapshot.summary().get("added-records"));
Assert.assertEquals("Summary metadata should include 2 files in total",
"2", snapshot.summary().get("total-data-files"));
Assert.assertEquals("Summary metadata should include 2 records in total",
"2", snapshot.summary().get("total-records"));
}
@Test
public void testMergedAppendManifestCleanupWithSnapshotIdInheritance() throws IOException {
table.updateProperties()
.set(TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED, "true")
.commit();
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
table.updateProperties()
.set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1")
.commit();
ManifestFile manifest1 = writeManifestWithName("manifest-file-1.avro", FILE_A, FILE_B);
table.newAppend()
.appendManifest(manifest1)
.commit();
Snapshot snap1 = table.currentSnapshot();
long commitId1 = snap1.snapshotId();
validateSnapshot(null, snap1, 1, FILE_A, FILE_B);
Assert.assertEquals("Should have only 1 manifest", 1, snap1.allManifests().size());
validateManifest(table.currentSnapshot().allManifests().get(0),
seqs(1, 1),
ids(commitId1, commitId1),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
Assert.assertTrue("Unmerged append manifest should not be deleted", new File(manifest1.path()).exists());
ManifestFile manifest2 = writeManifestWithName("manifest-file-2.avro", FILE_C, FILE_D);
table.newAppend()
.appendManifest(manifest2)
.commit();
Snapshot snap2 = table.currentSnapshot();
long commitId2 = snap2.snapshotId();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, table.currentSnapshot().sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Manifests should be merged into 1", 1, snap2.allManifests().size());
validateManifest(table.currentSnapshot().allManifests().get(0),
seqs(2, 2, 1, 1),
ids(commitId2, commitId2, commitId1, commitId1),
files(FILE_C, FILE_D, FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED, Status.EXISTING, Status.EXISTING));
Assert.assertFalse("Merged append manifest should be deleted", new File(manifest2.path()).exists());
}
@Test
public void testAppendManifestFailureWithSnapshotIdInheritance() throws IOException {
table.updateProperties()
.set(TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED, "true")
.commit();
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.commit();
table.ops().failCommits(5);
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
AppendFiles append = table.newAppend();
append.appendManifest(manifest);
AssertHelpers.assertThrows("Should reject commit",
CommitFailedException.class, "Injected failure",
append::commit);
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
Assert.assertTrue("Append manifest should not be deleted", new File(manifest.path()).exists());
}
@Test
public void testInvalidAppendManifest() throws IOException {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
ManifestFile manifestWithExistingFiles = writeManifest(
"manifest-file-1.avro",
manifestEntry(Status.EXISTING, null, FILE_A));
AssertHelpers.assertThrows("Should reject commit",
IllegalArgumentException.class, "Cannot append manifest with existing files",
() -> table.newAppend()
.appendManifest(manifestWithExistingFiles)
.commit());
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
ManifestFile manifestWithDeletedFiles = writeManifest(
"manifest-file-2.avro",
manifestEntry(Status.DELETED, null, FILE_A));
AssertHelpers.assertThrows("Should reject commit",
IllegalArgumentException.class, "Cannot append manifest with deleted files",
() -> table.newAppend()
.appendManifest(manifestWithDeletedFiles)
.commit());
Assert.assertEquals("Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
}
@Test
public void testUpdatePartitionSpecFieldIdsForV1Table() {
TableMetadata base = readMetadata();
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("id", 16)
.identity("data")
.bucket("data", 4)
.bucket("data", 16, "data_partition") // reuse field id although different target name
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
Assert.assertEquals("Last sequence number should be 0", 0, base.lastSequenceNumber());
List<PartitionSpec> partitionSpecs = table.ops().current().specs();
PartitionSpec partitionSpec = partitionSpecs.get(0);
Assert.assertEquals(1000, partitionSpec.lastAssignedFieldId());
Types.StructType structType = partitionSpec.partitionType();
List<Types.NestedField> fields = structType.fields();
Assert.assertEquals(1, fields.size());
Assert.assertEquals("data_bucket", fields.get(0).name());
Assert.assertEquals(1000, fields.get(0).fieldId());
partitionSpec = partitionSpecs.get(1);
Assert.assertEquals(1003, partitionSpec.lastAssignedFieldId());
structType = partitionSpec.partitionType();
fields = structType.fields();
Assert.assertEquals(4, fields.size());
Assert.assertEquals("id_bucket", fields.get(0).name());
Assert.assertEquals(1000, fields.get(0).fieldId());
Assert.assertEquals("data", fields.get(1).name());
Assert.assertEquals(1001, fields.get(1).fieldId());
Assert.assertEquals("data_bucket", fields.get(2).name());
Assert.assertEquals(1002, fields.get(2).fieldId());
Assert.assertEquals("data_partition", fields.get(3).name());
Assert.assertEquals(1003, fields.get(3).fieldId());
}
@Test
public void testManifestEntryFieldIdsForChangedPartitionSpecForV1Table() {
table.newAppend()
.appendFile(FILE_A)
.commit();
Snapshot snap = table.currentSnapshot();
long commitId = snap.snapshotId();
validateSnapshot(null, snap, 1, FILE_A);
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().allManifests().size());
ManifestFile initialManifest = base.currentSnapshot().allManifests().get(0);
validateManifest(initialManifest, seqs(1), ids(commitId), files(FILE_A), statuses(Status.ADDED));
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("id", 8)
.bucket("data", 8)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
V2Assert.assertEquals("Last sequence number should be 1", 1, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
// create a new with the table's current spec
DataFile newFile = DataFiles.builder(table.spec())
.withPath("/path/to/data-x.parquet")
.withFileSizeInBytes(10)
.withPartitionPath("id_bucket=1/data_bucket=1")
.withRecordCount(1)
.build();
table.newAppend()
.appendFile(newFile)
.commit();
Snapshot committedSnapshot = table.currentSnapshot();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, committedSnapshot.sequenceNumber());
V2Assert.assertEquals("Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals("Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
Assert.assertEquals("Should use 2 manifest files",
2, committedSnapshot.allManifests().size());
// new manifest comes first
validateManifest(committedSnapshot.allManifests().get(0),
seqs(2),
ids(committedSnapshot.snapshotId()), files(newFile),
statuses(Status.ADDED)
);
Assert.assertEquals("Second manifest should be the initial manifest with the old spec",
initialManifest, committedSnapshot.allManifests().get(1));
// field ids of manifest entries in two manifests with different specs of the same source field should be different
ManifestEntry<DataFile> entry = ManifestFiles.read(committedSnapshot.allManifests().get(0), FILE_IO)
.entries().iterator().next();
Types.NestedField field = ((PartitionData) entry.file().partition()).getPartitionType().fields().get(0);
Assert.assertEquals(1000, field.fieldId());
Assert.assertEquals("id_bucket", field.name());
field = ((PartitionData) entry.file().partition()).getPartitionType().fields().get(1);
Assert.assertEquals(1001, field.fieldId());
Assert.assertEquals("data_bucket", field.name());
entry = ManifestFiles.read(committedSnapshot.allManifests().get(1), FILE_IO).entries().iterator().next();
field = ((PartitionData) entry.file().partition()).getPartitionType().fields().get(0);
Assert.assertEquals(1000, field.fieldId());
Assert.assertEquals("data_bucket", field.name());
}
@Test
public void testDefaultPartitionSummaries() {
table.newFastAppend()
.appendFile(FILE_A)
.commit();
Set<String> partitionSummaryKeys = table.currentSnapshot().summary().keySet().stream()
.filter(key -> key.startsWith(SnapshotSummary.CHANGED_PARTITION_PREFIX))
.collect(Collectors.toSet());
Assert.assertEquals("Should include no partition summaries by default", 0, partitionSummaryKeys.size());
String summariesIncluded = table.currentSnapshot().summary()
.getOrDefault(SnapshotSummary.PARTITION_SUMMARY_PROP, "false");
Assert.assertEquals("Should not set partition-summaries-included to true", "false", summariesIncluded);
String changedPartitions = table.currentSnapshot().summary().get(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP);
Assert.assertEquals("Should set changed partition count", "1", changedPartitions);
}
@Test
public void testIncludedPartitionSummaries() {
table.updateProperties()
.set(TableProperties.WRITE_PARTITION_SUMMARY_LIMIT, "1")
.commit();
table.newFastAppend()
.appendFile(FILE_A)
.commit();
Set<String> partitionSummaryKeys = table.currentSnapshot().summary().keySet().stream()
.filter(key -> key.startsWith(SnapshotSummary.CHANGED_PARTITION_PREFIX))
.collect(Collectors.toSet());
Assert.assertEquals("Should include a partition summary", 1, partitionSummaryKeys.size());
String summariesIncluded = table.currentSnapshot().summary()
.getOrDefault(SnapshotSummary.PARTITION_SUMMARY_PROP, "false");
Assert.assertEquals("Should set partition-summaries-included to true", "true", summariesIncluded);
String changedPartitions = table.currentSnapshot().summary().get(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP);
Assert.assertEquals("Should set changed partition count", "1", changedPartitions);
String partitionSummary = table.currentSnapshot().summary()
.get(SnapshotSummary.CHANGED_PARTITION_PREFIX + "data_bucket=0");
Assert.assertEquals("Summary should include 1 file with 1 record that is 10 bytes",
"added-data-files=1,added-records=1,added-files-size=10", partitionSummary);
}
@Test
public void testIncludedPartitionSummaryLimit() {
table.updateProperties()
.set(TableProperties.WRITE_PARTITION_SUMMARY_LIMIT, "1")
.commit();
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Set<String> partitionSummaryKeys = table.currentSnapshot().summary().keySet().stream()
.filter(key -> key.startsWith(SnapshotSummary.CHANGED_PARTITION_PREFIX))
.collect(Collectors.toSet());
Assert.assertEquals("Should include no partition summaries, over limit", 0, partitionSummaryKeys.size());
String summariesIncluded = table.currentSnapshot().summary()
.getOrDefault(SnapshotSummary.PARTITION_SUMMARY_PROP, "false");
Assert.assertEquals("Should not set partition-summaries-included to true", "false", summariesIncluded);
String changedPartitions = table.currentSnapshot().summary().get(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP);
Assert.assertEquals("Should set changed partition count", "2", changedPartitions);
}
}
| 1 | 31,013 | 3x the smaller size would be around 17k, and we need it to be at least about 13k, which is 2x the larger size. I'd probably set this to 15k to split the difference and hopefully avoid needing to update this again as tests change. This is minor, though. | apache-iceberg | java |
@@ -360,7 +360,9 @@ class DateEncoder(Encoder):
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:] = 0
else:
- assert isinstance(input, datetime.datetime)
+ assert isinstance(input, datetime.datetime), (
+ "Input is type %s, expected datetime. Value: %s" % (type(input),
+ str(input)))
# Get the scalar values for each sub-field
scalars = self.getScalars(input) | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from base import Encoder
import datetime
from scalar import ScalarEncoder
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
class DateEncoder(Encoder):
"""A date encoder encodes a date according to encoding parameters
specified in its constructor.
The input to a date encoder is a datetime.datetime object. The output
is the concatenation of several sub-encodings, each of which encodes
a different aspect of the date. Which sub-encodings are present, and
details of those sub-encodings, are specified in the DateEncoder
constructor.
Each parameter describes one attribute to encode. By default, the attribute
is not encoded.
season (season of the year; units = day):
(int) width of attribute; default radius = 91.5 days (1 season)
(tuple) season[0] = width; season[1] = radius
dayOfWeek (monday = 0; units = day)
(int) width of attribute; default radius = 1 day
(tuple) dayOfWeek[0] = width; dayOfWeek[1] = radius
weekend (boolean: 0, 1)
(int) width of attribute
holiday (boolean: 0, 1)
(int) width of attribute
timeOfday (midnight = 0; units = hour)
(int) width of attribute: default radius = 4 hours
(tuple) timeOfDay[0] = width; timeOfDay[1] = radius
"""
############################################################################
def __init__(self, season=0, dayOfWeek=0, weekend=0, holiday=0, timeOfDay=0, customDays=0,
name = ''):
self.width = 0
self.description = []
self.name = name
# This will contain a list of (name, encoder, offset) tuples for use by
# the decode() method
self.encoders = []
self.seasonEncoder = None
if season != 0:
# Ignore leapyear differences -- assume 366 days in a year
# Radius = 91.5 days = length of season
# Value is number of days since beginning of year (0 - 355)
if hasattr(season, "__getitem__"):
w = season[0]
radius = season[1]
else:
w = season
radius = 91.5
self.seasonEncoder = ScalarEncoder(w = w, minval=0, maxval=366,
radius=radius, periodic=True,
name="season")
self.seasonOffset = self.width
self.width += self.seasonEncoder.getWidth()
self.description.append(("season", self.seasonOffset))
self.encoders.append(("season", self.seasonEncoder, self.seasonOffset))
self.dayOfWeekEncoder = None
if dayOfWeek != 0:
# Value is day of week (floating point)
# Radius is 1 day
if hasattr(dayOfWeek, "__getitem__"):
w = dayOfWeek[0]
radius = dayOfWeek[1]
else:
w = dayOfWeek
radius = 1
self.dayOfWeekEncoder = ScalarEncoder(w = w, minval=0, maxval=7,
radius=radius, periodic=True,
name="day of week")
self.dayOfWeekOffset = self.width
self.width += self.dayOfWeekEncoder.getWidth()
self.description.append(("day of week", self.dayOfWeekOffset))
self.encoders.append(("day of week", self.dayOfWeekEncoder, self.dayOfWeekOffset))
self.weekendEncoder = None
if weekend != 0:
# Binary value. Not sure if this makes sense. Also is somewhat redundant
# with dayOfWeek
#Append radius if it was not provided
if not hasattr(weekend, "__getitem__"):
weekend = (weekend,1)
self.weekendEncoder = ScalarEncoder(w = weekend[0], minval = 0, maxval=1,
periodic=False, radius=weekend[1],
name="weekend")
self.weekendOffset = self.width
self.width += self.weekendEncoder.getWidth()
self.description.append(("weekend", self.weekendOffset))
self.encoders.append(("weekend", self.weekendEncoder, self.weekendOffset))
#Set up custom days encoder, first argument in tuple is width
#second is either a single day of the week or a list of the days
#you want encoded as ones.
self.customDaysEncoder = None
if customDays !=0:
customDayEncoderName = ""
daysToParse = []
assert len(customDays)==2, "Please provide a w and the desired days"
if isinstance(customDays[1], list):
for day in customDays[1]:
customDayEncoderName+=str(day)+" "
daysToParse=customDays[1]
elif isinstance(customDays[1], str):
customDayEncoderName+=customDays[1]
daysToParse = [customDays[1]]
else:
assert False, "You must provide either a list of days or a single day"
#Parse days
self.customDays = []
for day in daysToParse:
if(day.lower() in ["mon","monday"]):
self.customDays+=[0]
elif day.lower() in ["tue","tuesday"]:
self.customDays+=[1]
elif day.lower() in ["wed","wednesday"]:
self.customDays+=[2]
elif day.lower() in ["thu","thursday"]:
self.customDays+=[3]
elif day.lower() in ["fri","friday"]:
self.customDays+=[4]
elif day.lower() in ["sat","saturday"]:
self.customDays+=[5]
elif day.lower() in ["sun","sunday"]:
self.customDays+=[6]
else:
assert False, "Unable to understand %s as a day of week" % str(day)
self.customDaysEncoder = ScalarEncoder(w=customDays[0], minval = 0, maxval=1,
periodic=False, radius=1,
name=customDayEncoderName)
self.customDaysOffset = self.width
self.width += self.customDaysEncoder.getWidth()
self.description.append(("customdays", self.customDaysOffset))
self.encoders.append(("customdays", self.customDaysEncoder, self.customDaysOffset))
self.holidayEncoder = None
if holiday != 0:
# A "continuous" binary value. = 1 on the holiday itself and smooth ramp
# 0->1 on the day before the holiday and 1->0 on the day after the holiday.
self.holidayEncoder = ScalarEncoder(w = holiday, minval = 0, maxval=1,
periodic=False, radius=1,
name="holiday")
self.holidayOffset = self.width
self.width += self.holidayEncoder.getWidth()
self.description.append(("holiday", self.holidayOffset))
self.encoders.append(("holiday", self.holidayEncoder, self.holidayOffset))
self.timeOfDayEncoder = None
if timeOfDay != 0:
# Value is time of day in hours
# Radius = 4 hours, e.g. morning, afternoon, evening, early night,
# late night, etc.
if hasattr(timeOfDay, "__getitem__"):
w = timeOfDay[0]
radius = timeOfDay[1]
else:
w = timeOfDay
radius = 4
self.timeOfDayEncoder = ScalarEncoder(w = w, minval=0, maxval=24,
periodic=True, radius=radius, name="time of day")
self.timeOfDayOffset = self.width
self.width += self.timeOfDayEncoder.getWidth()
self.description.append(("time of day", self.timeOfDayOffset))
self.encoders.append(("time of day", self.timeOfDayEncoder, self.timeOfDayOffset))
############################################################################
def getWidth(self):
return self.width
############################################################################
def getScalarNames(self, parentFieldName=''):
""" See method description in base.py """
names = []
# This forms a name which is the concatenation of the parentFieldName
# passed in and the encoder's own name.
def _formFieldName(encoder):
if parentFieldName == '':
return encoder.name
else:
return '%s.%s' % (parentFieldName, encoder.name)
# -------------------------------------------------------------------------
# Get the scalar values for each sub-field
if self.seasonEncoder is not None:
names.append(_formFieldName(self.seasonEncoder))
if self.dayOfWeekEncoder is not None:
names.append(_formFieldName(self.dayOfWeekEncoder))
if self.customDaysEncoder is not None:
names.append(_formFieldName(self.customDaysEncoder))
if self.weekendEncoder is not None:
names.append(_formFieldName(self.weekendEncoder))
if self.holidayEncoder is not None:
names.append(_formFieldName(self.holidayEncoder))
if self.timeOfDayEncoder is not None:
names.append(_formFieldName(self.timeOfDayEncoder))
return names
############################################################################
def getEncodedValues(self, input):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return numpy.array([None])
assert isinstance(input, datetime.datetime)
values = []
# -------------------------------------------------------------------------
# Get the scalar values for each sub-field
timetuple = input.timetuple()
timeOfDay = timetuple.tm_hour + float(timetuple.tm_min)/60.0
if self.seasonEncoder is not None:
dayOfYear = timetuple.tm_yday
# input.timetuple() computes the day of year 1 based, so convert to 0 based
values.append(dayOfYear-1)
if self.dayOfWeekEncoder is not None:
dayOfWeek = timetuple.tm_wday #+ timeOfDay / 24.0
values.append(dayOfWeek)
if self.weekendEncoder is not None:
# saturday, sunday or friday evening
if timetuple.tm_wday == 6 or timetuple.tm_wday == 5 \
or (timetuple.tm_wday == 4 and timeOfDay > 18):
weekend = 1
else:
weekend = 0
values.append(weekend)
if self.customDaysEncoder is not None:
if timetuple.tm_wday in self.customDays:
customDay = 1
else:
customDay = 0
values.append(customDay)
if self.holidayEncoder is not None:
# A "continuous" binary value. = 1 on the holiday itself and smooth ramp
# 0->1 on the day before the holiday and 1->0 on the day after the holiday.
# Currently the only holiday we know about is December 25
# holidays is a list of holidays that occur on a fixed date every year
holidays = [(12, 25)]
val = 0
for h in holidays:
# hdate is midnight on the holiday
hdate = datetime.datetime(timetuple.tm_year, h[0], h[1], 0, 0, 0)
if input > hdate:
diff = input - hdate
if diff.days == 0:
# return 1 on the holiday itself
val = 1
break
elif diff.days == 1:
# ramp smoothly from 1 -> 0 on the next day
val = 1.0 - (float(diff.seconds) / (86400))
break
else:
diff = hdate - input
if diff.days == 0:
# ramp smoothly from 0 -> 1 on the previous day
val = 1.0 - (float(diff.seconds) / 86400)
values.append(val)
if self.timeOfDayEncoder is not None:
values.append(timeOfDay)
return values
############################################################################
def getScalars(self, input):
""" See method description in base.py
Parameters:
-----------------------------------------------------------------------
input: A datetime object representing the time being encoded
Returns: A numpy array of the corresponding scalar values in
the following order:
[season, dayOfWeek, weekend, holiday, timeOfDay]
Note: some of these fields might be omitted if they were not
specified in the encoder
"""
return numpy.array(self.getEncodedValues(input))
############################################################################
def getBucketIndices(self, input):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
# Encoder each sub-field
return [None] * len(self.encoders)
else:
assert isinstance(input, datetime.datetime)
# Get the scalar values for each sub-field
scalars = self.getScalars(input)
# Encoder each sub-field
result = []
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
result.extend(encoder.getBucketIndices(scalars[i]))
return result
############################################################################
def encodeIntoArray(self, input, output):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
output[0:] = 0
else:
assert isinstance(input, datetime.datetime)
# Get the scalar values for each sub-field
scalars = self.getScalars(input)
# Encoder each sub-field
for i in xrange(len(self.encoders)):
(name, encoder, offset) = self.encoders[i]
encoder.encodeIntoArray(scalars[i], output[offset:])
############################################################################
def getDescription(self):
return self.description
| 1 | 12,916 | @scottpurdy, strictly speaking, should this scenario raise a ValueError exception instead of AssertionError? | numenta-nupic | py |
@@ -0,0 +1,7 @@
+package azkaban.spi;
+
+public enum ExecutorType {
+ // Type of executor where flow runs
+ BAREMETAL,
+ KUBERNETES
+} | 1 | 1 | 21,701 | Can you please add open source disclaimer? | azkaban-azkaban | java |
|
@@ -81,13 +81,11 @@ func (u *unaryHandler) Handle(ctx context.Context, transportRequest *transport.R
var wireError *wirepb.Error
if appErr != nil {
responseWriter.SetApplicationError()
- wireError = &wirepb.Error{
- appErr.Error(),
- }
+ wireError = &wirepb.Error{Message: appErr.Error()}
}
wireResponse := &wirepb.Response{
- responseData,
- wireError,
+ Payload: responseData,
+ Error: wireError,
}
protoBuffer := getBuffer()
defer putBuffer(protoBuffer) | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package protobuf
import (
"context"
apiencoding "go.uber.org/yarpc/api/encoding"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/encoding/x/protobuf/internal/wirepb"
"go.uber.org/yarpc/internal/buffer"
"go.uber.org/yarpc/internal/encoding"
"github.com/gogo/protobuf/proto"
)
type unaryHandler struct {
handle func(context.Context, proto.Message) (proto.Message, error)
newRequest func() proto.Message
}
func newUnaryHandler(
handle func(context.Context, proto.Message) (proto.Message, error),
newRequest func() proto.Message,
) *unaryHandler {
return &unaryHandler{handle, newRequest}
}
func (u *unaryHandler) Handle(ctx context.Context, transportRequest *transport.Request, responseWriter transport.ResponseWriter) error {
if err := encoding.Expect(transportRequest, Encoding); err != nil {
return err
}
ctx, call := apiencoding.NewInboundCall(ctx)
if err := call.ReadFromRequest(transportRequest); err != nil {
return err
}
buf := buffer.Get()
defer buffer.Put(buf)
if _, err := buf.ReadFrom(transportRequest.Body); err != nil {
return err
}
body := buf.Bytes()
request := u.newRequest()
// is this possible?
if body != nil {
if err := proto.Unmarshal(body, request); err != nil {
return encoding.RequestBodyDecodeError(transportRequest, err)
}
}
response, appErr := u.handle(ctx, request)
if err := call.WriteToResponse(responseWriter); err != nil {
return err
}
var responseData []byte
if response != nil {
protoBuffer := getBuffer()
defer putBuffer(protoBuffer)
if err := protoBuffer.Marshal(response); err != nil {
return encoding.ResponseBodyEncodeError(transportRequest, err)
}
responseData = protoBuffer.Bytes()
}
var wireError *wirepb.Error
if appErr != nil {
responseWriter.SetApplicationError()
wireError = &wirepb.Error{
appErr.Error(),
}
}
wireResponse := &wirepb.Response{
responseData,
wireError,
}
protoBuffer := getBuffer()
defer putBuffer(protoBuffer)
if err := protoBuffer.Marshal(wireResponse); err != nil {
return encoding.ResponseBodyEncodeError(transportRequest, err)
}
_, err := responseWriter.Write(protoBuffer.Bytes())
return err
}
type onewayHandler struct {
handleOneway func(context.Context, proto.Message) error
newRequest func() proto.Message
}
func newOnewayHandler(
handleOneway func(context.Context, proto.Message) error,
newRequest func() proto.Message,
) *onewayHandler {
return &onewayHandler{handleOneway, newRequest}
}
func (o *onewayHandler) HandleOneway(ctx context.Context, transportRequest *transport.Request) error {
if err := encoding.Expect(transportRequest, Encoding); err != nil {
return err
}
ctx, call := apiencoding.NewInboundCall(ctx)
if err := call.ReadFromRequest(transportRequest); err != nil {
return err
}
buf := buffer.Get()
defer buffer.Put(buf)
if _, err := buf.ReadFrom(transportRequest.Body); err != nil {
return err
}
body := buf.Bytes()
request := o.newRequest()
// is this possible?
if body != nil {
if err := proto.Unmarshal(body, request); err != nil {
return encoding.RequestBodyDecodeError(transportRequest, err)
}
}
return o.handleOneway(ctx, request)
}
| 1 | 12,821 | I dont want to do composite keys on purpose to verify at compile time if the message is completely filled out appropriately @sectioneight | yarpc-yarpc-go | go |
@@ -1170,6 +1170,8 @@ func TestServer_SendAction(t *testing.T) {
}}
chain.EXPECT().ChainID().Return(uint32(1)).Times(2)
+ chain.EXPECT().TipHeight().Return(uint64(4)).Times(2)
+ svr.cfg.Genesis.KamchatkaBlockHeight = 10
ap.EXPECT().Add(gomock.Any(), gomock.Any()).Return(nil).Times(2)
for i, test := range sendActionTests { | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"bytes"
"context"
"encoding/hex"
"math"
"math/big"
"strconv"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/ptypes"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-election/test/mock/mock_committee"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/blockdao"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/blockindex"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/gasstation"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/pkg/version"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_actpool"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/testutil"
)
const lld = "lifeLongDelegates"
var (
testTransfer, _ = action.SignedTransfer(identityset.Address(28).String(),
identityset.PrivateKey(28), 3, big.NewInt(10), []byte{}, testutil.TestGasLimit,
big.NewInt(testutil.TestGasPriceInt64))
testTransferHash, _ = testTransfer.Hash()
testTransferPb = testTransfer.Proto()
testExecution, _ = action.SignedExecution(identityset.Address(29).String(),
identityset.PrivateKey(29), 1, big.NewInt(0), testutil.TestGasLimit,
big.NewInt(testutil.TestGasPriceInt64), []byte{})
testExecutionHash, _ = testExecution.Hash()
testExecutionPb = testExecution.Proto()
testTransfer1, _ = action.SignedTransfer(identityset.Address(30).String(), identityset.PrivateKey(27), 1,
big.NewInt(10), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
transferHash1, _ = testTransfer1.Hash()
testTransfer2, _ = action.SignedTransfer(identityset.Address(30).String(), identityset.PrivateKey(30), 5,
big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
transferHash2, _ = testTransfer2.Hash()
testExecution1, _ = action.SignedExecution(identityset.Address(31).String(), identityset.PrivateKey(30), 6,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64), []byte{1})
executionHash1, _ = testExecution1.Hash()
testExecution3, _ = action.SignedExecution(identityset.Address(31).String(), identityset.PrivateKey(28), 2,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64), []byte{1})
executionHash3, _ = testExecution3.Hash()
blkHash = map[uint64]string{}
implicitLogs = map[hash.Hash256]*block.TransactionLog{}
)
var (
delegates = []genesis.Delegate{
{
OperatorAddrStr: identityset.Address(0).String(),
VotesStr: "10",
},
{
OperatorAddrStr: identityset.Address(1).String(),
VotesStr: "10",
},
{
OperatorAddrStr: identityset.Address(2).String(),
VotesStr: "10",
},
}
)
var (
getAccountTests = []struct {
in string
address string
balance string
nonce uint64
pendingNonce uint64
numActions uint64
}{
{identityset.Address(30).String(),
"io1d4c5lp4ea4754wy439g2t99ue7wryu5r2lslh2",
"3",
8,
9,
9,
},
{
identityset.Address(27).String(),
"io1mflp9m6hcgm2qcghchsdqj3z3eccrnekx9p0ms",
"9999999999999999999999898950",
5,
6,
6,
},
}
getActionsTests = []struct {
start uint64
count uint64
numActions int
}{
{
1,
11,
11,
},
{
11,
5,
4,
},
{
1,
0,
0,
},
}
getActionTests = []struct {
// Arguments
checkPending bool
in string
// Expected Values
nonce uint64
senderPubKey string
blkNumber uint64
}{
{
checkPending: false,
in: hex.EncodeToString(transferHash1[:]),
nonce: 1,
senderPubKey: testTransfer1.SrcPubkey().HexString(),
blkNumber: 1,
},
{
checkPending: false,
in: hex.EncodeToString(transferHash2[:]),
nonce: 5,
senderPubKey: testTransfer2.SrcPubkey().HexString(),
blkNumber: 2,
},
{
checkPending: false,
in: hex.EncodeToString(executionHash1[:]),
nonce: 6,
senderPubKey: testExecution1.SrcPubkey().HexString(),
blkNumber: 2,
},
}
getActionsByAddressTests = []struct {
address string
start uint64
count uint64
numActions int
}{
{
identityset.Address(27).String(),
0,
3,
2,
},
{
identityset.Address(30).String(),
1,
8,
8,
},
{
identityset.Address(33).String(),
2,
1,
0,
},
}
getUnconfirmedActionsByAddressTests = []struct {
address string
start uint64
count uint64
numActions int
}{
{
identityset.Address(27).String(),
0,
4,
4,
},
{
identityset.Address(27).String(),
2,
0,
0,
},
}
getActionsByBlockTests = []struct {
blkHeight uint64
start uint64
count uint64
numActions int
}{
{
2,
0,
7,
7,
},
{
4,
2,
5,
3,
},
{
3,
0,
0,
0,
},
{
1,
0,
math.MaxUint64,
2,
},
}
getBlockMetasTests = []struct {
start, count uint64
numBlks int
gasLimit, gasUsed uint64
}{
{
1,
4,
4,
20000,
10000,
},
{
2,
5,
3,
120000,
60100,
},
{
1,
0,
0,
20000,
10000,
},
// genesis block
{
0,
1,
1,
0,
0,
},
}
getBlockMetaTests = []struct {
blkHeight uint64
numActions int64
transferAmount string
logsBloom string
}{
{
2,
7,
"6",
"",
},
{
4,
5,
"2",
"",
},
}
getChainMetaTests = []struct {
// Arguments
emptyChain bool
tpsWindow int
pollProtocolType string
// Expected values
height uint64
numActions int64
tps int64
tpsFloat float32
epoch iotextypes.EpochData
}{
{
emptyChain: true,
},
{
false,
1,
lld,
4,
15,
1,
5 / 10.0,
iotextypes.EpochData{
Num: 1,
Height: 1,
GravityChainStartHeight: 1,
},
},
{
false,
5,
"governanceChainCommittee",
4,
15,
2,
15 / 13.0,
iotextypes.EpochData{
Num: 1,
Height: 1,
GravityChainStartHeight: 100,
},
},
}
sendActionTests = []struct {
// Arguments
actionPb *iotextypes.Action
// Expected Values
actionHash string
}{
{
testTransferPb,
hex.EncodeToString(testTransferHash[:]),
},
{
testExecutionPb,
hex.EncodeToString(testExecutionHash[:]),
},
}
getReceiptByActionTests = []struct {
in string
status uint64
blkHeight uint64
}{
{
hex.EncodeToString(transferHash1[:]),
uint64(iotextypes.ReceiptStatus_Success),
1,
},
{
hex.EncodeToString(transferHash2[:]),
uint64(iotextypes.ReceiptStatus_Success),
2,
},
{
hex.EncodeToString(executionHash1[:]),
uint64(iotextypes.ReceiptStatus_Success),
2,
},
{
hex.EncodeToString(executionHash3[:]),
uint64(iotextypes.ReceiptStatus_Success),
4,
},
}
readContractTests = []struct {
execHash string
callerAddr string
actionHash string
retValue string
gasConsumed uint64
}{
{
hex.EncodeToString(executionHash1[:]),
"",
"08b0066e10b5607e47159c2cf7ba36e36d0c980f5108dfca0ec20547a7adace4",
"",
10100,
},
}
suggestGasPriceTests = []struct {
defaultGasPrice uint64
suggestedGasPrice uint64
}{
{
1,
1,
},
}
estimateGasForActionTests = []struct {
actionHash string
estimatedGas uint64
}{
{
hex.EncodeToString(transferHash1[:]),
10000,
},
{
hex.EncodeToString(transferHash2[:]),
10000,
},
}
readUnclaimedBalanceTests = []struct {
// Arguments
protocolID string
methodName string
addr string
// Expected values
returnErr bool
balance *big.Int
}{
{
protocolID: "rewarding",
methodName: "UnclaimedBalance",
addr: identityset.Address(0).String(),
returnErr: false,
balance: unit.ConvertIotxToRau(64), // 4 block * 36 IOTX reward by default = 144 IOTX
},
{
protocolID: "rewarding",
methodName: "UnclaimedBalance",
addr: identityset.Address(1).String(),
returnErr: false,
balance: unit.ConvertIotxToRau(0), // 4 block * 36 IOTX reward by default = 144 IOTX
},
{
protocolID: "Wrong ID",
methodName: "UnclaimedBalance",
addr: identityset.Address(27).String(),
returnErr: true,
},
{
protocolID: "rewarding",
methodName: "Wrong Method",
addr: identityset.Address(27).String(),
returnErr: true,
},
}
readCandidatesByEpochTests = []struct {
// Arguments
protocolID string
protocolType string
methodName string
epoch uint64
// Expected Values
numDelegates int
}{
{
protocolID: "poll",
protocolType: lld,
methodName: "CandidatesByEpoch",
epoch: 1,
numDelegates: 3,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "CandidatesByEpoch",
epoch: 1,
numDelegates: 2,
},
}
readBlockProducersByEpochTests = []struct {
// Arguments
protocolID string
protocolType string
methodName string
epoch uint64
numCandidateDelegates uint64
// Expected Values
numBlockProducers int
}{
{
protocolID: "poll",
protocolType: lld,
methodName: "BlockProducersByEpoch",
epoch: 1,
numBlockProducers: 3,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "BlockProducersByEpoch",
epoch: 1,
numCandidateDelegates: 2,
numBlockProducers: 2,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "BlockProducersByEpoch",
epoch: 1,
numCandidateDelegates: 1,
numBlockProducers: 1,
},
}
readActiveBlockProducersByEpochTests = []struct {
// Arguments
protocolID string
protocolType string
methodName string
epoch uint64
numDelegates uint64
// Expected Values
numActiveBlockProducers int
}{
{
protocolID: "poll",
protocolType: lld,
methodName: "ActiveBlockProducersByEpoch",
epoch: 1,
numActiveBlockProducers: 3,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "ActiveBlockProducersByEpoch",
epoch: 1,
numDelegates: 2,
numActiveBlockProducers: 2,
},
{
protocolID: "poll",
protocolType: "governanceChainCommittee",
methodName: "ActiveBlockProducersByEpoch",
epoch: 1,
numDelegates: 1,
numActiveBlockProducers: 1,
},
}
readRollDPoSMetaTests = []struct {
// Arguments
protocolID string
methodName string
height uint64
// Expected Values
result uint64
}{
{
protocolID: "rolldpos",
methodName: "NumCandidateDelegates",
result: 36,
},
{
protocolID: "rolldpos",
methodName: "NumDelegates",
result: 24,
},
}
readEpochCtxTests = []struct {
// Arguments
protocolID string
methodName string
argument uint64
// Expected Values
result uint64
}{
{
protocolID: "rolldpos",
methodName: "NumSubEpochs",
argument: 1,
result: 2,
},
{
protocolID: "rolldpos",
methodName: "NumSubEpochs",
argument: 1816201,
result: 30,
},
{
protocolID: "rolldpos",
methodName: "EpochNumber",
argument: 100,
result: 3,
},
{
protocolID: "rolldpos",
methodName: "EpochHeight",
argument: 5,
result: 193,
},
{
protocolID: "rolldpos",
methodName: "EpochLastHeight",
argument: 1000,
result: 48000,
},
{
protocolID: "rolldpos",
methodName: "SubEpochNumber",
argument: 121,
result: 1,
},
}
getEpochMetaTests = []struct {
// Arguments
EpochNumber uint64
pollProtocolType string
// Expected Values
epochData iotextypes.EpochData
numBlksInEpoch int
numConsenusBlockProducers int
numActiveCensusBlockProducers int
}{
{
1,
lld,
iotextypes.EpochData{
Num: 1,
Height: 1,
GravityChainStartHeight: 1,
},
4,
24,
24,
},
{
1,
"governanceChainCommittee",
iotextypes.EpochData{
Num: 1,
Height: 1,
GravityChainStartHeight: 100,
},
4,
6,
6,
},
}
getRawBlocksTest = []struct {
// Arguments
startHeight uint64
count uint64
withReceipts bool
// Expected Values
numBlks int
numActions int
numReceipts int
}{
{
1,
1,
false,
1,
2,
0,
},
{
1,
2,
true,
2,
9,
9,
},
// genesis block
{
0,
1,
true,
1,
0,
0,
},
}
getLogsTest = []struct {
// Arguments
address []string
topics []*iotexapi.Topics
fromBlock uint64
count uint64
// Expected Values
numLogs int
}{
{
address: []string{},
topics: []*iotexapi.Topics{},
fromBlock: 1,
count: 100,
numLogs: 4,
},
{
address: []string{},
topics: []*iotexapi.Topics{},
fromBlock: 1,
count: 100,
numLogs: 4,
},
}
getImplicitLogByBlockHeightTest = []struct {
height uint64
code codes.Code
}{
{
1, codes.OK,
},
{
2, codes.OK,
},
{
3, codes.OK,
},
{
4, codes.OK,
},
{
5, codes.InvalidArgument,
},
}
)
func TestServer_GetAccount(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, true)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
// deploy a contract
contractCode := "6080604052348015600f57600080fd5b5060de8061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ee82ac5e14602d575b600080fd5b605660048036036020811015604157600080fd5b8101908080359060200190929190505050606c565b6040518082815260200191505060405180910390f35b60008082409050807f2d93f7749862d33969fb261757410b48065a1bc86a56da5c47820bd063e2338260405160405180910390a28091505091905056fea265627a7a723158200a258cd08ea99ee11aa68c78b6d2bf7ea912615a1e64a81b90a2abca2dd59cfa64736f6c634300050c0032"
contract, err := deployContract(svr, identityset.PrivateKey(13), 1, svr.bc.TipHeight(), contractCode)
require.NoError(err)
require.True(len(contract) > 0)
// read contract address
request := &iotexapi.GetAccountRequest{Address: contract}
res, err := svr.GetAccount(context.Background(), request)
require.NoError(err)
accountMeta := res.AccountMeta
require.Equal(contract, accountMeta.Address)
require.Equal("0", accountMeta.Balance)
require.EqualValues(0, accountMeta.Nonce)
require.EqualValues(1, accountMeta.PendingNonce)
require.EqualValues(0, accountMeta.NumActions)
require.True(accountMeta.IsContract)
require.True(len(accountMeta.ContractByteCode) > 0)
require.Contains(contractCode, hex.EncodeToString(accountMeta.ContractByteCode))
// success
for _, test := range getAccountTests {
request := &iotexapi.GetAccountRequest{Address: test.in}
res, err := svr.GetAccount(context.Background(), request)
require.NoError(err)
accountMeta := res.AccountMeta
require.Equal(test.address, accountMeta.Address)
require.Equal(test.balance, accountMeta.Balance)
require.Equal(test.nonce, accountMeta.Nonce)
require.Equal(test.pendingNonce, accountMeta.PendingNonce)
require.Equal(test.numActions, accountMeta.NumActions)
}
// failure
_, err = svr.GetAccount(context.Background(), &iotexapi.GetAccountRequest{})
require.Error(err)
// success: reward pool
res, err = svr.getProtocolAccount(context.Background(), address.RewardingPoolAddr)
require.NoError(err)
require.Equal(address.RewardingPoolAddr, res.AccountMeta.Address)
require.Equal("200000000000000000000101000", res.AccountMeta.Balance)
//failure: protocol staking isn't registered
_, err = svr.getProtocolAccount(context.Background(), address.StakingBucketPoolAddr)
require.Contains(err.Error(), "protocol staking isn't registered")
}
func TestServer_GetActions(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getActionsTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByIndex{
ByIndex: &iotexapi.GetActionsByIndexRequest{
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
if test.count == 0 {
require.Error(err)
continue
}
require.NoError(err)
require.Equal(test.numActions, len(res.ActionInfo))
}
}
func TestServer_GetAction(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, true)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getActionTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByHash{
ByHash: &iotexapi.GetActionByHashRequest{
ActionHash: test.in,
CheckPending: test.checkPending,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(1, len(res.ActionInfo))
act := res.ActionInfo[0]
require.Equal(test.nonce, act.Action.GetCore().GetNonce())
require.Equal(test.senderPubKey, hex.EncodeToString(act.Action.SenderPubKey))
if !test.checkPending {
blk, err := svr.dao.GetBlockByHeight(test.blkNumber)
require.NoError(err)
timeStamp := blk.ConvertToBlockHeaderPb().GetCore().GetTimestamp()
blkHash := blk.HashBlock()
require.Equal(hex.EncodeToString(blkHash[:]), act.BlkHash)
require.Equal(test.blkNumber, act.BlkHeight)
require.Equal(timeStamp, act.Timestamp)
} else {
require.Equal(hex.EncodeToString(hash.ZeroHash256[:]), act.BlkHash)
require.Nil(act.Timestamp)
require.Equal(uint64(0), act.BlkHeight)
}
}
}
func TestServer_GetActionsByAddress(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getActionsByAddressTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByAddr{
ByAddr: &iotexapi.GetActionsByAddressRequest{
Address: test.address,
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.ActionInfo))
if test.numActions == 0 {
// returns empty response body in case of no result
require.Equal(&iotexapi.GetActionsResponse{}, res)
}
var prevAct *iotexapi.ActionInfo
for _, act := range res.ActionInfo {
if prevAct != nil {
require.True(act.Timestamp.GetSeconds() >= prevAct.Timestamp.GetSeconds())
}
prevAct = act
}
if test.start > 0 && len(res.ActionInfo) > 0 {
request = &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByAddr{
ByAddr: &iotexapi.GetActionsByAddressRequest{
Address: test.address,
Start: 0,
Count: test.start,
},
},
}
prevRes, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.True(prevRes.ActionInfo[len(prevRes.ActionInfo)-1].Timestamp.GetSeconds() <= res.ActionInfo[0].Timestamp.GetSeconds())
}
}
}
func TestServer_GetUnconfirmedActionsByAddress(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, true)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getUnconfirmedActionsByAddressTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_UnconfirmedByAddr{
UnconfirmedByAddr: &iotexapi.GetUnconfirmedActionsByAddressRequest{
Address: test.address,
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
if test.count == 0 {
require.Error(err)
continue
}
require.NoError(err)
require.Equal(test.numActions, len(res.ActionInfo))
require.Equal(test.address, res.ActionInfo[0].Sender)
}
}
func TestServer_GetActionsByBlock(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getActionsByBlockTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByBlk{
ByBlk: &iotexapi.GetActionsByBlockRequest{
BlkHash: blkHash[test.blkHeight],
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
if test.count == 0 {
require.Error(err)
continue
}
require.NoError(err)
require.Equal(test.numActions, len(res.ActionInfo))
for _, v := range res.ActionInfo {
require.Equal(test.blkHeight, v.BlkHeight)
require.Equal(blkHash[test.blkHeight], v.BlkHash)
}
}
}
func TestServer_GetBlockMetas(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
genesis.SetGenesisTimestamp(cfg.Genesis.Timestamp)
block.LoadGenesisHash(&cfg.Genesis)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getBlockMetasTests {
request := &iotexapi.GetBlockMetasRequest{
Lookup: &iotexapi.GetBlockMetasRequest_ByIndex{
ByIndex: &iotexapi.GetBlockMetasByIndexRequest{
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetBlockMetas(context.Background(), request)
if test.count == 0 {
require.Error(err)
continue
}
require.NoError(err)
require.Equal(test.numBlks, len(res.BlkMetas))
meta := res.BlkMetas[0]
require.Equal(test.gasLimit, meta.GasLimit)
require.Equal(test.gasUsed, meta.GasUsed)
if test.start == 0 {
// genesis block
h := block.GenesisHash()
require.Equal(meta.Hash, hex.EncodeToString(h[:]))
}
var prevBlkPb *iotextypes.BlockMeta
for _, blkPb := range res.BlkMetas {
if prevBlkPb != nil {
require.True(blkPb.Height > prevBlkPb.Height)
}
prevBlkPb = blkPb
}
}
}
func TestServer_GetBlockMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getBlockMetaTests {
header, err := svr.bc.BlockHeaderByHeight(test.blkHeight)
require.NoError(err)
blkHash := header.HashBlock()
request := &iotexapi.GetBlockMetasRequest{
Lookup: &iotexapi.GetBlockMetasRequest_ByHash{
ByHash: &iotexapi.GetBlockMetaByHashRequest{
BlkHash: hex.EncodeToString(blkHash[:]),
},
},
}
res, err := svr.GetBlockMetas(context.Background(), request)
require.NoError(err)
require.Equal(1, len(res.BlkMetas))
blkPb := res.BlkMetas[0]
require.Equal(test.blkHeight, blkPb.Height)
require.Equal(test.numActions, blkPb.NumActions)
require.Equal(test.transferAmount, blkPb.TransferAmount)
require.Equal(header.LogsBloomfilter(), nil)
require.Equal(test.logsBloom, blkPb.LogsBloom)
}
}
func TestServer_GetChainMeta(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
var pol poll.Protocol
for _, test := range getChainMetaTests {
cfg := newConfig(t)
if test.pollProtocolType == lld {
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else if test.pollProtocolType == "governanceChainCommittee" {
committee := mock_committee.NewMockCommittee(ctrl)
slasher, _ := poll.NewSlasher(
func(uint64, uint64) (map[string]uint64, error) {
return nil, nil
},
nil,
nil,
nil,
nil,
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.DardanellesNumSubEpochs,
cfg.Genesis.ProductivityThreshold,
cfg.Genesis.ProbationEpochPeriod,
cfg.Genesis.UnproductiveDelegateMaxCacheSize,
cfg.Genesis.ProbationIntensityRate)
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
nil,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
cfg.Chain.PollInitialCandidatesInterval,
slasher)
committee.EXPECT().HeightByTime(gomock.Any()).Return(test.epoch.GravityChainStartHeight, nil)
}
cfg.API.TpsWindow = test.tpsWindow
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
if pol != nil {
require.NoError(pol.ForceRegister(svr.registry))
}
if test.emptyChain {
mbc := mock_blockchain.NewMockBlockchain(ctrl)
mbc.EXPECT().TipHeight().Return(uint64(0)).Times(1)
svr.bc = mbc
}
res, err := svr.GetChainMeta(context.Background(), &iotexapi.GetChainMetaRequest{})
require.NoError(err)
chainMetaPb := res.ChainMeta
require.Equal(test.height, chainMetaPb.Height)
require.Equal(test.numActions, chainMetaPb.NumActions)
require.Equal(test.tps, chainMetaPb.Tps)
require.Equal(test.epoch.Num, chainMetaPb.Epoch.Num)
require.Equal(test.epoch.Height, chainMetaPb.Epoch.Height)
require.Equal(test.epoch.GravityChainStartHeight, chainMetaPb.Epoch.GravityChainStartHeight)
}
}
func TestServer_SendAction(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
chain := mock_blockchain.NewMockBlockchain(ctrl)
ap := mock_actpool.NewMockActPool(ctrl)
broadcastHandlerCount := 0
svr := Server{bc: chain, ap: ap, broadcastHandler: func(_ context.Context, _ uint32, _ proto.Message) error {
broadcastHandlerCount++
return nil
}}
chain.EXPECT().ChainID().Return(uint32(1)).Times(2)
ap.EXPECT().Add(gomock.Any(), gomock.Any()).Return(nil).Times(2)
for i, test := range sendActionTests {
request := &iotexapi.SendActionRequest{Action: test.actionPb}
res, err := svr.SendAction(context.Background(), request)
require.NoError(err)
require.Equal(i+1, broadcastHandlerCount)
require.Equal(test.actionHash, res.ActionHash)
}
// 3 failure cases
ctx := context.Background()
tests := []struct {
server func() (*Server, string, error)
action *iotextypes.Action
err string
}{
{
func() (*Server, string, error) {
cfg := newConfig(t)
return createServer(cfg, true)
},
&iotextypes.Action{},
"invalid signature length =",
},
{
func() (*Server, string, error) {
cfg := newConfig(t)
return createServer(cfg, true)
},
&iotextypes.Action{
Signature: action.ValidSig,
},
"empty action proto to load",
},
{
func() (*Server, string, error) {
cfg := newConfig(t)
cfg.ActPool.MaxNumActsPerPool = 8
return createServer(cfg, true)
},
testTransferPb,
"insufficient space for action: invalid actpool",
},
}
for _, test := range tests {
request := &iotexapi.SendActionRequest{Action: test.action}
svr, file, err := test.server()
require.NoError(err)
defer func() {
testutil.CleanupPath(t, file)
}()
_, err = svr.SendAction(ctx, request)
require.Contains(err.Error(), test.err)
}
}
func TestServer_StreamLogs(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, true)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
err = svr.StreamLogs(&iotexapi.StreamLogsRequest{}, nil)
require.Error(err)
}
func TestServer_GetReceiptByAction(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getReceiptByActionTests {
request := &iotexapi.GetReceiptByActionRequest{ActionHash: test.in}
res, err := svr.GetReceiptByAction(context.Background(), request)
require.NoError(err)
receiptPb := res.ReceiptInfo.Receipt
require.Equal(test.status, receiptPb.Status)
require.Equal(test.blkHeight, receiptPb.BlkHeight)
require.NotEqual(hash.ZeroHash256, res.ReceiptInfo.BlkHash)
}
}
func TestServer_ReadContract(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range readContractTests {
hash, err := hash.HexStringToHash256(test.execHash)
require.NoError(err)
ai, err := svr.indexer.GetActionIndex(hash[:])
require.NoError(err)
exec, _, err := svr.dao.GetActionByActionHash(hash, ai.BlockHeight())
require.NoError(err)
request := &iotexapi.ReadContractRequest{
Execution: exec.Proto().GetCore().GetExecution(),
CallerAddress: test.callerAddr,
GasLimit: exec.GasLimit(),
GasPrice: big.NewInt(unit.Qev).String(),
}
res, err := svr.ReadContract(context.Background(), request)
require.NoError(err)
require.Equal(test.retValue, res.Data)
require.EqualValues(1, res.Receipt.Status)
require.Equal(test.actionHash, hex.EncodeToString(res.Receipt.ActHash))
require.Equal(test.gasConsumed, res.Receipt.GasConsumed)
}
}
func TestServer_SuggestGasPrice(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
for _, test := range suggestGasPriceTests {
cfg.API.GasStation.DefaultGas = test.defaultGasPrice
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
res, err := svr.SuggestGasPrice(context.Background(), &iotexapi.SuggestGasPriceRequest{})
require.NoError(err)
require.Equal(test.suggestedGasPrice, res.GasPrice)
}
}
func TestServer_EstimateGasForAction(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range estimateGasForActionTests {
hash, err := hash.HexStringToHash256(test.actionHash)
require.NoError(err)
ai, err := svr.indexer.GetActionIndex(hash[:])
require.NoError(err)
act, _, err := svr.dao.GetActionByActionHash(hash, ai.BlockHeight())
require.NoError(err)
request := &iotexapi.EstimateGasForActionRequest{Action: act.Proto()}
res, err := svr.EstimateGasForAction(context.Background(), request)
require.NoError(err)
require.Equal(test.estimatedGas, res.Gas)
}
}
func TestServer_EstimateActionGasConsumption(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
// test for contract deploy
data := "608060405234801561001057600080fd5b50610123600102600281600019169055503373ffffffffffffffffffffffffffffffffffffffff166001026003816000191690555060035460025417600481600019169055506102ae806100656000396000f300608060405260043610610078576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630cc0e1fb1461007d57806328f371aa146100b05780636b1d752b146100df578063d4b8399214610112578063daea85c514610145578063eb6fd96a14610188575b600080fd5b34801561008957600080fd5b506100926101bb565b60405180826000191660001916815260200191505060405180910390f35b3480156100bc57600080fd5b506100c56101c1565b604051808215151515815260200191505060405180910390f35b3480156100eb57600080fd5b506100f46101d7565b60405180826000191660001916815260200191505060405180910390f35b34801561011e57600080fd5b506101276101dd565b60405180826000191660001916815260200191505060405180910390f35b34801561015157600080fd5b50610186600480360381019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506101e3565b005b34801561019457600080fd5b5061019d61027c565b60405180826000191660001916815260200191505060405180910390f35b60035481565b6000600454600019166001546000191614905090565b60025481565b60045481565b3373ffffffffffffffffffffffffffffffffffffffff166001028173ffffffffffffffffffffffffffffffffffffffff16600102176001816000191690555060016000808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff02191690831515021790555050565b600154815600a165627a7a7230582089b5f99476d642b66a213c12cd198207b2e813bb1caf3bd75e22be535ebf5d130029"
byteCodes, err := hex.DecodeString(data)
require.NoError(err)
execution, err := action.NewExecution("", 1, big.NewInt(0), 0, big.NewInt(0), byteCodes)
require.NoError(err)
request := &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_Execution{
Execution: execution.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err := svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(286579), res.Gas)
// test for transfer
tran, err := action.NewTransfer(0, big.NewInt(0), "", []byte("123"), 0, big.NewInt(0))
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_Transfer{
Transfer: tran.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
var (
gaslimit = uint64(1000000)
gasprice = big.NewInt(10)
canAddress = "io1xpq62aw85uqzrccg9y5hnryv8ld2nkpycc3gza"
payload = []byte("123")
amount = big.NewInt(10)
nonce = uint64(0)
duration = uint32(1000)
autoStake = true
index = uint64(10)
)
// staking related
// case I: test for StakeCreate
cs, err := action.NewCreateStake(nonce, canAddress, amount.String(), duration, autoStake, payload, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_StakeCreate{
StakeCreate: cs.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
// case II: test for StakeUnstake
us, err := action.NewUnstake(nonce, index, payload, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_StakeUnstake{
StakeUnstake: us.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
// case III: test for StakeWithdraw
ws, err := action.NewWithdrawStake(nonce, index, payload, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_StakeWithdraw{
StakeWithdraw: ws.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
// Case IV: test for StakeDeposit
ds, err := action.NewDepositToStake(nonce, 1, amount.String(), payload, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_StakeAddDeposit{
StakeAddDeposit: ds.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
// Case V: test for StakeChangeCandidate
cc, err := action.NewChangeCandidate(nonce, canAddress, index, payload, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_StakeChangeCandidate{
StakeChangeCandidate: cc.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
// Case VI: test for StakeRestake
rs, err := action.NewRestake(nonce, index, duration, autoStake, payload, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_StakeRestake{
StakeRestake: rs.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
// Case VII: test for StakeTransfer
ts, err := action.NewTransferStake(nonce, canAddress, index, payload, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_StakeTransferOwnership{
StakeTransferOwnership: ts.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
// Case VIII: test for CandidateRegister
cr, err := action.NewCandidateRegister(nonce, canAddress, canAddress, canAddress, canAddress, amount.String(), duration, autoStake, payload, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_CandidateRegister{
CandidateRegister: cr.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10300), res.Gas)
// Case IX: test for CandidateUpdate
cu, err := action.NewCandidateUpdate(nonce, canAddress, canAddress, canAddress, gaslimit, gasprice)
require.NoError(err)
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_CandidateUpdate{
CandidateUpdate: cu.Proto(),
},
CallerAddress: identityset.Address(0).String(),
}
res, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.NoError(err)
require.Equal(uint64(10000), res.Gas)
// Case X: test for action nil
request = &iotexapi.EstimateActionGasConsumptionRequest{
Action: nil,
CallerAddress: identityset.Address(0).String(),
}
_, err = svr.EstimateActionGasConsumption(context.Background(), request)
require.Error(err)
}
func TestServer_ReadUnclaimedBalance(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
cfg.Consensus.Scheme = config.RollDPoSScheme
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range readUnclaimedBalanceTests {
out, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{[]byte(test.addr)},
})
if test.returnErr {
require.Error(err)
continue
}
require.NoError(err)
val, ok := big.NewInt(0).SetString(string(out.Data), 10)
require.True(ok)
require.Equal(test.balance, val)
}
}
func TestServer_TotalBalance(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
out, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte("rewarding"),
MethodName: []byte("TotalBalance"),
Arguments: nil,
})
require.NoError(err)
val, ok := big.NewInt(0).SetString(string(out.Data), 10)
require.True(ok)
require.Equal(unit.ConvertIotxToRau(200000000), val)
}
func TestServer_AvailableBalance(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
cfg.Consensus.Scheme = config.RollDPoSScheme
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
out, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte("rewarding"),
MethodName: []byte("AvailableBalance"),
Arguments: nil,
})
require.NoError(err)
val, ok := big.NewInt(0).SetString(string(out.Data), 10)
require.True(ok)
require.Equal(unit.ConvertIotxToRau(199999936), val)
}
func TestServer_ReadCandidatesByEpoch(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
ctrl := gomock.NewController(t)
committee := mock_committee.NewMockCommittee(ctrl)
candidates := []*state.Candidate{
{
Address: "address1",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
{
Address: "address2",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
}
for _, test := range readCandidatesByEpochTests {
var pol poll.Protocol
if test.protocolType == lld {
cfg.Genesis.Delegates = delegates
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else {
indexer, err := poll.NewCandidateIndexer(db.NewMemKVStore())
require.NoError(err)
slasher, _ := poll.NewSlasher(
func(uint64, uint64) (map[string]uint64, error) {
return nil, nil
},
func(protocol.StateReader, uint64, bool, bool) ([]*state.Candidate, uint64, error) {
return candidates, 0, nil
},
nil,
nil,
indexer,
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.DardanellesNumSubEpochs,
cfg.Genesis.ProductivityThreshold,
cfg.Genesis.ProbationEpochPeriod,
cfg.Genesis.UnproductiveDelegateMaxCacheSize,
cfg.Genesis.ProbationIntensityRate)
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
indexer,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
cfg.Chain.PollInitialCandidatesInterval,
slasher)
}
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
require.NoError(pol.ForceRegister(svr.registry))
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{[]byte(strconv.FormatUint(test.epoch, 10))},
})
require.NoError(err)
var delegates state.CandidateList
require.NoError(delegates.Deserialize(res.Data))
require.Equal(test.numDelegates, len(delegates))
}
}
func TestServer_ReadBlockProducersByEpoch(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
ctrl := gomock.NewController(t)
committee := mock_committee.NewMockCommittee(ctrl)
candidates := []*state.Candidate{
{
Address: "address1",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
{
Address: "address2",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
}
for _, test := range readBlockProducersByEpochTests {
var pol poll.Protocol
if test.protocolType == lld {
cfg.Genesis.Delegates = delegates
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else {
indexer, err := poll.NewCandidateIndexer(db.NewMemKVStore())
require.NoError(err)
slasher, _ := poll.NewSlasher(
func(uint64, uint64) (map[string]uint64, error) {
return nil, nil
},
func(protocol.StateReader, uint64, bool, bool) ([]*state.Candidate, uint64, error) {
return candidates, 0, nil
},
nil,
nil,
indexer,
test.numCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.DardanellesNumSubEpochs,
cfg.Genesis.ProductivityThreshold,
cfg.Genesis.ProbationEpochPeriod,
cfg.Genesis.UnproductiveDelegateMaxCacheSize,
cfg.Genesis.ProbationIntensityRate)
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
indexer,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
cfg.Chain.PollInitialCandidatesInterval,
slasher)
}
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
require.NoError(pol.ForceRegister(svr.registry))
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{[]byte(strconv.FormatUint(test.epoch, 10))},
})
require.NoError(err)
var blockProducers state.CandidateList
require.NoError(blockProducers.Deserialize(res.Data))
require.Equal(test.numBlockProducers, len(blockProducers))
}
}
func TestServer_ReadActiveBlockProducersByEpoch(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
ctrl := gomock.NewController(t)
committee := mock_committee.NewMockCommittee(ctrl)
candidates := []*state.Candidate{
{
Address: "address1",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
{
Address: "address2",
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
}
for _, test := range readActiveBlockProducersByEpochTests {
var pol poll.Protocol
if test.protocolType == lld {
cfg.Genesis.Delegates = delegates
pol = poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
} else {
indexer, err := poll.NewCandidateIndexer(db.NewMemKVStore())
require.NoError(err)
slasher, _ := poll.NewSlasher(
func(uint64, uint64) (map[string]uint64, error) {
return nil, nil
},
func(protocol.StateReader, uint64, bool, bool) ([]*state.Candidate, uint64, error) {
return candidates, 0, nil
},
nil,
nil,
indexer,
cfg.Genesis.NumCandidateDelegates,
test.numDelegates,
cfg.Genesis.DardanellesNumSubEpochs,
cfg.Genesis.ProductivityThreshold,
cfg.Genesis.ProbationEpochPeriod,
cfg.Genesis.UnproductiveDelegateMaxCacheSize,
cfg.Genesis.ProbationIntensityRate)
pol, _ = poll.NewGovernanceChainCommitteeProtocol(
indexer,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
cfg.Chain.PollInitialCandidatesInterval,
slasher)
}
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
require.NoError(pol.ForceRegister(svr.registry))
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{[]byte(strconv.FormatUint(test.epoch, 10))},
})
require.NoError(err)
var activeBlockProducers state.CandidateList
require.NoError(activeBlockProducers.Deserialize(res.Data))
require.Equal(test.numActiveBlockProducers, len(activeBlockProducers))
}
}
func TestServer_ReadRollDPoSMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
for _, test := range readRollDPoSMetaTests {
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
})
require.NoError(err)
result, err := strconv.ParseUint(string(res.Data), 10, 64)
require.NoError(err)
require.Equal(test.result, result)
}
}
func TestServer_ReadEpochCtx(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
for _, test := range readEpochCtxTests {
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
res, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{[]byte(strconv.FormatUint(test.argument, 10))},
})
require.NoError(err)
result, err := strconv.ParseUint(string(res.Data), 10, 64)
require.NoError(err)
require.Equal(test.result, result)
}
}
func TestServer_GetEpochMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
ctrl := gomock.NewController(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getEpochMetaTests {
if test.pollProtocolType == lld {
pol := poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
require.NoError(pol.ForceRegister(svr.registry))
} else if test.pollProtocolType == "governanceChainCommittee" {
committee := mock_committee.NewMockCommittee(ctrl)
mbc := mock_blockchain.NewMockBlockchain(ctrl)
indexer, err := poll.NewCandidateIndexer(db.NewMemKVStore())
require.NoError(err)
slasher, _ := poll.NewSlasher(
func(uint64, uint64) (map[string]uint64, error) {
return nil, nil
},
func(protocol.StateReader, uint64, bool, bool) ([]*state.Candidate, uint64, error) {
return []*state.Candidate{
{
Address: identityset.Address(1).String(),
Votes: big.NewInt(6),
RewardAddress: "rewardAddress",
},
{
Address: identityset.Address(2).String(),
Votes: big.NewInt(5),
RewardAddress: "rewardAddress",
},
{
Address: identityset.Address(3).String(),
Votes: big.NewInt(4),
RewardAddress: "rewardAddress",
},
{
Address: identityset.Address(4).String(),
Votes: big.NewInt(3),
RewardAddress: "rewardAddress",
},
{
Address: identityset.Address(5).String(),
Votes: big.NewInt(2),
RewardAddress: "rewardAddress",
},
{
Address: identityset.Address(6).String(),
Votes: big.NewInt(1),
RewardAddress: "rewardAddress",
},
}, 0, nil
},
nil,
nil,
indexer,
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.DardanellesNumSubEpochs,
cfg.Genesis.ProductivityThreshold,
cfg.Genesis.ProbationEpochPeriod,
cfg.Genesis.UnproductiveDelegateMaxCacheSize,
cfg.Genesis.ProbationIntensityRate)
pol, _ := poll.NewGovernanceChainCommitteeProtocol(
indexer,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
cfg.Chain.PollInitialCandidatesInterval,
slasher)
require.NoError(pol.ForceRegister(svr.registry))
committee.EXPECT().HeightByTime(gomock.Any()).Return(test.epochData.GravityChainStartHeight, nil)
mbc.EXPECT().TipHeight().Return(uint64(4)).Times(4)
mbc.EXPECT().BlockHeaderByHeight(gomock.Any()).DoAndReturn(func(height uint64) (*block.Header, error) {
if height > 0 && height <= 4 {
pk := identityset.PrivateKey(int(height))
blk, err := block.NewBuilder(
block.NewRunnableActionsBuilder().Build(),
).
SetHeight(height).
SetTimestamp(time.Time{}).
SignAndBuild(pk)
if err != nil {
return &block.Header{}, err
}
return &blk.Header, nil
}
return &block.Header{}, errors.Errorf("invalid block height %d", height)
}).AnyTimes()
svr.bc = mbc
}
res, err := svr.GetEpochMeta(context.Background(), &iotexapi.GetEpochMetaRequest{EpochNumber: test.EpochNumber})
require.NoError(err)
require.Equal(test.epochData.Num, res.EpochData.Num)
require.Equal(test.epochData.Height, res.EpochData.Height)
require.Equal(test.epochData.GravityChainStartHeight, res.EpochData.GravityChainStartHeight)
require.Equal(test.numBlksInEpoch, int(res.TotalBlocks))
require.Equal(test.numConsenusBlockProducers, len(res.BlockProducersInfo))
var numActiveBlockProducers int
var prevInfo *iotexapi.BlockProducerInfo
for _, bp := range res.BlockProducersInfo {
if bp.Active {
numActiveBlockProducers++
}
if prevInfo != nil {
prevVotes, _ := strconv.Atoi(prevInfo.Votes)
currVotes, _ := strconv.Atoi(bp.Votes)
require.True(prevVotes >= currVotes)
}
prevInfo = bp
}
require.Equal(test.numActiveCensusBlockProducers, numActiveBlockProducers)
}
}
func TestServer_GetRawBlocks(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getRawBlocksTest {
request := &iotexapi.GetRawBlocksRequest{
StartHeight: test.startHeight,
Count: test.count,
WithReceipts: test.withReceipts,
}
res, err := svr.GetRawBlocks(context.Background(), request)
require.NoError(err)
blkInfos := res.Blocks
require.Equal(test.numBlks, len(blkInfos))
if test.startHeight == 0 {
// verify genesis block
header := blkInfos[0].Block.Header.Core
require.EqualValues(version.ProtocolVersion, header.Version)
require.Zero(header.Height)
ts, err := ptypes.TimestampProto(time.Unix(genesis.Timestamp(), 0))
require.NoError(err)
require.Equal(ts, header.Timestamp)
require.Equal(0, bytes.Compare(hash.ZeroHash256[:], header.PrevBlockHash))
require.Equal(0, bytes.Compare(hash.ZeroHash256[:], header.TxRoot))
require.Equal(0, bytes.Compare(hash.ZeroHash256[:], header.DeltaStateDigest))
require.Equal(0, bytes.Compare(hash.ZeroHash256[:], header.ReceiptRoot))
}
var numActions, numReceipts int
for _, blkInfo := range blkInfos {
numActions += len(blkInfo.Block.Body.Actions)
numReceipts += len(blkInfo.Receipts)
}
require.Equal(test.numActions, numActions)
require.Equal(test.numReceipts, numReceipts)
}
}
func TestServer_GetLogs(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
for _, test := range getLogsTest {
request := &iotexapi.GetLogsRequest{
Filter: &iotexapi.LogsFilter{
Address: test.address,
Topics: test.topics,
},
Lookup: &iotexapi.GetLogsRequest_ByRange{
ByRange: &iotexapi.GetLogsByRange{
FromBlock: test.fromBlock,
ToBlock: test.fromBlock + test.count - 1,
},
},
}
res, err := svr.GetLogs(context.Background(), request)
require.NoError(err)
logs := res.Logs
require.Equal(test.numLogs, len(logs))
}
}
func TestServer_GetTransactionLogByActionHash(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
request := &iotexapi.GetTransactionLogByActionHashRequest{
ActionHash: hex.EncodeToString(hash.ZeroHash256[:]),
}
_, err = svr.GetTransactionLogByActionHash(context.Background(), request)
require.Error(err)
sta, ok := status.FromError(err)
require.Equal(true, ok)
require.Equal(codes.NotFound, sta.Code())
for h, log := range implicitLogs {
request.ActionHash = hex.EncodeToString(h[:])
res, err := svr.GetTransactionLogByActionHash(context.Background(), request)
require.NoError(err)
require.Equal(log.Proto(), res.TransactionLog)
}
// check implicit transfer receiver balance
state, err := accountutil.LoadAccount(svr.sf, hash.BytesToHash160(identityset.Address(31).Bytes()))
require.NoError(err)
require.Equal(big.NewInt(5), state.Balance)
}
func TestServer_GetEvmTransfersByBlockHeight(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
request := &iotexapi.GetTransactionLogByBlockHeightRequest{}
for _, test := range getImplicitLogByBlockHeightTest {
request.BlockHeight = test.height
res, err := svr.GetTransactionLogByBlockHeight(context.Background(), request)
if test.code != codes.OK {
require.Error(err)
sta, ok := status.FromError(err)
require.Equal(true, ok)
require.Equal(test.code, sta.Code())
} else {
require.NotNil(res)
// verify log
for _, log := range res.TransactionLogs.Logs {
l, ok := implicitLogs[hash.BytesToHash256(log.ActionHash)]
require.True(ok)
require.Equal(l.Proto(), log)
}
require.Equal(test.height, res.BlockIdentifier.Height)
require.Equal(blkHash[test.height], res.BlockIdentifier.Hash)
}
}
}
func addTestingBlocks(bc blockchain.Blockchain, ap actpool.ActPool) error {
ctx := context.Background()
addr0 := identityset.Address(27).String()
addr1 := identityset.Address(28).String()
addr2 := identityset.Address(29).String()
addr3 := identityset.Address(30).String()
priKey3 := identityset.PrivateKey(30)
addr4 := identityset.Address(31).String()
// Add block 1
// Producer transfer--> C
implicitLogs[transferHash1] = block.NewTransactionLog(transferHash1,
[]*block.TokenTxRecord{block.NewTokenTxRecord(iotextypes.TransactionLogType_NATIVE_TRANSFER, "10", addr0, addr3)},
)
blk1Time := testutil.TimestampNow()
if err := ap.Add(ctx, testTransfer1); err != nil {
return err
}
blk, err := bc.MintNewBlock(blk1Time)
if err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
ap.Reset()
h := blk.HashBlock()
blkHash[1] = hex.EncodeToString(h[:])
// Add block 2
// Charlie transfer--> A, B, D, P
// Charlie transfer--> C
// Charlie exec--> D
recipients := []string{addr1, addr2, addr4, addr0}
for i, recipient := range recipients {
selp, err := action.SignedTransfer(recipient, priKey3, uint64(i+1), big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
if err := ap.Add(ctx, selp); err != nil {
return err
}
selpHash, err := selp.Hash()
if err != nil {
return err
}
implicitLogs[selpHash] = block.NewTransactionLog(selpHash,
[]*block.TokenTxRecord{block.NewTokenTxRecord(iotextypes.TransactionLogType_NATIVE_TRANSFER, "1", addr3, recipient)},
)
}
implicitLogs[transferHash2] = block.NewTransactionLog(transferHash2,
[]*block.TokenTxRecord{block.NewTokenTxRecord(iotextypes.TransactionLogType_NATIVE_TRANSFER, "2", addr3, addr3)},
)
if err := ap.Add(ctx, testTransfer2); err != nil {
return err
}
implicitLogs[executionHash1] = block.NewTransactionLog(
executionHash1,
[]*block.TokenTxRecord{block.NewTokenTxRecord(iotextypes.TransactionLogType_IN_CONTRACT_TRANSFER, "1", addr3, addr4)},
)
if err := ap.Add(ctx, testExecution1); err != nil {
return err
}
if blk, err = bc.MintNewBlock(blk1Time.Add(time.Second)); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
ap.Reset()
h = blk.HashBlock()
blkHash[2] = hex.EncodeToString(h[:])
// Add block 3
// Empty actions
if blk, err = bc.MintNewBlock(blk1Time.Add(time.Second * 2)); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
ap.Reset()
h = blk.HashBlock()
blkHash[3] = hex.EncodeToString(h[:])
// Add block 4
// Charlie transfer--> C
// Alfa transfer--> A
// Charlie exec--> D
// Alfa exec--> D
tsf1, err := action.SignedTransfer(addr3, priKey3, uint64(7), big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf1Hash, err := tsf1.Hash()
if err != nil {
return err
}
implicitLogs[tsf1Hash] = block.NewTransactionLog(tsf1Hash,
[]*block.TokenTxRecord{block.NewTokenTxRecord(iotextypes.TransactionLogType_NATIVE_TRANSFER, "1", addr3, addr3)},
)
if err := ap.Add(ctx, tsf1); err != nil {
return err
}
tsf2, err := action.SignedTransfer(addr1, identityset.PrivateKey(28), uint64(1), big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2Hash, err := tsf2.Hash()
if err != nil {
return err
}
implicitLogs[tsf2Hash] = block.NewTransactionLog(tsf2Hash,
[]*block.TokenTxRecord{block.NewTokenTxRecord(iotextypes.TransactionLogType_NATIVE_TRANSFER, "1", addr1, addr1)},
)
if err := ap.Add(ctx, tsf2); err != nil {
return err
}
execution1, err := action.SignedExecution(addr4, priKey3, 8,
big.NewInt(2), testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64), []byte{1})
if err != nil {
return err
}
execution1Hash, err := execution1.Hash()
if err != nil {
return err
}
implicitLogs[execution1Hash] = block.NewTransactionLog(
execution1Hash,
[]*block.TokenTxRecord{block.NewTokenTxRecord(iotextypes.TransactionLogType_IN_CONTRACT_TRANSFER, "2", addr3, addr4)},
)
if err := ap.Add(ctx, execution1); err != nil {
return err
}
implicitLogs[executionHash3] = block.NewTransactionLog(
executionHash3,
[]*block.TokenTxRecord{block.NewTokenTxRecord(iotextypes.TransactionLogType_IN_CONTRACT_TRANSFER, "1", addr1, addr4)},
)
if err := ap.Add(ctx, testExecution3); err != nil {
return err
}
if blk, err = bc.MintNewBlock(blk1Time.Add(time.Second * 3)); err != nil {
return err
}
h = blk.HashBlock()
blkHash[4] = hex.EncodeToString(h[:])
return bc.CommitBlock(blk)
}
func deployContract(svr *Server, key crypto.PrivateKey, nonce, height uint64, code string) (string, error) {
data, _ := hex.DecodeString(code)
ex1, err := action.SignedExecution(action.EmptyAddress, key, nonce, big.NewInt(0), 500000, big.NewInt(testutil.TestGasPriceInt64), data)
if err != nil {
return "", err
}
if err := svr.ap.Add(context.Background(), ex1); err != nil {
return "", err
}
blk, err := svr.bc.MintNewBlock(testutil.TimestampNow())
if err != nil {
return "", err
}
if err := svr.bc.CommitBlock(blk); err != nil {
return "", err
}
svr.ap.Reset()
// get deployed contract address
var contract string
if svr.dao != nil {
ex1Hash, err := ex1.Hash()
if err != nil {
return "", err
}
r, err := svr.dao.GetReceiptByActionHash(ex1Hash, height+1)
if err != nil {
return "", err
}
contract = r.ContractAddress
}
return contract, nil
}
func addActsToActPool(ctx context.Context, ap actpool.ActPool) error {
// Producer transfer--> A
tsf1, err := action.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(27), 2, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
// Producer transfer--> P
tsf2, err := action.SignedTransfer(identityset.Address(27).String(), identityset.PrivateKey(27), 3, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
// Producer transfer--> B
tsf3, err := action.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 4, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
// Producer exec--> D
execution1, err := action.SignedExecution(identityset.Address(31).String(), identityset.PrivateKey(27), 5,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(10), []byte{1})
if err != nil {
return err
}
if err := ap.Add(ctx, tsf1); err != nil {
return err
}
if err := ap.Add(ctx, tsf2); err != nil {
return err
}
if err := ap.Add(ctx, tsf3); err != nil {
return err
}
return ap.Add(ctx, execution1)
}
func setupChain(cfg config.Config) (blockchain.Blockchain, blockdao.BlockDAO, blockindex.Indexer, blockindex.BloomFilterIndexer, factory.Factory, actpool.ActPool, *protocol.Registry, string, error) {
cfg.Chain.ProducerPrivKey = hex.EncodeToString(identityset.PrivateKey(0).Bytes())
registry := protocol.NewRegistry()
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption(), factory.RegistryOption(registry))
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", err
}
ap, err := setupActPool(sf, cfg.ActPool)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", err
}
cfg.Genesis.InitBalanceMap[identityset.Address(27).String()] = unit.ConvertIotxToRau(10000000000).String()
cfg.Genesis.InitBalanceMap[identityset.Address(28).String()] = unit.ConvertIotxToRau(10000000000).String()
// create indexer
indexer, err := blockindex.NewIndexer(db.NewMemKVStore(), cfg.Genesis.Hash())
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", errors.New("failed to create indexer")
}
testPath, _ := testutil.PathOfTempFile("bloomfilter")
cfg.DB.DbPath = testPath
bfIndexer, err := blockindex.NewBloomfilterIndexer(db.NewBoltDB(cfg.DB), cfg.Indexer)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", errors.New("failed to create bloomfilter indexer")
}
// create BlockDAO
dao := blockdao.NewBlockDAOInMemForTest([]blockdao.BlockIndexer{sf, indexer, bfIndexer})
if dao == nil {
return nil, nil, nil, nil, nil, nil, nil, "", errors.New("failed to create blockdao")
}
// create chain
bc := blockchain.NewBlockchain(
cfg,
dao,
factory.NewMinter(sf, ap),
blockchain.BlockValidatorOption(block.NewValidator(
sf,
protocol.NewGenericValidator(sf, accountutil.AccountState),
)),
)
if bc == nil {
return nil, nil, nil, nil, nil, nil, nil, "", errors.New("failed to create blockchain")
}
defer func() {
delete(cfg.Plugins, config.GatewayPlugin)
}()
acc := account.NewProtocol(rewarding.DepositGas)
evm := execution.NewProtocol(dao.GetBlockHash, rewarding.DepositGas)
p := poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)
rolldposProtocol := rolldpos.NewProtocol(
genesis.Default.NumCandidateDelegates,
genesis.Default.NumDelegates,
genesis.Default.NumSubEpochs,
rolldpos.EnableDardanellesSubEpoch(cfg.Genesis.DardanellesBlockHeight, cfg.Genesis.DardanellesNumSubEpochs),
)
r := rewarding.NewProtocol(0, 0)
if err := rolldposProtocol.Register(registry); err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", err
}
if err := acc.Register(registry); err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", err
}
if err := evm.Register(registry); err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", err
}
if err := r.Register(registry); err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", err
}
if err := p.Register(registry); err != nil {
return nil, nil, nil, nil, nil, nil, nil, "", err
}
return bc, dao, indexer, bfIndexer, sf, ap, registry, testPath, nil
}
func setupActPool(sf factory.Factory, cfg config.ActPool) (actpool.ActPool, error) {
ap, err := actpool.NewActPool(sf, cfg, actpool.EnableExperimentalActions())
if err != nil {
return nil, err
}
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
return ap, nil
}
func newConfig(t *testing.T) config.Config {
r := require.New(t)
cfg := config.Default
testTriePath, err := testutil.PathOfTempFile("trie")
r.NoError(err)
testDBPath, err := testutil.PathOfTempFile("db")
r.NoError(err)
testIndexPath, err := testutil.PathOfTempFile("index")
r.NoError(err)
testSystemLogPath, err := testutil.PathOfTempFile("systemlog")
r.NoError(err)
cfg.Plugins[config.GatewayPlugin] = true
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.IndexDBPath = testIndexPath
cfg.System.SystemLogDBPath = testSystemLogPath
cfg.Chain.EnableAsyncIndexWrite = false
cfg.Genesis.EnableGravityChainVoting = true
cfg.ActPool.MinGasPriceStr = "0"
cfg.API.RangeQueryLimit = 100
return cfg
}
func createServer(cfg config.Config, needActPool bool) (*Server, string, error) {
// TODO (zhi): revise
bc, dao, indexer, bfIndexer, sf, ap, registry, bfIndexFile, err := setupChain(cfg)
if err != nil {
return nil, "", err
}
ctx := context.Background()
// Start blockchain
if err := bc.Start(ctx); err != nil {
return nil, "", err
}
// Add testing blocks
if err := addTestingBlocks(bc, ap); err != nil {
return nil, "", err
}
if needActPool {
// Add actions to actpool
ctx = protocol.WithRegistry(ctx, registry)
if err := addActsToActPool(ctx, ap); err != nil {
return nil, "", err
}
}
svr := &Server{
bc: bc,
sf: sf,
dao: dao,
indexer: indexer,
bfIndexer: bfIndexer,
ap: ap,
cfg: cfg,
gs: gasstation.NewGasStation(bc, sf.SimulateExecution, dao, cfg.API),
registry: registry,
hasActionIndex: true,
}
return svr, bfIndexFile, nil
}
func TestServer_GetActPoolActions(t *testing.T) {
require := require.New(t)
cfg := newConfig(t)
ctx := context.Background()
svr, bfIndexFile, err := createServer(cfg, false)
require.NoError(err)
defer func() {
testutil.CleanupPath(t, bfIndexFile)
}()
res, err := svr.GetActPoolActions(ctx, &iotexapi.GetActPoolActionsRequest{})
require.NoError(err)
require.Equal(len(svr.ap.PendingActionMap()[identityset.Address(27).String()]), len(res.Actions))
tsf1, err := action.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(27), 2,
big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
tsf2, err := action.SignedTransfer(identityset.Address(27).String(), identityset.PrivateKey(27), 3,
big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
tsf3, err := action.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 4,
big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
execution1, err := action.SignedExecution(identityset.Address(31).String(), identityset.PrivateKey(27), 5,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(10), []byte{1})
require.NoError(err)
err = svr.ap.Add(ctx, tsf1)
require.NoError(err)
err = svr.ap.Add(ctx, tsf2)
require.NoError(err)
err = svr.ap.Add(ctx, execution1)
require.NoError(err)
var requests []string
h1, err := tsf1.Hash()
require.NoError(err)
requests = append(requests, hex.EncodeToString(h1[:]))
res, err = svr.GetActPoolActions(context.Background(), &iotexapi.GetActPoolActionsRequest{})
require.NoError(err)
require.Equal(len(svr.ap.PendingActionMap()[identityset.Address(27).String()]), len(res.Actions))
res, err = svr.GetActPoolActions(context.Background(), &iotexapi.GetActPoolActionsRequest{ActionHashes: requests})
require.NoError(err)
require.Equal(1, len(res.Actions))
h2, err := tsf2.Hash()
require.NoError(err)
requests = append(requests, hex.EncodeToString(h2[:]))
res, err = svr.GetActPoolActions(context.Background(), &iotexapi.GetActPoolActionsRequest{ActionHashes: requests})
require.NoError(err)
require.Equal(2, len(res.Actions))
h3, err := tsf3.Hash()
require.NoError(err)
_, err = svr.GetActPoolActions(context.Background(), &iotexapi.GetActPoolActionsRequest{ActionHashes: []string{hex.EncodeToString(h3[:])}})
require.Error(err)
}
| 1 | 23,674 | don't need this line, since it is not tested in api_test? | iotexproject-iotex-core | go |
@@ -164,7 +164,7 @@ func CheckForCStorPoolCRD(clientset clientset.Interface) {
// CheckForCStorVolumeReplicaCRD is Blocking call for checking status of CStorVolumeReplica CRD.
func CheckForCStorVolumeReplicaCRD(clientset clientset.Interface) {
for {
- _, err := clientset.OpenebsV1alpha1().CStorVolumeReplicas().List(metav1.ListOptions{})
+ _, err := clientset.OpenebsV1alpha1().CStorVolumeReplicas("").List(metav1.ListOptions{})
if err != nil {
glog.Errorf("CStorVolumeReplica CRD not found. Retrying after %v", CRDRetryInterval)
time.Sleep(CRDRetryInterval) | 1 | /*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"reflect"
"time"
"github.com/golang/glog"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/pool"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
clientset "github.com/openebs/maya/pkg/client/clientset/versioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//EventReason is used as part of the Event reason when a resource goes through different phases
type EventReason string
const (
// SuccessSynced is used as part of the Event 'reason' when a resource is synced
SuccessSynced EventReason = "Synced"
// MessageCreateSynced holds message for corresponding create request sync.
MessageCreateSynced EventReason = "Received Resource create event"
// MessageModifySynced holds message for corresponding modify request sync.
MessageModifySynced EventReason = "Received Resource modify event"
// MessageDestroySynced holds message for corresponding destroy request sync.
MessageDestroySynced EventReason = "Received Resource destroy event"
// SuccessCreated holds status for corresponding created resource.
SuccessCreated EventReason = "Created"
// MessageResourceCreated holds message for corresponding created resource.
MessageResourceCreated EventReason = "Resource created successfully"
// FailureCreate holds status for corresponding failed create resource.
FailureCreate EventReason = "FailCreate"
// MessageResourceFailCreate holds message for corresponding failed create resource.
MessageResourceFailCreate EventReason = "Resource creation failed"
// SuccessImported holds status for corresponding imported resource.
SuccessImported EventReason = "Imported"
// MessageResourceImported holds message for corresponding imported resource.
MessageResourceImported EventReason = "Resource imported successfully"
// FailureImport holds status for corresponding failed import resource.
FailureImport EventReason = "FailImport"
// MessageResourceFailImport holds message for corresponding failed import resource.
MessageResourceFailImport EventReason = "Resource import failed"
// FailureDestroy holds status for corresponding failed destroy resource.
FailureDestroy EventReason = "FailDestroy"
// MessageResourceFailDestroy holds message for corresponding failed destroy resource.
MessageResourceFailDestroy EventReason = "Resource Destroy failed"
// FailureValidate holds status for corresponding failed validate resource.
FailureValidate EventReason = "FailValidate"
// MessageResourceFailValidate holds message for corresponding failed validate resource.
MessageResourceFailValidate EventReason = "Resource validation failed"
// AlreadyPresent holds status for corresponding already present resource.
AlreadyPresent EventReason = "AlreadyPresent"
// MessageResourceAlreadyPresent holds message for corresponding already present resource.
MessageResourceAlreadyPresent EventReason = "Resource already present"
)
// Periodic interval duration.
const (
// CRDRetryInterval is used if CRD is not present.
CRDRetryInterval = 10 * time.Second
// PoolNameHandlerInterval is used when expected pool is not present.
PoolNameHandlerInterval = 5 * time.Second
// SharedInformerInterval is used to sync watcher controller.
SharedInformerInterval = 5 * time.Minute
// ResourceWorkerInterval is used for resource sync.
ResourceWorkerInterval = time.Second
// InitialZreplRetryInterval is used while initially starting controller.
InitialZreplRetryInterval = 3 * time.Second
// ContinuousZreplRetryInterval is used while controller has started running.
ContinuousZreplRetryInterval = 1 * time.Second
)
const (
// NoOfPoolWaitAttempts is number of attempts to wait in case of pod/container restarts.
NoOfPoolWaitAttempts = 30
// PoolWaitInterval is the interval to wait for pod/container restarts.
PoolWaitInterval = 2 * time.Second
)
// InitialImportedPoolVol is to store pool-volume names while pod restart.
var InitialImportedPoolVol []string
// QueueLoad is for storing the key and type of operation before entering workqueue
type QueueLoad struct {
Key string
Operation QueueOperation
}
// Environment is for environment variables passed for cstor-pool-mgmt.
type Environment string
const (
// OpenEBSIOCStorID is the environment variable specified in pod.
OpenEBSIOCStorID Environment = "OPENEBS_IO_CSTOR_ID"
)
//QueueOperation represents the type of operation on resource
type QueueOperation string
//Different type of operations on the controller
const (
QOpAdd QueueOperation = "add"
QOpDestroy QueueOperation = "destroy"
QOpModify QueueOperation = "modify"
)
// IsImported is channel to block cvr until certain pool import operations are over.
var IsImported chan bool
// PoolNameHandler tries to get pool name and blocks for
// particular number of attempts.
func PoolNameHandler(cVR *apis.CStorVolumeReplica, cnt int) bool {
for i := 0; ; i++ {
poolname, _ := pool.GetPoolName()
if reflect.DeepEqual(poolname, []string{}) ||
!CheckIfPresent(poolname, string(pool.PoolPrefix)+cVR.Labels["cstorpool.openebs.io/uid"]) {
glog.Warningf("Attempt %v: No pool found", i+1)
time.Sleep(PoolNameHandlerInterval)
if i > cnt {
return false
}
} else if CheckIfPresent(poolname, string(pool.PoolPrefix)+cVR.Labels["cstorpool.openebs.io/uid"]) {
return true
}
}
}
// CheckForCStorPoolCRD is Blocking call for checking status of CStorPool CRD.
func CheckForCStorPoolCRD(clientset clientset.Interface) {
for {
_, err := clientset.OpenebsV1alpha1().CStorPools().List(metav1.ListOptions{})
if err != nil {
glog.Errorf("CStorPool CRD not found. Retrying after %v", CRDRetryInterval)
time.Sleep(CRDRetryInterval)
continue
}
glog.Info("CStorPool CRD found")
break
}
}
// CheckForCStorVolumeReplicaCRD is Blocking call for checking status of CStorVolumeReplica CRD.
func CheckForCStorVolumeReplicaCRD(clientset clientset.Interface) {
for {
_, err := clientset.OpenebsV1alpha1().CStorVolumeReplicas().List(metav1.ListOptions{})
if err != nil {
glog.Errorf("CStorVolumeReplica CRD not found. Retrying after %v", CRDRetryInterval)
time.Sleep(CRDRetryInterval)
continue
}
glog.Info("CStorVolumeReplica CRD found")
break
}
}
// CheckForInitialImportedPoolVol is to check if volume is already
// imported with pool.
func CheckForInitialImportedPoolVol(InitialImportedPoolVol []string, fullvolname string) bool {
for i, initialVol := range InitialImportedPoolVol {
if initialVol == fullvolname {
if i < len(InitialImportedPoolVol) {
InitialImportedPoolVol = append(InitialImportedPoolVol[:i], InitialImportedPoolVol[i+1:]...)
}
return true
}
}
return false
}
// CheckIfPresent is to check if search string is present in array of string.
func CheckIfPresent(arrStr []string, searchStr string) bool {
for _, str := range arrStr {
if str == searchStr {
return true
}
}
return false
}
// CheckForCStorPool tries to get pool name and blocks forever because
// volumereplica can be created only if pool is present.
func CheckForCStorPool() {
for {
poolname, _ := pool.GetPoolName()
if reflect.DeepEqual(poolname, []string{}) {
glog.Warningf("CStorPool not found. Retrying after %v", PoolNameHandlerInterval)
time.Sleep(PoolNameHandlerInterval)
continue
}
glog.Info("CStorPool found")
break
}
}
| 1 | 8,690 | Does this mean the list operation is done for all the namespaces? How to list the volume replicas per namespace? | openebs-maya | go |
@@ -136,10 +136,14 @@ def batched_nms(bboxes, scores, inds, nms_cfg):
Returns:
tuple: kept bboxes and indice.
"""
- max_coordinate = bboxes.max()
- offsets = inds.to(bboxes) * (max_coordinate + 1)
- bboxes_for_nms = bboxes + offsets[:, None]
nms_cfg_ = nms_cfg.copy()
+ class_agnostic = nms_cfg_.pop('class_agnostic', False)
+ if not class_agnostic:
+ max_coordinate = bboxes.max()
+ offsets = inds.to(bboxes) * (max_coordinate + 1)
+ bboxes_for_nms = bboxes + offsets[:, None]
+ else:
+ bboxes_for_nms = bboxes
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = eval(nms_type)
dets, keep = nms_op( | 1 | import numpy as np
import torch
from . import nms_ext
def nms(dets, iou_thr, device_id=None):
"""Dispatch to either CPU or GPU NMS implementations.
The input can be either a torch tensor or numpy array. GPU NMS will be used
if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
will be used. The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for NMS.
device_id (int, optional): when `dets` is a numpy array, if `device_id`
is None, then cpu nms is used, otherwise gpu_nms will be used.
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
Example:
>>> dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9],
>>> [49.3, 32.9, 51.0, 35.3, 0.9],
>>> [49.2, 31.8, 51.0, 35.4, 0.5],
>>> [35.1, 11.5, 39.1, 15.7, 0.5],
>>> [35.6, 11.8, 39.3, 14.2, 0.5],
>>> [35.3, 11.5, 39.9, 14.5, 0.4],
>>> [35.2, 11.7, 39.7, 15.7, 0.3]], dtype=np.float32)
>>> iou_thr = 0.6
>>> suppressed, inds = nms(dets, iou_thr)
>>> assert len(inds) == len(suppressed) == 3
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else f'cuda:{device_id}'
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError('dets must be either a Tensor or numpy array, '
f'but got {type(dets)}')
# execute cpu or cuda nms
if dets_th.shape[0] == 0:
inds = dets_th.new_zeros(0, dtype=torch.long)
else:
if dets_th.is_cuda:
inds = nms_ext.nms(dets_th, iou_thr)
else:
inds = nms_ext.nms(dets_th, iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
"""Dispatch to only CPU Soft NMS implementations.
The input can be either a torch tensor or numpy array.
The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for Soft NMS.
method (str): either 'linear' or 'gaussian'
sigma (float): hyperparameter for gaussian method
min_score (float): score filter threshold
Returns:
tuple: new det bboxes and indice, which is always the same
data type as the input.
Example:
>>> dets = np.array([[4., 3., 5., 3., 0.9],
>>> [4., 3., 5., 4., 0.9],
>>> [3., 1., 3., 1., 0.5],
>>> [3., 1., 3., 1., 0.5],
>>> [3., 1., 3., 1., 0.4],
>>> [3., 1., 3., 1., 0.0]], dtype=np.float32)
>>> iou_thr = 0.6
>>> new_dets, inds = soft_nms(dets, iou_thr, sigma=0.5)
>>> assert len(inds) == len(new_dets) == 5
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
is_tensor = True
dets_t = dets.detach().cpu()
elif isinstance(dets, np.ndarray):
is_tensor = False
dets_t = torch.from_numpy(dets)
else:
raise TypeError('dets must be either a Tensor or numpy array, '
f'but got {type(dets)}')
method_codes = {'linear': 1, 'gaussian': 2}
if method not in method_codes:
raise ValueError(f'Invalid method for SoftNMS: {method}')
results = nms_ext.soft_nms(dets_t, iou_thr, method_codes[method], sigma,
min_score)
new_dets = results[:, :5]
inds = results[:, 5]
if is_tensor:
return new_dets.to(
device=dets.device, dtype=dets.dtype), inds.to(
device=dets.device, dtype=torch.long)
else:
return new_dets.numpy().astype(dets.dtype), inds.numpy().astype(
np.int64)
def batched_nms(bboxes, scores, inds, nms_cfg):
"""Performs non-maximum suppression in a batched fashion.
Modified from https://github.com/pytorch/vision/blob
/505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
In order to perform NMS independently per class, we add an offset to all
the boxes. The offset is dependent only on the class idx, and is large
enough so that boxes from different classes do not overlap.
Arguments:
bboxes (torch.Tensor): bboxes in shape (N, 4).
scores (torch.Tensor): scores in shape (N, ).
inds (torch.Tensor): each index value correspond to a bbox cluster,
and NMS will not be applied between elements of different inds,
shape (N, ).
nms_cfg (dict): specify nms type and other parameters like iou_thr.
Returns:
tuple: kept bboxes and indice.
"""
max_coordinate = bboxes.max()
offsets = inds.to(bboxes) * (max_coordinate + 1)
bboxes_for_nms = bboxes + offsets[:, None]
nms_cfg_ = nms_cfg.copy()
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = eval(nms_type)
dets, keep = nms_op(
torch.cat([bboxes_for_nms, scores[:, None]], -1), **nms_cfg_)
bboxes = bboxes[keep]
scores = dets[:, -1]
return torch.cat([bboxes, scores[:, None]], -1), keep
| 1 | 19,299 | I suggest adding `class_agnostic` as an argument of `batched_nms()`, with the default value False. | open-mmlab-mmdetection | py |
@@ -44,6 +44,7 @@ public class MetadataColumns {
Integer.MAX_VALUE - 101, "file_path", Types.StringType.get(), "Path of a file in which a deleted row is stored");
public static final NestedField DELETE_FILE_POS = NestedField.required(
Integer.MAX_VALUE - 102, "pos", Types.LongType.get(), "Ordinal position of a deleted row in the data file");
+ public static final String DELETE_FILE_ROW_FIELD_NAME = "row";
public static final int DELETE_FILE_ROW_FIELD_ID = Integer.MAX_VALUE - 103;
public static final String DELETE_FILE_ROW_DOC = "Deleted row values";
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.Map;
import java.util.Set;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.NestedField;
public class MetadataColumns {
private MetadataColumns() {
}
// IDs Integer.MAX_VALUE - (1-100) are used for metadata columns
public static final NestedField FILE_PATH = NestedField.required(
Integer.MAX_VALUE - 1, "_file", Types.StringType.get(), "Path of the file in which a row is stored");
public static final NestedField ROW_POSITION = NestedField.required(
Integer.MAX_VALUE - 2, "_pos", Types.LongType.get(), "Ordinal position of a row in the source data file");
public static final NestedField IS_DELETED = NestedField.required(
Integer.MAX_VALUE - 3, "_deleted", Types.BooleanType.get(), "Whether the row has been deleted");
// IDs Integer.MAX_VALUE - (101-200) are used for reserved columns
public static final NestedField DELETE_FILE_PATH = NestedField.required(
Integer.MAX_VALUE - 101, "file_path", Types.StringType.get(), "Path of a file in which a deleted row is stored");
public static final NestedField DELETE_FILE_POS = NestedField.required(
Integer.MAX_VALUE - 102, "pos", Types.LongType.get(), "Ordinal position of a deleted row in the data file");
public static final int DELETE_FILE_ROW_FIELD_ID = Integer.MAX_VALUE - 103;
public static final String DELETE_FILE_ROW_DOC = "Deleted row values";
private static final Map<String, NestedField> META_COLUMNS = ImmutableMap.of(
FILE_PATH.name(), FILE_PATH,
ROW_POSITION.name(), ROW_POSITION,
IS_DELETED.name(), IS_DELETED);
private static final Set<Integer> META_IDS = META_COLUMNS.values().stream().map(NestedField::fieldId)
.collect(ImmutableSet.toImmutableSet());
public static Set<Integer> metadataFieldIds() {
return META_IDS;
}
public static NestedField get(String name) {
return META_COLUMNS.get(name);
}
public static boolean isMetadataColumn(String name) {
return META_COLUMNS.containsKey(name);
}
public static boolean nonMetadataColumn(String name) {
return !META_COLUMNS.containsKey(name);
}
}
| 1 | 39,712 | @rdblue, did we not add the name on purpose? | apache-iceberg | java |
@@ -41,12 +41,12 @@ import org.apache.orc.TypeDescription;
*/
public final class ORCSchemaUtil {
- private enum BinaryType {
+ public enum BinaryType {
UUID, FIXED, BINARY
}
- private enum IntegerType {
- TIME, INTEGER
+ public enum LongType {
+ TIME, LONG
}
private static class OrcField { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.orc;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.iceberg.Schema;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
import org.apache.orc.TypeDescription;
/**
* Utilities for mapping Iceberg to ORC schemas.
*/
public final class ORCSchemaUtil {
private enum BinaryType {
UUID, FIXED, BINARY
}
private enum IntegerType {
TIME, INTEGER
}
private static class OrcField {
private final String name;
private final TypeDescription type;
OrcField(String name, TypeDescription type) {
this.name = name;
this.type = type;
}
public String name() {
return name;
}
public TypeDescription type() {
return type;
}
}
private static final String ICEBERG_ID_ATTRIBUTE = "iceberg.id";
private static final String ICEBERG_REQUIRED_ATTRIBUTE = "iceberg.required";
private static final String ICEBERG_BINARY_TYPE_ATTRIBUTE = "iceberg.binary-type";
private static final String ICEBERG_INTEGER_TYPE_ATTRIBUTE = "iceberg.integer-type";
private static final String ICEBERG_FIELD_LENGTH = "iceberg.length";
private static final ImmutableMap<Type.TypeID, TypeDescription.Category> TYPE_MAPPING =
ImmutableMap.<Type.TypeID, TypeDescription.Category>builder()
.put(Type.TypeID.BOOLEAN, TypeDescription.Category.BOOLEAN)
.put(Type.TypeID.INTEGER, TypeDescription.Category.INT)
.put(Type.TypeID.TIME, TypeDescription.Category.INT)
.put(Type.TypeID.LONG, TypeDescription.Category.LONG)
.put(Type.TypeID.FLOAT, TypeDescription.Category.FLOAT)
.put(Type.TypeID.DOUBLE, TypeDescription.Category.DOUBLE)
.put(Type.TypeID.DATE, TypeDescription.Category.DATE)
.put(Type.TypeID.TIMESTAMP, TypeDescription.Category.TIMESTAMP)
.put(Type.TypeID.STRING, TypeDescription.Category.STRING)
.put(Type.TypeID.UUID, TypeDescription.Category.BINARY)
.put(Type.TypeID.FIXED, TypeDescription.Category.BINARY)
.put(Type.TypeID.BINARY, TypeDescription.Category.BINARY)
.put(Type.TypeID.DECIMAL, TypeDescription.Category.DECIMAL)
.build();
private ORCSchemaUtil() {}
public static TypeDescription convert(Schema schema) {
final TypeDescription root = TypeDescription.createStruct();
final Types.StructType schemaRoot = schema.asStruct();
for (Types.NestedField field : schemaRoot.asStructType().fields()) {
TypeDescription orcColumType = convert(field.fieldId(), field.type(), field.isRequired());
root.addField(field.name(), orcColumType);
}
return root;
}
private static TypeDescription convert(Integer fieldId, Type type, boolean isRequired) {
final TypeDescription orcType;
switch (type.typeId()) {
case BOOLEAN:
orcType = TypeDescription.createBoolean();
break;
case INTEGER:
orcType = TypeDescription.createInt();
orcType.setAttribute(ICEBERG_INTEGER_TYPE_ATTRIBUTE, IntegerType.INTEGER.toString());
break;
case TIME:
orcType = TypeDescription.createInt();
orcType.setAttribute(ICEBERG_INTEGER_TYPE_ATTRIBUTE, IntegerType.TIME.toString());
break;
case LONG:
orcType = TypeDescription.createLong();
break;
case FLOAT:
orcType = TypeDescription.createFloat();
break;
case DOUBLE:
orcType = TypeDescription.createDouble();
break;
case DATE:
orcType = TypeDescription.createDate();
break;
case TIMESTAMP:
orcType = TypeDescription.createTimestamp();
break;
case STRING:
orcType = TypeDescription.createString();
break;
case UUID:
orcType = TypeDescription.createBinary();
orcType.setAttribute(ICEBERG_BINARY_TYPE_ATTRIBUTE, BinaryType.UUID.toString());
break;
case FIXED:
orcType = TypeDescription.createBinary();
orcType.setAttribute(ICEBERG_BINARY_TYPE_ATTRIBUTE, BinaryType.FIXED.toString());
orcType.setAttribute(ICEBERG_FIELD_LENGTH, Integer.toString(((Types.FixedType) type).length()));
break;
case BINARY:
orcType = TypeDescription.createBinary();
orcType.setAttribute(ICEBERG_BINARY_TYPE_ATTRIBUTE, BinaryType.BINARY.toString());
break;
case DECIMAL: {
Types.DecimalType decimal = (Types.DecimalType) type;
orcType = TypeDescription.createDecimal()
.withScale(decimal.scale())
.withPrecision(decimal.precision());
break;
}
case STRUCT: {
orcType = TypeDescription.createStruct();
for (Types.NestedField field : type.asStructType().fields()) {
TypeDescription childType = convert(field.fieldId(), field.type(), field.isRequired());
orcType.addField(field.name(), childType);
}
break;
}
case LIST: {
Types.ListType list = (Types.ListType) type;
TypeDescription elementType = convert(list.elementId(), list.elementType(),
list.isElementRequired());
orcType = TypeDescription.createList(elementType);
break;
}
case MAP: {
Types.MapType map = (Types.MapType) type;
TypeDescription keyType = convert(map.keyId(), map.keyType(), true);
TypeDescription valueType = convert(map.valueId(), map.valueType(), map.isValueRequired());
orcType = TypeDescription.createMap(keyType, valueType);
break;
}
default:
throw new IllegalArgumentException("Unhandled type " + type.typeId());
}
// Set Iceberg column attributes for mapping
orcType.setAttribute(ICEBERG_ID_ATTRIBUTE, String.valueOf(fieldId));
orcType.setAttribute(ICEBERG_REQUIRED_ATTRIBUTE, String.valueOf(isRequired));
return orcType;
}
/**
* Convert an ORC schema to an Iceberg schema. This method handles the convertion from the original
* Iceberg column mapping IDs if present in the ORC column attributes, otherwise, ORC column IDs
* will be assigned following ORCs pre-order ID assignment.
*
* @return the Iceberg schema
*/
public static Schema convert(TypeDescription orcSchema) {
List<TypeDescription> children = orcSchema.getChildren();
List<String> childrenNames = orcSchema.getFieldNames();
Preconditions.checkState(children.size() == childrenNames.size(),
"Error in ORC file, children fields and names do not match.");
List<Types.NestedField> icebergFields = Lists.newArrayListWithExpectedSize(children.size());
AtomicInteger lastColumnId = new AtomicInteger(getMaxIcebergId(orcSchema));
for (int i = 0; i < children.size(); i++) {
icebergFields.add(convertOrcToIceberg(children.get(i), childrenNames.get(i),
lastColumnId::incrementAndGet));
}
return new Schema(icebergFields);
}
/**
* Converts an Iceberg schema to a corresponding ORC schema within the context of an existing
* ORC file schema.
* This method also handles schema evolution from the original ORC file schema
* to the given Iceberg schema. It builds the desired reader schema with the schema
* evolution rules and pass that down to the ORC reader,
* which would then use its schema evolution to map that to the writer’s schema.
*
* Example:
* <code>
* Iceberg writer ORC writer
* struct<a (1): int, b (2): string> struct<a: int, b: string>
* struct<a (1): struct<b (2): string, c (3): date>> struct<a: struct<b:string, c:date>>
* </code>
*
* Iceberg reader ORC reader
* <code>
* struct<a (2): string, c (3): date> struct<b: string, c: date>
* struct<aa (1): struct<cc (3): date, bb (2): string>> struct<a: struct<c:date, b:string>>
* </code>
*
* @param schema an Iceberg schema
* @param originalOrcSchema an existing ORC file schema
* @return the resulting ORC schema
*/
public static TypeDescription buildOrcProjection(Schema schema,
TypeDescription originalOrcSchema) {
final Map<Integer, OrcField> icebergToOrc = icebergToOrcMapping("root", originalOrcSchema);
return buildOrcProjection(Integer.MIN_VALUE, schema.asStruct(), true, icebergToOrc);
}
private static TypeDescription buildOrcProjection(Integer fieldId, Type type, boolean isRequired,
Map<Integer, OrcField> mapping) {
final TypeDescription orcType;
switch (type.typeId()) {
case STRUCT:
orcType = TypeDescription.createStruct();
for (Types.NestedField nestedField : type.asStructType().fields()) {
// Using suffix _r to avoid potential underlying issues in ORC reader
// with reused column names between ORC and Iceberg;
// e.g. renaming column c -> d and adding new column d
String name = Optional.ofNullable(mapping.get(nestedField.fieldId()))
.map(OrcField::name)
.orElse(nestedField.name() + "_r" + nestedField.fieldId());
TypeDescription childType = buildOrcProjection(nestedField.fieldId(), nestedField.type(),
nestedField.isRequired(), mapping);
orcType.addField(name, childType);
}
break;
case LIST:
Types.ListType list = (Types.ListType) type;
TypeDescription elementType = buildOrcProjection(list.elementId(), list.elementType(),
list.isElementRequired(), mapping);
orcType = TypeDescription.createList(elementType);
break;
case MAP:
Types.MapType map = (Types.MapType) type;
TypeDescription keyType = buildOrcProjection(map.keyId(), map.keyType(), true, mapping);
TypeDescription valueType = buildOrcProjection(map.valueId(), map.valueType(), map.isValueRequired(),
mapping);
orcType = TypeDescription.createMap(keyType, valueType);
break;
default:
if (mapping.containsKey(fieldId)) {
TypeDescription originalType = mapping.get(fieldId).type();
Optional<TypeDescription> promotedType = getPromotedType(type, originalType);
if (promotedType.isPresent()) {
orcType = promotedType.get();
} else {
Preconditions.checkArgument(isSameType(originalType, type),
"Can not promote %s type to %s",
originalType.getCategory(), type.typeId().name());
orcType = originalType.clone();
}
} else {
if (isRequired) {
throw new IllegalArgumentException(
String.format("Field %d of type %s is required and was not found.", fieldId, type.toString()));
}
orcType = convert(fieldId, type, false);
}
}
return orcType;
}
private static Map<Integer, OrcField> icebergToOrcMapping(String name, TypeDescription orcType) {
Map<Integer, OrcField> icebergToOrc = Maps.newHashMap();
switch (orcType.getCategory()) {
case STRUCT:
List<String> childrenNames = orcType.getFieldNames();
List<TypeDescription> children = orcType.getChildren();
for (int i = 0; i < children.size(); i++) {
icebergToOrc.putAll(icebergToOrcMapping(childrenNames.get(i), children.get(i)));
}
break;
case LIST:
icebergToOrc.putAll(icebergToOrcMapping("element", orcType.getChildren().get(0)));
break;
case MAP:
icebergToOrc.putAll(icebergToOrcMapping("key", orcType.getChildren().get(0)));
icebergToOrc.putAll(icebergToOrcMapping("value", orcType.getChildren().get(1)));
break;
}
if (orcType.getId() > 0) {
// Only add to non-root types.
icebergID(orcType)
.ifPresent(integer -> icebergToOrc.put(integer, new OrcField(name, orcType)));
}
return icebergToOrc;
}
private static Optional<TypeDescription> getPromotedType(Type icebergType,
TypeDescription originalOrcType) {
TypeDescription promotedOrcType = null;
if (Type.TypeID.LONG.equals(icebergType.typeId()) &&
TypeDescription.Category.INT.equals(originalOrcType.getCategory())) {
// Promote: int to long
promotedOrcType = TypeDescription.createLong();
} else if (Type.TypeID.DOUBLE.equals(icebergType.typeId()) &&
TypeDescription.Category.FLOAT.equals(originalOrcType.getCategory())) {
// Promote: float to double
promotedOrcType = TypeDescription.createDouble();
} else if (Type.TypeID.DECIMAL.equals(icebergType.typeId()) &&
TypeDescription.Category.DECIMAL.equals(originalOrcType.getCategory())) {
// Promote: decimal(P, S) to decimal(P', S) if P' > P
Types.DecimalType newDecimal = (Types.DecimalType) icebergType;
if (newDecimal.scale() == originalOrcType.getScale() &&
newDecimal.precision() > originalOrcType.getPrecision()) {
promotedOrcType = TypeDescription.createDecimal()
.withScale(newDecimal.scale())
.withPrecision(newDecimal.precision());
}
}
return Optional.ofNullable(promotedOrcType);
}
private static boolean isSameType(TypeDescription orcType, Type icebergType) {
return Objects.equals(TYPE_MAPPING.get(icebergType.typeId()), orcType.getCategory());
}
private static Optional<Integer> icebergID(TypeDescription orcType) {
return Optional.ofNullable(orcType.getAttributeValue(ICEBERG_ID_ATTRIBUTE))
.map(Integer::parseInt);
}
private static boolean isRequired(TypeDescription orcType) {
String isRequiredStr = orcType.getAttributeValue(ICEBERG_REQUIRED_ATTRIBUTE);
if (isRequiredStr != null) {
return Boolean.parseBoolean(isRequiredStr);
}
return false;
}
private static Types.NestedField getIcebergType(int icebergID, String name, Type type,
boolean isRequired) {
return isRequired ?
Types.NestedField.required(icebergID, name, type) :
Types.NestedField.optional(icebergID, name, type);
}
private static Types.NestedField convertOrcToIceberg(TypeDescription orcType, String name,
TypeUtil.NextID nextID) {
final int icebergID = icebergID(orcType).orElseGet(nextID::get);
final boolean isRequired = isRequired(orcType);
switch (orcType.getCategory()) {
case BOOLEAN:
return getIcebergType(icebergID, name, Types.BooleanType.get(), isRequired);
case BYTE:
case SHORT:
case INT:
IntegerType integerType = IntegerType.valueOf(
orcType.getAttributeValue(ICEBERG_INTEGER_TYPE_ATTRIBUTE)
);
switch (integerType) {
case TIME:
return getIcebergType(icebergID, name, Types.TimeType.get(), isRequired);
case INTEGER:
return getIcebergType(icebergID, name, Types.IntegerType.get(), isRequired);
default:
throw new IllegalStateException("Invalid Integer type found in ORC type attribute");
}
case LONG:
return getIcebergType(icebergID, name, Types.LongType.get(), isRequired);
case FLOAT:
return getIcebergType(icebergID, name, Types.FloatType.get(), isRequired);
case DOUBLE:
return getIcebergType(icebergID, name, Types.DoubleType.get(), isRequired);
case STRING:
case CHAR:
case VARCHAR:
return getIcebergType(icebergID, name, Types.StringType.get(), isRequired);
case BINARY:
BinaryType binaryType = BinaryType.valueOf(
orcType.getAttributeValue(ICEBERG_BINARY_TYPE_ATTRIBUTE));
switch (binaryType) {
case UUID:
return getIcebergType(icebergID, name, Types.UUIDType.get(), isRequired);
case FIXED:
int fixedLength = Integer.parseInt(orcType.getAttributeValue(ICEBERG_FIELD_LENGTH));
return getIcebergType(icebergID, name, Types.FixedType.ofLength(fixedLength), isRequired);
case BINARY:
return getIcebergType(icebergID, name, Types.BinaryType.get(), isRequired);
default:
throw new IllegalStateException("Invalid Binary type found in ORC type attribute");
}
case DATE:
return getIcebergType(icebergID, name, Types.DateType.get(), isRequired);
case TIMESTAMP:
return getIcebergType(icebergID, name, Types.TimestampType.withZone(), isRequired);
case DECIMAL:
return getIcebergType(icebergID, name,
Types.DecimalType.of(orcType.getPrecision(), orcType.getScale()),
isRequired);
case STRUCT: {
List<String> fieldNames = orcType.getFieldNames();
List<TypeDescription> fieldTypes = orcType.getChildren();
List<Types.NestedField> fields = new ArrayList<>(fieldNames.size());
for (int c = 0; c < fieldNames.size(); ++c) {
String childName = fieldNames.get(c);
TypeDescription type = fieldTypes.get(c);
Types.NestedField field = convertOrcToIceberg(type, childName, nextID);
fields.add(field);
}
return getIcebergType(icebergID, name, Types.StructType.of(fields), isRequired);
}
case LIST: {
TypeDescription elementType = orcType.getChildren().get(0);
Types.NestedField element = convertOrcToIceberg(elementType, "element", nextID);
Types.ListType listTypeWithElem = isRequired(elementType) ?
Types.ListType.ofRequired(element.fieldId(), element.type()) :
Types.ListType.ofOptional(element.fieldId(), element.type());
return isRequired ?
Types.NestedField.required(icebergID, name, listTypeWithElem) :
Types.NestedField.optional(icebergID, name, listTypeWithElem);
}
case MAP: {
TypeDescription keyType = orcType.getChildren().get(0);
Types.NestedField key = convertOrcToIceberg(keyType, "key", nextID);
TypeDescription valueType = orcType.getChildren().get(1);
Types.NestedField value = convertOrcToIceberg(valueType, "value", nextID);
Types.MapType mapTypeWithKV = isRequired(valueType) ?
Types.MapType.ofRequired(key.fieldId(), value.fieldId(), key.type(), value.type()) :
Types.MapType.ofOptional(key.fieldId(), value.fieldId(), key.type(), value.type());
return getIcebergType(icebergID, name, mapTypeWithKV, isRequired);
}
default:
// We don't have an answer for union types.
throw new IllegalArgumentException("Can't handle " + orcType);
}
}
private static int getMaxIcebergId(TypeDescription originalOrcSchema) {
int maxId = icebergID(originalOrcSchema).orElse(0);
final List<TypeDescription> children = Optional.ofNullable(originalOrcSchema.getChildren())
.orElse(Collections.emptyList());
for (TypeDescription child : children) {
maxId = Math.max(maxId, getMaxIcebergId(child));
}
return maxId;
}
}
| 1 | 17,723 | Why is this now public? | apache-iceberg | java |
@@ -111,9 +111,7 @@ module Blacklight::SearchContext
response, documents = search_service.previous_and_next_documents_for_search index, search_state.reset(current_search_session.query_params).to_hash
search_session['total'] = response.total
- @search_context_response = response
- @previous_document = documents.first
- @next_document = documents.last
+ { prev: documents.first, next: documents.last }
end
rescue Blacklight::Exceptions::InvalidRequest => e
logger.warn "Unable to setup next and previous documents: #{e}" | 1 | # frozen_string_literal: true
module Blacklight::SearchContext
extend ActiveSupport::Concern
# The following code is executed when someone includes blacklight::catalog::search_session in their
# own controller.
included do
helper_method :current_search_session, :search_session
end
module ClassMethods
# Save the submitted search parameters in the search session
def record_search_parameters opts = { only: :index }
before_action :set_current_search_session, opts
end
end
private
# sets up the session[:search] hash if it doesn't already exist
def search_session
session[:search] ||= {}
# Need to call the getter again. The value is mutated
# https://github.com/rails/rails/issues/23884
session[:search]
end
# The current search session
def current_search_session
@current_search_session ||= find_search_session
end
# Persist the current search session id to the user's session
def set_current_search_session
search_session['id'] = current_search_session.id if current_search_session
end
def find_search_session
if agent_is_crawler?
nil
elsif params[:search_context].present?
find_or_initialize_search_session_from_params JSON.parse(params[:search_context])
elsif params[:search_id].present?
begin
# TODO: check the search id signature.
searches_from_history.find(params[:search_id])
rescue ActiveRecord::RecordNotFound
nil
end
elsif start_new_search_session?
find_or_initialize_search_session_from_params search_state.to_h
elsif search_session['id']
begin
searches_from_history.find(search_session['id'])
rescue ActiveRecord::RecordNotFound
nil
end
end
end
##
# If the current action should start a new search session, this should be
# set to true
def start_new_search_session?
false
end
##
# Determine if the current request is coming from an anonymous bot
# or search crawler
#
def agent_is_crawler?
crawler_proc = blacklight_config.crawler_detector
return false if crawler_proc.nil? || current_user.present?
crawler_proc.call(request)
end
def find_or_initialize_search_session_from_params params
params_copy = params.reject { |k, v| blacklisted_search_session_params.include?(k.to_sym) || v.blank? }
return if params_copy.reject { |k, _v| [:action, :controller].include? k.to_sym }.blank?
saved_search = searches_from_history.find { |x| x.query_params == params_copy }
saved_search ||= Search.create(query_params: params_copy).tap do |s|
add_to_search_history(s)
end
end
# Add a search to the in-session search history list
def add_to_search_history search
session[:history] ||= []
session[:history].unshift(search.id)
if session[:history].length > blacklight_config.search_history_window
session[:history] = session[:history].slice(0, blacklight_config.search_history_window)
end
end
# A list of query parameters that should not be persisted for a search
def blacklisted_search_session_params
[:commit, :counter, :total, :search_id, :page, :per_page]
end
# calls setup_previous_document then setup_next_document.
# used in the show action for single view pagination.
def setup_next_and_previous_documents
if search_session['counter'] && current_search_session
index = search_session['counter'].to_i - 1
response, documents = search_service.previous_and_next_documents_for_search index, search_state.reset(current_search_session.query_params).to_hash
search_session['total'] = response.total
@search_context_response = response
@previous_document = documents.first
@next_document = documents.last
end
rescue Blacklight::Exceptions::InvalidRequest => e
logger.warn "Unable to setup next and previous documents: #{e}"
end
end
| 1 | 7,674 | I guess we should backport some deprecation for this? | projectblacklight-blacklight | rb |
@@ -57,11 +57,10 @@ module Selenium
is_relative = Regexp.last_match(1).strip == '1'
when /^Path=(.+)$/
path = Regexp.last_match(1).strip
+ p = path_for(name, is_relative, path)
+ @profile_paths[name] = p if p
end
end
-
- p = path_for(name, is_relative, path)
- @profile_paths[name] = p if p
end
def path_for(name, is_relative, path) | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Firefox
# @api private
class ProfilesIni
def initialize
@ini_path = File.join(Util.app_data_path, 'profiles.ini')
@profile_paths = {}
parse if File.exist?(@ini_path)
end
def [](name)
path = @profile_paths[name]
path && Profile.new(path)
end
def refresh
@profile_paths.clear
parse
end
private
def parse
string = File.read @ini_path
name = nil
is_relative = nil
path = nil
string.split("\n").each do |line|
case line
when /^\[Profile/
name, path = nil if path_for(name, is_relative, path)
when /^Name=(.+)$/
name = Regexp.last_match(1).strip
when /^IsRelative=(.+)$/
is_relative = Regexp.last_match(1).strip == '1'
when /^Path=(.+)$/
path = Regexp.last_match(1).strip
end
end
p = path_for(name, is_relative, path)
@profile_paths[name] = p if p
end
def path_for(name, is_relative, path)
return unless [name, path].any?
is_relative ? File.join(Util.app_data_path, path) : path
end
end # ProfilesIni
end # Firefox
end # WebDriver
end # Selenium
| 1 | 13,901 | Moving this code inside the last case statement doesn't seem right. Why are we defining `name` and `is_relative` variables there if we aren't using them anywhere? | SeleniumHQ-selenium | rb |
@@ -105,6 +105,7 @@ static byte buf[8192];
#define IMMARG(sz) opnd_create_immed_int(37, sz)
#define TGTARG opnd_create_instr(instrlist_last(ilist))
#define REGARG(reg) opnd_create_reg(REG_##reg)
+#define DR_REGARG(reg) opnd_create_reg(DR_REG_##reg)
#define REGARG_PARTIAL(reg, sz) opnd_create_reg_partial(REG_##reg, sz)
#define VSIBX(sz) (opnd_create_base_disp(REG_XCX, REG_XMM6, 2, 0x42, sz))
#define VSIBY(sz) (opnd_create_base_disp(REG_XDX, REG_YMM6, 2, 0x17, sz)) | 1 | /* **********************************************************
* Copyright (c) 2011-2019 Google, Inc. All rights reserved.
* Copyright (c) 2007-2008 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Define DR_FAST_IR to verify that everything compiles when we call the inline
* versions of these routines.
*/
#ifndef STANDALONE_DECODER
# define DR_FAST_IR 1
#endif
/* Uses the DR CLIENT_INTERFACE API, using DR as a standalone library, rather than
* being a client library working with DR on a target program.
*
* To run, you need to put dynamorio.dll into either the current directory
* or system32.
*/
#ifndef USE_DYNAMO
# error NEED USE_DYNAMO
#endif
#include "configure.h"
#include "dr_api.h"
#include "tools.h"
#ifdef WINDOWS
# define _USE_MATH_DEFINES 1
# include <math.h> /* for M_PI, M_LN2, and M_LN10 for OP_fldpi, etc. */
#endif
#define VERBOSE 0
#ifdef STANDALONE_DECODER
# define ASSERT(x) \
((void)((!(x)) ? (fprintf(stderr, "ASSERT FAILURE: %s:%d: %s\n", __FILE__, \
__LINE__, #x), \
abort(), 0) \
: 0))
#else
# define ASSERT(x) \
((void)((!(x)) ? (dr_fprintf(STDERR, "ASSERT FAILURE: %s:%d: %s\n", __FILE__, \
__LINE__, #x), \
dr_abort(), 0) \
: 0))
#endif
#define BOOLS_MATCH(b1, b2) (!!(b1) == !!(b2))
#define BUFFER_SIZE_BYTES(buf) sizeof(buf)
#define BUFFER_SIZE_ELEMENTS(buf) (BUFFER_SIZE_BYTES(buf) / sizeof(buf[0]))
static byte buf[8192];
/***************************************************************************
* make sure the following are consistent (though they could still all be wrong :))
* with respect to instr length and opcode:
* - decode_fast
* - decode
* - INSTR_CREATE_
* - encode
*/
/* we split testing up to avoid VS2010 from taking 25 minutes to compile
* this file.
* we cannot pass on variadic args as separate args to another
* macro, so we must split ours by # args (xref PR 208603).
*/
/* we can encode+fast-decode some instrs cross-platform but we
* leave that testing to the regression run on that platform
*/
/* these are shared among all test_all_opcodes_*() routines: */
#define MEMARG(sz) (opnd_create_base_disp(REG_XCX, REG_NULL, 0, 0x37, sz))
#define IMMARG(sz) opnd_create_immed_int(37, sz)
#define TGTARG opnd_create_instr(instrlist_last(ilist))
#define REGARG(reg) opnd_create_reg(REG_##reg)
#define REGARG_PARTIAL(reg, sz) opnd_create_reg_partial(REG_##reg, sz)
#define VSIBX(sz) (opnd_create_base_disp(REG_XCX, REG_XMM6, 2, 0x42, sz))
#define VSIBY(sz) (opnd_create_base_disp(REG_XDX, REG_YMM6, 2, 0x17, sz))
#define X86_ONLY 1
#define X64_ONLY 2
static void
test_all_opcodes_0(void *dc)
{
#define INCLUDE_NAME "ir_x86_0args.h"
#define OPCODE_FOR_CREATE(name, opc, icnm, flags) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, INSTR_CREATE_##icnm(dc)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
#define XOPCODE_FOR_CREATE(name, opc, icnm, flags) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, XINST_CREATE_##icnm(dc)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
#include "ir_x86_all_opc.h"
#undef OPCODE_FOR_CREATE
#undef XOPCODE_FOR_CREATE
#undef INCLUDE_NAME
}
#ifndef STANDALONE_DECODER
/* vs2005 cl takes many minute to compile w/ static drdecode lib
* so we disable part of this test since for static we just want a
* sanity check
*/
static void
test_all_opcodes_1(void *dc)
{
# define INCLUDE_NAME "ir_x86_1args.h"
# define OPCODE_FOR_CREATE(name, opc, icnm, flags, arg1) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, INSTR_CREATE_##icnm(dc, arg1)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# define XOPCODE_FOR_CREATE(name, opc, icnm, flags, arg1) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, XINST_CREATE_##icnm(dc, arg1)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# include "ir_x86_all_opc.h"
# undef OPCODE_FOR_CREATE
# undef XOPCODE_FOR_CREATE
# undef INCLUDE_NAME
}
static void
test_all_opcodes_2(void *dc)
{
# define INCLUDE_NAME "ir_x86_2args.h"
# define OPCODE_FOR_CREATE(name, opc, icnm, flags, arg1, arg2) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, INSTR_CREATE_##icnm(dc, arg1, arg2)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# define XOPCODE_FOR_CREATE(name, opc, icnm, flags, arg1, arg2) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, XINST_CREATE_##icnm(dc, arg1, arg2)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# include "ir_x86_all_opc.h"
# undef OPCODE_FOR_CREATE
# undef XOPCODE_FOR_CREATE
# undef INCLUDE_NAME
}
static void
test_all_opcodes_2_mm(void *dc)
{
# define INCLUDE_NAME "ir_x86_2args_mm.h"
# define OPCODE_FOR_CREATE(name, opc, icnm, flags, arg1, arg2) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, INSTR_CREATE_##icnm(dc, arg1, arg2)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# include "ir_x86_all_opc.h"
# undef OPCODE_FOR_CREATE
# undef INCLUDE_NAME
}
static void
test_all_opcodes_3(void *dc)
{
# define INCLUDE_NAME "ir_x86_3args.h"
# define OPCODE_FOR_CREATE(name, opc, icnm, flags, arg1, arg2, arg3) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, INSTR_CREATE_##icnm(dc, arg1, arg2, arg3)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# define XOPCODE_FOR_CREATE(name, opc, icnm, flags, arg1, arg2, arg3) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, XINST_CREATE_##icnm(dc, arg1, arg2, arg3)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# include "ir_x86_all_opc.h"
# undef OPCODE_FOR_CREATE
# undef XOPCODE_FOR_CREATE
# undef INCLUDE_NAME
}
static void
test_all_opcodes_3_avx(void *dc)
{
# define INCLUDE_NAME "ir_x86_3args_avx.h"
# define OPCODE_FOR_CREATE(name, opc, icnm, flags, arg1, arg2, arg3) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, INSTR_CREATE_##icnm(dc, arg1, arg2, arg3)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# include "ir_x86_all_opc.h"
# undef OPCODE_FOR_CREATE
# undef INCLUDE_NAME
}
static void
test_all_opcodes_4(void *dc)
{
# define INCLUDE_NAME "ir_x86_4args.h"
# define OPCODE_FOR_CREATE(name, opc, icnm, flags, arg1, arg2, arg3, arg4) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, \
INSTR_CREATE_##icnm(dc, arg1, arg2, arg3, arg4)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# define XOPCODE_FOR_CREATE(name, opc, icnm, flags, arg1, arg2, arg3, arg4) \
do { \
if ((flags & IF_X64_ELSE(X86_ONLY, X64_ONLY)) == 0) { \
instrlist_append(ilist, \
XINST_CREATE_##icnm(dc, arg1, arg2, arg3, arg4)); \
len_##name = instr_length(dc, instrlist_last(ilist)); \
} \
} while (0);
# include "ir_x86_all_opc.h"
# undef OPCODE_FOR_CREATE
# undef INCLUDE_NAME
}
#endif /* !STANDALONE_DECODER */
/*
***************************************************************************/
static void
test_disp_control_helper(void *dc, int disp, bool encode_zero_disp, bool force_full_disp,
bool disp16, uint len_expect)
{
byte *pc;
uint len;
instr_t *instr = INSTR_CREATE_mov_ld(
dc, opnd_create_reg(REG_ECX),
opnd_create_base_disp_ex(disp16 ? IF_X64_ELSE(REG_EBX, REG_BX) : REG_XBX,
REG_NULL, 0, disp, OPSZ_4, encode_zero_disp,
force_full_disp, disp16));
pc = instr_encode(dc, instr, buf);
len = (int)(pc - (byte *)buf);
#if VERBOSE
pc = disassemble_with_info(dc, buf, STDOUT, true, true);
#endif
ASSERT(len == len_expect);
instr_reset(dc, instr);
decode(dc, buf, instr);
ASSERT(
instr_num_srcs(instr) == 1 && opnd_is_base_disp(instr_get_src(instr, 0)) &&
BOOLS_MATCH(encode_zero_disp,
opnd_is_disp_encode_zero(instr_get_src(instr, 0))) &&
BOOLS_MATCH(force_full_disp, opnd_is_disp_force_full(instr_get_src(instr, 0))) &&
BOOLS_MATCH(disp16, opnd_is_disp_short_addr(instr_get_src(instr, 0))));
instr_destroy(dc, instr);
}
/* Test encode_zero_disp and force_full_disp control from case 4457 */
static void
test_disp_control(void *dc)
{
/*
0x004275b4 8b 0b mov (%ebx) -> %ecx
0x004275b4 8b 4b 00 mov $0x00(%ebx) -> %ecx
0x004275b4 8b 8b 00 00 00 00 mov $0x00000000 (%ebx) -> %ecx
0x004275b4 8b 4b 7f mov $0x7f(%ebx) -> %ecx
0x004275b4 8b 8b 7f 00 00 00 mov $0x0000007f (%ebx) -> %ecx
0x00430258 67 8b 4f 7f addr16 mov 0x7f(%bx) -> %ecx
0x00430258 67 8b 8f 7f 00 addr16 mov 0x007f(%bx) -> %ecx
*/
test_disp_control_helper(dc, 0, false, false, false, 2);
test_disp_control_helper(dc, 0, true, false, false, 3);
test_disp_control_helper(dc, 0, true, true, false, 6);
test_disp_control_helper(dc, 0x7f, false, false, false, 3);
test_disp_control_helper(dc, 0x7f, false, true, false, 6);
test_disp_control_helper(dc, 0x7f, false, false, true, 4);
test_disp_control_helper(dc, 0x7f, false, true, true, IF_X64_ELSE(7, 5));
}
/* emits the instruction to buf (for tests that wish to do additional checks on
* the output) */
static void
test_instr_encode(void *dc, instr_t *instr, uint len_expect)
{
instr_t *decin;
uint len;
byte *pc = instr_encode(dc, instr, buf);
len = (int)(pc - (byte *)buf);
#if VERBOSE
disassemble_with_info(dc, buf, STDOUT, true, true);
#endif
ASSERT(len == len_expect);
decin = instr_create(dc);
decode(dc, buf, decin);
ASSERT(instr_same(instr, decin));
instr_destroy(dc, instr);
instr_destroy(dc, decin);
}
static void
test_instr_decode(void *dc, instr_t *instr, byte *bytes, uint bytes_len, bool size_match)
{
instr_t *decin;
if (size_match) {
uint len;
byte *pc = instr_encode(dc, instr, buf);
len = (int)(pc - (byte *)buf);
#if VERBOSE
disassemble_with_info(dc, buf, STDOUT, true, true);
#endif
ASSERT(len == bytes_len);
ASSERT(memcmp(buf, bytes, bytes_len) == 0);
}
decin = instr_create(dc);
decode(dc, bytes, decin);
#if VERBOSE
print("Comparing |");
instr_disassemble(dc, instr, STDOUT);
print("|\n to |");
instr_disassemble(dc, decin, STDOUT);
print("|\n");
#endif
ASSERT(instr_same(instr, decin));
instr_destroy(dc, instr);
instr_destroy(dc, decin);
}
/* emits the instruction to buf (for tests that wish to do additional checks on
* the output) */
static void
test_instr_encode_and_decode(void *dc, instr_t *instr, uint len_expect,
/* also checks one operand's size */
bool src, uint opnum, opnd_size_t sz, uint bytes)
{
opnd_t op;
opnd_size_t opsz;
instr_t *decin;
uint len;
byte *pc = instr_encode(dc, instr, buf);
len = (int)(pc - (byte *)buf);
#if VERBOSE
disassemble_with_info(dc, buf, STDOUT, true, true);
#endif
ASSERT(len == len_expect);
decin = instr_create(dc);
decode(dc, buf, decin);
ASSERT(instr_same(instr, decin));
/* PR 245805: variable sizes should be resolved on decode */
if (src)
op = instr_get_src(decin, opnum);
else
op = instr_get_dst(decin, opnum);
opsz = opnd_get_size(op);
ASSERT(opsz == sz && opnd_size_in_bytes(opsz) == bytes);
instr_destroy(dc, instr);
instr_destroy(dc, decin);
}
static void
test_indirect_cti(void *dc)
{
/*
0x004275f4 ff d1 call %ecx %esp -> %esp (%esp)
0x004275f4 66 ff d1 data16 call %cx %esp -> %esp (%esp)
0x004275f4 67 ff d1 addr16 call %ecx %esp -> %esp (%esp)
0x00427794 ff 19 lcall (%ecx) %esp -> %esp (%esp)
0x00427794 66 ff 19 data16 lcall (%ecx) %esp -> %esp (%esp)
0x00427794 67 ff 1f addr16 lcall (%bx) %esp -> %esp (%esp)
*/
instr_t *instr;
byte bytes_addr16_call[] = { 0x67, 0xff, 0xd1 };
instr = INSTR_CREATE_call_ind(dc, opnd_create_reg(REG_XCX));
test_instr_encode(dc, instr, 2);
#ifndef X64 /* only on AMD can we shorten, so we don't test it */
instr =
instr_create_2dst_2src(dc, OP_call_ind, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -2, OPSZ_2),
opnd_create_reg(REG_CX), opnd_create_reg(REG_XSP));
test_instr_encode(dc, instr, 3);
#endif
/* addr16 prefix does nothing here */
instr = INSTR_CREATE_call_ind(dc, opnd_create_reg(REG_XCX));
test_instr_decode(dc, instr, bytes_addr16_call, sizeof(bytes_addr16_call), false);
/* invalid to have far call go through reg since needs 6 bytes */
instr = INSTR_CREATE_call_far_ind(
dc, opnd_create_base_disp(REG_XCX, REG_NULL, 0, 0, OPSZ_6));
test_instr_encode(dc, instr, 2);
instr = instr_create_2dst_2src(
dc, OP_call_far_ind, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -4, OPSZ_4),
opnd_create_base_disp(REG_XCX, REG_NULL, 0, 0, OPSZ_4), opnd_create_reg(REG_XSP));
test_instr_encode(dc, instr, 3);
instr = instr_create_2dst_2src(
dc, OP_call_far_ind, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -8, OPSZ_8_rex16_short4),
opnd_create_base_disp(IF_X64_ELSE(REG_EBX, REG_BX), REG_NULL, 0, 0, OPSZ_6),
opnd_create_reg(REG_XSP));
test_instr_encode(dc, instr, 3);
/* case 10710: make sure we can encode these guys
0x00428844 0e push %cs %esp -> %esp (%esp)
0x00428844 1e push %ds %esp -> %esp (%esp)
0x00428844 16 push %ss %esp -> %esp (%esp)
0x00428844 06 push %es %esp -> %esp (%esp)
0x00428844 0f a0 push %fs %esp -> %esp (%esp)
0x00428844 0f a8 push %gs %esp -> %esp (%esp)
0x00428844 1f pop %esp (%esp) -> %ds %esp
0x00428844 17 pop %esp (%esp) -> %ss %esp
0x00428844 07 pop %esp (%esp) -> %es %esp
0x00428844 0f a1 pop %esp (%esp) -> %fs %esp
0x00428844 0f a9 pop %esp (%esp) -> %gs %esp
*/
#ifndef X64
test_instr_encode(dc, INSTR_CREATE_push(dc, opnd_create_reg(SEG_CS)), 1);
test_instr_encode(dc, INSTR_CREATE_push(dc, opnd_create_reg(SEG_DS)), 1);
test_instr_encode(dc, INSTR_CREATE_push(dc, opnd_create_reg(SEG_SS)), 1);
test_instr_encode(dc, INSTR_CREATE_push(dc, opnd_create_reg(SEG_ES)), 1);
#endif
test_instr_encode(dc, INSTR_CREATE_push(dc, opnd_create_reg(SEG_FS)), 2);
test_instr_encode(dc, INSTR_CREATE_push(dc, opnd_create_reg(SEG_GS)), 2);
#ifndef X64
test_instr_encode(dc, INSTR_CREATE_pop(dc, opnd_create_reg(SEG_DS)), 1);
test_instr_encode(dc, INSTR_CREATE_pop(dc, opnd_create_reg(SEG_SS)), 1);
test_instr_encode(dc, INSTR_CREATE_pop(dc, opnd_create_reg(SEG_ES)), 1);
#endif
test_instr_encode(dc, INSTR_CREATE_pop(dc, opnd_create_reg(SEG_FS)), 2);
test_instr_encode(dc, INSTR_CREATE_pop(dc, opnd_create_reg(SEG_GS)), 2);
}
static void
test_cti_prefixes(void *dc)
{
/* case 10689: test decoding jmp/call w/ 16-bit prefixes
* 0x00428844 66 e9 ab cd data16 jmp $0x55f3
* 0x00428844 67 e9 ab cd ef 12 addr16 jmp $0x133255f5
*/
buf[0] = 0x66;
buf[1] = 0xe9;
buf[2] = 0xab;
buf[3] = 0xcd;
buf[4] = 0xef;
buf[5] = 0x12;
/* data16 (0x66) == 4 bytes, while addr16 (0x67) == 6 bytes */
#ifndef X64 /* no jmp16 for x64 */
# if VERBOSE
disassemble_with_info(dc, buf, STDOUT, true, true);
# endif
ASSERT(decode_next_pc(dc, buf) == (byte *)&buf[4]);
#endif
buf[0] = 0x67;
#if VERBOSE
disassemble_with_info(dc, buf, STDOUT, true, true);
#endif
ASSERT(decode_next_pc(dc, buf) == (byte *)&buf[6]);
}
static void
test_modrm16_helper(void *dc, reg_id_t base, reg_id_t scale, uint disp, uint len)
{
instr_t *instr;
/* Avoid REG_EAX b/c of the special 0xa0-0xa3 opcodes */
instr = INSTR_CREATE_mov_ld(dc, opnd_create_reg(REG_EBX),
opnd_create_base_disp(base, scale,
(scale == REG_NULL ? 0 : 1),
/* we need OPSZ_4_short2 to match
* instr_same on decode! */
disp, OPSZ_4_short2));
if (base == REG_NULL && scale == REG_NULL) {
/* Don't need _ex unless abs addr, in which case should get 32-bit
* disp! Test both sides. */
test_instr_encode(dc, instr, len + 1 /*32-bit disp but no prefix*/);
instr = INSTR_CREATE_mov_ld(
dc, opnd_create_reg(REG_EBX),
opnd_create_base_disp_ex(base, scale, (scale == REG_NULL ? 0 : 1),
/* we need OPSZ_4_short2 to match
* instr_same on decode! */
disp, OPSZ_4_short2, false, false, true));
test_instr_encode(dc, instr, len);
} else {
test_instr_encode(dc, instr, len);
}
}
static void
test_modrm16(void *dc)
{
/*
* 0x00428964 67 8b 18 addr16 mov (%bx,%si,1) -> %ebx
* 0x00428964 67 8b 19 addr16 mov (%bx,%di,1) -> %ebx
* 0x00428964 67 8b 1a addr16 mov (%bp,%si,1) -> %ebx
* 0x00428964 67 8b 1b addr16 mov (%bp,%di,1) -> %ebx
* 0x00428964 67 8b 1c addr16 mov (%si) -> %ebx
* 0x00428964 67 8b 1d addr16 mov (%di) -> %ebx
* 0x004289c4 8b 1d 7f 00 00 00 mov 0x7f -> %ebx
* 0x004289c4 67 8b 1e 7f 00 addr16 mov 0x7f -> %ebx
* 0x004289c4 67 8b 5e 00 addr16 mov (%bp) -> %ebx
* 0x004289c4 67 8b 1f addr16 mov (%bx) -> %ebx
* 0x004289c4 67 8b 58 7f addr16 mov 0x7f(%bx,%si,1) -> %ebx
* 0x004289c4 67 8b 59 7f addr16 mov 0x7f(%bx,%di,1) -> %ebx
* 0x004289c4 67 8b 5a 7f addr16 mov 0x7f(%bp,%si,1) -> %ebx
* 0x004289c4 67 8b 5b 7f addr16 mov 0x7f(%bp,%di,1) -> %ebx
* 0x004289c4 67 8b 5c 7f addr16 mov 0x7f(%si) -> %ebx
* 0x004289c4 67 8b 5d 7f addr16 mov 0x7f(%di) -> %ebx
* 0x004289c4 67 8b 5e 7f addr16 mov 0x7f(%bp) -> %ebx
* 0x004289c4 67 8b 5f 7f addr16 mov 0x7f(%bx) -> %ebx
* 0x004289c4 67 8b 98 80 00 addr16 mov 0x0080(%bx,%si,1) -> %ebx
* 0x004289c4 67 8b 99 80 00 addr16 mov 0x0080(%bx,%di,1) -> %ebx
* 0x004289c4 67 8b 9a 80 00 addr16 mov 0x0080(%bp,%si,1) -> %ebx
* 0x004289c4 67 8b 9b 80 00 addr16 mov 0x0080(%bp,%di,1) -> %ebx
* 0x004289c4 67 8b 9c 80 00 addr16 mov 0x0080(%si) -> %ebx
* 0x004289c4 67 8b 9d 80 00 addr16 mov 0x0080(%di) -> %ebx
* 0x004289c4 67 8b 9e 80 00 addr16 mov 0x0080(%bp) -> %ebx
* 0x004289c4 67 8b 9f 80 00 addr16 mov 0x0080(%bx) -> %ebx
*/
test_modrm16_helper(dc, REG_BX, REG_SI, 0, 3);
test_modrm16_helper(dc, REG_BX, REG_DI, 0, 3);
test_modrm16_helper(dc, REG_BP, REG_SI, 0, 3);
test_modrm16_helper(dc, REG_BP, REG_DI, 0, 3);
test_modrm16_helper(dc, REG_SI, REG_NULL, 0, 3);
test_modrm16_helper(dc, REG_DI, REG_NULL, 0, 3);
test_modrm16_helper(dc, REG_NULL, REG_NULL, 0x7f, 5); /* must do disp16 */
test_modrm16_helper(dc, REG_BP, REG_NULL, 0, 4); /* must do disp8 */
test_modrm16_helper(dc, REG_BX, REG_NULL, 0, 3);
test_modrm16_helper(dc, REG_BX, REG_SI, 0x7f, 4);
test_modrm16_helper(dc, REG_BX, REG_DI, 0x7f, 4);
test_modrm16_helper(dc, REG_BP, REG_SI, 0x7f, 4);
test_modrm16_helper(dc, REG_BP, REG_DI, 0x7f, 4);
test_modrm16_helper(dc, REG_SI, REG_NULL, 0x7f, 4);
test_modrm16_helper(dc, REG_DI, REG_NULL, 0x7f, 4);
test_modrm16_helper(dc, REG_BP, REG_NULL, 0x7f, 4);
test_modrm16_helper(dc, REG_BX, REG_NULL, 0x7f, 4);
test_modrm16_helper(dc, REG_BX, REG_SI, 0x80, 5);
test_modrm16_helper(dc, REG_BX, REG_DI, 0x80, 5);
test_modrm16_helper(dc, REG_BP, REG_SI, 0x80, 5);
test_modrm16_helper(dc, REG_BP, REG_DI, 0x80, 5);
test_modrm16_helper(dc, REG_SI, REG_NULL, 0x80, 5);
test_modrm16_helper(dc, REG_DI, REG_NULL, 0x80, 5);
test_modrm16_helper(dc, REG_BP, REG_NULL, 0x80, 5);
test_modrm16_helper(dc, REG_BX, REG_NULL, 0x80, 5);
}
/* PR 215143: auto-magically add size prefixes */
static void
test_size_changes(void *dc)
{
/*
* 0x004299d4 66 51 data16 push %cx %esp -> %esp (%esp)
* 0x004298a4 e3 fe jecxz $0x004298a4 %ecx
* 0x004298a4 67 e3 fd addr16 jecxz $0x004298a4 %cx
* 0x080a5260 67 e2 fd addr16 loop $0x080a5260 %cx -> %cx
* 0x080a5260 67 e1 fd addr16 loope $0x080a5260 %cx -> %cx
* 0x080a5260 67 e0 fd addr16 loopne $0x080a5260 %cx -> %cx
*/
instr_t *instr;
/* addr16 doesn't affect push so we only test data16 here */
#ifndef X64 /* can only shorten on AMD */
/* push data16 */
instr =
instr_create_2dst_2src(dc, OP_push, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -2, OPSZ_2),
opnd_create_reg(REG_CX), opnd_create_reg(REG_XSP));
test_instr_encode(dc, instr, 2);
#endif
/* jecxz and jcxz */
test_instr_encode(dc, INSTR_CREATE_jecxz(dc, opnd_create_pc(buf)), 2);
/* test non-default count register size (requires addr prefix) */
instr = instr_create_0dst_2src(dc, OP_jecxz, opnd_create_pc(buf),
opnd_create_reg(IF_X64_ELSE(REG_ECX, REG_CX)));
test_instr_encode(dc, instr, 3);
instr = instr_create_1dst_2src(
dc, OP_loop, opnd_create_reg(IF_X64_ELSE(REG_ECX, REG_CX)), opnd_create_pc(buf),
opnd_create_reg(IF_X64_ELSE(REG_ECX, REG_CX)));
test_instr_encode(dc, instr, 3);
instr = instr_create_1dst_2src(
dc, OP_loope, opnd_create_reg(IF_X64_ELSE(REG_ECX, REG_CX)), opnd_create_pc(buf),
opnd_create_reg(IF_X64_ELSE(REG_ECX, REG_CX)));
test_instr_encode(dc, instr, 3);
instr = instr_create_1dst_2src(
dc, OP_loopne, opnd_create_reg(IF_X64_ELSE(REG_ECX, REG_CX)), opnd_create_pc(buf),
opnd_create_reg(IF_X64_ELSE(REG_ECX, REG_CX)));
test_instr_encode(dc, instr, 3);
/*
* 0x004ee0b8 a6 cmps %ds:(%esi) %es:(%edi) %esi %edi -> %esi
* %edi 0x004ee0b8 67 a6 addr16 cmps %ds:(%si) %es:(%di) %si %di ->
* %si %di 0x004ee0b8 66 a7 data16 cmps %ds:(%esi) %es:(%edi) %esi
* %edi -> %esi %edi 0x004ee0b8 d7 xlat %ds:(%ebx,%al,1) -> %al
* 0x004ee0b8 67 d7 addr16 xlat %ds:(%bx,%al,1) -> %al
* 0x004ee0b8 0f f7 c1 maskmovq %mm0 %mm1 -> %ds:(%edi)
* 0x004ee0b8 67 0f f7 c1 addr16 maskmovq %mm0 %mm1 -> %ds:(%di)
* 0x004ee0b8 66 0f f7 c1 maskmovdqu %xmm0 %xmm1 -> %ds:(%edi)
* 0x004ee0b8 67 66 0f f7 c1 addr16 maskmovdqu %xmm0 %xmm1 -> %ds:(%di)
*/
test_instr_encode(dc, INSTR_CREATE_cmps_1(dc), 1);
instr = instr_create_2dst_4src(
dc, OP_cmps, opnd_create_reg(IF_X64_ELSE(REG_ESI, REG_SI)),
opnd_create_reg(IF_X64_ELSE(REG_EDI, REG_DI)),
opnd_create_far_base_disp(SEG_DS, IF_X64_ELSE(REG_ESI, REG_SI), REG_NULL, 0, 0,
OPSZ_1),
opnd_create_far_base_disp(SEG_ES, IF_X64_ELSE(REG_EDI, REG_DI), REG_NULL, 0, 0,
OPSZ_1),
opnd_create_reg(IF_X64_ELSE(REG_ESI, REG_SI)),
opnd_create_reg(IF_X64_ELSE(REG_EDI, REG_DI)));
test_instr_encode(dc, instr, 2);
instr = instr_create_2dst_4src(
dc, OP_cmps, opnd_create_reg(REG_XSI), opnd_create_reg(REG_XDI),
opnd_create_far_base_disp(SEG_DS, REG_XSI, REG_NULL, 0, 0, OPSZ_2),
opnd_create_far_base_disp(SEG_ES, REG_XDI, REG_NULL, 0, 0, OPSZ_2),
opnd_create_reg(REG_XSI), opnd_create_reg(REG_XDI));
test_instr_encode_and_decode(dc, instr, 2, true /*src*/, 0, OPSZ_2, 2);
test_instr_encode(dc, INSTR_CREATE_xlat(dc), 1);
instr = instr_create_1dst_1src(dc, OP_xlat, opnd_create_reg(REG_AL),
opnd_create_far_base_disp(SEG_DS,
IF_X64_ELSE(REG_EBX, REG_BX),
REG_AL, 1, 0, OPSZ_1));
test_instr_encode(dc, instr, 2);
instr = INSTR_CREATE_maskmovq(dc, opnd_create_reg(REG_MM0), opnd_create_reg(REG_MM1));
test_instr_encode(dc, instr, 3);
instr = INSTR_PRED(instr_create_1dst_2src(
dc, OP_maskmovq,
opnd_create_far_base_disp(SEG_DS, IF_X64_ELSE(REG_EDI, REG_DI),
REG_NULL, 0, 0, OPSZ_8),
opnd_create_reg(REG_MM0), opnd_create_reg(REG_MM1)),
DR_PRED_COMPLEX);
test_instr_encode(dc, instr, 4);
instr =
INSTR_CREATE_maskmovdqu(dc, opnd_create_reg(REG_XMM0), opnd_create_reg(REG_XMM1));
test_instr_encode(dc, instr, 4);
instr = INSTR_PRED(instr_create_1dst_2src(
dc, OP_maskmovdqu,
opnd_create_far_base_disp(SEG_DS, IF_X64_ELSE(REG_EDI, REG_DI),
REG_NULL, 0, 0, OPSZ_16),
opnd_create_reg(REG_XMM0), opnd_create_reg(REG_XMM1)),
DR_PRED_COMPLEX);
test_instr_encode(dc, instr, 5);
/* Test iretw, iretd, iretq (unlike most stack operation iretd (and lretd on AMD)
* exist and are the default in 64-bit mode. As such, it has a different size/type
* then most other stack operations). Our instr_create routine should match stack
* (iretq on 64-bit, iretd on 32-bit). See PR 191977. */
instr = INSTR_CREATE_iret(dc);
#ifdef X64
test_instr_encode_and_decode(dc, instr, 2, true /*src*/, 1, OPSZ_40, 40);
ASSERT(buf[0] == 0x48); /* check for rex.w prefix */
#else
test_instr_encode_and_decode(dc, instr, 1, true /*src*/, 1, OPSZ_12, 12);
#endif
instr = instr_create_1dst_2src(
dc, OP_iret, opnd_create_reg(REG_XSP), opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0, OPSZ_12));
test_instr_encode_and_decode(dc, instr, 1, true /*src*/, 1, OPSZ_12, 12);
instr = instr_create_1dst_2src(
dc, OP_iret, opnd_create_reg(REG_XSP), opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0, OPSZ_6));
test_instr_encode_and_decode(dc, instr, 2, true /*src*/, 1, OPSZ_6, 6);
ASSERT(buf[0] == 0x66); /* check for data prefix */
}
/* PR 332254: test xchg vs nop */
static void
test_nop_xchg(void *dc)
{
/* 0x0000000000671460 87 c0 xchg %eax %eax -> %eax %eax
* 0x0000000000671460 48 87 c0 xchg %rax %rax -> %rax %rax
* 0x0000000000671460 41 87 c0 xchg %r8d %eax -> %r8d %eax
* 0x0000000000671460 46 90 nop
* 0x0000000000671460 4e 90 nop
* 0x0000000000671460 41 90 xchg %r8d %eax -> %r8d %eax
*/
instr_t *instr;
instr = INSTR_CREATE_xchg(dc, opnd_create_reg(REG_EAX), opnd_create_reg(REG_EAX));
test_instr_encode(dc, instr, 2);
#ifdef X64
/* we don't do the optimal "48 90" instead of "48 87 c0" */
instr = INSTR_CREATE_xchg(dc, opnd_create_reg(REG_RAX), opnd_create_reg(REG_RAX));
test_instr_encode(dc, instr, 3);
/* we don't do the optimal "41 90" instead of "41 87 c0" */
instr = INSTR_CREATE_xchg(dc, opnd_create_reg(REG_R8D), opnd_create_reg(REG_EAX));
test_instr_encode(dc, instr, 3);
/* ensure we treat as nop and NOT xchg if doesn't have rex.b */
buf[0] = 0x46;
buf[1] = 0x90;
instr = instr_create(dc);
# if VERBOSE
disassemble_with_info(dc, buf, STDOUT, true, true);
# endif
decode(dc, buf, instr);
ASSERT(instr_get_opcode(instr) == OP_nop);
instr_destroy(dc, instr);
buf[0] = 0x4e;
buf[1] = 0x90;
instr = instr_create(dc);
# if VERBOSE
disassemble_with_info(dc, buf, STDOUT, true, true);
# endif
decode(dc, buf, instr);
ASSERT(instr_get_opcode(instr) == OP_nop);
instr_destroy(dc, instr);
buf[0] = 0x41;
buf[1] = 0x90;
instr = instr_create(dc);
# if VERBOSE
disassemble_with_info(dc, buf, STDOUT, true, true);
# endif
decode(dc, buf, instr);
ASSERT(instr_get_opcode(instr) == OP_xchg);
instr_destroy(dc, instr);
#endif
}
static void
test_hint_nops(void *dc)
{
byte *pc;
instr_t *instr;
instr = instr_create(dc);
/* ensure we treat as nop. */
buf[0] = 0x0f;
buf[1] = 0x18;
/* nop [eax], and then ecx, edx, ebx */
for (buf[2] = 0x38; buf[2] <= 0x3b; buf[2]++) {
pc = decode(dc, buf, instr);
ASSERT(instr_get_opcode(instr) == OP_nop_modrm);
instr_reset(dc, instr);
}
/* other types of hintable nop [eax] */
buf[2] = 0x00;
for (buf[1] = 0x19; buf[1] <= 0x1f; buf[1]++) {
pc = decode(dc, buf, instr);
ASSERT(instr_get_opcode(instr) == OP_nop_modrm);
instr_reset(dc, instr);
}
instr_destroy(dc, instr);
}
#ifdef X64
static void
test_x86_mode(void *dc)
{
byte *pc, *end;
instr_t *instr;
/* create instr that looks different in x86 vs x64 */
instr = INSTR_CREATE_add(dc, opnd_create_reg(REG_RAX), OPND_CREATE_INT32(42));
end = instr_encode(dc, instr, buf);
ASSERT(end - buf < BUFFER_SIZE_ELEMENTS(buf));
/* read back in */
set_x86_mode(dc, false /*64-bit*/);
instr_reset(dc, instr);
pc = decode(dc, buf, instr);
ASSERT(pc != NULL);
ASSERT(instr_get_opcode(instr) == OP_add);
/* now interpret as 32-bit where rex will be an inc */
set_x86_mode(dc, true /*32-bit*/);
instr_reset(dc, instr);
pc = decode(dc, buf, instr);
ASSERT(pc != NULL);
ASSERT(instr_get_opcode(instr) == OP_dec);
/* test i#352: in x86 mode, sysexit should have esp as dest, not rsp */
set_x86_mode(dc, true /*32-bit*/);
buf[0] = 0x0f;
buf[1] = 0x35;
instr_reset(dc, instr);
pc = decode(dc, buf, instr);
ASSERT(pc != NULL);
ASSERT(instr_get_opcode(instr) == OP_sysexit);
ASSERT(opnd_get_reg(instr_get_dst(instr, 0)) == DR_REG_ESP);
instr_free(dc, instr);
set_x86_mode(dc, false /*64-bit*/);
}
static void
test_x64_abs_addr(void *dc)
{
/* 48 a1 ef be ad de ef be ad de mov 0xdeadbeefdeadbeef -> %rax
* 48 a3 ef be ad de ef be ad de mov %rax -> 0xdeadbeefdeadbeef
*/
instr_t *instr;
opnd_t abs_addr = opnd_create_abs_addr((void *)0xdeadbeefdeadbeef, OPSZ_8);
/* movabs load */
instr = INSTR_CREATE_mov_ld(dc, opnd_create_reg(DR_REG_RAX), abs_addr);
test_instr_encode(dc, instr, 10); /* REX + op + 8 */
/* movabs store */
instr = INSTR_CREATE_mov_st(dc, abs_addr, opnd_create_reg(DR_REG_RAX));
test_instr_encode(dc, instr, 10); /* REX + op + 8 */
}
static void
test_x64_inc(void *dc)
{
/* i#842: inc/dec should not be encoded as 40-4f in x64 */
instr_t *instr;
instr = INSTR_CREATE_inc(dc, opnd_create_reg(REG_EAX));
test_instr_encode(dc, instr, 2);
}
#endif
static void
test_regs(void *dc)
{
reg_id_t reg;
/* Various subregs of xax to OPSZ_1. */
#ifdef X64
reg = reg_resize_to_opsz(DR_REG_RAX, OPSZ_1);
ASSERT(reg == DR_REG_AL);
#endif
reg = reg_resize_to_opsz(DR_REG_EAX, OPSZ_1);
ASSERT(reg == DR_REG_AL);
reg = reg_resize_to_opsz(DR_REG_AX, OPSZ_1);
ASSERT(reg == DR_REG_AL);
reg = reg_resize_to_opsz(DR_REG_AH, OPSZ_1);
ASSERT(reg == DR_REG_AL);
reg = reg_resize_to_opsz(DR_REG_AL, OPSZ_1);
ASSERT(reg == DR_REG_AL);
/* xax to OPSZ_2 */
#ifdef X64
reg = reg_resize_to_opsz(DR_REG_RAX, OPSZ_2);
ASSERT(reg == DR_REG_AX);
#endif
reg = reg_resize_to_opsz(DR_REG_EAX, OPSZ_2);
ASSERT(reg == DR_REG_AX);
reg = reg_resize_to_opsz(DR_REG_AX, OPSZ_2);
ASSERT(reg == DR_REG_AX);
reg = reg_resize_to_opsz(DR_REG_AH, OPSZ_2);
ASSERT(reg == DR_REG_AX);
reg = reg_resize_to_opsz(DR_REG_AL, OPSZ_2);
ASSERT(reg == DR_REG_AX);
/* xax to OPSZ_4 */
#ifdef X64
reg = reg_resize_to_opsz(DR_REG_RAX, OPSZ_4);
ASSERT(reg == DR_REG_EAX);
#endif
reg = reg_resize_to_opsz(DR_REG_EAX, OPSZ_4);
ASSERT(reg == DR_REG_EAX);
reg = reg_resize_to_opsz(DR_REG_AX, OPSZ_4);
ASSERT(reg == DR_REG_EAX);
reg = reg_resize_to_opsz(DR_REG_AH, OPSZ_4);
ASSERT(reg == DR_REG_EAX);
reg = reg_resize_to_opsz(DR_REG_AL, OPSZ_4);
ASSERT(reg == DR_REG_EAX);
#ifdef X64
/* xax to OPSZ_8 */
reg = reg_resize_to_opsz(DR_REG_RAX, OPSZ_8);
ASSERT(reg == DR_REG_RAX);
reg = reg_resize_to_opsz(DR_REG_EAX, OPSZ_8);
ASSERT(reg == DR_REG_RAX);
reg = reg_resize_to_opsz(DR_REG_AX, OPSZ_8);
ASSERT(reg == DR_REG_RAX);
reg = reg_resize_to_opsz(DR_REG_AH, OPSZ_8);
ASSERT(reg == DR_REG_RAX);
reg = reg_resize_to_opsz(DR_REG_AL, OPSZ_8);
ASSERT(reg == DR_REG_RAX);
#endif
/* Quick check of other regs. */
reg = reg_resize_to_opsz(DR_REG_XBX, OPSZ_1);
ASSERT(reg == DR_REG_BL);
reg = reg_resize_to_opsz(DR_REG_XCX, OPSZ_1);
ASSERT(reg == DR_REG_CL);
reg = reg_resize_to_opsz(DR_REG_XDX, OPSZ_1);
ASSERT(reg == DR_REG_DL);
/* X64 only subregs, OPSZ_1. */
reg = reg_resize_to_opsz(DR_REG_XDI, OPSZ_1);
ASSERT(reg == IF_X64_ELSE(DR_REG_DIL, DR_REG_NULL));
reg = reg_resize_to_opsz(DR_REG_XSI, OPSZ_1);
ASSERT(reg == IF_X64_ELSE(DR_REG_SIL, DR_REG_NULL));
reg = reg_resize_to_opsz(DR_REG_XSP, OPSZ_1);
ASSERT(reg == IF_X64_ELSE(DR_REG_SPL, DR_REG_NULL));
reg = reg_resize_to_opsz(DR_REG_XBP, OPSZ_1);
ASSERT(reg == IF_X64_ELSE(DR_REG_BPL, DR_REG_NULL));
/* X64 only subregs, OPSZ_2. */
reg = reg_resize_to_opsz(DR_REG_XDI, OPSZ_2);
ASSERT(reg == DR_REG_DI);
reg = reg_resize_to_opsz(DR_REG_XSI, OPSZ_2);
ASSERT(reg == DR_REG_SI);
reg = reg_resize_to_opsz(DR_REG_XSP, OPSZ_2);
ASSERT(reg == DR_REG_SP);
reg = reg_resize_to_opsz(DR_REG_XBP, OPSZ_2);
ASSERT(reg == DR_REG_BP);
}
static void
test_instr_opnds(void *dc)
{
/* Verbose disasm looks like this:
* 32-bit:
* 0x080f1ae0 ff 25 e7 1a 0f 08 jmp 0x080f1ae7
* 0x080f1ae6 b8 ef be ad de mov $0xdeadbeef -> %eax
* 0x080f1ae0 a0 e6 1a 0f 08 mov 0x080f1ae6 -> %al
* 0x080f1ae5 b8 ef be ad de mov $0xdeadbeef -> %eax
* 64-bit:
* 0x00000000006b8de0 ff 25 02 00 00 00 jmp <rel> 0x00000000006b8de8
* 0x00000000006b8de6 48 b8 ef be ad de 00 mov $0x00000000deadbeef -> %rax
* 00 00 00
* 0x00000000006b8de0 8a 05 02 00 00 00 mov <rel> 0x00000000006b8de8 -> %al
* 0x00000000006b8de6 48 b8 ef be ad de 00 mov $0x00000000deadbeef -> %rax
* 00 00 00
*/
instrlist_t *ilist;
instr_t *tgt, *instr;
byte *pc;
short disp;
ilist = instrlist_create(dc);
/* test mem instr as ind jmp target */
tgt = INSTR_CREATE_mov_imm(dc, opnd_create_reg(DR_REG_XAX),
opnd_create_immed_int(0xdeadbeef, OPSZ_PTR));
/* skip rex+opcode */
disp = IF_X64_ELSE(2, 1);
instrlist_append(
ilist, INSTR_CREATE_jmp_ind(dc, opnd_create_mem_instr(tgt, disp, OPSZ_PTR)));
instrlist_append(ilist, tgt);
pc = instrlist_encode(dc, ilist, buf, true /*instr targets*/);
ASSERT(pc != NULL);
instrlist_clear(dc, ilist);
#if VERBOSE
pc = disassemble_with_info(dc, buf, STDOUT, true, true);
pc = disassemble_with_info(dc, pc, STDOUT, true, true);
#endif
pc = buf;
instr = instr_create(dc);
pc = decode(dc, pc, instr);
ASSERT(pc != NULL);
ASSERT(instr_get_opcode(instr) == OP_jmp_ind);
#ifdef X64
ASSERT(opnd_is_rel_addr(instr_get_src(instr, 0)));
ASSERT(opnd_get_addr(instr_get_src(instr, 0)) == pc + disp);
#else
ASSERT(opnd_is_base_disp(instr_get_src(instr, 0)));
ASSERT(opnd_get_base(instr_get_src(instr, 0)) == REG_NULL);
ASSERT(opnd_get_index(instr_get_src(instr, 0)) == REG_NULL);
ASSERT(opnd_get_disp(instr_get_src(instr, 0)) == (ptr_int_t)pc + disp);
#endif
/* test mem instr as TYPE_O */
tgt = INSTR_CREATE_mov_imm(dc, opnd_create_reg(DR_REG_XAX),
opnd_create_immed_int(0xdeadbeef, OPSZ_PTR));
/* skip rex+opcode */
disp = IF_X64_ELSE(2, 1);
instrlist_append(ilist,
INSTR_CREATE_mov_ld(dc, opnd_create_reg(DR_REG_AL),
opnd_create_mem_instr(tgt, disp, OPSZ_1)));
instrlist_append(ilist, tgt);
pc = instrlist_encode(dc, ilist, buf, true /*instr targets*/);
ASSERT(pc != NULL);
instrlist_clear(dc, ilist);
#if VERBOSE
pc = disassemble_with_info(dc, buf, STDOUT, true, true);
pc = disassemble_with_info(dc, pc, STDOUT, true, true);
#endif
pc = buf;
instr_reset(dc, instr);
pc = decode(dc, pc, instr);
ASSERT(pc != NULL);
ASSERT(instr_get_opcode(instr) == OP_mov_ld);
#ifdef X64
ASSERT(opnd_is_rel_addr(instr_get_src(instr, 0)));
ASSERT(opnd_get_addr(instr_get_src(instr, 0)) == pc + disp);
#else
ASSERT(opnd_is_base_disp(instr_get_src(instr, 0)));
ASSERT(opnd_get_base(instr_get_src(instr, 0)) == REG_NULL);
ASSERT(opnd_get_index(instr_get_src(instr, 0)) == REG_NULL);
ASSERT(opnd_get_disp(instr_get_src(instr, 0)) == (ptr_int_t)pc + disp);
#endif
instr_free(dc, instr);
instrlist_destroy(dc, ilist);
}
static void
test_strict_invalid(void *dc)
{
instr_t instr;
byte *pc;
const byte buf[] = { 0xf2, 0x0f, 0xd8, 0xe9 }; /* psubusb w/ invalid prefix */
instr_init(dc, &instr);
/* The instr should be valid by default and invalid if decode_strict */
pc = decode(dc, (byte *)buf, &instr);
ASSERT(pc != NULL);
disassemble_set_syntax(DR_DISASM_STRICT_INVALID);
instr_reset(dc, &instr);
pc = decode(dc, (byte *)buf, &instr);
ASSERT(pc == NULL);
instr_free(dc, &instr);
}
static void
test_tsx(void *dc)
{
/* Test the xacquire and xrelease prefixes */
byte *pc;
const byte b1[] = { 0xf3, 0xa3, 0x9a, 0x7a, 0x21, 0x02, 0xfa, 0x8c, 0xec, 0xa3 };
const byte b2[] = { 0xf3, 0x89, 0x39 };
const byte b3[] = { 0xf2, 0x89, 0x39 };
const byte b4[] = { 0xf2, 0xf0, 0x00, 0x00 };
char buf[512];
int len;
pc = disassemble_to_buffer(dc, (byte *)b1, (byte *)b1, false /*no pc*/,
false /*no bytes*/, buf, BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf,
IF_X64_ELSE("mov %eax -> 0xa3ec8cfa02217a9a[4byte]\n",
"mov %eax -> 0x02217a9a[4byte]\n")) == 0);
pc = disassemble_to_buffer(dc, (byte *)b2, (byte *)b2, false /*no pc*/,
false /*no bytes*/, buf, BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf,
IF_X64_ELSE("mov %edi -> (%rcx)[4byte]\n",
"mov %edi -> (%ecx)[4byte]\n")) == 0);
pc = disassemble_to_buffer(dc, (byte *)b3, (byte *)b3, false /*no pc*/,
false /*no bytes*/, buf, BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf,
IF_X64_ELSE("xacquire mov %edi -> (%rcx)[4byte]\n",
"xacquire mov %edi -> (%ecx)[4byte]\n")) == 0);
pc = disassemble_to_buffer(dc, (byte *)b4, (byte *)b4, false /*no pc*/,
false /*no bytes*/, buf, BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf,
IF_X64_ELSE(
"xacquire lock add %al (%rax)[1byte] -> (%rax)[1byte]\n",
"xacquire lock add %al (%eax)[1byte] -> (%eax)[1byte]\n")) == 0);
}
static void
test_vsib_helper(void *dc, dr_mcontext_t *mc, instr_t *instr, reg_t base, int mask_idx,
int index_idx, int scale, int disp, int count, opnd_size_t index_sz)
{
uint memopidx, memoppos;
app_pc addr;
bool write;
for (memopidx = 0;
instr_compute_address_ex_pos(instr, mc, memopidx, &addr, &write, &memoppos);
memopidx++) {
/* should be a read from 1st source */
ptr_int_t index =
((index_sz == OPSZ_4) ?
/* this only works w/ the mask all enabled */
(mc->simd[index_idx].u32[memopidx])
:
#ifdef X64
(mc->simd[index_idx].u64[memopidx])
#else
((((int64)mc->simd[index_idx].u32[memopidx * 2 + 1])
<< 32) |
mc->simd[index_idx].u32[memopidx * 2])
#endif
);
ASSERT(!write);
ASSERT(memoppos == 0);
ASSERT((ptr_int_t)addr == base + disp + scale * index);
}
ASSERT(memopidx == count);
}
static void
test_vsib(void *dc)
{
dr_mcontext_t mc;
instr_t *instr;
/* Test VSIB addressing */
byte *pc;
const byte b1[] = { 0xc4, 0xe2, 0xe9, 0x90, 0x24, 0x42 };
/* Invalid b/c modrm doesn't ask for SIB */
const byte b2[] = { 0xc4, 0xe2, 0xe9, 0x90, 0x00 };
char buf[512];
int len;
pc = disassemble_to_buffer(dc, (byte *)b1, (byte *)b1, false /*no pc*/,
false /*no bytes*/, buf, BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf,
IF_X64_ELSE(
"vpgatherdq (%rdx,%xmm0,2)[8byte] %xmm2 -> %xmm4 %xmm2\n",
"vpgatherdq (%edx,%xmm0,2)[8byte] %xmm2 -> %xmm4 %xmm2\n")) == 0);
pc = disassemble_to_buffer(dc, (byte *)b2, (byte *)b2, false /*no pc*/,
false /*no bytes*/, buf, BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc == NULL);
/* Test mem addr emulation */
mc.size = sizeof(mc);
mc.flags = DR_MC_ALL;
mc.xcx = 0x42;
mc.simd[1].u32[0] = 0x11111111;
mc.simd[1].u32[1] = 0x22222222;
mc.simd[1].u32[2] = 0x33333333;
mc.simd[1].u32[3] = 0x44444444;
mc.simd[1].u32[4] = 0x12345678;
mc.simd[1].u32[5] = 0x87654321;
mc.simd[1].u32[6] = 0xabababab;
mc.simd[1].u32[7] = 0xcdcdcdcd;
/* mask */
mc.simd[2].u32[0] = 0xf1111111;
mc.simd[2].u32[1] = 0xf2222222;
mc.simd[2].u32[2] = 0xf3333333;
mc.simd[2].u32[3] = 0xf4444444;
mc.simd[2].u32[4] = 0xf5444444;
mc.simd[2].u32[5] = 0xf6444444;
mc.simd[2].u32[6] = 0xf7444444;
mc.simd[2].u32[7] = 0xf8444444;
/* test index size 4 and mem size 8 */
instr =
INSTR_CREATE_vgatherdpd(dc, opnd_create_reg(REG_XMM0),
opnd_create_base_disp(REG_XCX, REG_XMM1, 2, 0x12, OPSZ_8),
opnd_create_reg(REG_XMM2));
test_vsib_helper(dc, &mc, instr, mc.xcx, 2, 1, 2, 0x12, 2, OPSZ_4);
instr_destroy(dc, instr);
/* test index size 8 and mem size 4 */
instr =
INSTR_CREATE_vgatherqpd(dc, opnd_create_reg(REG_XMM0),
opnd_create_base_disp(REG_XCX, REG_XMM1, 2, 0x12, OPSZ_8),
opnd_create_reg(REG_XMM2));
test_vsib_helper(dc, &mc, instr, mc.xcx, 2, 1, 2, 0x12, 2, OPSZ_8);
instr_destroy(dc, instr);
/* test index size 4 and mem size 4 */
instr =
INSTR_CREATE_vgatherdps(dc, opnd_create_reg(REG_XMM0),
opnd_create_base_disp(REG_XCX, REG_XMM1, 2, 0x12, OPSZ_4),
opnd_create_reg(REG_XMM2));
test_vsib_helper(dc, &mc, instr, mc.xcx, 2, 1, 2, 0x12, 4, OPSZ_4);
instr_destroy(dc, instr);
/* test index size 8 and mem size 4 */
instr =
INSTR_CREATE_vgatherqps(dc, opnd_create_reg(REG_XMM0),
opnd_create_base_disp(REG_XCX, REG_XMM1, 2, 0x12, OPSZ_4),
opnd_create_reg(REG_XMM2));
test_vsib_helper(dc, &mc, instr, mc.xcx, 2, 1, 2, 0x12, 2, OPSZ_8);
instr_destroy(dc, instr);
/* test 256-byte */
instr =
INSTR_CREATE_vgatherdps(dc, opnd_create_reg(REG_YMM0),
opnd_create_base_disp(REG_XCX, REG_YMM1, 2, 0x12, OPSZ_4),
opnd_create_reg(REG_YMM2));
test_vsib_helper(dc, &mc, instr, mc.xcx, 2, 1, 2, 0x12, 8, OPSZ_4);
instr_destroy(dc, instr);
/* test mask not selecting things -- in the middle complicates
* our helper checks so we just do the ends
*/
mc.simd[2].u32[0] = 0x71111111;
mc.simd[2].u32[1] = 0x32222222;
mc.simd[2].u32[2] = 0x13333333;
mc.simd[2].u32[3] = 0x04444444;
mc.simd[2].u32[4] = 0x65444444;
mc.simd[2].u32[5] = 0x56444444;
mc.simd[2].u32[6] = 0x47444444;
mc.simd[2].u32[7] = 0x28444444;
instr =
INSTR_CREATE_vgatherdps(dc, opnd_create_reg(REG_YMM0),
opnd_create_base_disp(REG_XCX, REG_YMM1, 2, 0x12, OPSZ_4),
opnd_create_reg(REG_YMM2));
test_vsib_helper(dc, &mc, instr, mc.xcx, 2, 1, 2, 0x12, 0 /*nothing*/, OPSZ_4);
instr_destroy(dc, instr);
}
static void
test_disasm_sizes(void *dc)
{
byte *pc;
char buf[512];
int len;
{
const byte b1[] = { 0xac };
const byte b2[] = { 0xad };
pc = disassemble_to_buffer(dc, (byte *)b1, (byte *)b1, false, false, buf,
BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf,
IF_X64_ELSE("lods %ds:(%rsi)[1byte] %rsi -> %al %rsi\n",
"lods %ds:(%esi)[1byte] %esi -> %al %esi\n")) == 0);
pc = disassemble_to_buffer(dc, (byte *)b2, (byte *)b2, false, false, buf,
BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf,
IF_X64_ELSE("lods %ds:(%rsi)[4byte] %rsi -> %eax %rsi\n",
"lods %ds:(%esi)[4byte] %esi -> %eax %esi\n")) == 0);
}
#ifdef X64
{
const byte b3[] = { 0x48, 0xad };
pc = disassemble_to_buffer(dc, (byte *)b3, (byte *)b3, false, false, buf,
BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf, "lods %ds:(%rsi)[8byte] %rsi -> %rax %rsi\n") == 0);
}
#endif
#ifdef X64
{
const byte b1[] = { 0xc7, 0x80, 0x90, 0xe4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
const byte b2[] = { 0x48, 0xc7, 0x80, 0x90, 0xe4, 0xff,
0xff, 0x00, 0x00, 0x00, 0x00 };
pc = disassemble_to_buffer(dc, (byte *)b1, (byte *)b1, false, false, buf,
BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf, "mov $0x00000000 -> 0xffffe490(%rax)[4byte]\n") == 0);
pc = disassemble_to_buffer(dc, (byte *)b2, (byte *)b2, false, false, buf,
BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL);
ASSERT(strcmp(buf, "mov $0x0000000000000000 -> 0xffffe490(%rax)[8byte]\n") ==
0);
}
#endif
}
static void
test_predication(void *dc)
{
byte *pc;
uint usage;
instr_t *instr = INSTR_CREATE_vmaskmovps(dc, opnd_create_reg(REG_XMM0),
opnd_create_reg(REG_XMM1), MEMARG(OPSZ_16));
ASSERT(instr_reads_from_reg(instr, REG_XMM1, DR_QUERY_DEFAULT));
ASSERT(instr_reads_from_reg(instr, REG_XMM1, DR_QUERY_INCLUDE_ALL));
ASSERT(instr_reads_from_reg(instr, REG_XMM1, DR_QUERY_INCLUDE_COND_DSTS));
ASSERT(instr_reads_from_reg(instr, REG_XMM1, 0));
ASSERT(!instr_writes_to_reg(instr, REG_XMM0, DR_QUERY_DEFAULT));
ASSERT(instr_writes_to_reg(instr, REG_XMM0, DR_QUERY_INCLUDE_ALL));
ASSERT(instr_writes_to_reg(instr, REG_XMM0, DR_QUERY_INCLUDE_COND_DSTS));
ASSERT(!instr_writes_to_reg(instr, REG_XMM0, 0));
pc = instr_encode(dc, instr, buf);
ASSERT(pc != NULL);
instr_reset(dc, instr);
decode(dc, buf, instr);
ASSERT(instr_reads_from_reg(instr, REG_XMM1, DR_QUERY_DEFAULT));
ASSERT(instr_reads_from_reg(instr, REG_XMM1, DR_QUERY_INCLUDE_ALL));
ASSERT(instr_reads_from_reg(instr, REG_XMM1, DR_QUERY_INCLUDE_COND_DSTS));
ASSERT(instr_reads_from_reg(instr, REG_XMM1, 0));
ASSERT(!instr_writes_to_reg(instr, REG_XMM0, DR_QUERY_DEFAULT));
ASSERT(instr_writes_to_reg(instr, REG_XMM0, DR_QUERY_INCLUDE_ALL));
ASSERT(instr_writes_to_reg(instr, REG_XMM0, DR_QUERY_INCLUDE_COND_DSTS));
ASSERT(!instr_writes_to_reg(instr, REG_XMM0, 0));
instr_reset(dc, instr);
instr = INSTR_CREATE_cmovcc(dc, OP_cmovnle, opnd_create_reg(REG_EAX),
opnd_create_reg(REG_ECX));
ASSERT(instr_reads_from_reg(instr, REG_ECX, DR_QUERY_DEFAULT));
ASSERT(instr_reads_from_reg(instr, REG_ECX, DR_QUERY_INCLUDE_ALL));
ASSERT(!instr_reads_from_reg(instr, REG_ECX, DR_QUERY_INCLUDE_COND_DSTS));
ASSERT(!instr_reads_from_reg(instr, REG_ECX, 0));
ASSERT(!instr_writes_to_reg(instr, REG_EAX, DR_QUERY_DEFAULT));
ASSERT(instr_writes_to_reg(instr, REG_EAX, DR_QUERY_INCLUDE_ALL));
ASSERT(instr_writes_to_reg(instr, REG_EAX, DR_QUERY_INCLUDE_COND_DSTS));
ASSERT(!instr_writes_to_reg(instr, REG_EAX, 0));
pc = instr_encode(dc, instr, buf);
ASSERT(pc != NULL);
instr_reset(dc, instr);
decode(dc, buf, instr);
ASSERT(instr_reads_from_reg(instr, REG_ECX, DR_QUERY_DEFAULT));
ASSERT(instr_reads_from_reg(instr, REG_ECX, DR_QUERY_INCLUDE_ALL));
ASSERT(!instr_reads_from_reg(instr, REG_ECX, DR_QUERY_INCLUDE_COND_DSTS));
ASSERT(!instr_reads_from_reg(instr, REG_ECX, 0));
ASSERT(!instr_writes_to_reg(instr, REG_EAX, DR_QUERY_DEFAULT));
ASSERT(instr_writes_to_reg(instr, REG_EAX, DR_QUERY_INCLUDE_ALL));
ASSERT(instr_writes_to_reg(instr, REG_EAX, DR_QUERY_INCLUDE_COND_DSTS));
ASSERT(!instr_writes_to_reg(instr, REG_EAX, 0));
/* bsf always writes to eflags */
instr_reset(dc, instr);
instr = INSTR_CREATE_bsf(dc, opnd_create_reg(REG_EAX), opnd_create_reg(REG_ECX));
ASSERT(TESTALL(EFLAGS_WRITE_6, instr_get_eflags(instr, DR_QUERY_DEFAULT)));
ASSERT(TESTALL(EFLAGS_WRITE_6, instr_get_eflags(instr, DR_QUERY_INCLUDE_ALL)));
ASSERT(TESTALL(EFLAGS_WRITE_6, instr_get_eflags(instr, DR_QUERY_INCLUDE_COND_DSTS)));
ASSERT(TESTALL(EFLAGS_WRITE_6, instr_get_eflags(instr, 0)));
pc = instr_encode(dc, instr, buf);
ASSERT(pc != NULL);
ASSERT(decode_eflags_usage(dc, buf, &usage, DR_QUERY_DEFAULT) != NULL &&
TESTALL(EFLAGS_WRITE_6, usage));
ASSERT(decode_eflags_usage(dc, buf, &usage, DR_QUERY_INCLUDE_ALL) != NULL &&
TESTALL(EFLAGS_WRITE_6, usage));
ASSERT(decode_eflags_usage(dc, buf, &usage, DR_QUERY_INCLUDE_COND_DSTS) != NULL &&
TESTALL(EFLAGS_WRITE_6, usage));
ASSERT(decode_eflags_usage(dc, buf, &usage, 0) != NULL &&
TESTALL(EFLAGS_WRITE_6, usage));
instr_reset(dc, instr);
decode(dc, buf, instr);
ASSERT(TESTALL(EFLAGS_WRITE_6, instr_get_eflags(instr, DR_QUERY_DEFAULT)));
ASSERT(TESTALL(EFLAGS_WRITE_6, instr_get_eflags(instr, DR_QUERY_INCLUDE_ALL)));
ASSERT(TESTALL(EFLAGS_WRITE_6, instr_get_eflags(instr, DR_QUERY_INCLUDE_COND_DSTS)));
ASSERT(TESTALL(EFLAGS_WRITE_6, instr_get_eflags(instr, 0)));
instr_destroy(dc, instr);
}
static void
test_xinst_create(void *dc)
{
byte *pc;
reg_id_t reg = DR_REG_XDX;
instr_t *ins1, *ins2;
/* load 1 byte zextend */
ins1 = XINST_CREATE_load_1byte_zext4(
dc, opnd_create_reg(reg_resize_to_opsz(reg, OPSZ_4)), MEMARG(OPSZ_1));
pc = instr_encode(dc, ins1, buf);
ASSERT(pc != NULL);
ins2 = instr_create(dc);
decode(dc, buf, ins2);
ASSERT(instr_same(ins1, ins2));
instr_reset(dc, ins1);
instr_reset(dc, ins2);
/* load 1 byte */
ins1 = XINST_CREATE_load_1byte(dc, opnd_create_reg(reg_resize_to_opsz(reg, OPSZ_1)),
MEMARG(OPSZ_1));
pc = instr_encode(dc, ins1, buf);
ASSERT(pc != NULL);
ins2 = instr_create(dc);
decode(dc, buf, ins2);
ASSERT(instr_same(ins1, ins2));
instr_reset(dc, ins1);
instr_reset(dc, ins2);
/* load 2 bytes */
ins1 = XINST_CREATE_load_2bytes(dc, opnd_create_reg(reg_resize_to_opsz(reg, OPSZ_2)),
MEMARG(OPSZ_2));
pc = instr_encode(dc, ins1, buf);
ASSERT(pc != NULL);
ins2 = instr_create(dc);
decode(dc, buf, ins2);
ASSERT(instr_same(ins1, ins2));
instr_reset(dc, ins1);
instr_reset(dc, ins2);
/* store 1 byte */
ins1 = XINST_CREATE_store_1byte(dc, MEMARG(OPSZ_1),
opnd_create_reg(reg_resize_to_opsz(reg, OPSZ_1)));
pc = instr_encode(dc, ins1, buf);
ASSERT(pc != NULL);
ins2 = instr_create(dc);
decode(dc, buf, ins2);
ASSERT(instr_same(ins1, ins2));
instr_reset(dc, ins1);
instr_reset(dc, ins2);
/* store 1 byte */
ins1 = XINST_CREATE_store_2bytes(dc, MEMARG(OPSZ_2),
opnd_create_reg(reg_resize_to_opsz(reg, OPSZ_2)));
pc = instr_encode(dc, ins1, buf);
ASSERT(pc != NULL);
ins2 = instr_create(dc);
decode(dc, buf, ins2);
ASSERT(instr_same(ins1, ins2));
instr_reset(dc, ins1);
instr_reset(dc, ins2);
}
static void
test_stack_pointer_size(void *dc)
{
/* Test i#2281 where we had the stack pointer size incorrectly varying.
* We can't simply append these to dis-udis86-randtest.raw b/c our test
* there uses -syntax_intel. We could make a new raw DR-style test.
*/
byte *pc;
char buf[512];
int len;
const byte bytes_push[] = { 0x67, 0x51 };
const byte bytes_ret[] = { 0x67, 0xc3 };
const byte bytes_enter[] = { 0x67, 0xc8, 0xab, 0xcd, 0xef };
const byte bytes_leave[] = { 0x67, 0xc9 };
pc =
disassemble_to_buffer(dc, (byte *)bytes_push, (byte *)bytes_push, false /*no pc*/,
false /*no bytes*/, buf, BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL && pc - (byte *)bytes_push == sizeof(bytes_push));
ASSERT(strcmp(buf,
IF_X64_ELSE(
"addr32 push %rcx %rsp -> %rsp 0xfffffff8(%rsp)[8byte]\n",
"addr16 push %ecx %esp -> %esp 0xfffffffc(%esp)[4byte]\n")) == 0);
pc = disassemble_to_buffer(dc, (byte *)bytes_ret, (byte *)bytes_ret, false /*no pc*/,
false /*no bytes*/, buf, BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL && pc - (byte *)bytes_ret == sizeof(bytes_ret));
ASSERT(strcmp(buf,
IF_X64_ELSE("addr32 ret %rsp (%rsp)[8byte] -> %rsp\n",
"addr16 ret %esp (%esp)[4byte] -> %esp\n")) == 0);
pc = disassemble_to_buffer(dc, (byte *)bytes_enter, (byte *)bytes_enter,
false /*no pc*/, false /*no bytes*/, buf,
BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL && pc - (byte *)bytes_enter == sizeof(bytes_enter));
ASSERT(
strcmp(buf,
IF_X64_ELSE(
"addr32 enter $0xcdab $0xef %rsp %rbp -> %rsp 0xfffffff8(%rsp)[8byte]"
" %rbp\n",
"addr16 enter $0xcdab $0xef %esp %ebp -> %esp 0xfffffffc(%esp)[4byte]"
" %ebp\n")) == 0);
pc = disassemble_to_buffer(dc, (byte *)bytes_leave, (byte *)bytes_leave,
false /*no pc*/, false /*no bytes*/, buf,
BUFFER_SIZE_ELEMENTS(buf), &len);
ASSERT(pc != NULL && pc - (byte *)bytes_leave == sizeof(bytes_leave));
ASSERT(strcmp(buf,
IF_X64_ELSE("addr32 leave %rbp %rsp (%rbp)[8byte] -> %rsp %rbp\n",
"addr16 leave %ebp %esp (%ebp)[4byte] -> %esp %ebp\n")) ==
0);
}
int
main(int argc, char *argv[])
{
#ifdef STANDALONE_DECODER
void *dcontext = GLOBAL_DCONTEXT;
#else
void *dcontext = dr_standalone_init();
/* simple test of deadlock_avoidance, etc. being disabled in standalone */
void *x = dr_mutex_create();
dr_mutex_lock(x);
dr_mutex_unlock(x);
dr_mutex_destroy(x);
#endif
test_all_opcodes_0(dcontext);
#ifndef STANDALONE_DECODER /* speed up compilation */
test_all_opcodes_1(dcontext);
test_all_opcodes_2(dcontext);
test_all_opcodes_2_mm(dcontext);
test_all_opcodes_3(dcontext);
test_all_opcodes_3_avx(dcontext);
test_all_opcodes_4(dcontext);
#endif
test_disp_control(dcontext);
test_indirect_cti(dcontext);
test_cti_prefixes(dcontext);
#ifndef X64
test_modrm16(dcontext);
#endif
test_size_changes(dcontext);
test_nop_xchg(dcontext);
test_hint_nops(dcontext);
#ifdef X64
test_x86_mode(dcontext);
test_x64_abs_addr(dcontext);
test_x64_inc(dcontext);
#endif
test_regs(dcontext);
test_instr_opnds(dcontext);
test_strict_invalid(dcontext);
test_tsx(dcontext);
test_vsib(dcontext);
test_disasm_sizes(dcontext);
test_predication(dcontext);
test_xinst_create(dcontext);
test_stack_pointer_size(dcontext);
print("all done\n");
return 0;
}
| 1 | 16,527 | I would say just change REGARG: no need for a separate thing. | DynamoRIO-dynamorio | c |
@@ -0,0 +1,18 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>C2</title>
+ <meta name="viewport" content="width=device-width"/>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
+ <link rel="stylesheet" type="text/css" href="<%= stylesheet_url("mailer.css") %>">
+</head>
+<body class="c2-notification">
+ <%= yield %>
+ <hr>
+ <div id="footer" class="inset">
+ <p>
+ <%= t("mailer.feedback_footer_html", feedback_url: feedback_url) %>
+ </p>
+ </div>
+</body>
+</html> | 1 | 1 | 16,402 | we can remove this template, it is no longer in use | 18F-C2 | rb |
|
@@ -45,11 +45,9 @@ module Beaker
end
it "recursively dumps values" do
- options.merge({'k' => 'v', 'key' => {'subkey' => 'subvalue'}, 'array_key' => ['array1', 'array2'], 'array_key2' => [['subarray1', 'subarray2'], 'anotherval']})
- expect(options.dump).to be === "Options:\n\t\tk : v\n\t\tkey : \n\t\t\tsubkey : subvalue\n\t\tarray_key : \n\t\t\t[\n\t\t\tarray1,\n\t\t\tarray2,\n\t\t\t]\n\t\tarray_key2 : \n\t\t\t[\n\t\t\t[\n\t\t\tsubarray1,\n\t\t\tsubarray2,\n\t\t\t]\n\t\t\tanotherval,\n\t\t\t]\n"
-
+ options.merge({'k' => { 'key' => {'subkey' => 'subvalue'}}})
+ expect(options.dump).to be === "Options:\n\t\tk : \n\t\t\tkey : \n\t\t\t\tsubkey : subvalue\n"
end
-
end
end
end | 1 | require "spec_helper"
module Beaker
module Options
describe OptionsHash do
let(:options) { Beaker::Options::OptionsHash.new }
#test options hash methods
it "converts all string keys to symbols when doing direct assignment" do
options['key'] = 'value'
expect(options.has_key?(:key)) === true and expect(options.has_key?('key')) === false
end
it "can look up by string or symbol key" do
options.merge({'key' => 'value'})
expect(options['key']) === 'value' and expect(options[:key]) === 'value'
end
it "supports is_pe?, defaults to pe" do
expect(options.is_pe?) === true
end
it "supports is_pe?, respects :type == foss" do
options[:type] = 'foss'
expect(options.is_pe?) === false
end
it "can delete by string of symbol key" do
options['key'] = 'value'
expect(options.delete('key')) === 'value' and expect(options.delete(:key)) === 'value'
end
it "when merged with a Hash remains an OptionsHash" do
options.merge({'key' => 'value'})
expect(options.is_a?(OptionsHash)) === true
end
it "when merged with a hash that contains a hash, the sub-hash becomes an OptionsHash" do
options.merge({'key' => {'subkey' => 'subvalue'}})
expect(options[:key].is_a?(OptionsHash)) === true and expect(options[:key][:subkey]) === 'subvalue'
end
it "supports a dump function" do
expect{options.dump}.to_not raise_error
end
it "recursively dumps values" do
options.merge({'k' => 'v', 'key' => {'subkey' => 'subvalue'}, 'array_key' => ['array1', 'array2'], 'array_key2' => [['subarray1', 'subarray2'], 'anotherval']})
expect(options.dump).to be === "Options:\n\t\tk : v\n\t\tkey : \n\t\t\tsubkey : subvalue\n\t\tarray_key : \n\t\t\t[\n\t\t\tarray1,\n\t\t\tarray2,\n\t\t\t]\n\t\tarray_key2 : \n\t\t\t[\n\t\t\t[\n\t\t\tsubarray1,\n\t\t\tsubarray2,\n\t\t\t]\n\t\t\tanotherval,\n\t\t\t]\n"
end
end
end
end
| 1 | 4,772 | This doesn't look like it has as much coverage of the recursive code as it once did. | voxpupuli-beaker | rb |
@@ -107,6 +107,10 @@ type AWSLoadBalancerSpec struct {
// Defaults to false.
// +optional
CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing,omitempty"`
+
+ // SecurityGroups sets the security groups used by the load balancer
+ // +optional
+ SecurityGroups []SecurityGroup `json:"securityGroups,omitempty"`
}
// AWSClusterStatus defines the observed state of AWSCluster | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
)
const (
// ClusterFinalizer allows ReconcileAWSCluster to clean up AWS resources associated with AWSCluster before
// removing it from the apiserver.
ClusterFinalizer = "awscluster.infrastructure.cluster.x-k8s.io"
)
// AWSClusterSpec defines the desired state of AWSCluster
type AWSClusterSpec struct {
// NetworkSpec encapsulates all things related to AWS network.
NetworkSpec NetworkSpec `json:"networkSpec,omitempty"`
// The AWS Region the cluster lives in.
Region string `json:"region,omitempty"`
// SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
// +optional
SSHKeyName *string `json:"sshKeyName,omitempty"`
// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
// +optional
ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"`
// AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
// ones added by default.
// +optional
AdditionalTags Tags `json:"additionalTags,omitempty"`
// ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior
// +optional
ControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"controlPlaneLoadBalancer,omitempty"`
// ImageLookupFormat is the AMI naming format to look up machine images when
// a machine does not specify an AMI. When set, this will be used for all
// cluster machines unless a machine specifies a different ImageLookupOrg.
// Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base
// OS and kubernetes version, respectively. The BaseOS will be the value in
// ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as
// defined by the packages produced by kubernetes/release without v as a
// prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default
// image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up
// searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a
// Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See
// also: https://golang.org/pkg/text/template/
// +optional
ImageLookupFormat string `json:"imageLookupFormat,omitempty"`
// ImageLookupOrg is the AWS Organization ID to look up machine images when a
// machine does not specify an AMI. When set, this will be used for all
// cluster machines unless a machine specifies a different ImageLookupOrg.
// +optional
ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
// ImageLookupBaseOS is the name of the base operating system used to look
// up machine images when a machine does not specify an AMI. When set, this
// will be used for all cluster machines unless a machine specifies a
// different ImageLookupBaseOS.
ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"`
// Bastion contains options to configure the bastion host.
// +optional
Bastion Bastion `json:"bastion"`
}
type Bastion struct {
// Enabled allows this provider to create a bastion host instance
// with a public ip to access the VPC private network.
// +optional
Enabled bool `json:"enabled"`
}
// AWSLoadBalancerSpec defines the desired state of an AWS load balancer
type AWSLoadBalancerSpec struct {
// Scheme sets the scheme of the load balancer (defaults to Internet-facing)
// +optional
Scheme *ClassicELBScheme `json:"scheme,omitempty"`
// CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing.
//
// With cross-zone load balancing, each load balancer node for your Classic Load Balancer
// distributes requests evenly across the registered instances in all enabled Availability Zones.
// If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across
// the registered instances in its Availability Zone only.
//
// Defaults to false.
// +optional
CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing,omitempty"`
}
// AWSClusterStatus defines the observed state of AWSCluster
type AWSClusterStatus struct {
// +kubebuilder:default=false
Ready bool `json:"ready"`
Network Network `json:"network,omitempty"`
FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`
Bastion *Instance `json:"bastion,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsclusters,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSCluster belongs"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready for EC2 instances"
// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.networkSpec.vpc.id",description="AWS VPC the cluster is using"
// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".status.apiEndpoints[0]",description="API Endpoint",priority=1
// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access"
// AWSCluster is the Schema for the awsclusters API
type AWSCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AWSClusterSpec `json:"spec,omitempty"`
Status AWSClusterStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// AWSClusterList contains a list of AWSCluster
type AWSClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AWSCluster `json:"items"`
}
func init() {
SchemeBuilder.Register(&AWSCluster{}, &AWSClusterList{})
}
| 1 | 14,360 | Do we actually expect users to provide more information here than possibly a SecurityGroup ID? If not, then this should probably use a new type, otherwise users might be confused by what options would be respected/ignored when used in this context. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -1,6 +1,8 @@
package edu.harvard.iq.dataverse.util;
import com.ocpsoft.pretty.PrettyContext;
+import com.rometools.utils.Strings;
+
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.Dataverse; | 1 | package edu.harvard.iq.dataverse.util;
import com.ocpsoft.pretty.PrettyContext;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.Dataverse;
import edu.harvard.iq.dataverse.DataverseServiceBean;
import edu.harvard.iq.dataverse.DvObjectContainer;
import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean;
import edu.harvard.iq.dataverse.authorization.providers.builtin.BuiltinAuthenticationProvider;
import edu.harvard.iq.dataverse.authorization.providers.oauth2.AbstractOAuth2AuthenticationProvider;
import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException;
import static edu.harvard.iq.dataverse.datasetutility.FileSizeChecker.bytesToHumanReadable;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.validation.PasswordValidatorUtil;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringReader;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.time.Year;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.MissingResourceException;
import java.util.Properties;
import java.util.ResourceBundle;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.ejb.EJB;
import javax.ejb.Stateless;
import javax.inject.Named;
import javax.json.Json;
import javax.json.JsonArray;
import javax.json.JsonObject;
import javax.json.JsonReader;
import javax.json.JsonString;
import javax.json.JsonValue;
import org.passay.CharacterRule;
import org.apache.commons.io.IOUtils;
/**
* System-wide configuration
*/
@Stateless
@Named
public class SystemConfig {
private static final Logger logger = Logger.getLogger(SystemConfig.class.getCanonicalName());
@EJB
SettingsServiceBean settingsService;
@EJB
DataverseServiceBean dataverseService;
@EJB
AuthenticationServiceBean authenticationService;
public static final String DATAVERSE_PATH = "/dataverse/";
/**
* A JVM option for the advertised fully qualified domain name (hostname) of
* the Dataverse installation, such as "dataverse.example.com", which may
* differ from the hostname that the server knows itself as.
*
* The equivalent in DVN 3.x was "dvn.inetAddress".
*/
public static final String FQDN = "dataverse.fqdn";
/**
* A JVM option for specifying the "official" URL of the site.
* Unlike the FQDN option above, this would be a complete URL,
* with the protocol, port number etc.
*/
public static final String SITE_URL = "dataverse.siteUrl";
/**
* A JVM option for where files are stored on the file system.
*/
public static final String FILES_DIRECTORY = "dataverse.files.directory";
/**
* Some installations may not want download URLs to their files to be
* available in Schema.org JSON-LD output.
*/
public static final String FILES_HIDE_SCHEMA_DOT_ORG_DOWNLOAD_URLS = "dataverse.files.hide-schema-dot-org-download-urls";
/**
* A JVM option to override the number of minutes for which a password reset
* token is valid ({@link #minutesUntilPasswordResetTokenExpires}).
*/
private static final String PASSWORD_RESET_TIMEOUT_IN_MINUTES = "dataverse.auth.password-reset-timeout-in-minutes";
/**
* A common place to find the String for a sane Solr hostname:port
* combination.
*/
private String saneDefaultForSolrHostColonPort = "localhost:8983";
/**
* The default number of datafiles that we allow to be created through
* zip file upload.
*/
private static final int defaultZipUploadFilesLimit = 1000;
private static final long defaultZipDownloadLimit = 104857600L; // 100MB
private static final int defaultMultipleUploadFilesLimit = 1000;
private static final int defaultLoginSessionTimeout = 480; // = 8 hours
private static String appVersionString = null;
private static String buildNumberString = null;
private static final String JVM_TIMER_SERVER_OPTION = "dataverse.timerServer";
private static final long DEFAULT_GUESTBOOK_RESPONSES_DISPLAY_LIMIT = 5000L;
public final static String DEFAULTCURATIONLABELSET = "DEFAULT";
public final static String CURATIONLABELSDISABLED = "DISABLED";
public String getVersion() {
return getVersion(false);
}
public String getVersion(boolean withBuildNumber) {
if (appVersionString == null) {
// The Version Number is no longer supplied in a .properties file - so
// we can't just do
// return BundleUtil.getStringFromBundle("version.number", null, ResourceBundle.getBundle("VersionNumber", Locale.US));
//
// Instead, we'll rely on Maven placing the version number into the
// Manifest, and getting it from there:
// (this is considered a better practice, and will also allow us
// to maintain this number in only one place - the pom.xml file)
// -- L.A. 4.0.2
// One would assume, that once the version is in the MANIFEST.MF,
// as Implementation-Version:, it would be possible to obtain
// said version simply as
// appVersionString = getClass().getPackage().getImplementationVersion();
// alas - that's not working, for whatever reason. (perhaps that's
// only how it works with jar-ed packages; not with .war files).
// People on the interwebs suggest that one should instead
// open the Manifest as a resource, then extract its attributes.
// There were some complications with that too. Plus, relying solely
// on the MANIFEST.MF would NOT work for those of the developers who
// are using "in place deployment" (i.e., where
// Netbeans runs their builds directly from the local target
// directory, bypassing the war file deployment; and the Manifest
// is only available in the .war file). For that reason, I am
// going to rely on the pom.properties file, and use java.util.Properties
// to read it. We have to look for this file in 2 different places
// depending on whether this is a .war file deployment, or a
// developers build. (the app-level META-INF is only populated when
// a .war file is built; the "maven-archiver" directory, on the other
// hand, is only available when it's a local build deployment).
// So, long story short, I'm resorting to the convoluted steps below.
// It may look hacky, but it should actually be pretty solid and
// reliable.
// First, find the absolute path url of the application persistence file
// always supplied with the Dataverse app:
java.net.URL fileUrl = Thread.currentThread().getContextClassLoader().getResource("META-INF/persistence.xml");
String filePath = null;
if (fileUrl != null) {
filePath = fileUrl.getFile();
if (filePath != null) {
InputStream mavenPropertiesInputStream = null;
String mavenPropertiesFilePath;
Properties mavenProperties = new Properties();
filePath = filePath.replaceFirst("/[^/]*$", "/");
// Using a relative path, find the location of the maven pom.properties file.
// First, try to look for it in the app-level META-INF. This will only be
// available if it's a war file deployment:
mavenPropertiesFilePath = filePath.concat("../../../META-INF/maven/edu.harvard.iq/dataverse/pom.properties");
try {
mavenPropertiesInputStream = new FileInputStream(mavenPropertiesFilePath);
} catch (IOException ioex) {
// OK, let's hope this is a local dev. build.
// In that case the properties file should be available in
// the maven-archiver directory:
mavenPropertiesFilePath = filePath.concat("../../../../maven-archiver/pom.properties");
// try again:
try {
mavenPropertiesInputStream = new FileInputStream(mavenPropertiesFilePath);
} catch (IOException ioex2) {
logger.warning("Failed to find and/or open for reading the pom.properties file.");
mavenPropertiesInputStream = null;
}
}
if (mavenPropertiesInputStream != null) {
try {
mavenProperties.load(mavenPropertiesInputStream);
appVersionString = mavenProperties.getProperty("version");
} catch (IOException ioex) {
logger.warning("caught IOException trying to read and parse the pom properties file.");
} finally {
IOUtils.closeQuietly(mavenPropertiesInputStream);
}
}
} else {
logger.warning("Null file path representation of the location of persistence.xml in the webapp root directory!");
}
} else {
logger.warning("Could not find the location of persistence.xml in the webapp root directory!");
}
if (appVersionString == null) {
// still null? - defaulting to 4.0:
appVersionString = "4.0";
}
}
if (withBuildNumber) {
if (buildNumberString == null) {
// (build number is still in a .properties file in the source tree; it only
// contains a real build number if this war file was built by
// Jenkins)
try {
buildNumberString = ResourceBundle.getBundle("BuildNumber").getString("build.number");
} catch (MissingResourceException ex) {
buildNumberString = null;
}
}
if (buildNumberString != null && !buildNumberString.equals("")) {
return appVersionString + " build " + buildNumberString;
}
}
return appVersionString;
}
public String getSolrHostColonPort() {
String SolrHost;
if ( System.getenv("SOLR_SERVICE_HOST") != null && System.getenv("SOLR_SERVICE_HOST") != ""){
SolrHost = System.getenv("SOLR_SERVICE_HOST");
}
else SolrHost = saneDefaultForSolrHostColonPort;
String solrHostColonPort = settingsService.getValueForKey(SettingsServiceBean.Key.SolrHostColonPort, SolrHost);
return solrHostColonPort;
}
public boolean isProvCollectionEnabled() {
String provCollectionEnabled = settingsService.getValueForKey(SettingsServiceBean.Key.ProvCollectionEnabled, null);
if("true".equalsIgnoreCase(provCollectionEnabled)){
return true;
}
return false;
}
public int getMetricsCacheTimeoutMinutes() {
int defaultValue = 10080; //one week in minutes
SettingsServiceBean.Key key = SettingsServiceBean.Key.MetricsCacheTimeoutMinutes;
String metricsCacheTimeString = settingsService.getValueForKey(key);
if (metricsCacheTimeString != null) {
int returnInt = 0;
try {
returnInt = Integer.parseInt(metricsCacheTimeString);
if (returnInt >= 0) {
return returnInt;
} else {
logger.info("Returning " + defaultValue + " for " + key + " because value must be greater than zero, not \"" + metricsCacheTimeString + "\".");
}
} catch (NumberFormatException ex) {
logger.info("Returning " + defaultValue + " for " + key + " because value must be an integer greater than zero, not \"" + metricsCacheTimeString + "\".");
}
}
return defaultValue;
}
public int getMinutesUntilConfirmEmailTokenExpires() {
final int minutesInOneDay = 1440;
final int reasonableDefault = minutesInOneDay;
SettingsServiceBean.Key key = SettingsServiceBean.Key.MinutesUntilConfirmEmailTokenExpires;
String valueFromDatabase = settingsService.getValueForKey(key);
if (valueFromDatabase != null) {
try {
int intFromDatabase = Integer.parseInt(valueFromDatabase);
if (intFromDatabase > 0) {
return intFromDatabase;
} else {
logger.info("Returning " + reasonableDefault + " for " + key + " because value must be greater than zero, not \"" + intFromDatabase + "\".");
}
} catch (NumberFormatException ex) {
logger.info("Returning " + reasonableDefault + " for " + key + " because value must be an integer greater than zero, not \"" + valueFromDatabase + "\".");
}
}
logger.fine("Returning " + reasonableDefault + " for " + key);
return reasonableDefault;
}
/**
* The number of minutes for which a password reset token is valid. Can be
* overridden by {@link #PASSWORD_RESET_TIMEOUT_IN_MINUTES}.
*/
public static int getMinutesUntilPasswordResetTokenExpires() {
final int reasonableDefault = 60;
String configuredValueAsString = System.getProperty(PASSWORD_RESET_TIMEOUT_IN_MINUTES);
if (configuredValueAsString != null) {
int configuredValueAsInteger = 0;
try {
configuredValueAsInteger = Integer.parseInt(configuredValueAsString);
if (configuredValueAsInteger > 0) {
return configuredValueAsInteger;
} else {
logger.info(PASSWORD_RESET_TIMEOUT_IN_MINUTES + " is configured as a negative number \"" + configuredValueAsInteger + "\". Using default value instead: " + reasonableDefault);
return reasonableDefault;
}
} catch (NumberFormatException ex) {
logger.info("Unable to convert " + PASSWORD_RESET_TIMEOUT_IN_MINUTES + " from \"" + configuredValueAsString + "\" into an integer value: " + ex + ". Using default value " + reasonableDefault);
}
}
return reasonableDefault;
}
/**
* The "official", designated URL of the site;
* can be defined as a complete URL; or derived from the
* "official" hostname. If none of these options is set,
* defaults to the InetAddress.getLocalHOst() and https;
* These are legacy JVM options. Will be eventualy replaced
* by the Settings Service configuration.
*/
public String getDataverseSiteUrl() {
return getDataverseSiteUrlStatic();
}
public static String getDataverseSiteUrlStatic() {
String hostUrl = System.getProperty(SITE_URL);
if (hostUrl != null && !"".equals(hostUrl)) {
return hostUrl;
}
String hostName = System.getProperty(FQDN);
if (hostName == null) {
try {
hostName = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
return null;
}
}
hostUrl = "https://" + hostName;
return hostUrl;
}
/**
* URL Tracking:
*/
public String getPageURLWithQueryString() {
return PrettyContext.getCurrentInstance().getRequestURL().toURL() + PrettyContext.getCurrentInstance().getRequestQueryString().toQueryString();
}
/**
* The "official" server's fully-qualified domain name:
*/
public String getDataverseServer() {
// still reliese on a JVM option:
String fqdn = System.getProperty(FQDN);
if (fqdn == null) {
try {
fqdn = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
return null;
}
}
return fqdn;
}
public String getGuidesBaseUrl() {
String saneDefault = "https://guides.dataverse.org";
String guidesBaseUrl = settingsService.getValueForKey(SettingsServiceBean.Key.GuidesBaseUrl, saneDefault);
return guidesBaseUrl + "/" + getGuidesLanguage();
}
private String getGuidesLanguage() {
String saneDefault = "en";
return saneDefault;
}
public String getGuidesVersion() {
String saneDefault = getVersion();
String guidesVersion = settingsService.getValueForKey(SettingsServiceBean.Key.GuidesVersion, saneDefault);
if (guidesVersion != null) {
return guidesVersion;
}
return saneDefault;
}
public String getMetricsUrl() {
String saneDefault = null;
String metricsUrl = settingsService.getValueForKey(SettingsServiceBean.Key.MetricsUrl, saneDefault);
return metricsUrl;
}
static long getLongLimitFromStringOrDefault(String limitSetting, Long defaultValue) {
Long limit = null;
if (limitSetting != null && !limitSetting.equals("")) {
try {
limit = new Long(limitSetting);
} catch (NumberFormatException nfe) {
limit = null;
}
}
return limit != null ? limit : defaultValue;
}
static int getIntLimitFromStringOrDefault(String limitSetting, Integer defaultValue) {
Integer limit = null;
if (limitSetting != null && !limitSetting.equals("")) {
try {
limit = new Integer(limitSetting);
} catch (NumberFormatException nfe) {
limit = null;
}
}
return limit != null ? limit : defaultValue;
}
/**
* Download-as-zip size limit.
* returns defaultZipDownloadLimit if not specified;
* set to -1 to disable zip downloads.
*/
public long getZipDownloadLimit() {
String zipLimitOption = settingsService.getValueForKey(SettingsServiceBean.Key.ZipDownloadLimit);
return getLongLimitFromStringOrDefault(zipLimitOption, defaultZipDownloadLimit);
}
public int getZipUploadFilesLimit() {
String limitOption = settingsService.getValueForKey(SettingsServiceBean.Key.ZipUploadFilesLimit);
return getIntLimitFromStringOrDefault(limitOption, defaultZipUploadFilesLimit);
}
/**
* Session timeout, in minutes.
* (default value provided)
*/
public int getLoginSessionTimeout() {
return getIntLimitFromStringOrDefault(
settingsService.getValueForKey(SettingsServiceBean.Key.LoginSessionTimeout),
defaultLoginSessionTimeout);
}
/*
` the number of files the GUI user is allowed to upload in one batch,
via drag-and-drop, or through the file select dialog
*/
public int getMultipleUploadFilesLimit() {
String limitOption = settingsService.getValueForKey(SettingsServiceBean.Key.MultipleUploadFilesLimit);
return getIntLimitFromStringOrDefault(limitOption, defaultMultipleUploadFilesLimit);
}
public long getGuestbookResponsesPageDisplayLimit() {
String limitSetting = settingsService.getValueForKey(SettingsServiceBean.Key.GuestbookResponsesPageDisplayLimit);
return getLongLimitFromStringOrDefault(limitSetting, DEFAULT_GUESTBOOK_RESPONSES_DISPLAY_LIMIT);
}
public long getUploadLogoSizeLimit(){
return 500000;
}
// TODO: (?)
// create sensible defaults for these things? -- 4.2.2
public long getThumbnailSizeLimitImage() {
long limit = getThumbnailSizeLimit("Image");
return limit == 0 ? 500000 : limit;
}
public long getThumbnailSizeLimitPDF() {
long limit = getThumbnailSizeLimit("PDF");
return limit == 0 ? 500000 : limit;
}
public long getThumbnailSizeLimit(String type) {
String option = null;
//get options via jvm options
if ("Image".equals(type)) {
option = System.getProperty("dataverse.dataAccess.thumbnail.image.limit");
} else if ("PDF".equals(type)) {
option = System.getProperty("dataverse.dataAccess.thumbnail.pdf.limit");
}
return getLongLimitFromStringOrDefault(option, 0L);
}
public boolean isThumbnailGenerationDisabledForType(String type) {
return getThumbnailSizeLimit(type) == -1l;
}
public boolean isThumbnailGenerationDisabledForImages() {
return isThumbnailGenerationDisabledForType("Image");
}
public boolean isThumbnailGenerationDisabledForPDF() {
return isThumbnailGenerationDisabledForType("PDF");
}
public String getApplicationTermsOfUse() {
String language = BundleUtil.getCurrentLocale().getLanguage();
String saneDefaultForAppTermsOfUse = BundleUtil.getStringFromBundle("system.app.terms");
// Get the value for the defaultLocale. IT will either be used as the return
// value, or as a better default than the saneDefaultForAppTermsOfUse if there
// is no language-specific value
String appTermsOfUse = settingsService.getValueForKey(SettingsServiceBean.Key.ApplicationTermsOfUse, saneDefaultForAppTermsOfUse);
//Now get the language-specific value if it exists
if (!language.equalsIgnoreCase(BundleUtil.getDefaultLocale().getLanguage())) {
appTermsOfUse = settingsService.getValueForKey(SettingsServiceBean.Key.ApplicationTermsOfUse, language, appTermsOfUse);
}
return appTermsOfUse;
}
public String getApiTermsOfUse() {
String saneDefaultForApiTermsOfUse = BundleUtil.getStringFromBundle("system.api.terms");
String apiTermsOfUse = settingsService.getValueForKey(SettingsServiceBean.Key.ApiTermsOfUse, saneDefaultForApiTermsOfUse);
return apiTermsOfUse;
}
// TODO:
// remove this method!
// pages should be using settingsWrapper.get(":ApplicationPrivacyPolicyUrl") instead. -- 4.2.1
public String getApplicationPrivacyPolicyUrl() {
String saneDefaultForPrivacyPolicyUrl = null;
String appPrivacyPolicyUrl = settingsService.getValueForKey(SettingsServiceBean.Key.ApplicationPrivacyPolicyUrl, saneDefaultForPrivacyPolicyUrl);
return appPrivacyPolicyUrl;
}
public boolean myDataDoesNotUsePermissionDocs() {
boolean safeDefaultIfKeyNotFound = false;
return settingsService.isTrueForKey(SettingsServiceBean.Key.MyDataDoesNotUseSolrPermissionDocs, safeDefaultIfKeyNotFound);
}
public boolean isFilesOnDatasetPageFromSolr() {
boolean safeDefaultIfKeyNotFound = false;
return settingsService.isTrueForKey(SettingsServiceBean.Key.FilesOnDatasetPageFromSolr, safeDefaultIfKeyNotFound);
}
public Long getMaxFileUploadSizeForStore(String driverId){
return settingsService.getValueForCompoundKeyAsLong(SettingsServiceBean.Key.MaxFileUploadSizeInBytes, driverId);
}
public Integer getSearchHighlightFragmentSize() {
String fragSize = settingsService.getValueForKey(SettingsServiceBean.Key.SearchHighlightFragmentSize);
if (fragSize != null) {
try {
return new Integer(fragSize);
} catch (NumberFormatException nfe) {
logger.info("Could not convert " + SettingsServiceBean.Key.SearchHighlightFragmentSize + " to int: " + nfe);
}
}
return null;
}
public long getTabularIngestSizeLimit() {
// This method will return the blanket ingestable size limit, if
// set on the system. I.e., the universal limit that applies to all
// tabular ingests, regardless of fromat:
String limitEntry = settingsService.getValueForKey(SettingsServiceBean.Key.TabularIngestSizeLimit);
if (limitEntry != null) {
try {
Long sizeOption = new Long(limitEntry);
return sizeOption;
} catch (NumberFormatException nfe) {
logger.warning("Invalid value for TabularIngestSizeLimit option? - " + limitEntry);
}
}
// -1 means no limit is set;
// 0 on the other hand would mean that ingest is fully disabled for
// tabular data.
return -1;
}
public long getTabularIngestSizeLimit(String formatName) {
// This method returns the size limit set specifically for this format name,
// if available, otherwise - the blanket limit that applies to all tabular
// ingests regardless of a format.
if (formatName == null || formatName.equals("")) {
return getTabularIngestSizeLimit();
}
String limitEntry = settingsService.get(SettingsServiceBean.Key.TabularIngestSizeLimit.toString() + ":" + formatName);
if (limitEntry != null) {
try {
Long sizeOption = new Long(limitEntry);
return sizeOption;
} catch (NumberFormatException nfe) {
logger.warning("Invalid value for TabularIngestSizeLimit:" + formatName + "? - " + limitEntry );
}
}
return getTabularIngestSizeLimit();
}
public boolean isOAIServerEnabled() {
boolean defaultResponse = false;
return settingsService.isTrueForKey(SettingsServiceBean.Key.OAIServerEnabled, defaultResponse);
}
public void enableOAIServer() {
settingsService.setValueForKey(SettingsServiceBean.Key.OAIServerEnabled, "true");
}
public void disableOAIServer() {
settingsService.deleteValueForKey(SettingsServiceBean.Key.OAIServerEnabled);
}
public boolean isTimerServer() {
String optionValue = System.getProperty(JVM_TIMER_SERVER_OPTION);
if ("true".equalsIgnoreCase(optionValue)) {
return true;
}
return false;
}
public String getFooterCopyrightAndYear() {
return BundleUtil.getStringFromBundle("footer.copyright", Arrays.asList(Year.now().getValue() + ""));
}
public DataFile.ChecksumType getFileFixityChecksumAlgorithm() {
DataFile.ChecksumType saneDefault = DataFile.ChecksumType.MD5;
String checksumStringFromDatabase = settingsService.getValueForKey(SettingsServiceBean.Key.FileFixityChecksumAlgorithm, saneDefault.toString());
try {
DataFile.ChecksumType checksumTypeFromDatabase = DataFile.ChecksumType.fromString(checksumStringFromDatabase);
return checksumTypeFromDatabase;
} catch (IllegalArgumentException ex) {
logger.info("The setting " + SettingsServiceBean.Key.FileFixityChecksumAlgorithm + " is misconfigured. " + ex.getMessage() + " Returning sane default: " + saneDefault + ".");
return saneDefault;
}
}
public String getDefaultAuthProvider() {
String saneDefault = BuiltinAuthenticationProvider.PROVIDER_ID;
String settingInDatabase = settingsService.getValueForKey(SettingsServiceBean.Key.DefaultAuthProvider, saneDefault);
if (settingInDatabase != null && !settingInDatabase.isEmpty()) {
/**
* @todo Add more sanity checking.
*/
return settingInDatabase;
}
return saneDefault;
}
public String getNameOfInstallation() {
return dataverseService.getRootDataverseName();
}
public AbstractOAuth2AuthenticationProvider.DevOAuthAccountType getDevOAuthAccountType() {
AbstractOAuth2AuthenticationProvider.DevOAuthAccountType saneDefault = AbstractOAuth2AuthenticationProvider.DevOAuthAccountType.PRODUCTION;
String settingReturned = settingsService.getValueForKey(SettingsServiceBean.Key.DebugOAuthAccountType);
logger.fine("setting returned: " + settingReturned);
if (settingReturned != null) {
try {
AbstractOAuth2AuthenticationProvider.DevOAuthAccountType parsedValue = AbstractOAuth2AuthenticationProvider.DevOAuthAccountType.valueOf(settingReturned);
return parsedValue;
} catch (IllegalArgumentException ex) {
logger.info("Couldn't parse value: " + ex + " - returning a sane default: " + saneDefault);
return saneDefault;
}
} else {
logger.fine("OAuth dev mode has not been configured. Returning a sane default: " + saneDefault);
return saneDefault;
}
}
public String getOAuth2CallbackUrl() {
String saneDefault = getDataverseSiteUrl() + "/oauth2/callback.xhtml";
String settingReturned = settingsService.getValueForKey(SettingsServiceBean.Key.OAuth2CallbackUrl);
logger.fine("getOAuth2CallbackUrl setting returned: " + settingReturned);
if (settingReturned != null) {
return settingReturned;
}
return saneDefault;
}
public boolean isShibPassiveLoginEnabled() {
boolean defaultResponse = false;
return settingsService.isTrueForKey(SettingsServiceBean.Key.ShibPassiveLoginEnabled, defaultResponse);
}
public boolean isShibAttributeCharacterSetConversionEnabled() {
boolean defaultResponse = true;
return settingsService.isTrueForKey(SettingsServiceBean.Key.ShibAttributeCharacterSetConversionEnabled, defaultResponse);
}
/**
* getPVDictionaries
*
* @return A string of one or more pipe (|) separated file paths.
*/
public String getPVDictionaries() {
return settingsService.get(SettingsServiceBean.Key.PVDictionaries.toString());
}
/**
* getPVGoodStrength
*
* Get the minimum length of a valid password to apply an expiration rule.
* Defaults to 20.
*
* @return The length.
*/
public int getPVGoodStrength() {
// FIXME: Change this to 21 to match Harvard's requirements or implement a way to disable the rule (0 or -1) and have the default be disabled.
int goodStrengthLength = 20;
//String _goodStrengthLength = System.getProperty("pv.goodstrength", settingsService.get(SettingsServiceBean.Key.PVGoodStrength.toString()));
String _goodStrengthLength = settingsService.get(SettingsServiceBean.Key.PVGoodStrength.toString());
try {
goodStrengthLength = Integer.parseInt(_goodStrengthLength);
} catch (NumberFormatException nfe) {
logger.fine("Invalid value for PVGoodStrength: " + _goodStrengthLength);
}
return goodStrengthLength;
}
/**
* getPVMinLength
*
* Get the minimum length of a valid password. Defaults to 6.
*
* @return The length.
*/
public int getPVMinLength() {
int passportValidatorMinLength = 6;
String _passportValidatorMinLength = settingsService.get(SettingsServiceBean.Key.PVMinLength.toString());
try {
passportValidatorMinLength = Integer.parseInt(_passportValidatorMinLength);
} catch (NumberFormatException nfe) {
logger.fine("Invalid value for PwMinLength: " + _passportValidatorMinLength);
}
return passportValidatorMinLength;
}
/**
* getPVMaxLength
*
* Get the maximum length of a valid password. Defaults to 0 (disabled).
*
* @return The length.
*/
public int getPVMaxLength() {
int passportValidatorMaxLength = 0;
String _passportValidatorMaxLength = settingsService.get(SettingsServiceBean.Key.PVMaxLength.toString());
try {
passportValidatorMaxLength = Integer.parseInt(_passportValidatorMaxLength);
} catch (NumberFormatException nfe) {
logger.fine("Invalid value for PwMaxLength: " + _passportValidatorMaxLength);
}
return passportValidatorMaxLength;
}
/**
* One letter, 2 special characters, etc. Defaults to:
*
* - one uppercase
*
* - one lowercase
*
* - one digit
*
* - one special character
*
* TODO: This is more strict than what Dataverse 4.0 shipped with. Consider
* keeping the default the same.
*/
public List<CharacterRule> getPVCharacterRules() {
String characterRulesString = settingsService.get(SettingsServiceBean.Key.PVCharacterRules.toString());
return PasswordValidatorUtil.getCharacterRules(characterRulesString);
}
/**
* getPVNumberOfCharacteristics
*
* Get the number M characteristics. Defaults to 3.
*
* @return The number.
*
* TODO: Consider changing the out-of-the-box rules to be the same as Dataverse 4.0, which was 2 (one letter, one number).
*/
public int getPVNumberOfCharacteristics() {
int numberOfCharacteristics = 2;
String _numberOfCharacteristics = settingsService.get(SettingsServiceBean.Key.PVNumberOfCharacteristics.toString());
try {
numberOfCharacteristics = Integer.parseInt(_numberOfCharacteristics);
} catch (NumberFormatException nfe) {
logger.fine("Invalid value for PVNumberOfCharacteristics: " + _numberOfCharacteristics);
}
return numberOfCharacteristics;
}
/**
* Get the number of consecutive digits allowed. Defaults to highest int
* possible.
*/
public int getPVNumberOfConsecutiveDigitsAllowed() {
int numConsecutiveDigitsAllowed = Integer.MAX_VALUE;
String _numberOfConsecutiveDigitsAllowed = settingsService.get(SettingsServiceBean.Key.PVNumberOfConsecutiveDigitsAllowed.toString());
try {
numConsecutiveDigitsAllowed = Integer.parseInt(_numberOfConsecutiveDigitsAllowed);
} catch (NumberFormatException nfe) {
logger.fine("Invalid value for " + SettingsServiceBean.Key.PVNumberOfConsecutiveDigitsAllowed + ": " + _numberOfConsecutiveDigitsAllowed);
}
return numConsecutiveDigitsAllowed;
}
/**
* Below are three related enums having to do with big data support:
*
* - FileUploadMethods
*
* - FileDownloadMethods
*
* - TransferProtocols
*
* There is a good chance these will be consolidated in the future.
*/
public enum FileUploadMethods {
/**
* DCM stands for Data Capture Module. Right now it supports upload over
* rsync+ssh but DCM may support additional methods in the future.
*/
RSYNC("dcm/rsync+ssh"),
/**
* Traditional Dataverse file handling, which tends to involve users
* uploading and downloading files using a browser or APIs.
*/
NATIVE("native/http");
private final String text;
private FileUploadMethods(final String text) {
this.text = text;
}
public static FileUploadMethods fromString(String text) {
if (text != null) {
for (FileUploadMethods fileUploadMethods : FileUploadMethods.values()) {
if (text.equals(fileUploadMethods.text)) {
return fileUploadMethods;
}
}
}
throw new IllegalArgumentException("FileUploadMethods must be one of these values: " + Arrays.asList(FileUploadMethods.values()) + ".");
}
@Override
public String toString() {
return text;
}
}
/**
* See FileUploadMethods.
*
* TODO: Consider if dataverse.files.<id>.download-redirect belongs here since
* it's a way to bypass Glassfish when downloading.
*/
public enum FileDownloadMethods {
/**
* RSAL stands for Repository Storage Abstraction Layer. Downloads don't
* go through Glassfish.
*/
RSYNC("rsal/rsync"),
NATIVE("native/http");
private final String text;
private FileDownloadMethods(final String text) {
this.text = text;
}
public static FileUploadMethods fromString(String text) {
if (text != null) {
for (FileUploadMethods fileUploadMethods : FileUploadMethods.values()) {
if (text.equals(fileUploadMethods.text)) {
return fileUploadMethods;
}
}
}
throw new IllegalArgumentException("FileDownloadMethods must be one of these values: " + Arrays.asList(FileDownloadMethods.values()) + ".");
}
@Override
public String toString() {
return text;
}
}
public enum DataFilePIDFormat {
DEPENDENT("DEPENDENT"),
INDEPENDENT("INDEPENDENT");
private final String text;
public String getText() {
return text;
}
private DataFilePIDFormat(final String text){
this.text = text;
}
@Override
public String toString() {
return text;
}
}
/**
* See FileUploadMethods.
*/
public enum TransferProtocols {
RSYNC("rsync"),
/**
* POSIX includes NFS. This is related to Key.LocalDataAccessPath in
* SettingsServiceBean.
*/
POSIX("posix"),
GLOBUS("globus");
private final String text;
private TransferProtocols(final String text) {
this.text = text;
}
public static TransferProtocols fromString(String text) {
if (text != null) {
for (TransferProtocols transferProtocols : TransferProtocols.values()) {
if (text.equals(transferProtocols.text)) {
return transferProtocols;
}
}
}
throw new IllegalArgumentException("TransferProtocols must be one of these values: " + Arrays.asList(TransferProtocols.values()) + ".");
}
@Override
public String toString() {
return text;
}
}
public boolean isPublicInstall(){
boolean saneDefault = false;
return settingsService.isTrueForKey(SettingsServiceBean.Key.PublicInstall, saneDefault);
}
public boolean isRsyncUpload(){
return getUploadMethodAvailable(SystemConfig.FileUploadMethods.RSYNC.toString());
}
// Controls if HTTP upload is enabled for both GUI and API.
public boolean isHTTPUpload(){
return getUploadMethodAvailable(SystemConfig.FileUploadMethods.NATIVE.toString());
}
public boolean isRsyncOnly(){
String downloadMethods = settingsService.getValueForKey(SettingsServiceBean.Key.DownloadMethods);
if(downloadMethods == null){
return false;
}
if (!downloadMethods.toLowerCase().equals(SystemConfig.FileDownloadMethods.RSYNC.toString())){
return false;
}
String uploadMethods = settingsService.getValueForKey(SettingsServiceBean.Key.UploadMethods);
if (uploadMethods==null){
return false;
} else {
return Arrays.asList(uploadMethods.toLowerCase().split("\\s*,\\s*")).size() == 1 && uploadMethods.toLowerCase().equals(SystemConfig.FileUploadMethods.RSYNC.toString());
}
}
public boolean isRsyncDownload() {
String downloadMethods = settingsService.getValueForKey(SettingsServiceBean.Key.DownloadMethods);
return downloadMethods !=null && downloadMethods.toLowerCase().contains(SystemConfig.FileDownloadMethods.RSYNC.toString());
}
public boolean isHTTPDownload() {
String downloadMethods = settingsService.getValueForKey(SettingsServiceBean.Key.DownloadMethods);
logger.warning("Download Methods:" + downloadMethods);
return downloadMethods !=null && downloadMethods.toLowerCase().contains(SystemConfig.FileDownloadMethods.NATIVE.toString());
}
private Boolean getUploadMethodAvailable(String method){
String uploadMethods = settingsService.getValueForKey(SettingsServiceBean.Key.UploadMethods);
if (uploadMethods==null){
return false;
} else {
return Arrays.asList(uploadMethods.toLowerCase().split("\\s*,\\s*")).contains(method);
}
}
public Integer getUploadMethodCount(){
String uploadMethods = settingsService.getValueForKey(SettingsServiceBean.Key.UploadMethods);
if (uploadMethods==null){
return 0;
} else {
return Arrays.asList(uploadMethods.toLowerCase().split("\\s*,\\s*")).size();
}
}
public boolean isDataFilePIDSequentialDependent(){
String doiIdentifierType = settingsService.getValueForKey(SettingsServiceBean.Key.IdentifierGenerationStyle, "randomString");
String doiDataFileFormat = settingsService.getValueForKey(SettingsServiceBean.Key.DataFilePIDFormat, "DEPENDENT");
if (doiIdentifierType.equals("storedProcGenerated") && doiDataFileFormat.equals("DEPENDENT")){
return true;
}
return false;
}
public int getPIDAsynchRegFileCount() {
String fileCount = settingsService.getValueForKey(SettingsServiceBean.Key.PIDAsynchRegFileCount, "10");
int retVal = 10;
try {
retVal = Integer.parseInt(fileCount);
} catch (NumberFormatException e) {
//if no number in the setting we'll return 10
}
return retVal;
}
public boolean isFilePIDsEnabled() {
boolean safeDefaultIfKeyNotFound = true;
return settingsService.isTrueForKey(SettingsServiceBean.Key.FilePIDsEnabled, safeDefaultIfKeyNotFound);
}
public boolean isIndependentHandleService() {
boolean safeDefaultIfKeyNotFound = false;
return settingsService.isTrueForKey(SettingsServiceBean.Key.IndependentHandleService, safeDefaultIfKeyNotFound);
}
public String getHandleAuthHandle() {
String handleAuthHandle = settingsService.getValueForKey(SettingsServiceBean.Key.HandleAuthHandle, null);
return handleAuthHandle;
}
public String getMDCLogPath() {
String mDCLogPath = settingsService.getValueForKey(SettingsServiceBean.Key.MDCLogPath, null);
return mDCLogPath;
}
public boolean isDatafileValidationOnPublishEnabled() {
boolean safeDefaultIfKeyNotFound = true;
return settingsService.isTrueForKey(SettingsServiceBean.Key.FileValidationOnPublishEnabled, safeDefaultIfKeyNotFound);
}
public boolean directUploadEnabled(DvObjectContainer container) {
return Boolean.getBoolean("dataverse.files." + container.getEffectiveStorageDriverId() + ".upload-redirect");
}
public String getDataCiteRestApiUrlString() {
//As of 5.0 the 'doi.dataciterestapiurlstring' is the documented jvm option. Prior versions used 'doi.mdcbaseurlstring' or were hardcoded to api.datacite.org, so the defaults are for backward compatibility.
return System.getProperty("doi.dataciterestapiurlstring", System.getProperty("doi.mdcbaseurlstring", "https://api.datacite.org"));
}
public long getDatasetValidationSizeLimit() {
String limitEntry = settingsService.getValueForKey(SettingsServiceBean.Key.DatasetChecksumValidationSizeLimit);
if (limitEntry != null) {
try {
Long sizeOption = new Long(limitEntry);
return sizeOption;
} catch (NumberFormatException nfe) {
logger.warning("Invalid value for DatasetValidationSizeLimit option? - " + limitEntry);
}
}
// -1 means no limit is set;
return -1;
}
public long getFileValidationSizeLimit() {
String limitEntry = settingsService.getValueForKey(SettingsServiceBean.Key.DataFileChecksumValidationSizeLimit);
if (limitEntry != null) {
try {
Long sizeOption = new Long(limitEntry);
return sizeOption;
} catch (NumberFormatException nfe) {
logger.warning("Invalid value for FileValidationSizeLimit option? - " + limitEntry);
}
}
// -1 means no limit is set;
return -1;
}
public Map<String, String[]> getCurationLabels() {
Map<String, String[]> labelMap = new HashMap<String, String[]>();
try {
JsonReader jsonReader = Json.createReader(new StringReader(settingsService.getValueForKey(SettingsServiceBean.Key.AllowedCurationLabels, "")));
Pattern pattern = Pattern.compile("(^[\\w ]+$)"); // alphanumeric, underscore and whitespace allowed
JsonObject labelSets = jsonReader.readObject();
for (String key : labelSets.keySet()) {
JsonArray labels = (JsonArray) labelSets.getJsonArray(key);
String[] labelArray = new String[labels.size()];
boolean allLabelsOK = true;
Iterator<JsonValue> iter = labels.iterator();
int i=0;
while(iter.hasNext()) {
String label = ((JsonString)iter.next()).getString();
Matcher matcher = pattern.matcher(label);
if (!matcher.matches()) {
logger.warning("Label rejected: " + label + ", Label set " + key + " ignored.");
allLabelsOK = false;
break;
}
labelArray[i] = label;
i++;
}
if (allLabelsOK) {
labelMap.put(key, labelArray);
}
}
} catch (Exception e) {
logger.warning("Unable to parse " + SettingsServiceBean.Key.AllowedCurationLabels.name() + ": " + e.getLocalizedMessage());
e.printStackTrace();
}
return labelMap;
}
}
| 1 | 45,439 | This seems like a new dependency on rometools. Do we need it? | IQSS-dataverse | java |
@@ -93,7 +93,7 @@ public class Docker {
throw new WebDriverException("Unable to pull container: " + name);
}
- LOG.info(String.format("Pull of %s:%s complete", name, tag));
+ LOG.fine(String.format("Pull of %s:%s complete", name, tag));
return findImage(new ImageNamePredicate(name, tag))
.orElseThrow(() -> new DockerException( | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.docker;
import com.google.common.reflect.TypeToken;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonException;
import org.openqa.selenium.json.JsonOutput;
import org.openqa.selenium.remote.http.HttpHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Predicate;
import java.util.logging.Logger;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.openqa.selenium.json.Json.MAP_TYPE;
import static org.openqa.selenium.remote.http.Contents.string;
import static org.openqa.selenium.remote.http.Contents.utf8String;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
public class Docker {
private static final Logger LOG = Logger.getLogger(Docker.class.getName());
private static final Json JSON = new Json();
private final HttpHandler client;
public Docker(HttpHandler client) {
Objects.requireNonNull(client, "Docker HTTP client must be set.");
this.client = req -> {
HttpResponse resp = client.execute(req);
if (resp.getStatus() < 200 && resp.getStatus() > 200) {
String value = string(resp);
try {
Object obj = JSON.toType(value, Object.class);
if (obj instanceof Map) {
Map<?, ?> map = (Map<?, ?>) obj;
String message = map.get("message") instanceof String ?
(String) map.get("message") :
value;
throw new RuntimeException(message);
}
throw new RuntimeException(value);
} catch (JsonException e) {
throw new RuntimeException(value);
}
}
return resp;
};
}
public Image pull(String name, String tag) {
Objects.requireNonNull(name);
Objects.requireNonNull(tag);
findImage(new ImageNamePredicate(name, tag));
LOG.info(String.format("Pulling %s:%s", name, tag));
HttpRequest request = new HttpRequest(POST, "/images/create")
.addQueryParameter("fromImage", name)
.addQueryParameter("tag", tag);
HttpResponse res = client.execute(request);
if (res.getStatus() != HTTP_OK) {
throw new WebDriverException("Unable to pull container: " + name);
}
LOG.info(String.format("Pull of %s:%s complete", name, tag));
return findImage(new ImageNamePredicate(name, tag))
.orElseThrow(() -> new DockerException(
String.format("Cannot find image matching: %s:%s", name, tag)));
}
public List<Image> listImages() {
LOG.fine("Listing images");
HttpResponse response = client.execute(new HttpRequest(GET, "/images/json"));
List<ImageSummary> images =
JSON.toType(string(response), new TypeToken<List<ImageSummary>>() {}.getType());
return images.stream()
.map(Image::new)
.collect(toImmutableList());
}
public Optional<Image> findImage(Predicate<Image> filter) {
Objects.requireNonNull(filter);
LOG.fine("Finding image: " + filter);
return listImages().stream()
.filter(filter)
.findFirst();
}
public Container create(ContainerInfo info) {
StringBuilder json = new StringBuilder();
try (JsonOutput output = JSON.newOutput(json)) {
output.setPrettyPrint(false);
output.write(info);
}
LOG.info("Creating container: " + json);
HttpRequest request = new HttpRequest(POST, "/containers/create");
request.setContent(utf8String(json));
HttpResponse response = client.execute(request);
Map<String, Object> toRead = JSON.toType(string(response), MAP_TYPE);
return new Container(client, new ContainerId((String) toRead.get("Id")));
}
}
| 1 | 17,123 | Waiting for the pull takes a long time. This message informs the user that at least one of the images being pulled is available. Please leave. | SeleniumHQ-selenium | js |
@@ -7,13 +7,12 @@ import (
"sync/atomic"
"testing"
+ "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/pkg/server/plugin/datastore"
"github.com/spiffe/spire/pkg/server/plugin/datastore/sql"
- spi "github.com/spiffe/spire/proto/spire/common/plugin"
- "github.com/spiffe/spire/test/spiretest"
)
var ( | 1 | package fakedatastore
import (
"context"
"fmt"
"sort"
"sync/atomic"
"testing"
"github.com/stretchr/testify/require"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/pkg/server/plugin/datastore"
"github.com/spiffe/spire/pkg/server/plugin/datastore/sql"
spi "github.com/spiffe/spire/proto/spire/common/plugin"
"github.com/spiffe/spire/test/spiretest"
)
var (
nextID uint32
)
type DataStore struct {
datastore.UnsafeDataStoreServer
ds datastore.DataStore
errs []error
}
var _ datastore.DataStore = (*DataStore)(nil)
func New(tb testing.TB) *DataStore {
var ds datastore.Plugin
spiretest.LoadPlugin(tb, sql.BuiltIn(), &ds)
_, err := ds.Configure(context.Background(), &spi.ConfigureRequest{
Configuration: fmt.Sprintf(`
database_type = "sqlite3"
connection_string = "file:memdb%d?mode=memory&cache=shared"
`, atomic.AddUint32(&nextID, 1)),
})
require.NoError(tb, err)
return &DataStore{
ds: ds,
}
}
func (s *DataStore) CreateBundle(ctx context.Context, req *datastore.CreateBundleRequest) (*datastore.CreateBundleResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.CreateBundle(ctx, req)
}
func (s *DataStore) UpdateBundle(ctx context.Context, req *datastore.UpdateBundleRequest) (*datastore.UpdateBundleResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.UpdateBundle(ctx, req)
}
func (s *DataStore) SetBundle(ctx context.Context, req *datastore.SetBundleRequest) (*datastore.SetBundleResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.SetBundle(ctx, req)
}
func (s *DataStore) AppendBundle(ctx context.Context, req *datastore.AppendBundleRequest) (*datastore.AppendBundleResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.AppendBundle(ctx, req)
}
func (s *DataStore) CountBundles(ctx context.Context, req *datastore.CountBundlesRequest) (*datastore.CountBundlesResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.CountBundles(ctx, req)
}
func (s *DataStore) DeleteBundle(ctx context.Context, req *datastore.DeleteBundleRequest) (*datastore.DeleteBundleResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.DeleteBundle(ctx, req)
}
func (s *DataStore) FetchBundle(ctx context.Context, req *datastore.FetchBundleRequest) (*datastore.FetchBundleResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.FetchBundle(ctx, req)
}
func (s *DataStore) ListBundles(ctx context.Context, req *datastore.ListBundlesRequest) (*datastore.ListBundlesResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
resp, err := s.ds.ListBundles(ctx, req)
if err == nil {
// Sorting helps unit-tests have deterministic assertions.
sort.Slice(resp.Bundles, func(i, j int) bool {
return resp.Bundles[i].TrustDomainId < resp.Bundles[j].TrustDomainId
})
}
return resp, err
}
func (s *DataStore) PruneBundle(ctx context.Context, req *datastore.PruneBundleRequest) (*datastore.PruneBundleResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.PruneBundle(ctx, req)
}
func (s *DataStore) CountAttestedNodes(ctx context.Context, req *datastore.CountAttestedNodesRequest) (*datastore.CountAttestedNodesResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.CountAttestedNodes(ctx, req)
}
func (s *DataStore) CreateAttestedNode(ctx context.Context, req *datastore.CreateAttestedNodeRequest) (*datastore.CreateAttestedNodeResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.CreateAttestedNode(ctx, req)
}
func (s *DataStore) FetchAttestedNode(ctx context.Context, req *datastore.FetchAttestedNodeRequest) (*datastore.FetchAttestedNodeResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.FetchAttestedNode(ctx, req)
}
func (s *DataStore) ListAttestedNodes(ctx context.Context, req *datastore.ListAttestedNodesRequest) (*datastore.ListAttestedNodesResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.ListAttestedNodes(ctx, req)
}
func (s *DataStore) UpdateAttestedNode(ctx context.Context, req *datastore.UpdateAttestedNodeRequest) (*datastore.UpdateAttestedNodeResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.UpdateAttestedNode(ctx, req)
}
func (s *DataStore) DeleteAttestedNode(ctx context.Context, req *datastore.DeleteAttestedNodeRequest) (*datastore.DeleteAttestedNodeResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.DeleteAttestedNode(ctx, req)
}
func (s *DataStore) SetNodeSelectors(ctx context.Context, req *datastore.SetNodeSelectorsRequest) (*datastore.SetNodeSelectorsResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.SetNodeSelectors(ctx, req)
}
func (s *DataStore) ListNodeSelectors(ctx context.Context, req *datastore.ListNodeSelectorsRequest) (*datastore.ListNodeSelectorsResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.ListNodeSelectors(ctx, req)
}
func (s *DataStore) GetNodeSelectors(ctx context.Context, req *datastore.GetNodeSelectorsRequest) (*datastore.GetNodeSelectorsResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
resp, err := s.ds.GetNodeSelectors(ctx, req)
if err == nil {
// Sorting helps unit-tests have deterministic assertions.
util.SortSelectors(resp.Selectors.Selectors)
}
return resp, err
}
func (s *DataStore) CountRegistrationEntries(ctx context.Context, req *datastore.CountRegistrationEntriesRequest) (*datastore.CountRegistrationEntriesResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.CountRegistrationEntries(ctx, req)
}
func (s *DataStore) CreateRegistrationEntry(ctx context.Context, req *datastore.CreateRegistrationEntryRequest) (*datastore.CreateRegistrationEntryResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.CreateRegistrationEntry(ctx, req)
}
func (s *DataStore) FetchRegistrationEntry(ctx context.Context, req *datastore.FetchRegistrationEntryRequest) (*datastore.FetchRegistrationEntryResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.FetchRegistrationEntry(ctx, req)
}
func (s *DataStore) ListRegistrationEntries(ctx context.Context, req *datastore.ListRegistrationEntriesRequest) (*datastore.ListRegistrationEntriesResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
resp, err := s.ds.ListRegistrationEntries(ctx, req)
if err == nil {
// Sorting helps unit-tests have deterministic assertions.
util.SortRegistrationEntries(resp.Entries)
}
return resp, err
}
func (s *DataStore) UpdateRegistrationEntry(ctx context.Context, req *datastore.UpdateRegistrationEntryRequest) (*datastore.UpdateRegistrationEntryResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.UpdateRegistrationEntry(ctx, req)
}
func (s *DataStore) DeleteRegistrationEntry(ctx context.Context, req *datastore.DeleteRegistrationEntryRequest) (*datastore.DeleteRegistrationEntryResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.DeleteRegistrationEntry(ctx, req)
}
func (s *DataStore) PruneRegistrationEntries(ctx context.Context, req *datastore.PruneRegistrationEntriesRequest) (*datastore.PruneRegistrationEntriesResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.PruneRegistrationEntries(ctx, req)
}
func (s *DataStore) CreateJoinToken(ctx context.Context, req *datastore.CreateJoinTokenRequest) (*datastore.CreateJoinTokenResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.CreateJoinToken(ctx, req)
}
func (s *DataStore) FetchJoinToken(ctx context.Context, req *datastore.FetchJoinTokenRequest) (*datastore.FetchJoinTokenResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.FetchJoinToken(ctx, req)
}
func (s *DataStore) DeleteJoinToken(ctx context.Context, req *datastore.DeleteJoinTokenRequest) (*datastore.DeleteJoinTokenResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.DeleteJoinToken(ctx, req)
}
func (s *DataStore) PruneJoinTokens(ctx context.Context, req *datastore.PruneJoinTokensRequest) (*datastore.PruneJoinTokensResponse, error) {
if err := s.getNextError(); err != nil {
return nil, err
}
return s.ds.PruneJoinTokens(ctx, req)
}
func (s *DataStore) SetNextError(err error) {
s.errs = []error{err}
}
func (s *DataStore) AppendNextError(err error) {
s.errs = append(s.errs, err)
}
func (s *DataStore) getNextError() error {
if len(s.errs) == 0 {
return nil
}
err := s.errs[0]
s.errs = s.errs[1:]
return err
}
func (s *DataStore) Configure(ctx context.Context, req *spi.ConfigureRequest) (*spi.ConfigureResponse, error) {
return &spi.ConfigureResponse{}, nil
}
func (s *DataStore) GetPluginInfo(context.Context, *spi.GetPluginInfoRequest) (*spi.GetPluginInfoResponse, error) {
return &spi.GetPluginInfoResponse{}, nil
}
| 1 | 16,304 | I can not think in a good use for it, but may we allow a way to setup a fake with a hook for logs? this fake is special, and we may need access to some of thoe logs, at the same time it may be an overkill because we dont want to test "sql" implementation but results.. but we can create some tests with end to end logs in some scenaries, What do you think about it? | spiffe-spire | go |
@@ -149,6 +149,10 @@ void dag_close_over_environment(struct dag *d)
{
dag_variable_add_value(name, d->default_category->mf_variables, 0, value_env);
}
+
+ if(!value_env && !strcmp(name, RESOURCES_CORES)) {
+ dag_variable_add_value(name, d->default_category->mf_variables, 0, "1");
+ }
}
}
| 1 | /*
Copyright (C) 2008- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <signal.h>
#include <stdarg.h>
#include <fcntl.h>
#include <dirent.h>
#include <unistd.h>
#include "cctools.h"
#include "catalog_query.h"
#include "category.h"
#include "create_dir.h"
#include "copy_stream.h"
#include "datagram.h"
#include "host_disk_info.h"
#include "domain_name_cache.h"
#include "link.h"
#include "macros.h"
#include "hash_table.h"
#include "itable.h"
#include "debug.h"
#include "delete_dir.h"
#include "stringtools.h"
#include "load_average.h"
#include "get_line.h"
#include "int_sizes.h"
#include "list.h"
#include "xxmalloc.h"
#include "getopt_aux.h"
#include "rmonitor.h"
#include "path.h"
#include "dag.h"
#include "dag_visitors.h"
#include "dag_resources.h"
#include "lexer.h"
#include "buffer.h"
#include "parser.h"
static int dag_parse(struct dag *d, FILE * dag_stream);
static int dag_parse_variable(struct lexer *bk, struct dag_node *n);
static int dag_parse_node(struct lexer *bk);
static int dag_parse_syntax(struct lexer *bk);
static int dag_parse_node_filelist(struct lexer *bk, struct dag_node *n);
static int dag_parse_node_command(struct lexer *bk, struct dag_node *n);
static int dag_parse_node_regular_command(struct lexer *bk, struct dag_node *n);
static int dag_parse_node_nested_makeflow(struct lexer *bk, struct dag_node *n);
static int dag_parse_export(struct lexer *bk);
int verbose_parsing=0;
static const int parsing_rule_mod_counter = 250;
static int dag_parse_node_regular_command(struct lexer *bk, struct dag_node *n)
{
struct buffer b;
buffer_init(&b);
struct token *t;
while((t = lexer_next_token(bk)) && t->type != TOKEN_NEWLINE)
{
switch(t->type)
{
case TOKEN_SPACE:
buffer_printf(&b, " ");
break;
case TOKEN_LITERAL:
buffer_printf(&b, "%s", t->lexeme);
break;
case TOKEN_IO_REDIRECT:
buffer_printf(&b, "%s", t->lexeme);
break;
default:
lexer_report_error(bk, "Unexpected command token: %s.\n", lexer_print_token(t));
break;
}
lexer_free_token(t);
}
if(!t) {
lexer_report_error(bk, "Command does not end with newline.\n");
} else {
lexer_free_token(t);
}
n->command = xxstrdup(buffer_tostring(&b));
buffer_free(&b);
debug(D_MAKEFLOW_PARSER, "node command=%s", n->command);
return 1;
}
/* Returns a pointer to a new struct dag described by filename. Return NULL on
* failure. */
struct dag *dag_from_file(const char *filename)
{
FILE *dagfile;
struct dag *d = NULL;
dagfile = fopen(filename, "r");
if(dagfile == NULL)
debug(D_MAKEFLOW_PARSER, "makeflow: unable to open file %s: %s\n", filename, strerror(errno));
else {
d = dag_create();
d->filename = xxstrdup(filename);
if(!dag_parse(d, dagfile)) {
free(d);
d = NULL;
}
fclose(dagfile);
}
return d;
}
void dag_close_over_environment(struct dag *d)
{
//for each exported and special variable, if the variable does not have a
//value assigned yet, we look for its value in the running environment
char *name;
struct dag_variable_value *v;
set_first_element(d->special_vars);
while((name = set_next_element(d->special_vars)))
{
v = dag_variable_get_value(name, d->default_category->mf_variables, d->nodeid_counter);
if(!v)
{
char *value_env = getenv(name);
if(value_env)
{
dag_variable_add_value(name, d->default_category->mf_variables, 0, value_env);
}
}
}
set_first_element(d->export_vars);
while((name = set_next_element(d->export_vars)))
{
v = dag_variable_get_value(name, d->default_category->mf_variables, d->nodeid_counter);
if(!v)
{
char *value_env = getenv(name);
if(value_env)
{
dag_variable_add_value(name, d->default_category->mf_variables, 0, value_env);
}
}
}
}
void dag_close_over_categories(struct dag *d) {
/* per category, we assign the values found for resources. */
struct category *c;
char *name;
hash_table_firstkey(d->categories);
while(hash_table_nextkey(d->categories, &name, (void **) &c)) {
struct rmsummary *rs = rmsummary_create(-1);
struct dag_variable_lookup_set s = {d, c, NULL, NULL };
struct dag_variable_value *val;
val = dag_variable_lookup(RESOURCES_CORES, &s);
if(val) {
rs->cores = atoll(val->value);
}
val = dag_variable_lookup(RESOURCES_DISK, &s);
if(val) {
rs->disk = atoll(val->value);
}
val = dag_variable_lookup(RESOURCES_MEMORY, &s);
if(val) {
rs->memory = atoll(val->value);
}
val = dag_variable_lookup(RESOURCES_GPUS, &s);
if(val) {
rs->gpus = atoll(val->value);
}
c->max_allocation = rs;
}
}
static int dag_parse(struct dag *d, FILE *stream)
{
struct lexer *bk = lexer_create(STREAM, stream, 1, 1);
bk->d = d;
bk->stream = stream;
bk->category = d->default_category;
struct dag_variable_lookup_set s = { d, NULL, NULL, NULL };
bk->environment = &s;
struct token *t;
while((t = lexer_peek_next_token(bk)))
{
s.category = bk->category;
s.node = NULL;
s.table = NULL;
switch (t->type) {
case TOKEN_NEWLINE:
case TOKEN_SPACE:
/* Skip newlines, spaces at top level. */
lexer_free_token(lexer_next_token(bk));
break;
case TOKEN_SYNTAX:
dag_parse_syntax(bk);
break;
case TOKEN_FILES:
dag_parse_node(bk);
break;
case TOKEN_VARIABLE:
dag_parse_variable(bk, NULL);
break;
default:
lexer_report_error(bk, "Unexpected token. Expected one of NEWLINE, SPACE, SYNTAX, FILES, or VARIABLE, but got: %s\n:", lexer_print_token(t));
break;
}
}
dag_close_over_environment(d);
dag_close_over_categories(d);
dag_compile_ancestors(d);
lexer_delete(bk);
return 1;
}
//return 1 if name was processed as special variable, 0 otherwise
static int dag_parse_process_special_variable(struct lexer *bk, struct dag_node *n, int nodeid, char *name, const char *value)
{
struct dag *d = bk->d;
int special = 0;
if(strcmp("CATEGORY", name) == 0 || strcmp("SYMBOL", name) == 0) {
special = 1;
/* If we have never seen this label, then create
* a new category, otherwise retrieve the category. */
struct category *category = makeflow_category_lookup_or_create(d, value);
/* If we are parsing inside a node, make category
* the category of the node, but do not update
* the global task_category. Else, update the
* global task category. */
if(n) {
n->category = category;
debug(D_MAKEFLOW_PARSER, "Updating category '%s' for rule %d.\n", value, n->nodeid);
}
else
bk->category = category;
}
/* else if some other special variable .... */
/* ... */
return special;
}
void dag_parse_append_variable(struct lexer *bk, int nodeid, struct dag_node *n, const char *name, const char *value)
{
struct dag_variable_lookup_set sd = { bk->d, NULL, NULL, NULL };
struct dag_variable_value *vd = dag_variable_lookup(name, &sd);
struct dag_variable_value *v;
if(n)
{
v = dag_variable_get_value(name, n->variables, nodeid);
if(v)
{
dag_variable_value_append_or_create(v, value);
}
else
{
char *new_value;
if(vd)
{
new_value = string_format("%s %s", vd->value, value);
}
else
{
new_value = xxstrdup(value);
}
dag_variable_add_value(name, n->variables, nodeid, new_value);
free(new_value);
}
}
else
{
if(vd)
{
dag_variable_value_append_or_create(vd, value);
}
else
{
dag_variable_add_value(name, bk->d->default_category->mf_variables, nodeid, value);
}
}
}
static int dag_parse_syntax(struct lexer *bk)
{
struct token *t = lexer_next_token(bk);
if(strcmp(t->lexeme, "export") == 0) {
lexer_free_token(t);
dag_parse_export(bk);
} else {
lexer_report_error(bk, "Unknown syntax keyboard.\n");
}
return 1;
}
static int dag_parse_variable(struct lexer *bk, struct dag_node *n)
{
struct token *t = lexer_next_token(bk);
char mode = t->lexeme[0]; //=, or + (assign or append)
lexer_free_token(t);
t = lexer_next_token(bk);
if(t->type != TOKEN_LITERAL)
{
lexer_report_error(bk, "Literal variable name expected.");
}
char *name = xxstrdup(t->lexeme);
lexer_free_token(t);
t = lexer_next_token(bk);
if(t->type != TOKEN_LITERAL)
{
lexer_report_error(bk, "Expected LITERAL token, got: %s\n", lexer_print_token(t));
}
char *value = xxstrdup(t->lexeme);
lexer_free_token(t);
struct hash_table *current_table;
int nodeid;
if(n)
{
current_table = n->variables;
nodeid = n->nodeid;
}
else
{
current_table = bk->d->default_category->mf_variables;
nodeid = bk->d->nodeid_counter;
}
int result = 1;
switch(mode)
{
case '=':
dag_variable_add_value(name, current_table, nodeid, value);
debug(D_MAKEFLOW_PARSER, "%s appending to variable name=%s, value=%s", (n ? "node" : "dag"), name, value);
break;
case '+':
dag_parse_append_variable(bk, nodeid, n, name, value);
debug(D_MAKEFLOW_PARSER, "%s variable name=%s, value=%s", (n ? "node" : "dag"), name, value);
break;
default:
lexer_report_error(bk, "Unknown variable operator.");
result = 0;
}
dag_parse_process_special_variable(bk, n, nodeid, name, value);
free(name);
free(value);
return result;
}
static int dag_parse_node_filelist(struct lexer *bk, struct dag_node *n)
{
int before_colon = 1;
char *filename;
char *newname;
struct token *t, *arrow, *rename;
while((t = lexer_next_token(bk)))
{
filename = NULL;
newname = NULL;
switch (t->type) {
case TOKEN_COLON:
before_colon = 0;
lexer_free_token(t);
break;
case TOKEN_NEWLINE:
/* Finished reading file list */
lexer_free_token(t);
return 1;
break;
case TOKEN_LITERAL:
rename = NULL;
arrow = lexer_peek_next_token(bk);
if(!arrow)
{
lexer_report_error(bk, "Rule specification is incomplete.");
}
else if(arrow->type == TOKEN_REMOTE_RENAME) //Is the arrow really an arrow?
{
lexer_free_token(lexer_next_token(bk)); //Jump arrow.
rename = lexer_next_token(bk);
if(!rename)
{
lexer_report_error(bk, "Remote name specification is incomplete.");
}
}
filename = t->lexeme;
newname = rename ? rename->lexeme : NULL;
if(before_colon)
dag_node_add_target_file(n, filename, newname);
else
dag_node_add_source_file(n, filename, newname);
lexer_free_token(t);
if(rename)
{
lexer_free_token(rename);
}
break;
default:
lexer_report_error(bk, "Error reading file list. %s", lexer_print_token(t));
break;
}
}
return 0;
}
static int dag_parse_node(struct lexer *bk)
{
struct token *t = lexer_next_token(bk);
if(t->type != TOKEN_FILES)
{
lexer_report_error(bk, "Error reading rule.");
}
lexer_free_token(t);
struct dag_node *n;
n = dag_node_create(bk->d, bk->line_number);
if(verbose_parsing && bk->d->nodeid_counter % parsing_rule_mod_counter == 0)
{
fprintf(stdout, "\rRules parsed: %d", bk->d->nodeid_counter + 1);
fflush(stdout);
}
n->category = bk->category;
dag_parse_node_filelist(bk, n);
bk->environment->node = n;
/* Read variables, if any */
while((t = lexer_peek_next_token(bk)) && t->type != TOKEN_COMMAND)
{
switch (t->type) {
case TOKEN_VARIABLE:
dag_parse_variable(bk, n);
break;
default:
lexer_report_error(bk, "Expected COMMAND or VARIABLE, got: %s", lexer_print_token(t));
break;
}
}
if(!t)
{
lexer_report_error(bk, "Rule does not have a command.\n");
}
dag_parse_node_command(bk, n);
bk->environment->node = NULL;
n->next = bk->d->nodes;
bk->d->nodes = n;
itable_insert(bk->d->node_table, n->nodeid, n);
debug(D_MAKEFLOW_PARSER, "Setting resource category '%s' for rule %d.\n", n->category->name, n->nodeid);
dag_node_init_resources(n);
return 1;
}
static int dag_parse_node_command(struct lexer *bk, struct dag_node *n)
{
struct token *t;
//Jump COMMAND token.
t = lexer_next_token(bk);
lexer_free_token(t);
char *local = dag_variable_lookup_string("BATCH_LOCAL", bk->environment);
if(local) {
if(string_istrue(local))
n->local_job = 1;
free(local);
}
/* Read command modifiers. */
while((t = lexer_peek_next_token(bk)) && t->type != TOKEN_COMMAND_MOD_END)
{
t = lexer_next_token(bk);
if(strcmp(t->lexeme, "LOCAL") == 0)
{
n->local_job = 1;
}
else if(strcmp(t->lexeme, "MAKEFLOW") == 0)
{
n->nested_job = 1;
}
else
{
lexer_report_error(bk, "Parser does not know about modifier: %s.\n", t->lexeme);
}
lexer_free_token(t);
}
if(!t)
{
lexer_report_error(bk, "Malformed command.");
}
//Free COMMAND_MOD_END token.
t = lexer_next_token(bk);
lexer_free_token(t);
if(n->nested_job)
{
return dag_parse_node_nested_makeflow(bk, n);
}
else
{
return dag_parse_node_regular_command(bk, n);
}
}
void dag_parse_drop_spaces(struct lexer *bk)
{
struct token *t;
while((t = lexer_peek_next_token(bk)) && t->type == TOKEN_SPACE) {
t = lexer_next_token(bk);
lexer_free_token(t);
}
}
/* Support for recursive calls to makeflow. A recursive call is indicated in
* the makeflow file with the following syntax:
* \tMAKEFLOW some-makeflow-file [working-directory [wrapper]]
*
* If wrapper is not given, it defaults to an empty string.
* If working-directory is not given, it defaults to ".".
* If makeflow_exe is NULL, it defaults to makeflow
*
* The call is then as:
*
* cd working-directory && wrapper makeflow_exe some-makeflow-file
*
* */
static int dag_parse_node_nested_makeflow(struct lexer *bk, struct dag_node *n)
{
struct token *t, *start;
dag_parse_drop_spaces(bk);
//Get the dag's file name.
t = lexer_next_token(bk);
if(t->type == TOKEN_LITERAL) {
n->makeflow_dag = xxstrdup(t->lexeme);
start = t;
} else {
lexer_report_error(bk, "At least the name of the Makeflow file should be specified in a recursive call.\n");
return 0; // not reached, silences warning
}
dag_parse_drop_spaces(bk);
//Get dag's working directory.
t = lexer_peek_next_token(bk);
if(t->type == TOKEN_LITERAL) {
t = lexer_next_token(bk);
n->makeflow_cwd = xxstrdup(t->lexeme);
lexer_free_token(t);
} else {
n->makeflow_cwd = xxstrdup(".");
}
dag_parse_drop_spaces(bk);
//Get wrapper's name
char *wrapper = NULL;
t = lexer_peek_next_token(bk);
if(t->type == TOKEN_LITERAL) {
wrapper = xxstrdup(t->lexeme);
lexer_free_token(t);
} else {
wrapper = xxstrdup("");
}
free(start->lexeme);
start->lexeme = string_format("cd %s && %s %s %s",
n->makeflow_cwd,
wrapper,
"makeflow",
n->makeflow_dag);
free(wrapper);
dag_parse_drop_spaces(bk);
lexer_preppend_token(bk, start);
return dag_parse_node_regular_command(bk, n);
}
static int dag_parse_export(struct lexer *bk)
{
struct token *t, *vtoken, *vname;
const char *name;
int count = 0;
while((t = lexer_peek_next_token(bk)) && t->type != TOKEN_NEWLINE)
{
switch(t->type)
{
case TOKEN_VARIABLE:
vtoken = lexer_next_token(bk); //Save VARIABLE token.
vname = lexer_peek_next_token(bk);
if(vname->type == TOKEN_LITERAL) {
name = xxstrdup(vname->lexeme);
} else {
lexer_report_error(bk, "Variable definition has name missing.\n");
}
lexer_preppend_token(bk, vtoken); //Restore VARIABLE token.
dag_parse_variable(bk, NULL);
break;
case TOKEN_LITERAL:
t = lexer_next_token(bk);
name = xxstrdup(t->lexeme);
lexer_free_token(t);
break;
default:
lexer_report_error(bk, "Malformed export syntax.\n");
break;
}
set_insert(bk->d->export_vars, name);
count++;
debug(D_MAKEFLOW_PARSER, "export variable: %s", name);
}
if(t) {
//Free newline
t = lexer_next_token(bk);
lexer_free_token(t);
}
if(count < 1) {
lexer_report_error(bk, "The export syntax needs the explicit name of the variables to be exported.\n");
}
return 1;
}
/* vim: set noexpandtab tabstop=4: */
| 1 | 12,739 | I don't think you should be changing the dag unilaterally at parse time. If cores isn't specified, then it isn't specified. | cooperative-computing-lab-cctools | c |
@@ -26,6 +26,16 @@
// see URLOpener.
// See https://godoc.org/gocloud.dev#hdr-URLs for background information.
//
+// Message Delivery Semantics
+//
+// Azure ServiceBus supports at-least-once semantics in the default Peek-Lock
+// mode; applications must call Message.Ack/Nack after processing a message, or
+// it will be redelivered. However, it also supports a Receive-Delete mode,
+// which essentially auto-acks a message when it is delivered, resulting in
+// at-most-once semantics. See SubscriberOptions.AckFuncForReceiveAndDelete.
+// See https://godoc.org/gocloud.dev/pubsub#hdr-At_most_once_and_At_least_once_Delivery
+// for more background.
+//
// As
//
// azuresb exposes the following types for As: | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package azuresb provides an implementation of pubsub using Azure Service
// Bus Topic and Subscription.
// See https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-messaging-overview for an overview.
//
// URLs
//
// For pubsub.OpenTopic and pubsub.OpenSubscription, azuresb registers
// for the scheme "azuresb".
// The default URL opener will use a Service Bus Connection String based on
// the environment variable "SERVICEBUS_CONNECTION_STRING".
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://godoc.org/gocloud.dev#hdr-URLs for background information.
//
// As
//
// azuresb exposes the following types for As:
// - Topic: *servicebus.Topic
// - Subscription: *servicebus.Subscription
// - Message: *servicebus.Message
// - Error: common.Retryable
package azuresb // import "gocloud.dev/pubsub/azuresb"
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path"
"runtime"
"strings"
"sync"
"time"
common "github.com/Azure/azure-amqp-common-go"
"github.com/Azure/azure-amqp-common-go/cbs"
"github.com/Azure/azure-amqp-common-go/rpc"
"github.com/Azure/azure-amqp-common-go/uuid"
servicebus "github.com/Azure/azure-service-bus-go"
"github.com/google/wire"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/batcher"
"gocloud.dev/internal/useragent"
"gocloud.dev/pubsub"
"gocloud.dev/pubsub/driver"
"pack.ag/amqp"
)
const (
// https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-amqp-request-response#update-disposition-status
dispositionForAck = "completed"
dispositionForNack = "abandoned"
listenerTimeout = 1 * time.Second
)
var sendBatcherOpts = &batcher.Options{
MaxBatchSize: 1, // SendBatch only supports one message at a time
MaxHandlers: 100, // max concurrency for sends
}
func init() {
o := new(defaultOpener)
pubsub.DefaultURLMux().RegisterTopic(Scheme, o)
pubsub.DefaultURLMux().RegisterSubscription(Scheme, o)
}
// Set holds Wire providers for this package.
var Set = wire.NewSet(
SubscriptionOptions{},
TopicOptions{},
URLOpener{},
)
// defaultURLOpener creates an URLOpener with ConnectionString initialized from
// the environment variable SERVICEBUS_CONNECTION_STRING.
type defaultOpener struct {
init sync.Once
opener *URLOpener
err error
}
func (o *defaultOpener) defaultOpener() (*URLOpener, error) {
o.init.Do(func() {
cs := os.Getenv("SERVICEBUS_CONNECTION_STRING")
if cs == "" {
o.err = errors.New("SERVICEBUS_CONNECTION_STRING environment variable not set")
return
}
o.opener = &URLOpener{ConnectionString: cs}
})
return o.opener, o.err
}
func (o *defaultOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) {
opener, err := o.defaultOpener()
if err != nil {
return nil, fmt.Errorf("open topic %v: %v", u, err)
}
return opener.OpenTopicURL(ctx, u)
}
func (o *defaultOpener) OpenSubscriptionURL(ctx context.Context, u *url.URL) (*pubsub.Subscription, error) {
opener, err := o.defaultOpener()
if err != nil {
return nil, fmt.Errorf("open subscription %v: %v", u, err)
}
return opener.OpenSubscriptionURL(ctx, u)
}
// Scheme is the URL scheme azuresb registers its URLOpeners under on pubsub.DefaultMux.
const Scheme = "azuresb"
// URLOpener opens Azure Service Bus URLs like "azuresb://mytopic" for
// topics or "azuresb://mytopic?subscription=mysubscription" for subscriptions.
//
// - The URL's host+path is used as the topic name.
// - For subscriptions, the subscription name must be provided in the
// "subscription" query parameter.
//
// No other query parameters are supported.
type URLOpener struct {
// ConnectionString is the Service Bus connection string (required).
// https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-dotnet-get-started-with-queues
ConnectionString string
// Options passed when creating the ServiceBus Topic/Subscription.
ServiceBusTopicOptions []servicebus.TopicOption
ServiceBusSubscriptionOptions []servicebus.SubscriptionOption
// TopicOptions specifies the options to pass to OpenTopic.
TopicOptions TopicOptions
// SubscriptionOptions specifies the options to pass to OpenSubscription.
SubscriptionOptions SubscriptionOptions
}
func (o *URLOpener) namespace(kind string, u *url.URL) (*servicebus.Namespace, error) {
if o.ConnectionString == "" {
return nil, fmt.Errorf("open %s %v: ConnectionString is required", kind, u)
}
ns, err := NewNamespaceFromConnectionString(o.ConnectionString)
if err != nil {
return nil, fmt.Errorf("open %s %v: invalid connection string %q: %v", kind, u, o.ConnectionString, err)
}
return ns, nil
}
// OpenTopicURL opens a pubsub.Topic based on u.
func (o *URLOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) {
ns, err := o.namespace("topic", u)
if err != nil {
return nil, err
}
for param := range u.Query() {
return nil, fmt.Errorf("open topic %v: invalid query parameter %q", u, param)
}
topicName := path.Join(u.Host, u.Path)
t, err := NewTopic(ns, topicName, o.ServiceBusTopicOptions)
if err != nil {
return nil, fmt.Errorf("open topic %v: couldn't open topic %q: %v", u, topicName, err)
}
return OpenTopic(ctx, t, &o.TopicOptions)
}
// OpenSubscriptionURL opens a pubsub.Subscription based on u.
func (o *URLOpener) OpenSubscriptionURL(ctx context.Context, u *url.URL) (*pubsub.Subscription, error) {
ns, err := o.namespace("subscription", u)
if err != nil {
return nil, err
}
topicName := path.Join(u.Host, u.Path)
t, err := NewTopic(ns, topicName, o.ServiceBusTopicOptions)
if err != nil {
return nil, fmt.Errorf("open subscription %v: couldn't open topic %q: %v", u, topicName, err)
}
q := u.Query()
subName := q.Get("subscription")
q.Del("subscription")
if subName == "" {
return nil, fmt.Errorf("open subscription %v: missing required query parameter subscription", u)
}
for param := range q {
return nil, fmt.Errorf("open subscription %v: invalid query parameter %q", u, param)
}
sub, err := NewSubscription(t, subName, o.ServiceBusSubscriptionOptions)
if err != nil {
return nil, fmt.Errorf("open subscription %v: couldn't open subscription %q: %v", u, subName, err)
}
return OpenSubscription(ctx, ns, t, sub, &o.SubscriptionOptions)
}
type topic struct {
sbTopic *servicebus.Topic
}
// TopicOptions provides configuration options for an Azure SB Topic.
type TopicOptions struct{}
// NewNamespaceFromConnectionString returns a *servicebus.Namespace from a Service Bus connection string.
// https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-dotnet-get-started-with-queues
func NewNamespaceFromConnectionString(connectionString string) (*servicebus.Namespace, error) {
nsOptions := servicebus.NamespaceWithConnectionString(connectionString)
return servicebus.NewNamespace(nsOptions)
}
// NewTopic returns a *servicebus.Topic associated with a Service Bus Namespace.
func NewTopic(ns *servicebus.Namespace, topicName string, opts []servicebus.TopicOption) (*servicebus.Topic, error) {
return ns.NewTopic(topicName, opts...)
}
// NewSubscription returns a *servicebus.Subscription associated with a Service Bus Topic.
func NewSubscription(parentTopic *servicebus.Topic, subscriptionName string, opts []servicebus.SubscriptionOption) (*servicebus.Subscription, error) {
return parentTopic.NewSubscription(subscriptionName, opts...)
}
// OpenTopic initializes a pubsub Topic on a given Service Bus Topic.
func OpenTopic(ctx context.Context, sbTopic *servicebus.Topic, opts *TopicOptions) (*pubsub.Topic, error) {
t, err := openTopic(ctx, sbTopic, opts)
if err != nil {
return nil, err
}
return pubsub.NewTopic(t, sendBatcherOpts), nil
}
// openTopic returns the driver for OpenTopic. This function exists so the test
// harness can get the driver interface implementation if it needs to.
func openTopic(ctx context.Context, sbTopic *servicebus.Topic, _ *TopicOptions) (driver.Topic, error) {
if sbTopic == nil {
return nil, errors.New("azuresb: OpenTopic requires a Service Bus Topic")
}
return &topic{sbTopic: sbTopic}, nil
}
// SendBatch implements driver.Topic.SendBatch.
func (t *topic) SendBatch(ctx context.Context, dms []*driver.Message) error {
if len(dms) != 1 {
panic("azuresb.SendBatch should only get one message at a time")
}
dm := dms[0]
sbms := servicebus.NewMessage(dm.Body)
for k, v := range dm.Metadata {
sbms.Set(k, v)
}
return t.sbTopic.Send(ctx, sbms)
}
func (t *topic) IsRetryable(err error) bool {
// Let the Service Bus SDK recover from any transient connectivity issue.
return false
}
func (t *topic) As(i interface{}) bool {
p, ok := i.(**servicebus.Topic)
if !ok {
return false
}
*p = t.sbTopic
return true
}
// ErrorAs implements driver.Topic.ErrorAs
func (*topic) ErrorAs(err error, i interface{}) bool {
return errorAs(err, i)
}
func errorAs(err error, i interface{}) bool {
switch v := err.(type) {
case *amqp.Error:
if p, ok := i.(**amqp.Error); ok {
*p = v
return true
}
case common.Retryable:
if p, ok := i.(*common.Retryable); ok {
*p = v
return true
}
}
return false
}
func (*topic) ErrorCode(err error) gcerrors.ErrorCode {
return errorCode(err)
}
type subscription struct {
sbSub *servicebus.Subscription
opts *SubscriptionOptions
linkErr error // saved error for initializing amqpLink
amqpLink *rpc.Link // nil if linkErr != nil
}
// SubscriptionOptions will contain configuration for subscriptions.
type SubscriptionOptions struct {
// If nil, the subscription MUST be in Peek-Lock mode. The Ack method must be called on each message
// to complete it, otherwise you run the risk of deadlettering messages.
// If non-nil, the subscription MUST be in Receive-and-Delete mode, and this function will be called
// whenever Ack is called on a message.
// See the "At-most-once vs. At-least-once Delivery" section in the pubsub package documentation.
AckFuncForReceiveAndDelete func()
}
// OpenSubscription initializes a pubsub Subscription on a given Service Bus Subscription and its parent Service Bus Topic.
func OpenSubscription(ctx context.Context, parentNamespace *servicebus.Namespace, parentTopic *servicebus.Topic, sbSubscription *servicebus.Subscription, opts *SubscriptionOptions) (*pubsub.Subscription, error) {
ds, err := openSubscription(ctx, parentNamespace, parentTopic, sbSubscription, opts)
if err != nil {
return nil, err
}
return pubsub.NewSubscription(ds, nil, nil), nil
}
// openSubscription returns a driver.Subscription.
func openSubscription(ctx context.Context, sbNs *servicebus.Namespace, sbTop *servicebus.Topic, sbSub *servicebus.Subscription, opts *SubscriptionOptions) (driver.Subscription, error) {
if sbNs == nil {
return nil, errors.New("azuresb: OpenSubscription requires a Service Bus Namespace")
}
if sbTop == nil {
return nil, errors.New("azuresb: OpenSubscription requires a Service Bus Topic")
}
if sbSub == nil {
return nil, errors.New("azuresb: OpenSubscription requires a Service Bus Subscription")
}
if opts == nil {
opts = &SubscriptionOptions{}
}
sub := &subscription{sbSub: sbSub, opts: opts}
// Initialize a link to the AMQP server, but save any errors to be
// returned in ReceiveBatch instead of returning them here, because we
// want "subscription not found" to be a Receive time error.
host := fmt.Sprintf("amqps://%s.%s/", sbNs.Name, sbNs.Environment.ServiceBusEndpointSuffix)
amqpClient, err := amqp.Dial(host,
amqp.ConnSASLAnonymous(),
amqp.ConnProperty("product", "Go-Cloud Client"),
amqp.ConnProperty("version", servicebus.Version),
amqp.ConnProperty("platform", runtime.GOOS),
amqp.ConnProperty("framework", runtime.Version()),
amqp.ConnProperty("user-agent", useragent.AzureUserAgentPrefix("pubsub")),
)
if err != nil {
sub.linkErr = fmt.Errorf("failed to dial AMQP: %v", err)
return sub, nil
}
entityPath := sbTop.Name + "/Subscriptions/" + sbSub.Name
audience := host + entityPath
if err = cbs.NegotiateClaim(ctx, audience, amqpClient, sbNs.TokenProvider); err != nil {
sub.linkErr = fmt.Errorf("failed to negotiate claim with AMQP: %v", err)
return sub, nil
}
link, err := rpc.NewLink(amqpClient, sbSub.ManagementPath())
if err != nil {
sub.linkErr = fmt.Errorf("failed to create link to AMQP %s: %v", sbSub.ManagementPath(), err)
return sub, nil
}
sub.amqpLink = link
return sub, nil
}
// IsRetryable implements driver.Subscription.IsRetryable.
func (s *subscription) IsRetryable(error) bool {
// Let the Service Bus SDK recover from any transient connectivity issue.
return false
}
// As implements driver.Subscription.As.
func (s *subscription) As(i interface{}) bool {
p, ok := i.(**servicebus.Subscription)
if !ok {
return false
}
*p = s.sbSub
return true
}
// ErrorAs implements driver.Subscription.ErrorAs
func (s *subscription) ErrorAs(err error, i interface{}) bool {
return errorAs(err, i)
}
func (s *subscription) ErrorCode(err error) gcerrors.ErrorCode {
return errorCode(err)
}
// AckFunc implements driver.Subscription.AckFunc.
func (s *subscription) AckFunc() func() {
if s == nil {
return nil
}
return s.opts.AckFuncForReceiveAndDelete
}
// ReceiveBatch implements driver.Subscription.ReceiveBatch.
func (s *subscription) ReceiveBatch(ctx context.Context, maxMessages int) ([]*driver.Message, error) {
if s.linkErr != nil {
return nil, s.linkErr
}
rctx, cancel := context.WithTimeout(ctx, listenerTimeout)
defer cancel()
var messages []*driver.Message
var wg sync.WaitGroup
wg.Add(1)
go func() {
s.sbSub.Receive(rctx, servicebus.HandlerFunc(func(innerctx context.Context, sbmsg *servicebus.Message) error {
metadata := map[string]string{}
sbmsg.ForeachKey(func(k, v string) error {
metadata[k] = v
return nil
})
messages = append(messages, &driver.Message{
Body: sbmsg.Data,
Metadata: metadata,
AckID: sbmsg.LockToken,
AsFunc: messageAsFunc(sbmsg),
})
if len(messages) >= maxMessages {
cancel()
}
return nil
}))
select {
case <-rctx.Done():
wg.Done()
}
}()
wg.Wait()
return messages, nil
}
func messageAsFunc(sbmsg *servicebus.Message) func(interface{}) bool {
return func(i interface{}) bool {
p, ok := i.(**servicebus.Message)
if !ok {
return false
}
*p = sbmsg
return true
}
}
// SendAcks implements driver.Subscription.SendAcks.
func (s *subscription) SendAcks(ctx context.Context, ids []driver.AckID) error {
return s.updateMessageDispositions(ctx, ids, dispositionForAck)
}
// SendNacks implements driver.Subscription.SendNacks.
func (s *subscription) SendNacks(ctx context.Context, ids []driver.AckID) error {
return s.updateMessageDispositions(ctx, ids, dispositionForNack)
}
// IMPORTANT: This is a workaround to issue message dispositions in bulk which is not supported in the Service Bus SDK.
func (s *subscription) updateMessageDispositions(ctx context.Context, ids []driver.AckID, disposition string) error {
if len(ids) == 0 {
return nil
}
lockIds := []amqp.UUID{}
for _, mid := range ids {
if id, ok := mid.(*uuid.UUID); ok {
lockTokenBytes := [16]byte(*id)
lockIds = append(lockIds, amqp.UUID(lockTokenBytes))
}
}
value := map[string]interface{}{
"disposition-status": disposition,
"lock-tokens": lockIds,
}
msg := &amqp.Message{
ApplicationProperties: map[string]interface{}{
"operation": "com.microsoft:update-disposition",
},
Value: value,
}
// We're not actually making use of link.Retryable since we're passing 1
// here. The portable type will retry as needed.
//
// We could just use link.RPC, but it returns a result with a status code
// in addition to err, and we'd have to check both.
_, err := s.amqpLink.RetryableRPC(ctx, 1, 0, msg)
if err == nil {
return nil
}
if !isNotFoundErr(err) {
return err
}
// It's a "not found" error, probably due to the message already being
// deleted on the server. If we're just acking 1 message, we can just
// swallow the error, but otherwise we'll need to retry one by one.
if len(ids) == 1 {
return nil
}
for _, lockID := range lockIds {
value["lock-tokens"] = []amqp.UUID{lockID}
if _, err := s.amqpLink.RetryableRPC(ctx, 1, 0, msg); err != nil && !isNotFoundErr(err) {
return err
}
}
return nil
}
// isNotFoundErr returns true if the error is status code 410, Gone.
// Azure returns this when trying to ack/nack a message that no longer exists.
func isNotFoundErr(err error) bool {
return strings.Contains(err.Error(), "status code 410")
}
func errorCode(err error) gcerrors.ErrorCode {
aerr, ok := err.(*amqp.Error)
if !ok {
return gcerrors.Unknown
}
switch aerr.Condition {
case amqp.ErrorCondition(servicebus.ErrorNotFound):
return gcerrors.NotFound
case amqp.ErrorCondition(servicebus.ErrorPreconditionFailed):
return gcerrors.FailedPrecondition
case amqp.ErrorCondition(servicebus.ErrorInternalError):
return gcerrors.Internal
case amqp.ErrorCondition(servicebus.ErrorNotImplemented):
return gcerrors.Unimplemented
case amqp.ErrorCondition(servicebus.ErrorUnauthorizedAccess), amqp.ErrorCondition(servicebus.ErrorNotAllowed):
return gcerrors.PermissionDenied
case amqp.ErrorCondition(servicebus.ErrorResourceLimitExceeded):
return gcerrors.ResourceExhausted
case amqp.ErrorCondition(servicebus.ErrorInvalidField):
return gcerrors.InvalidArgument
default:
return gcerrors.Unknown
}
}
| 1 | 16,505 | Instead of just "See SubscriberOptions....", say something like "Use ... to choose between the two." | google-go-cloud | go |
@@ -0,0 +1,10 @@
+import argparse
+from rdkit import Chem
+sdf = Chem.SDMolSupplier("cdk2.sdf")
+f = open("cdk2.smi","w")
+for mol in sdf:
+ name = mol.GetProp("_Name")
+ smi = Chem.MolToSmiles( mol )
+ f.write( "{}\t{}\n".format(name,smi))
+f.close()
+ | 1 | 1 | 17,810 | Use with statement for `f` | rdkit-rdkit | cpp |
|
@@ -426,3 +426,16 @@ TEST_F(BadPonyTest, AnnotatedIfClause)
TEST_COMPILE(src);
}
+
+TEST_F(BadPonyTest, CapSubtypeInConstrainSubtyping)
+{
+ // From PR #1816
+ const char* src =
+ "trait T\n"
+ " fun alias[X: Any iso](x: X!) : X^\n"
+ "class C is T\n"
+ " fun alias[X: Any tag](x: X!) : X^ => x\n";
+
+ TEST_ERRORS_1(src,
+ "type parameter constraint Any tag is not a supertype of Any iso");
+} | 1 | #include <gtest/gtest.h>
#include <platform.h>
#include "util.h"
/** Pony code that parses, but is erroneous. Typically type check errors and
* things used in invalid contexts.
*
* We build all the way up to and including code gen and check that we do not
* assert, segfault, etc but that the build fails and at least one error is
* reported.
*
* There is definite potential for overlap with other tests but this is also a
* suitable location for tests which don't obviously belong anywhere else.
*/
#define TEST_COMPILE(src) DO(test_compile(src, "all"))
#define TEST_ERRORS_1(src, err1) \
{ const char* errs[] = {err1, NULL}; \
DO(test_expected_errors(src, "ir", errs)); }
#define TEST_ERRORS_2(src, err1, err2) \
{ const char* errs[] = {err1, err2, NULL}; \
DO(test_expected_errors(src, "ir", errs)); }
#define TEST_ERRORS_3(src, err1, err2, err3) \
{ const char* errs[] = {err1, err2, err3, NULL}; \
DO(test_expected_errors(src, "ir", errs)); }
class BadPonyTest : public PassTest
{};
// Cases from reported issues
TEST_F(BadPonyTest, ClassInOtherClassProvidesList)
{
// From issue #218
const char* src =
"class Named\n"
"class Dog is Named\n"
"actor Main\n"
" new create(env: Env) =>\n"
" None";
TEST_ERRORS_1(src, "can only provide traits and interfaces");
}
TEST_F(BadPonyTest, TypeParamMissingForTypeInProvidesList)
{
// From issue #219
const char* src =
"trait Bar[A]\n"
" fun bar(a: A) =>\n"
" None\n"
"trait Foo is Bar // here also should be a type argument, like Bar[U8]\n"
" fun foo() =>\n"
" None\n"
"actor Main\n"
" new create(env: Env) =>\n"
" None";
TEST_ERRORS_1(src, "not enough type arguments");
}
TEST_F(BadPonyTest, TupleIndexIsZero)
{
// From issue #397
const char* src =
"primitive Foo\n"
" fun bar(): None =>\n"
" (None, None)._0";
TEST_ERRORS_1(src, "Did you mean _1?");
}
TEST_F(BadPonyTest, TupleIndexIsOutOfRange)
{
// From issue #397
const char* src =
"primitive Foo\n"
" fun bar(): None =>\n"
" (None, None)._3";
TEST_ERRORS_1(src, "Valid range is [1, 2]");
}
TEST_F(BadPonyTest, InvalidLambdaReturnType)
{
// From issue #828
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" {(): tag => this }\n";
TEST_ERRORS_1(src, "lambda return type: tag");
}
TEST_F(BadPonyTest, InvalidMethodReturnType)
{
// From issue #828
const char* src =
"primitive Foo\n"
" fun bar(): iso =>\n"
" U32(1)\n";
TEST_ERRORS_1(src, "function return type: iso");
}
TEST_F(BadPonyTest, ObjectLiteralUninitializedField)
{
// From issue #879
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" object\n"
" let x: I32\n"
" end";
TEST_ERRORS_1(src, "object literal fields must be initialized");
}
TEST_F(BadPonyTest, LambdaCaptureVariableBeforeDeclarationWithTypeInferenceExpressionFail)
{
// From issue #1018
const char* src =
"class Foo\n"
" fun f() =>\n"
" {()(x) => None }\n"
" let x = 0";
TEST_ERRORS_1(src, "declaration of 'x' appears after use");
}
// TODO: This test is not correct because it does not fail without the fix.
// I do not know how to generate a test that calls genheader().
// Comments are welcomed.
TEST_F(BadPonyTest, ExportedActorWithVariadicReturnTypeContainingNone)
{
// From issue #891
const char* src =
"primitive T\n"
"\n"
"actor @A\n"
" fun f(a: T): (T | None) =>\n"
" a\n";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, TypeAliasRecursionThroughTypeParameterInTuple)
{
// From issue #901
const char* src =
"type Foo is (Map[Foo, Foo], None)\n"
"actor Main\n"
" new create(env: Env) =>\n"
" None";
TEST_ERRORS_1(src, "type aliases can't be recursive");
}
TEST_F(BadPonyTest, ParenthesisedReturn)
{
// From issue #1050
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" (return)";
TEST_ERRORS_1(src, "use return only to exit early from a method");
}
TEST_F(BadPonyTest, ParenthesisedReturn2)
{
// From issue #1050
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" foo()\n"
" fun foo(): U64 =>\n"
" (return 0)\n"
" 2";
TEST_ERRORS_1(src, "Unreachable code");
}
TEST_F(BadPonyTest, MatchUncalledMethod)
{
// From issue #903
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" match foo\n"
" | None => None\n"
" end\n"
" fun foo() =>\n"
" None";
TEST_ERRORS_2(src, "can't reference a method without calling it",
"this pattern can never match");
}
TEST_F(BadPonyTest, TupleFieldReassign)
{
// From issue #1101
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" var foo: (U64, String) = (42, \"foo\")\n"
" foo._2 = \"bar\"";
TEST_ERRORS_2(src, "can't assign to an element of a tuple",
"left side must be something that can be assigned to");
}
TEST_F(BadPonyTest, WithBlockTypeInference)
{
// From issue #1135
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" with x = 1 do None end";
TEST_ERRORS_3(src, "could not infer literal type, no valid types found",
"cannot infer type of $1$0",
"cannot infer type of x");
}
TEST_F(BadPonyTest, EmbedNestedTuple)
{
// From issue #1136
const char* src =
"class Foo\n"
" fun get_foo(): Foo => Foo\n"
"actor Main\n"
" embed foo: Foo\n"
" let x: U64\n"
" new create(env: Env) =>\n"
" (foo, x) = (Foo.get_foo(), 42)";
TEST_ERRORS_1(src, "an embedded field must be assigned using a constructor");
}
TEST_F(BadPonyTest, CircularTypeInfer)
{
// From issue #1334
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" let x = x.create()\n"
" let y = y.create()";
TEST_ERRORS_2(src, "cannot infer type of x",
"cannot infer type of y");
}
TEST_F(BadPonyTest, CallConstructorOnTypeIntersection)
{
// From issue #1398
const char* src =
"interface Foo\n"
"type Isect is (None & Foo)\n"
"actor Main\n"
" new create(env: Env) =>\n"
" Isect.create()";
TEST_ERRORS_1(src, "can't call a constructor on a type intersection");
}
TEST_F(BadPonyTest, AssignToFieldOfIso)
{
// From issue #1469
const char* src =
"class Foo\n"
" var x: String ref = String\n"
" fun iso bar(): String iso^ =>\n"
" let s = recover String end\n"
" x = s\n"
" consume s\n"
" fun ref foo(): String iso^ =>\n"
" let s = recover String end\n"
" let y: Foo iso = Foo\n"
" y.x = s\n"
" consume s";
TEST_ERRORS_2(src,
"right side must be a subtype of left side",
"right side must be a subtype of left side"
);
}
TEST_F(BadPonyTest, IndexArrayWithBrackets)
{
// From issue #1493
const char* src =
"actor Main\n"
"new create(env: Env) =>\n"
"let xs = [as I64: 1; 2; 3]\n"
"xs[1]";
TEST_ERRORS_1(src, "Value formal parameters not yet supported");
}
TEST_F(BadPonyTest, ShadowingBuiltinTypeParameter)
{
const char* src =
"class A[I8]\n"
"let b: U8 = 0";
TEST_ERRORS_1(src, "type parameter shadows existing type");
}
TEST_F(BadPonyTest, ShadowingTypeParameterInSameFile)
{
const char* src =
"trait B\n"
"class A[B]";
TEST_ERRORS_1(src, "can't reuse name 'B'");
}
TEST_F(BadPonyTest, TupleToUnionGentrace)
{
// From issue #1561
const char* src =
"primitive X\n"
"primitive Y\n"
"class iso T\n"
"actor Main\n"
" new create(env: Env) =>\n"
" this((T, Y))\n"
" be apply(m: (X | (T, Y))) => None";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, RefCapViolationViaCapReadTypeParameter)
{
// From issue #1328
const char* src =
"class Foo\n"
"var i: USize = 0\n"
"fun ref boom() => i = 3\n"
"actor Main\n"
"new create(env: Env) =>\n"
"let a: Foo val = Foo\n"
"call_boom[Foo val](a)\n"
"fun call_boom[A: Foo #read](x: A) =>\n"
"x.boom()";
TEST_ERRORS_1(src, "receiver type is not a subtype of target type");
}
TEST_F(BadPonyTest, RefCapViolationViaCapAnyTypeParameter)
{
// From issue #1328
const char* src =
"class Foo\n"
"var i: USize = 0\n"
"fun ref boom() => i = 3\n"
"actor Main\n"
"new create(env: Env) =>\n"
"let a: Foo val = Foo\n"
"call_boom[Foo val](a)\n"
"fun call_boom[A: Foo #any](x: A) =>\n"
"x.boom()";
TEST_ERRORS_1(src, "receiver type is not a subtype of target type");
}
TEST_F(BadPonyTest, TypeParamArrowClass)
{
// From issue #1687
const char* src =
"class C1\n"
"trait Test[A]\n"
"fun foo(a: A): A->C1";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, ArrowTypeParamInConstraint)
{
// From issue #1694
const char* src =
"trait T1[A: B->A, B]\n"
"trait T2[A: box->B, B]";
TEST_ERRORS_2(src,
"arrow types can't be used as type constraints",
"arrow types can't be used as type constraints");
}
TEST_F(BadPonyTest, AnnotatedIfClause)
{
// From issue #1751
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" if \\likely\\ U32(1) == 1 then\n"
" None\n"
" end\n";
TEST_COMPILE(src);
}
| 1 | 10,397 | It's a small style point, but could you remove the "extra" space before the colon that precedes the return type? This would make it more closely match the prevailing style in these tests and in the standard libraries. | ponylang-ponyc | c |
@@ -594,6 +594,13 @@ func (md *MDOpsStandard) PruneBranch(
return md.config.MDServer().PruneBranch(ctx, id, bid)
}
+// ResolveBranch implements the MDOps interface for MDOpsStandard.
+func (md *MDOpsStandard) ResolveBranch(
+ ctx context.Context, id TlfID, bid BranchID,
+ blocksToDelete []BlockID, rmd *RootMetadata) (MdID, error) {
+ return MdID{}, errors.New("ResolveBranch not supported by MDOpsStandard")
+}
+
// GetLatestHandleForTLF implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) GetLatestHandleForTLF(ctx context.Context, id TlfID) (
BareTlfHandle, error) { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"sync"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"golang.org/x/net/context"
)
// MDOpsStandard provides plaintext RootMetadata objects to upper
// layers, and processes RootMetadataSigned objects (encrypted and
// signed) suitable for passing to/from the MDServer backend.
type MDOpsStandard struct {
config Config
log logger.Logger
}
// NewMDOpsStandard returns a new MDOpsStandard
func NewMDOpsStandard(config Config) *MDOpsStandard {
return &MDOpsStandard{config, config.MakeLogger("")}
}
// convertVerifyingKeyError gives a better error when the TLF was
// signed by a key that is no longer associated with the last writer.
func (md *MDOpsStandard) convertVerifyingKeyError(ctx context.Context,
rmds *RootMetadataSigned, handle *TlfHandle, err error) error {
if _, ok := err.(KeyNotFoundError); !ok {
return err
}
tlf := handle.GetCanonicalPath()
writer, nameErr := md.config.KBPKI().GetNormalizedUsername(ctx,
rmds.MD.LastModifyingWriter())
if nameErr != nil {
writer = libkb.NormalizedUsername("uid: " +
rmds.MD.LastModifyingWriter().String())
}
md.log.CDebugf(ctx, "Unverifiable update for TLF %s", rmds.MD.TlfID())
return UnverifiableTlfUpdateError{tlf, writer, err}
}
func (md *MDOpsStandard) verifyWriterKey(ctx context.Context,
rmds *RootMetadataSigned, handle *TlfHandle,
getRangeLock *sync.Mutex) error {
if !rmds.MD.IsWriterMetadataCopiedSet() {
var err error
if handle.IsFinal() {
err = md.config.KBPKI().HasUnverifiedVerifyingKey(ctx,
rmds.MD.LastModifyingWriter(),
rmds.GetWriterMetadataSigInfo().VerifyingKey)
} else {
err = md.config.KBPKI().HasVerifyingKey(ctx,
rmds.MD.LastModifyingWriter(),
rmds.GetWriterMetadataSigInfo().VerifyingKey,
rmds.untrustedServerTimestamp)
}
if err != nil {
return md.convertVerifyingKeyError(ctx, rmds, handle, err)
}
return nil
}
// The writer metadata can be copied only for rekeys or
// finalizations, neither of which should happen while
// unmerged.
if rmds.MD.MergedStatus() != Merged {
return fmt.Errorf("Revision %d for %s has a copied writer "+
"metadata, but is unexpectedly not merged",
rmds.MD.RevisionNumber(), rmds.MD.TlfID())
}
if getRangeLock != nil {
// If there are multiple goroutines, we don't want to risk
// several concurrent requests to the MD server, just in case
// there are several revisions with copied writer MD in this
// range.
//
// TODO: bugs could result in thousands (or more) copied MD
// updates in a row (i.e., too many to fit in the cache). We
// could do something more sophisticated here where once one
// goroutine finds the copied MD, it stores it somewhere so
// the other goroutines don't have to also search through all
// the same MD updates (which may have been evicted from the
// cache in the meantime). Also, maybe copied writer MDs
// should include the original revision number so we don't
// have to search like this.
getRangeLock.Lock()
defer getRangeLock.Unlock()
}
// The server timestamp on rmds does not reflect when the
// writer MD was actually signed, since it was copied from a
// previous revision. Search backwards for the most recent
// uncopied writer MD to get the right timestamp.
prevHead := rmds.MD.RevisionNumber() - 1
for {
startRev := prevHead - maxMDsAtATime + 1
if startRev < MetadataRevisionInitial {
startRev = MetadataRevisionInitial
}
// Recursively call into MDOps. Note that in the case where
// we were already fetching a range of MDs, this could do
// extra work by downloading the same MDs twice (for those
// that aren't yet in the cache). That should be so rare that
// it's not worth optimizing.
prevMDs, err := getMDRange(ctx, md.config, rmds.MD.TlfID(), rmds.MD.BID(),
startRev, prevHead, rmds.MD.MergedStatus())
if err != nil {
return err
}
for i := len(prevMDs) - 1; i >= 0; i-- {
if !prevMDs[i].IsWriterMetadataCopiedSet() {
// We want to compare the writer signature of
// rmds with that of prevMDs[i]. However, we've
// already dropped prevMDs[i]'s writer
// signature. We can just verify prevMDs[i]'s
// writer metadata with rmds's signature,
// though.
buf, err := prevMDs[i].GetSerializedWriterMetadata(md.config.Codec())
if err != nil {
return err
}
err = md.config.Crypto().Verify(
buf, rmds.GetWriterMetadataSigInfo())
if err != nil {
return fmt.Errorf("Could not verify "+
"uncopied writer metadata "+
"from revision %d of folder "+
"%s with signature from "+
"revision %d: %v",
prevMDs[i].Revision(),
rmds.MD.TlfID(),
rmds.MD.RevisionNumber(), err)
}
// The fact the fact that we were able to process this
// MD correctly means that we already verified its key
// at the correct timestamp, so we're good.
return nil
}
}
// No more MDs left to process.
if len(prevMDs) < maxMDsAtATime {
return fmt.Errorf("Couldn't find uncopied MD previous to "+
"revision %d of folder %s for checking the writer "+
"timestamp", rmds.MD.RevisionNumber(), rmds.MD.TlfID())
}
prevHead = prevMDs[0].Revision() - 1
}
}
// processMetadata converts the given rmds to an
// ImmutableRootMetadata. After this function is called, rmds
// shouldn't be used.
func (md *MDOpsStandard) processMetadata(ctx context.Context,
handle *TlfHandle, rmds *RootMetadataSigned, extra ExtraMetadata,
getRangeLock *sync.Mutex) (ImmutableRootMetadata, error) {
// First, verify validity and signatures.
err := rmds.IsValidAndSigned(md.config.Codec(), md.config.Crypto(), extra)
if err != nil {
return ImmutableRootMetadata{}, MDMismatchError{
rmds.MD.RevisionNumber(), handle.GetCanonicalPath(),
rmds.MD.TlfID(), err,
}
}
// Then, verify the verifying keys.
if err := md.verifyWriterKey(ctx, rmds, handle, getRangeLock); err != nil {
return ImmutableRootMetadata{}, err
}
if handle.IsFinal() {
err = md.config.KBPKI().HasUnverifiedVerifyingKey(
ctx, rmds.MD.GetLastModifyingUser(),
rmds.SigInfo.VerifyingKey)
} else {
err = md.config.KBPKI().HasVerifyingKey(
ctx, rmds.MD.GetLastModifyingUser(),
rmds.SigInfo.VerifyingKey,
rmds.untrustedServerTimestamp)
}
if err != nil {
return ImmutableRootMetadata{}, md.convertVerifyingKeyError(ctx, rmds, handle, err)
}
_, uid, err := md.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
// If this is a public folder, it's ok to proceed if we have
// no current session.
if _, ok := err.(NoCurrentSessionError); ok && !handle.IsPublic() {
return ImmutableRootMetadata{}, err
} else if !ok {
return ImmutableRootMetadata{}, err
}
}
// TODO: Avoid having to do this type assertion.
brmd, ok := rmds.MD.(MutableBareRootMetadata)
if !ok {
return ImmutableRootMetadata{}, MutableBareRootMetadataNoImplError{}
}
rmd := MakeRootMetadata(brmd, extra, handle)
// Try to decrypt using the keys available in this md. If that
// doesn't work, a future MD may contain more keys and will be
// tried later.
pmd, err := decryptMDPrivateData(
ctx, md.config.Codec(), md.config.Crypto(),
md.config.BlockCache(), md.config.BlockOps(),
md.config.KeyManager(), uid, rmd.GetSerializedPrivateMetadata(),
rmd, rmd)
if err != nil {
return ImmutableRootMetadata{}, err
}
rmd.data = pmd
mdID, err := md.config.Crypto().MakeMdID(rmd.bareMd)
if err != nil {
return ImmutableRootMetadata{}, err
}
localTimestamp := rmds.untrustedServerTimestamp
if offset, ok := md.config.MDServer().OffsetFromServerTime(); ok {
localTimestamp = localTimestamp.Add(offset)
}
key := rmds.GetWriterMetadataSigInfo().VerifyingKey
*rmds = RootMetadataSigned{}
return MakeImmutableRootMetadata(rmd, key, mdID, localTimestamp), nil
}
// GetForHandle implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) GetForHandle(ctx context.Context, handle *TlfHandle,
mStatus MergeStatus) (TlfID, ImmutableRootMetadata, error) {
mdserv := md.config.MDServer()
bh, err := handle.ToBareHandle()
if err != nil {
return TlfID{}, ImmutableRootMetadata{}, err
}
id, rmds, err := mdserv.GetForHandle(ctx, bh, mStatus)
if err != nil {
return TlfID{}, ImmutableRootMetadata{}, err
}
if rmds == nil {
if mStatus == Unmerged {
// The caller ignores the id argument for
// mStatus == Unmerged.
return TlfID{}, ImmutableRootMetadata{}, nil
}
return id, ImmutableRootMetadata{}, nil
}
extra, err := md.getExtraMD(ctx, rmds.MD)
if err != nil {
return TlfID{}, ImmutableRootMetadata{}, err
}
bareMdHandle, err := rmds.MD.MakeBareTlfHandle(extra)
if err != nil {
return TlfID{}, ImmutableRootMetadata{}, err
}
mdHandle, err := MakeTlfHandle(ctx, bareMdHandle, md.config.KBPKI())
if err != nil {
return TlfID{}, ImmutableRootMetadata{}, err
}
// Check for mutual handle resolution.
if err := mdHandle.MutuallyResolvesTo(ctx, md.config.Codec(),
md.config.KBPKI(), *handle, rmds.MD.RevisionNumber(), rmds.MD.TlfID(),
md.log); err != nil {
return TlfID{}, ImmutableRootMetadata{}, err
}
// TODO: For now, use the mdHandle that came with rmds for
// consistency. In the future, we'd want to eventually notify
// the upper layers of the new name, either directly, or
// through a rekey.
rmd, err := md.processMetadata(ctx, mdHandle, rmds, extra, nil)
if err != nil {
return TlfID{}, ImmutableRootMetadata{}, err
}
return id, rmd, nil
}
func (md *MDOpsStandard) processMetadataWithID(ctx context.Context,
id TlfID, bid BranchID, handle *TlfHandle, rmds *RootMetadataSigned,
extra ExtraMetadata, getRangeLock *sync.Mutex) (ImmutableRootMetadata, error) {
// Make sure the signed-over ID matches
if id != rmds.MD.TlfID() {
return ImmutableRootMetadata{}, MDMismatchError{
rmds.MD.RevisionNumber(), id.String(), rmds.MD.TlfID(),
fmt.Errorf("MD contained unexpected folder id %s, expected %s",
rmds.MD.TlfID().String(), id.String()),
}
}
// Make sure the signed-over branch ID matches
if bid != NullBranchID && bid != rmds.MD.BID() {
return ImmutableRootMetadata{}, MDMismatchError{
rmds.MD.RevisionNumber(), id.String(), rmds.MD.TlfID(),
fmt.Errorf("MD contained unexpected branch id %s, expected %s, "+
"folder id %s", rmds.MD.BID().String(), bid.String(), id.String()),
}
}
return md.processMetadata(ctx, handle, rmds, extra, getRangeLock)
}
func (md *MDOpsStandard) getForTLF(ctx context.Context, id TlfID,
bid BranchID, mStatus MergeStatus) (ImmutableRootMetadata, error) {
rmds, err := md.config.MDServer().GetForTLF(ctx, id, bid, mStatus)
if err != nil {
return ImmutableRootMetadata{}, err
}
if rmds == nil {
// Possible if mStatus is Unmerged
return ImmutableRootMetadata{}, nil
}
extra, err := md.getExtraMD(ctx, rmds.MD)
if err != nil {
return ImmutableRootMetadata{}, err
}
bareHandle, err := rmds.MD.MakeBareTlfHandle(extra)
if err != nil {
return ImmutableRootMetadata{}, err
}
handle, err := MakeTlfHandle(ctx, bareHandle, md.config.KBPKI())
if err != nil {
return ImmutableRootMetadata{}, err
}
rmd, err := md.processMetadataWithID(ctx, id, bid, handle, rmds, extra, nil)
if err != nil {
return ImmutableRootMetadata{}, err
}
return rmd, nil
}
// GetForTLF implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) GetForTLF(ctx context.Context, id TlfID) (
ImmutableRootMetadata, error) {
return md.getForTLF(ctx, id, NullBranchID, Merged)
}
// GetUnmergedForTLF implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) GetUnmergedForTLF(
ctx context.Context, id TlfID, bid BranchID) (
ImmutableRootMetadata, error) {
return md.getForTLF(ctx, id, bid, Unmerged)
}
func (md *MDOpsStandard) processRange(ctx context.Context, id TlfID,
bid BranchID, rmdses []*RootMetadataSigned) (
[]ImmutableRootMetadata, error) {
if len(rmdses) == 0 {
return nil, nil
}
var wg sync.WaitGroup
numWorkers := len(rmdses)
if numWorkers > maxMDsAtATime {
numWorkers = maxMDsAtATime
}
wg.Add(numWorkers)
// Parallelize the MD decryption, because it could involve
// fetching blocks to get unembedded block changes.
rmdsChan := make(chan *RootMetadataSigned, len(rmdses))
irmdChan := make(chan ImmutableRootMetadata, len(rmdses))
errChan := make(chan error, 1)
var getRangeLock sync.Mutex
worker := func() {
defer wg.Done()
for rmds := range rmdsChan {
extra, err := md.getExtraMD(ctx, rmds.MD)
if err != nil {
select {
case errChan <- err:
default:
}
return
}
bareHandle, err := rmds.MD.MakeBareTlfHandle(extra)
if err != nil {
select {
case errChan <- err:
default:
}
return
}
handle, err := MakeTlfHandle(ctx, bareHandle, md.config.KBPKI())
if err != nil {
select {
case errChan <- err:
default:
}
return
}
irmd, err := md.processMetadataWithID(ctx, id, bid,
handle, rmds, extra, &getRangeLock)
if err != nil {
select {
case errChan <- err:
default:
}
return
}
irmdChan <- irmd
}
}
for i := 0; i < numWorkers; i++ {
go worker()
}
// Do this first, since processMetadataWithID consumes its
// rmds argument.
startRev := rmdses[0].MD.RevisionNumber()
rmdsCount := len(rmdses)
for _, rmds := range rmdses {
rmdsChan <- rmds
}
close(rmdsChan)
rmdses = nil
go func() {
wg.Wait()
close(errChan)
close(irmdChan)
}()
err := <-errChan
if err != nil {
return nil, err
}
// Sort into slice based on revision.
irmds := make([]ImmutableRootMetadata, rmdsCount)
numExpected := MetadataRevision(len(irmds))
for irmd := range irmdChan {
i := irmd.Revision() - startRev
if i < 0 || i >= numExpected {
return nil, fmt.Errorf("Unexpected revision %d; expected "+
"something between %d and %d inclusive", irmd.Revision(),
startRev, startRev+numExpected-1)
} else if irmds[i] != (ImmutableRootMetadata{}) {
return nil, fmt.Errorf("Got revision %d twice", irmd.Revision())
}
irmds[i] = irmd
}
// Now that we have all the immutable RootMetadatas, verify that
// the given MD objects form a valid sequence.
var prevIRMD ImmutableRootMetadata
for _, irmd := range irmds {
if prevIRMD != (ImmutableRootMetadata{}) {
// Ideally, we'd call
// ReadOnlyRootMetadata.CheckValidSuccessor()
// instead. However, we only convert r.MD to
// an ImmutableRootMetadata in
// processMetadataWithID below, and we want to
// do this check before then.
err = prevIRMD.bareMd.CheckValidSuccessor(
prevIRMD.mdID, irmd.bareMd)
if err != nil {
return nil, MDMismatchError{
prevIRMD.Revision(),
irmd.GetTlfHandle().GetCanonicalPath(),
prevIRMD.TlfID(), err,
}
}
}
prevIRMD = irmd
}
// TODO: in the case where lastRoot == MdID{}, should we verify
// that the starting PrevRoot points back to something that's
// actually a valid part of this history? If the MD signature is
// indeed valid, this probably isn't a huge deal, but it may let
// the server rollback or truncate unmerged history...
return irmds, nil
}
func (md *MDOpsStandard) getRange(ctx context.Context, id TlfID,
bid BranchID, mStatus MergeStatus, start, stop MetadataRevision) (
[]ImmutableRootMetadata, error) {
rmds, err := md.config.MDServer().GetRange(
ctx, id, bid, mStatus, start, stop)
if err != nil {
return nil, err
}
rmd, err := md.processRange(ctx, id, bid, rmds)
if err != nil {
return nil, err
}
return rmd, nil
}
// GetRange implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) GetRange(ctx context.Context, id TlfID,
start, stop MetadataRevision) ([]ImmutableRootMetadata, error) {
return md.getRange(ctx, id, NullBranchID, Merged, start, stop)
}
// GetUnmergedRange implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) GetUnmergedRange(ctx context.Context, id TlfID,
bid BranchID, start, stop MetadataRevision) ([]ImmutableRootMetadata, error) {
return md.getRange(ctx, id, bid, Unmerged, start, stop)
}
func (md *MDOpsStandard) put(
ctx context.Context, rmd *RootMetadata) (MdID, error) {
_, me, err := md.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return MdID{}, err
}
// Ensure that the block changes are properly unembedded.
if !rmd.IsWriterMetadataCopiedSet() &&
rmd.data.Changes.Info.BlockPointer == zeroPtr &&
!md.config.BlockSplitter().ShouldEmbedBlockChanges(&rmd.data.Changes) {
return MdID{},
errors.New("MD has embedded block changes, but shouldn't")
}
err = encryptMDPrivateData(
ctx, md.config.Codec(), md.config.Crypto(),
md.config.Crypto(), md.config.KeyManager(), me, rmd)
if err != nil {
return MdID{}, err
}
rmds, err := signMD(
ctx, md.config.Codec(), md.config.Crypto(),
rmd.bareMd, time.Time{})
if err != nil {
return MdID{}, err
}
err = md.config.MDServer().Put(ctx, rmds, rmd.NewExtra())
if err != nil {
return MdID{}, err
}
mdID, err := md.config.Crypto().MakeMdID(rmds.MD)
if err != nil {
return MdID{}, err
}
return mdID, nil
}
// Put implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) Put(
ctx context.Context, rmd *RootMetadata) (MdID, error) {
if rmd.MergedStatus() == Unmerged {
return MdID{}, UnexpectedUnmergedPutError{}
}
return md.put(ctx, rmd)
}
// PutUnmerged implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) PutUnmerged(
ctx context.Context, rmd *RootMetadata) (MdID, error) {
rmd.SetUnmerged()
if rmd.BID() == NullBranchID {
// new branch ID
bid, err := md.config.Crypto().MakeRandomBranchID()
if err != nil {
return MdID{}, err
}
rmd.SetBranchID(bid)
}
return md.put(ctx, rmd)
}
// PruneBranch implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) PruneBranch(
ctx context.Context, id TlfID, bid BranchID) error {
return md.config.MDServer().PruneBranch(ctx, id, bid)
}
// GetLatestHandleForTLF implements the MDOps interface for MDOpsStandard.
func (md *MDOpsStandard) GetLatestHandleForTLF(ctx context.Context, id TlfID) (
BareTlfHandle, error) {
// TODO: Verify this mapping using a Merkle tree.
return md.config.MDServer().GetLatestHandleForTLF(ctx, id)
}
func (md *MDOpsStandard) getExtraMD(ctx context.Context, brmd BareRootMetadata) (
extra ExtraMetadata, err error) {
wkbID, rkbID := brmd.GetTLFWriterKeyBundleID(), brmd.GetTLFReaderKeyBundleID()
if (wkbID == TLFWriterKeyBundleID{}) || (rkbID == TLFReaderKeyBundleID{}) {
// Pre-v3 metadata embed key bundles and as such won't set any IDs.
return nil, nil
}
mdserv := md.config.MDServer()
kbcache := md.config.KeyBundleCache()
tlf := brmd.TlfID()
// Check the cache.
wkb, err2 := kbcache.GetTLFWriterKeyBundle(tlf, wkbID)
if err2 != nil {
md.log.CDebugf(ctx, "Error fetching writer key bundle %s from cache for TLF %s: %s",
wkbID, tlf, err2)
}
rkb, err2 := kbcache.GetTLFReaderKeyBundle(tlf, rkbID)
if err2 != nil {
md.log.CDebugf(ctx, "Error fetching reader key bundle %s from cache for TLF %s: %s",
rkbID, tlf, err2)
}
if wkb != nil && rkb != nil {
return &ExtraMetadataV3{wkb: wkb, rkb: rkb}, nil
}
if wkb != nil {
// Don't need the writer bundle.
_, rkb, err = mdserv.GetKeyBundles(ctx, tlf, TLFWriterKeyBundleID{}, rkbID)
} else if rkb != nil {
// Don't need the reader bundle.
wkb, _, err = mdserv.GetKeyBundles(ctx, tlf, wkbID, TLFReaderKeyBundleID{})
} else {
// Need them both.
wkb, rkb, err = mdserv.GetKeyBundles(ctx, tlf, wkbID, rkbID)
}
if err != nil {
return nil, err
}
// Cache the results.
kbcache.PutTLFWriterKeyBundle(tlf, wkbID, wkb)
kbcache.PutTLFReaderKeyBundle(tlf, rkbID, rkb)
return &ExtraMetadataV3{wkb: wkb, rkb: rkb}, nil
}
| 1 | 14,021 | seems clunky to me to have an `MDOps` interface method that some implementations don't implement. Perhaps define a separate interface, like, `BranchResolver`, and then callers that have an `MDOps` object can check via type assertion? | keybase-kbfs | go |
@@ -105,7 +105,7 @@ public interface WebDriver extends SearchContext {
* @see org.openqa.selenium.WebDriver.Timeouts
*/
@Override
- List<WebElement> findElements(By by);
+ <T extends WebElement> List<T> findElements(By by);
/** | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium;
import org.openqa.selenium.logging.LoggingPreferences;
import org.openqa.selenium.logging.Logs;
import java.net.URL;
import java.time.Duration;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
* WebDriver is a remote control interface that enables introspection and control of user agents
* (browsers). The methods in this interface fall into three categories:
* <ul>
* <li>Control of the browser itself</li>
* <li>Selection of {@link WebElement}s</li>
* <li>Debugging aids</li>
* </ul>
* <p>
* Key methods are {@link WebDriver#get(String)}, which is used to load a new web page, and the
* various methods similar to {@link WebDriver#findElement(By)}, which is used to find
* {@link WebElement}s.
* <p>
* Currently, you will need to instantiate implementations of this interface directly. It is hoped
* that you write your tests against this interface so that you may "swap in" a more fully featured
* browser when there is a requirement for one.
* <p>
* Most implementations of this interface follow
* <a href="https://w3c.github.io/webdriver/">W3C WebDriver specification</a>
*/
public interface WebDriver extends SearchContext {
// Navigation
/**
* Load a new web page in the current browser window. This is done using an HTTP POST operation,
* and the method will block until the load is complete (with the default 'page load strategy'.
* This will follow redirects issued either by the server or as a meta-redirect from within the
* returned HTML. Should a meta-redirect "rest" for any duration of time, it is best to wait until
* this timeout is over, since should the underlying page change whilst your test is executing the
* results of future calls against this interface will be against the freshly loaded page. Synonym
* for {@link org.openqa.selenium.WebDriver.Navigation#to(String)}.
* <p>
* See <a href="https://w3c.github.io/webdriver/#navigate-to">W3C WebDriver specification</a>
* for more details.
*
* @param url The URL to load. Must be a fully qualified URL
* @see org.openqa.selenium.PageLoadStrategy
*/
void get(String url);
/**
* Get a string representing the current URL that the browser is looking at.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-current-url">W3C WebDriver specification</a>
* for more details.
*
* @return The URL of the page currently loaded in the browser
*/
String getCurrentUrl();
// General properties
/**
* Get the title of the current page.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-title">W3C WebDriver specification</a>
* for more details.
*
* @return The title of the current page, with leading and trailing whitespace stripped, or null
* if one is not already set
*/
String getTitle();
/**
* Find all elements within the current page using the given mechanism.
* This method is affected by the 'implicit wait' times in force at the time of execution. When
* implicitly waiting, this method will return as soon as there are more than 0 items in the
* found collection, or will return an empty list if the timeout is reached.
* <p>
* See <a href="https://w3c.github.io/webdriver/#find-elements">W3C WebDriver specification</a>
* for more details.
*
* @param by The locating mechanism to use
* @return A list of all matching {@link WebElement}s, or an empty list if nothing matches
* @see org.openqa.selenium.By
* @see org.openqa.selenium.WebDriver.Timeouts
*/
@Override
List<WebElement> findElements(By by);
/**
* Find the first {@link WebElement} using the given method.
* This method is affected by the 'implicit wait' times in force at the time of execution.
* The findElement(..) invocation will return a matching row, or try again repeatedly until
* the configured timeout is reached.
* <p>
* findElement should not be used to look for non-present elements, use {@link #findElements(By)}
* and assert zero length response instead.
* <p>
* See <a href="https://w3c.github.io/webdriver/#find-element">W3C WebDriver specification</a>
* for more details.
*
* @param by The locating mechanism to use
* @return The first matching element on the current page
* @throws NoSuchElementException If no matching elements are found
* @see org.openqa.selenium.By
* @see org.openqa.selenium.WebDriver.Timeouts
*/
@Override
WebElement findElement(By by);
// Misc
/**
* Get the source of the last loaded page. If the page has been modified after loading (for
* example, by Javascript) there is no guarantee that the returned text is that of the modified
* page. Please consult the documentation of the particular driver being used to determine whether
* the returned text reflects the current state of the page or the text last sent by the web
* server. The page source returned is a representation of the underlying DOM: do not expect it to
* be formatted or escaped in the same way as the response sent from the web server. Think of it
* as an artist's impression.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-page-source">W3C WebDriver specification</a>
* for more details.
*
* @return The source of the current page
*/
String getPageSource();
/**
* Close the current window, quitting the browser if it's the last window currently open.
* <p>
* See <a href="https://w3c.github.io/webdriver/#close-window">W3C WebDriver specification</a>
* for more details.
*/
void close();
/**
* Quits this driver, closing every associated window.
*/
void quit();
/**
* Return a set of window handles which can be used to iterate over all open windows of this
* WebDriver instance by passing them to {@link #switchTo()}.{@link Options#window()}
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-window-handles">W3C WebDriver specification</a>
* for more details.
*
* @return A set of window handles which can be used to iterate over all open windows.
*/
Set<String> getWindowHandles();
/**
* Return an opaque handle to this window that uniquely identifies it within this driver instance.
* This can be used to switch to this window at a later date
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-window-handle">W3C WebDriver specification</a>
* for more details.
*
* @return the current window handle
*/
String getWindowHandle();
/**
* Send future commands to a different frame or window.
*
* @return A TargetLocator which can be used to select a frame or window
* @see org.openqa.selenium.WebDriver.TargetLocator
*/
TargetLocator switchTo();
/**
* An abstraction allowing the driver to access the browser's history and to navigate to a given
* URL.
*
* @return A {@link org.openqa.selenium.WebDriver.Navigation} that allows the selection of what to
* do next
*/
Navigation navigate();
/**
* Gets the Option interface
*
* @return An option interface
* @see org.openqa.selenium.WebDriver.Options
*/
Options manage();
/**
* An interface for managing stuff you would do in a browser menu
*/
interface Options {
/**
* Add a specific cookie. If the cookie's domain name is left blank, it is assumed that the
* cookie is meant for the domain of the current document.
* <p>
* See <a href="https://w3c.github.io/webdriver/#add-cookie">W3C WebDriver specification</a>
* for more details.
*
* @param cookie The cookie to add.
*/
void addCookie(Cookie cookie);
/**
* Delete the named cookie from the current domain. This is equivalent to setting the named
* cookie's expiry date to some time in the past.
* <p>
* See <a href="https://w3c.github.io/webdriver/#delete-cookie">W3C WebDriver specification</a>
* for more details.
*
* @param name The name of the cookie to delete
*/
void deleteCookieNamed(String name);
/**
* Delete a cookie from the browser's "cookie jar". The domain of the cookie will be ignored.
*
* @param cookie nom nom nom
*/
void deleteCookie(Cookie cookie);
/**
* Delete all the cookies for the current domain.
* <p>
* See <a href="https://w3c.github.io/webdriver/#delete-all-cookies">W3C WebDriver specification</a>
* for more details.
*/
void deleteAllCookies();
/**
* Get all the cookies for the current domain.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-all-cookies">W3C WebDriver specification</a>
* for more details.
*
* @return A Set of cookies for the current domain.
*/
Set<Cookie> getCookies();
/**
* Get a cookie with a given name.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-named-cookie">W3C WebDriver specification</a>
* for more details.
*
* @param name the name of the cookie
* @return the cookie, or null if no cookie with the given name is present
*/
Cookie getCookieNamed(String name);
/**
* @return the interface for managing driver timeouts.
*/
Timeouts timeouts();
/**
* @return the interface for controlling IME engines to generate complex-script input.
*/
ImeHandler ime();
/**
* @return the interface for managing the current window.
*/
Window window();
/**
* Gets the {@link Logs} interface used to fetch different types of logs.
* <p>
* To set the logging preferences {@link LoggingPreferences}.
*
* @return A Logs interface.
*/
@Beta
Logs logs();
}
/**
* An interface for managing timeout behavior for WebDriver instances.
* <p>
* See <a href="https://w3c.github.io/webdriver/#set-timeouts">W3C WebDriver specification</a>
* for more details.
*/
interface Timeouts {
/**
* @deprecated Use {@link #implicitlyWait(Duration)}
*
* Specifies the amount of time the driver should wait when searching for an element if it is
* not immediately present.
* <p>
* When searching for a single element, the driver should poll the page until the element has
* been found, or this timeout expires before throwing a {@link NoSuchElementException}. When
* searching for multiple elements, the driver should poll the page until at least one element
* has been found or this timeout has expired.
* <p>
* Increasing the implicit wait timeout should be used judiciously as it will have an adverse
* effect on test run time, especially when used with slower location strategies like XPath.
* <p>
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with invalid
* argument will be returned.
*
* @param time The amount of time to wait.
* @param unit The unit of measure for {@code time}.
* @return A self reference.
*/
@Deprecated
Timeouts implicitlyWait(long time, TimeUnit unit);
/**
* Specifies the amount of time the driver should wait when searching for an element if it is
* not immediately present.
* <p>
* When searching for a single element, the driver should poll the page until the element has
* been found, or this timeout expires before throwing a {@link NoSuchElementException}. When
* searching for multiple elements, the driver should poll the page until at least one element
* has been found or this timeout has expired.
* <p>
* Increasing the implicit wait timeout should be used judiciously as it will have an adverse
* effect on test run time, especially when used with slower location strategies like XPath.
* <p>
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with invalid
* argument will be returned.
*
* @param duration The duration to wait.
* @return A self reference.
*/
default Timeouts implicitlyWait(Duration duration) {
return implicitlyWait(duration.toMillis(), TimeUnit.MILLISECONDS);
}
/**
* Gets the amount of time the driver should wait when searching for an element if it is
* not immediately present.
*
* @return The amount of time the driver should wait when searching for an element.
* @see <a href="https://www.w3.org/TR/webdriver/#get-timeouts">W3C WebDriver</a>
*/
default Duration getImplicitWaitTimeout() {
throw new UnsupportedCommandException();
}
/**
* @deprecated Use {@link #setScriptTimeout(Duration)}
*
* Sets the amount of time to wait for an asynchronous script to finish execution before
* throwing an error. If the timeout is negative, not null, or greater than 2e16 - 1, an
* error code with invalid argument will be returned.
*
* @param time The timeout value.
* @param unit The unit of time.
* @return A self reference.
* @see JavascriptExecutor#executeAsyncScript(String, Object...)
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
@Deprecated
Timeouts setScriptTimeout(long time, TimeUnit unit);
/**
* Sets the amount of time to wait for an asynchronous script to finish execution before
* throwing an error. If the timeout is negative, not null, or greater than 2e16 - 1, an
* error code with invalid argument will be returned.
*
* @param duration The timeout value.
* @deprecated Use {@link #scriptTimeout(Duration)}
* @return A self reference.
* @see JavascriptExecutor#executeAsyncScript(String, Object...)
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
@Deprecated
default Timeouts setScriptTimeout(Duration duration) {
return setScriptTimeout(duration.toMillis(), TimeUnit.MILLISECONDS);
}
/**
* Sets the amount of time to wait for an asynchronous script to finish execution before
* throwing an error. If the timeout is negative, not null, or greater than 2e16 - 1, an
* error code with invalid argument will be returned.
*
* @param duration The timeout value.
* @return A self reference.
* @see JavascriptExecutor#executeAsyncScript(String, Object...)
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
default Timeouts scriptTimeout(Duration duration) {
return setScriptTimeout(duration);
}
/**
* Gets the amount of time to wait for an asynchronous script to finish execution before
* throwing an error. If the timeout is negative, not null, or greater than 2e16 - 1, an
* error code with invalid argument will be returned.
*
* @return The amount of time to wait for an asynchronous script to finish execution.
* @see <a href="https://www.w3.org/TR/webdriver/#get-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
default Duration getScriptTimeout() {
throw new UnsupportedCommandException();
}
/**
* @param time The timeout value.
* @param unit The unit of time.
* @return A Timeouts interface.
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
* @deprecated Use {@link #pageLoadTimeout(Duration)}
*
* Sets the amount of time to wait for a page load to complete before throwing an error.
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with
* invalid argument will be returned.
*/
@Deprecated
Timeouts pageLoadTimeout(long time, TimeUnit unit);
/**
* Sets the amount of time to wait for a page load to complete before throwing an error.
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with
* invalid argument will be returned.
*
* @param duration The timeout value.
* @return A Timeouts interface.
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
default Timeouts pageLoadTimeout(Duration duration) {
return pageLoadTimeout(duration.toMillis(), TimeUnit.MILLISECONDS);
}
/**
* Gets the amount of time to wait for a page load to complete before throwing an error.
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with
* invalid argument will be returned.
*
* @return The amount of time to wait for a page load to complete.
* @see <a href="https://www.w3.org/TR/webdriver/#get-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
default Duration getPageLoadTimeout() {
throw new UnsupportedCommandException();
}
}
/**
* Used to locate a given frame or window.
*/
interface TargetLocator {
/**
* Select a frame by its (zero-based) index. Selecting a frame by index is equivalent to the
* JS expression window.frames[index] where "window" is the DOM window represented by the
* current context. Once the frame has been selected, all subsequent calls on the WebDriver
* interface are made to that frame.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-frame">W3C WebDriver specification</a>
* for more details.
*
* @param index (zero-based) index
* @return This driver focused on the given frame
* @throws NoSuchFrameException If the frame cannot be found
*/
WebDriver frame(int index);
/**
* Select a frame by its name or ID. Frames located by matching name attributes are always given
* precedence over those matched by ID.
*
* @param nameOrId the name of the frame window, the id of the <frame> or <iframe>
* element, or the (zero-based) index
* @return This driver focused on the given frame
* @throws NoSuchFrameException If the frame cannot be found
*/
WebDriver frame(String nameOrId);
/**
* Select a frame using its previously located {@link WebElement}.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-frame">W3C WebDriver specification</a>
* for more details.
*
* @param frameElement The frame element to switch to.
* @return This driver focused on the given frame.
* @throws NoSuchFrameException If the given element is neither an IFRAME nor a FRAME element.
* @throws StaleElementReferenceException If the WebElement has gone stale.
* @see WebDriver#findElement(By)
*/
WebDriver frame(WebElement frameElement);
/**
* Change focus to the parent context. If the current context is the top level browsing context,
* the context remains unchanged.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-parent-frame">W3C WebDriver specification</a>
* for more details.
*
* @return This driver focused on the parent frame
*/
WebDriver parentFrame();
/**
* Switch the focus of future commands for this driver to the window with the given name/handle.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-window">W3C WebDriver specification</a>
* for more details.
*
* @param nameOrHandle The name of the window or the handle as returned by
* {@link WebDriver#getWindowHandle()}
* @return This driver focused on the given window
* @throws NoSuchWindowException If the window cannot be found
*/
WebDriver window(String nameOrHandle);
/**
* Creates a new browser window and switches the focus for future commands of this driver
* to the new window.
* <p>
* See <a href="https://w3c.github.io/webdriver/#new-window">W3C WebDriver specification</a>
* for more details.
*
* @param typeHint The type of new browser window to be created. The created window is not
* guaranteed to be of the requested type; if the driver does not support
* the requested type, a new browser window will be created of whatever type
* the driver does support.
* @return This driver focused on the given window
*/
WebDriver newWindow(WindowType typeHint);
/**
* Selects either the first frame on the page, or the main document when a page contains
* iframes.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-frame">W3C WebDriver specification</a>
* for more details.
*
* @return This driver focused on the top window/first frame.
*/
WebDriver defaultContent();
/**
* Switches to the element that currently has focus within the document currently "switched to",
* or the body element if this cannot be detected. This matches the semantics of calling
* "document.activeElement" in Javascript.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-active-element">W3C WebDriver specification</a>
* for more details.
*
* @return The WebElement with focus, or the body element if no element with focus can be
* detected.
*/
WebElement activeElement();
/**
* Switches to the currently active modal dialog for this particular driver instance.
*
* @return A handle to the dialog.
* @throws NoAlertPresentException If the dialog cannot be found
*/
Alert alert();
}
interface Navigation {
/**
* Move back a single "item" in the browser's history.
* <p>
* See <a href="https://w3c.github.io/webdriver/#back">W3C WebDriver specification</a>
* for more details.
*/
void back();
/**
* Move a single "item" forward in the browser's history. Does nothing if we are on the latest
* page viewed.
* <p>
* See <a href="https://w3c.github.io/webdriver/#forward">W3C WebDriver specification</a>
* for more details.
*/
void forward();
/**
* Load a new web page in the current browser window. This is done using an HTTP POST operation,
* and the method will block until the load is complete. This will follow redirects issued
* either by the server or as a meta-redirect from within the returned HTML. Should a
* meta-redirect "rest" for any duration of time, it is best to wait until this timeout is over,
* since should the underlying page change whilst your test is executing the results of future
* calls against this interface will be against the freshly loaded page.
* <p>
* See <a href="https://w3c.github.io/webdriver/#navigate-to">W3C WebDriver specification</a>
* for more details.
*
* @param url The URL to load. Must be a fully qualified URL
*/
void to(String url);
/**
* Overloaded version of {@link #to(String)} that makes it easy to pass in a URL.
*
* @param url URL
*/
void to(URL url);
/**
* Refresh the current page
* <p>
* See <a href="https://w3c.github.io/webdriver/#refresh">W3C WebDriver specification</a>
* for more details.
*/
void refresh();
}
/**
* An interface for managing input methods.
*/
interface ImeHandler {
/**
* All available engines on the machine. To use an engine, it has to be activated.
*
* @return list of available IME engines.
* @throws ImeNotAvailableException if the host does not support IME.
*/
List<String> getAvailableEngines();
/**
* Get the name of the active IME engine. The name string is platform-specific.
*
* @return name of the active IME engine.
* @throws ImeNotAvailableException if the host does not support IME.
*/
String getActiveEngine();
/**
* Indicates whether IME input active at the moment (not if it's available).
*
* @return true if IME input is available and currently active, false otherwise.
* @throws ImeNotAvailableException if the host does not support IME.
*/
boolean isActivated();
/**
* De-activate IME input (turns off the currently activated engine). Note that getActiveEngine
* may still return the name of the engine but isActivated will return false.
*
* @throws ImeNotAvailableException if the host does not support IME.
*/
void deactivate();
/**
* Make an engines that is available (appears on the list returned by getAvailableEngines)
* active. After this call, the only loaded engine on the IME daemon will be this one and the
* input sent using sendKeys will be converted by the engine. Note that this is a
* platform-independent method of activating IME (the platform-specific way being using keyboard
* shortcuts).
*
*
* @param engine name of engine to activate.
* @throws ImeNotAvailableException if the host does not support IME.
* @throws ImeActivationFailedException if the engine is not available or if activation failed
* for other reasons.
*/
void activateEngine(String engine);
}
@Beta
interface Window {
/**
* Get the size of the current window. This will return the outer window dimension, not just
* the view port.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-window-rect">W3C WebDriver specification</a>
* for more details.
*
* @return The current window size.
*/
Dimension getSize();
/**
* Set the size of the current window. This will change the outer window dimension,
* not just the view port, synonymous to window.resizeTo() in JS.
* <p>
* See <a href="https://w3c.github.io/webdriver/#set-window-rect">W3C WebDriver specification</a>
* for more details.
*
* @param targetSize The target size.
*/
void setSize(Dimension targetSize);
/**
* Get the position of the current window, relative to the upper left corner of the screen.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-window-rect">W3C WebDriver specification</a>
* for more details.
*
* @return The current window position.
*/
Point getPosition();
/**
* Set the position of the current window. This is relative to the upper left corner of the
* screen, synonymous to window.moveTo() in JS.
* <p>
* See <a href="https://w3c.github.io/webdriver/#set-window-rect">W3C WebDriver specification</a>
* for more details.
*
* @param targetPosition The target position of the window.
*/
void setPosition(Point targetPosition);
/**
* Maximizes the current window if it is not already maximized
* <p>
* See <a href="https://w3c.github.io/webdriver/#maximize-window">W3C WebDriver specification</a>
* for more details.
*/
void maximize();
/**
* Minimizes the current window if it is not already minimized
* <p>
* See <a href="https://w3c.github.io/webdriver/#minimize-window">W3C WebDriver specification</a>
* for more details.
*/
void minimize();
/**
* Fullscreen the current window if it is not already fullscreen
* <p>
* See <a href="https://w3c.github.io/webdriver/#fullscreen-window">W3C WebDriver specification</a>
* for more details.
*/
void fullscreen();
}
}
| 1 | 19,274 | This change should also probably go into the corresponding method of the abstract By class? | SeleniumHQ-selenium | java |
@@ -42,6 +42,7 @@ import (
"github.com/iotexproject/iotex-core/test/mock/mock_dispatcher"
ta "github.com/iotexproject/iotex-core/test/testaddress"
"github.com/iotexproject/iotex-core/testutil"
+ "github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
const ( | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"context"
"encoding/hex"
"math/big"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/gasstation"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
"github.com/iotexproject/iotex-core/protogen/iotextypes"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/test/mock/mock_dispatcher"
ta "github.com/iotexproject/iotex-core/test/testaddress"
"github.com/iotexproject/iotex-core/testutil"
)
const (
testTriePath = "trie.test"
testDBPath = "db.test"
)
var (
testTransfer, _ = testutil.SignedTransfer(ta.Addrinfo["alfa"].String(),
ta.Keyinfo["alfa"].PriKey, 3, big.NewInt(10), []byte{}, testutil.TestGasLimit,
big.NewInt(testutil.TestGasPrice))
testTransferPb = testTransfer.Proto()
testExecution, _ = testutil.SignedExecution(ta.Addrinfo["bravo"].String(),
ta.Keyinfo["bravo"].PriKey, 1, big.NewInt(0), testutil.TestGasLimit,
big.NewInt(testutil.TestGasPrice), []byte{})
testExecutionPb = testExecution.Proto()
testTransfer1, _ = testutil.SignedTransfer(ta.Addrinfo["charlie"].String(), ta.Keyinfo["producer"].PriKey, 1,
big.NewInt(10), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
transferHash1 = testTransfer1.Hash()
testVote1, _ = testutil.SignedVote(ta.Addrinfo["charlie"].String(), ta.Keyinfo["charlie"].PriKey, 5,
testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
voteHash1 = testVote1.Hash()
testExecution1, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["producer"].PriKey, 5,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(10), []byte{1})
executionHash1 = testExecution1.Hash()
testExecution2, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["charlie"].PriKey, 6,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
executionHash2 = testExecution2.Hash()
testExecution3, _ = testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["alfa"].PriKey, 2,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
executionHash3 = testExecution3.Hash()
)
var (
getAccountTests = []struct {
in string
address string
balance string
nonce uint64
pendingNonce uint64
}{
{ta.Addrinfo["charlie"].String(),
"io1hw79kmqxlp33h7t83wrf9gkduy58th4vmkkue4",
"3",
8,
9,
},
{
ta.Addrinfo["producer"].String(),
"io14485vn8markfupgy86at5a0re78jll0pmq8fjv",
"9999999999999999999999999991",
1,
6,
},
}
getActionsTests = []struct {
start uint64
count uint64
numActions int
}{
{
0,
11,
11,
},
{
11,
21,
4,
},
}
getActionTests = []struct {
checkPending bool
in string
nonce uint64
senderPubKey string
}{
{
false,
hex.EncodeToString(transferHash1[:]),
1,
keypair.EncodePublicKey(testTransfer1.SrcPubkey()),
},
{
false,
hex.EncodeToString(voteHash1[:]),
5,
keypair.EncodePublicKey(testVote1.SrcPubkey()),
},
{
true,
hex.EncodeToString(executionHash1[:]),
5,
keypair.EncodePublicKey(testExecution1.SrcPubkey()),
},
}
getActionsByAddressTests = []struct {
address string
start uint64
count uint64
numActions int
}{
{
ta.Addrinfo["producer"].String(),
0,
3,
2,
},
{
ta.Addrinfo["charlie"].String(),
1,
8,
8,
},
}
getUnconfirmedActionsByAddressTests = []struct {
address string
start uint64
count uint64
numActions int
}{
{
ta.Addrinfo["producer"].String(),
0,
4,
4,
},
}
getActionsByBlockTests = []struct {
blkHeight uint64
start uint64
count uint64
numActions int
}{
{
2,
0,
7,
7,
},
{
4,
0,
5,
5,
},
}
getBlockMetasTests = []struct {
start uint64
count uint64
numBlks int
}{
{
0,
4,
4,
},
{
1,
5,
4,
},
}
getBlockMetaTests = []struct {
blkHeight uint64
numActions int64
transferAmount string
}{
{
2,
7,
"4",
},
{
4,
5,
"0",
},
}
getChainMetaTests = []struct {
height uint64
numActions int64
tps int64
epoch iotextypes.EpochData
}{
{
4,
15,
15,
iotextypes.EpochData{
Num: 1,
Height: 1,
},
},
}
sendActionTests = []struct {
actionPb *iotextypes.Action
}{
{
testTransferPb,
},
{
testExecutionPb,
},
}
getReceiptByActionTests = []struct {
in string
status uint64
}{
{
hex.EncodeToString(executionHash2[:]),
1,
},
{
hex.EncodeToString(executionHash3[:]),
1,
},
}
readContractTests = []struct {
execHash string
retValue string
}{
{
hex.EncodeToString(executionHash2[:]),
"",
},
}
suggestGasPriceTests = []struct {
defaultGasPrice uint64
suggestedGasPrice uint64
}{
{
1,
1,
},
}
estimateGasForActionTests = []struct {
actionHash string
estimatedGas uint64
}{
{
hex.EncodeToString(transferHash1[:]),
10000,
},
{
hex.EncodeToString(voteHash1[:]),
10000,
},
}
readUnclaimedBalanceTests = []struct {
// Arguments
protocolID string
methodName string
addr string
// Expected values
returnErr bool
balance *big.Int
}{
{
protocolID: rewarding.ProtocolID,
methodName: "UnclaimedBalance",
addr: identityset.Address(0).String(),
returnErr: false,
balance: unit.ConvertIotxToRau(144), // 4 block * 36 IOTX reward by default = 144 IOTX
},
{
protocolID: rewarding.ProtocolID,
methodName: "UnclaimedBalance",
addr: identityset.Address(1).String(),
returnErr: false,
balance: unit.ConvertIotxToRau(0), // 4 block * 36 IOTX reward by default = 144 IOTX
},
{
protocolID: "Wrong ID",
methodName: "UnclaimedBalance",
addr: ta.Addrinfo["producer"].String(),
returnErr: true,
},
{
protocolID: rewarding.ProtocolID,
methodName: "Wrong Method",
addr: ta.Addrinfo["producer"].String(),
returnErr: true,
},
}
)
func TestServer_GetAccount(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, true)
require.NoError(err)
// success
for _, test := range getAccountTests {
request := &iotexapi.GetAccountRequest{Address: test.in}
res, err := svr.GetAccount(context.Background(), request)
require.NoError(err)
accountMeta := res.AccountMeta
require.Equal(test.address, accountMeta.Address)
require.Equal(test.balance, accountMeta.Balance)
require.Equal(test.nonce, accountMeta.Nonce)
require.Equal(test.pendingNonce, accountMeta.PendingNonce)
}
// failure
_, err = svr.GetAccount(context.Background(), &iotexapi.GetAccountRequest{})
require.Error(err)
}
func TestServer_GetActions(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByIndex{
ByIndex: &iotexapi.GetActionsByIndexRequest{
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.Actions))
}
}
func TestServer_GetAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, true)
require.NoError(err)
for _, test := range getActionTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByHash{
ByHash: &iotexapi.GetActionByHashRequest{
ActionHash: test.in,
CheckPending: test.checkPending,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(1, len(res.Actions))
actPb := res.Actions[0]
require.Equal(test.nonce, actPb.GetCore().GetNonce())
require.Equal(test.senderPubKey, hex.EncodeToString(actPb.SenderPubKey))
}
}
func TestServer_GetActionsByAddress(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsByAddressTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByAddr{
ByAddr: &iotexapi.GetActionsByAddressRequest{
Address: test.address,
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.Actions))
}
}
func TestServer_GetUnconfirmedActionsByAddress(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, true)
require.NoError(err)
for _, test := range getUnconfirmedActionsByAddressTests {
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_UnconfirmedByAddr{
UnconfirmedByAddr: &iotexapi.GetUnconfirmedActionsByAddressRequest{
Address: test.address,
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.Actions))
}
}
func TestServer_GetActionsByBlock(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getActionsByBlockTests {
blk, err := svr.bc.GetBlockByHeight(test.blkHeight)
require.NoError(err)
blkHash := blk.HashBlock()
request := &iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByBlk{
ByBlk: &iotexapi.GetActionsByBlockRequest{
BlkHash: hex.EncodeToString(blkHash[:]),
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetActions(context.Background(), request)
require.NoError(err)
require.Equal(test.numActions, len(res.Actions))
}
}
func TestServer_GetBlockMetas(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getBlockMetasTests {
request := &iotexapi.GetBlockMetasRequest{
Lookup: &iotexapi.GetBlockMetasRequest_ByIndex{
ByIndex: &iotexapi.GetBlockMetasByIndexRequest{
Start: test.start,
Count: test.count,
},
},
}
res, err := svr.GetBlockMetas(context.Background(), request)
require.NoError(err)
require.Equal(test.numBlks, len(res.BlkMetas))
var prevBlkPb *iotextypes.BlockMeta
for _, blkPb := range res.BlkMetas {
if prevBlkPb != nil {
require.True(blkPb.Timestamp < prevBlkPb.Timestamp)
require.True(blkPb.Height < prevBlkPb.Height)
prevBlkPb = blkPb
}
}
}
}
func TestServer_GetBlockMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getBlockMetaTests {
blk, err := svr.bc.GetBlockByHeight(test.blkHeight)
require.NoError(err)
blkHash := blk.HashBlock()
request := &iotexapi.GetBlockMetasRequest{
Lookup: &iotexapi.GetBlockMetasRequest_ByHash{
ByHash: &iotexapi.GetBlockMetaByHashRequest{
BlkHash: hex.EncodeToString(blkHash[:]),
},
},
}
res, err := svr.GetBlockMetas(context.Background(), request)
require.NoError(err)
require.Equal(1, len(res.BlkMetas))
blkPb := res.BlkMetas[0]
require.Equal(test.numActions, blkPb.NumActions)
require.Equal(test.transferAmount, blkPb.TransferAmount)
}
}
func TestServer_GetChainMeta(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getChainMetaTests {
res, err := svr.GetChainMeta(context.Background(), &iotexapi.GetChainMetaRequest{})
require.NoError(err)
chainMetaPb := res.ChainMeta
require.Equal(test.height, chainMetaPb.Height)
require.Equal(test.numActions, chainMetaPb.NumActions)
require.Equal(test.tps, chainMetaPb.Tps)
require.Equal(test.epoch.Num, chainMetaPb.Epoch.Num)
require.Equal(test.epoch.Height, chainMetaPb.Epoch.Height)
}
}
func TestServer_SendAction(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
chain := mock_blockchain.NewMockBlockchain(ctrl)
mDp := mock_dispatcher.NewMockDispatcher(ctrl)
broadcastHandlerCount := 0
svr := Server{bc: chain, dp: mDp, broadcastHandler: func(_ context.Context, _ uint32, _ proto.Message) error {
broadcastHandlerCount++
return nil
}}
chain.EXPECT().ChainID().Return(uint32(1)).Times(4)
mDp.EXPECT().HandleBroadcast(gomock.Any(), gomock.Any(), gomock.Any()).Times(2)
for i, test := range sendActionTests {
request := &iotexapi.SendActionRequest{Action: test.actionPb}
_, err := svr.SendAction(context.Background(), request)
require.NoError(err)
require.Equal(i+1, broadcastHandlerCount)
}
}
func TestServer_GetReceiptByAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range getReceiptByActionTests {
request := &iotexapi.GetReceiptByActionRequest{ActionHash: test.in}
res, err := svr.GetReceiptByAction(context.Background(), request)
require.NoError(err)
receiptPb := res.Receipt
require.Equal(test.status, receiptPb.Status)
}
}
func TestServer_ReadContract(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range readContractTests {
hash, err := toHash256(test.execHash)
require.NoError(err)
exec, err := svr.bc.GetActionByActionHash(hash)
require.NoError(err)
request := &iotexapi.ReadContractRequest{Action: exec.Proto()}
res, err := svr.ReadContract(context.Background(), request)
require.NoError(err)
require.Equal(test.retValue, res.Data)
}
}
func TestServer_SuggestGasPrice(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
for _, test := range suggestGasPriceTests {
cfg.API.GasStation.DefaultGas = test.defaultGasPrice
svr, err := createServer(cfg, false)
require.NoError(err)
res, err := svr.SuggestGasPrice(context.Background(), &iotexapi.SuggestGasPriceRequest{})
require.NoError(err)
require.Equal(test.suggestedGasPrice, res.GasPrice)
}
}
func TestServer_EstimateGasForAction(t *testing.T) {
require := require.New(t)
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(err)
for _, test := range estimateGasForActionTests {
hash, err := toHash256(test.actionHash)
require.NoError(err)
act, err := svr.bc.GetActionByActionHash(hash)
require.NoError(err)
request := &iotexapi.EstimateGasForActionRequest{Action: act.Proto()}
res, err := svr.EstimateGasForAction(context.Background(), request)
require.NoError(err)
require.Equal(test.estimatedGas, res.Gas)
}
}
func TestServer_ReadUnclaimedBalance(t *testing.T) {
cfg := newConfig()
testutil.CleanupPath(t, testTriePath)
defer testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
defer testutil.CleanupPath(t, testDBPath)
svr, err := createServer(cfg, false)
require.NoError(t, err)
for _, test := range readUnclaimedBalanceTests {
out, err := svr.ReadState(context.Background(), &iotexapi.ReadStateRequest{
ProtocolID: []byte(test.protocolID),
MethodName: []byte(test.methodName),
Arguments: [][]byte{[]byte(test.addr)},
})
if test.returnErr {
require.Error(t, err)
continue
}
require.NoError(t, err)
val, ok := big.NewInt(0).SetString(string(out.Data), 10)
require.True(t, ok)
assert.Equal(t, test.balance, val)
}
}
func addProducerToFactory(sf factory.Factory) error {
ws, err := sf.NewWorkingSet()
if err != nil {
return err
}
if _, err = accountutil.LoadOrCreateAccount(ws, ta.Addrinfo["producer"].String(),
blockchain.Gen.TotalSupply); err != nil {
return err
}
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: ta.Addrinfo["producer"],
GasLimit: &gasLimit,
})
if _, err = ws.RunActions(ctx, 0, nil); err != nil {
return err
}
return sf.Commit(ws)
}
func addTestingBlocks(bc blockchain.Blockchain) error {
addr0 := ta.Addrinfo["producer"].String()
priKey0 := ta.Keyinfo["producer"].PriKey
addr1 := ta.Addrinfo["alfa"].String()
priKey1 := ta.Keyinfo["alfa"].PriKey
addr2 := ta.Addrinfo["bravo"].String()
addr3 := ta.Addrinfo["charlie"].String()
priKey3 := ta.Keyinfo["charlie"].PriKey
addr4 := ta.Addrinfo["delta"].String()
// Add block 1
// Producer transfer--> C
tsf, err := testutil.SignedTransfer(addr3, priKey0, 1, big.NewInt(10), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[addr0] = []action.SealedEnvelope{tsf}
blk, err := bc.MintNewBlock(
actionMap,
time.Now().Unix(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 2
// Charlie transfer--> A, B, D, P
// Charlie vote--> C
// Charlie exec--> D
recipients := []string{addr1, addr2, addr4, addr0}
selps := make([]action.SealedEnvelope, 0)
for i, recipient := range recipients {
selp, err := testutil.SignedTransfer(recipient, priKey3, uint64(i+1), big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
selps = append(selps, selp)
}
vote1, err := testutil.SignedVote(addr3, priKey3, 5, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
execution1, err := testutil.SignedExecution(addr4, priKey3, 6,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
if err != nil {
return err
}
selps = append(selps, vote1)
selps = append(selps, execution1)
actionMap = make(map[string][]action.SealedEnvelope)
actionMap[addr3] = selps
if blk, err = bc.MintNewBlock(
actionMap,
time.Now().Unix(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 3
// Empty actions
if blk, err = bc.MintNewBlock(
nil,
time.Now().Unix(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 4
// Charlie vote--> C
// Charlie exec--> D
// Alfa vote--> A
// Alfa exec--> D
vote1, err = testutil.SignedVote(addr3, priKey3, 7, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
vote2, err := testutil.SignedVote(addr1, priKey1, 1, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
execution1, err = testutil.SignedExecution(addr4, priKey3, 8,
big.NewInt(2), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
if err != nil {
return err
}
execution2, err := testutil.SignedExecution(addr4, priKey1, 2,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice), []byte{1})
if err != nil {
return err
}
actionMap = make(map[string][]action.SealedEnvelope)
actionMap[addr3] = []action.SealedEnvelope{vote1, execution1}
actionMap[addr1] = []action.SealedEnvelope{vote2, execution2}
if blk, err = bc.MintNewBlock(
actionMap,
time.Now().Unix(),
); err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
return bc.CommitBlock(blk)
}
func addActsToActPool(ap actpool.ActPool) error {
// Producer transfer--> A
tsf1, err := testutil.SignedTransfer(ta.Addrinfo["alfa"].String(), ta.Keyinfo["producer"].PriKey, 2, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
// Producer vote--> P
vote1, err := testutil.SignedVote(ta.Addrinfo["producer"].String(), ta.Keyinfo["producer"].PriKey, 3, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
// Producer transfer--> B
tsf2, err := testutil.SignedTransfer(ta.Addrinfo["bravo"].String(), ta.Keyinfo["producer"].PriKey, 4, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPrice))
if err != nil {
return err
}
// Producer exec--> D
execution1, err := testutil.SignedExecution(ta.Addrinfo["delta"].String(), ta.Keyinfo["producer"].PriKey, 5,
big.NewInt(1), testutil.TestGasLimit, big.NewInt(10), []byte{1})
if err != nil {
return err
}
if err := ap.Add(tsf1); err != nil {
return err
}
if err := ap.Add(vote1); err != nil {
return err
}
if err := ap.Add(tsf2); err != nil {
return err
}
return ap.Add(execution1)
}
func setupChain(cfg config.Config) (blockchain.Blockchain, *protocol.Registry, error) {
cfg.Chain.ProducerPrivKey = hex.EncodeToString(keypair.PrivateKeyToBytes(identityset.PrivateKey(0)))
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
if err != nil {
return nil, nil, err
}
// create chain
registry := protocol.Registry{}
bc := blockchain.NewBlockchain(
cfg,
blockchain.PrecreatedStateFactoryOption(sf),
blockchain.InMemDaoOption(),
blockchain.RegistryOption(®istry),
)
if bc == nil {
return nil, nil, errors.New("failed to create blockchain")
}
acc := account.NewProtocol()
v := vote.NewProtocol(bc)
evm := execution.NewProtocol(bc)
rolldposProtocol := rolldpos.NewProtocol(
genesis.Default.NumCandidateDelegates,
genesis.Default.NumDelegates,
genesis.Default.NumSubEpochs,
)
r := rewarding.NewProtocol(bc, rolldposProtocol)
if err := registry.Register(rolldpos.ProtocolID, rolldposProtocol); err != nil {
return nil, nil, err
}
if err := registry.Register(account.ProtocolID, acc); err != nil {
return nil, nil, err
}
if err := registry.Register(vote.ProtocolID, v); err != nil {
return nil, nil, err
}
if err := registry.Register(execution.ProtocolID, evm); err != nil {
return nil, nil, err
}
if err := registry.Register(rewarding.ProtocolID, r); err != nil {
return nil, nil, err
}
sf.AddActionHandlers(acc, v, evm, r)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
bc.Validator().AddActionValidators(acc, v, evm, r)
return bc, ®istry, nil
}
func setupActPool(bc blockchain.Blockchain, cfg config.ActPool) (actpool.ActPool, error) {
ap, err := actpool.NewActPool(bc, cfg)
if err != nil {
return nil, err
}
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
ap.AddActionValidators(vote.NewProtocol(bc), execution.NewProtocol(bc))
return ap, nil
}
func newConfig() config.Config {
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.EnableIndex = true
return cfg
}
func createServer(cfg config.Config, needActPool bool) (*Server, error) {
bc, registry, err := setupChain(cfg)
if err != nil {
return nil, err
}
// Start blockchain
ctx := context.Background()
if err := bc.Start(ctx); err != nil {
return nil, err
}
// Create state for producer
if err := addProducerToFactory(bc.GetFactory()); err != nil {
return nil, err
}
// Add testing blocks
if err := addTestingBlocks(bc); err != nil {
return nil, err
}
var ap actpool.ActPool
if needActPool {
ap, err = setupActPool(bc, cfg.ActPool)
if err != nil {
return nil, err
}
// Add actions to actpool
if err := addActsToActPool(ap); err != nil {
return nil, err
}
}
apiCfg := config.API{TpsWindow: 10, MaxTransferPayloadBytes: 1024, GasStation: cfg.API.GasStation}
svr := &Server{
bc: bc,
ap: ap,
cfg: apiCfg,
gs: gasstation.NewGasStation(bc, apiCfg),
registry: registry,
}
return svr, nil
}
| 1 | 15,772 | File is not `gofmt`-ed with `-s` (from `gofmt`) | iotexproject-iotex-core | go |
@@ -82,9 +82,9 @@ func (fsrv *FileServer) serveBrowse(dirPath string, w http.ResponseWriter, r *ht
w.Header().Set("Content-Type", "text/html; charset=utf-8")
}
- buf.WriteTo(w)
+ _, err = buf.WriteTo(w)
- return nil
+ return err
}
func (fsrv *FileServer) loadDirectoryContents(dir *os.File, urlPath string, repl *caddy.Replacer) (browseListing, error) { | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileserver
import (
"bytes"
"encoding/json"
"html/template"
"net/http"
"os"
"path"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
// Browse configures directory browsing.
type Browse struct {
// Use this template file instead of the default browse template.
TemplateFile string `json:"template_file,omitempty"`
template *template.Template
}
func (fsrv *FileServer) serveBrowse(dirPath string, w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
// navigation on the client-side gets messed up if the
// URL doesn't end in a trailing slash because hrefs like
// "/b/c" on a path like "/a" end up going to "/b/c" instead
// of "/a/b/c" - so we have to redirect in this case
if !strings.HasSuffix(r.URL.Path, "/") {
r.URL.Path += "/"
http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)
return nil
}
dir, err := fsrv.openFile(dirPath, w)
if err != nil {
return err
}
defer dir.Close()
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// calling path.Clean here prevents weird breadcrumbs when URL paths are sketchy like /%2e%2e%2f
listing, err := fsrv.loadDirectoryContents(dir, path.Clean(r.URL.Path), repl)
switch {
case os.IsPermission(err):
return caddyhttp.Error(http.StatusForbidden, err)
case os.IsNotExist(err):
return fsrv.notFound(w, r, next)
case err != nil:
return caddyhttp.Error(http.StatusInternalServerError, err)
}
fsrv.browseApplyQueryParams(w, r, &listing)
// write response as either JSON or HTML
var buf *bytes.Buffer
acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ","))
if strings.Contains(acceptHeader, "application/json") {
if buf, err = fsrv.browseWriteJSON(listing); err != nil {
return caddyhttp.Error(http.StatusInternalServerError, err)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
} else {
if buf, err = fsrv.browseWriteHTML(listing); err != nil {
return caddyhttp.Error(http.StatusInternalServerError, err)
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
}
buf.WriteTo(w)
return nil
}
func (fsrv *FileServer) loadDirectoryContents(dir *os.File, urlPath string, repl *caddy.Replacer) (browseListing, error) {
files, err := dir.Readdir(-1)
if err != nil {
return browseListing{}, err
}
// determine if user can browse up another folder
curPathDir := path.Dir(strings.TrimSuffix(urlPath, "/"))
canGoUp := strings.HasPrefix(curPathDir, fsrv.Root)
return fsrv.directoryListing(files, canGoUp, urlPath, repl), nil
}
// browseApplyQueryParams applies query parameters to the listing.
// It mutates the listing and may set cookies.
func (fsrv *FileServer) browseApplyQueryParams(w http.ResponseWriter, r *http.Request, listing *browseListing) {
sortParam := r.URL.Query().Get("sort")
orderParam := r.URL.Query().Get("order")
limitParam := r.URL.Query().Get("limit")
offsetParam := r.URL.Query().Get("offset")
// first figure out what to sort by
switch sortParam {
case "":
sortParam = sortByNameDirFirst
if sortCookie, sortErr := r.Cookie("sort"); sortErr == nil {
sortParam = sortCookie.Value
}
case sortByName, sortByNameDirFirst, sortBySize, sortByTime:
http.SetCookie(w, &http.Cookie{Name: "sort", Value: sortParam, Secure: r.TLS != nil})
}
// then figure out the order
switch orderParam {
case "":
orderParam = "asc"
if orderCookie, orderErr := r.Cookie("order"); orderErr == nil {
orderParam = orderCookie.Value
}
case "asc", "desc":
http.SetCookie(w, &http.Cookie{Name: "order", Value: orderParam, Secure: r.TLS != nil})
}
// finally, apply the sorting and limiting
listing.applySortAndLimit(sortParam, orderParam, limitParam, offsetParam)
}
func (fsrv *FileServer) browseWriteJSON(listing browseListing) (*bytes.Buffer, error) {
buf := bufPool.Get().(*bytes.Buffer)
err := json.NewEncoder(buf).Encode(listing.Items)
bufPool.Put(buf)
return buf, err
}
func (fsrv *FileServer) browseWriteHTML(listing browseListing) (*bytes.Buffer, error) {
buf := bufPool.Get().(*bytes.Buffer)
err := fsrv.Browse.template.Execute(buf, listing)
bufPool.Put(buf)
return buf, err
}
// isSymlink return true if f is a symbolic link
func isSymlink(f os.FileInfo) bool {
return f.Mode()&os.ModeSymlink != 0
}
// isSymlinkTargetDir returns true if f's symbolic link target
// is a directory.
func isSymlinkTargetDir(f os.FileInfo, root, urlPath string) bool {
if !isSymlink(f) {
return false
}
target := sanitizedPathJoin(root, path.Join(urlPath, f.Name()))
targetInfo, err := os.Stat(target)
if err != nil {
return false
}
return targetInfo.IsDir()
}
| 1 | 15,744 | This is likely to return an error value if the client fails to read the response we write, which is why I chose to ignore this error. | caddyserver-caddy | go |
@@ -649,6 +649,9 @@ class Channel(AbstractChannel):
def is_static_remotekey_enabled(self) -> bool:
return bool(self.storage.get('static_remotekey_enabled'))
+ def is_upfront_shutdown_script_enabled(self) -> bool:
+ return bool(self.storage.get('upfront_shutdown_script_enabled'))
+
def get_wallet_addresses_channel_might_want_reserved(self) -> Sequence[str]:
ret = []
if self.is_static_remotekey_enabled(): | 1 | # Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from collections import namedtuple, defaultdict
import binascii
import json
from enum import IntEnum
from typing import (Optional, Dict, List, Tuple, NamedTuple, Set, Callable,
Iterable, Sequence, TYPE_CHECKING, Iterator, Union)
import time
import threading
from abc import ABC, abstractmethod
import itertools
from aiorpcx import NetAddress
import attr
from . import ecc
from . import constants, util
from .util import bfh, bh2u, chunks, TxMinedInfo
from .invoices import PR_PAID
from .bitcoin import redeem_script_to_address
from .crypto import sha256, sha256d
from .transaction import Transaction, PartialTransaction, TxInput
from .logging import Logger
from .lnonion import decode_onion_error, OnionFailureCode, OnionRoutingFailureMessage
from . import lnutil
from .lnutil import (Outpoint, LocalConfig, RemoteConfig, Keypair, OnlyPubkeyKeypair, ChannelConstraints,
get_per_commitment_secret_from_seed, secret_to_pubkey, derive_privkey, make_closing_tx,
sign_and_get_sig_string, RevocationStore, derive_blinded_pubkey, Direction, derive_pubkey,
make_htlc_tx_with_open_channel, make_commitment, make_received_htlc, make_offered_htlc,
HTLC_TIMEOUT_WEIGHT, HTLC_SUCCESS_WEIGHT, extract_ctn_from_tx_and_chan, UpdateAddHtlc,
funding_output_script, SENT, RECEIVED, LOCAL, REMOTE, HTLCOwner, make_commitment_outputs,
ScriptHtlc, PaymentFailure, calc_fees_for_commitment_tx, RemoteMisbehaving, make_htlc_output_witness_script,
ShortChannelID, map_htlcs_to_ctx_output_idxs, LNPeerAddr,
LN_MAX_HTLC_VALUE_MSAT, fee_for_htlc_output, offered_htlc_trim_threshold_sat,
received_htlc_trim_threshold_sat, make_commitment_output_to_remote_address)
from .lnsweep import create_sweeptxs_for_our_ctx, create_sweeptxs_for_their_ctx
from .lnsweep import create_sweeptx_for_their_revoked_htlc, SweepInfo
from .lnhtlc import HTLCManager
from .lnmsg import encode_msg, decode_msg
from .address_synchronizer import TX_HEIGHT_LOCAL
from .lnutil import CHANNEL_OPENING_TIMEOUT
from .lnutil import ChannelBackupStorage
from .lnutil import format_short_channel_id
if TYPE_CHECKING:
from .lnworker import LNWallet
from .json_db import StoredDict
from .lnrouter import RouteEdge
# lightning channel states
# Note: these states are persisted by name (for a given channel) in the wallet file,
# so consider doing a wallet db upgrade when changing them.
class ChannelState(IntEnum):
PREOPENING = 0 # Initial negotiation. Channel will not be reestablished
OPENING = 1 # Channel will be reestablished. (per BOLT2)
# - Funding node: has received funding_signed (can broadcast the funding tx)
# - Non-funding node: has sent the funding_signed message.
FUNDED = 2 # Funding tx was mined (requires min_depth and tx verification)
OPEN = 3 # both parties have sent funding_locked
SHUTDOWN = 4 # shutdown has been sent.
CLOSING = 5 # closing negotiation done. we have a fully signed tx.
FORCE_CLOSING = 6 # we force-closed, and closing tx is unconfirmed. Note that if the
# remote force-closes then we remain OPEN until it gets mined -
# the server could be lying to us with a fake tx.
CLOSED = 7 # closing tx has been mined
REDEEMED = 8 # we can stop watching
class PeerState(IntEnum):
DISCONNECTED = 0
REESTABLISHING = 1
GOOD = 2
BAD = 3
cs = ChannelState
state_transitions = [
(cs.PREOPENING, cs.OPENING),
(cs.OPENING, cs.FUNDED),
(cs.FUNDED, cs.OPEN),
(cs.OPENING, cs.SHUTDOWN),
(cs.FUNDED, cs.SHUTDOWN),
(cs.OPEN, cs.SHUTDOWN),
(cs.SHUTDOWN, cs.SHUTDOWN), # if we reestablish
(cs.SHUTDOWN, cs.CLOSING),
(cs.CLOSING, cs.CLOSING),
# we can force close almost any time
(cs.OPENING, cs.FORCE_CLOSING),
(cs.FUNDED, cs.FORCE_CLOSING),
(cs.OPEN, cs.FORCE_CLOSING),
(cs.SHUTDOWN, cs.FORCE_CLOSING),
(cs.CLOSING, cs.FORCE_CLOSING),
# we can get force closed almost any time
(cs.OPENING, cs.CLOSED),
(cs.FUNDED, cs.CLOSED),
(cs.OPEN, cs.CLOSED),
(cs.SHUTDOWN, cs.CLOSED),
(cs.CLOSING, cs.CLOSED),
#
(cs.FORCE_CLOSING, cs.FORCE_CLOSING), # allow multiple attempts
(cs.FORCE_CLOSING, cs.CLOSED),
(cs.FORCE_CLOSING, cs.REDEEMED),
(cs.CLOSED, cs.REDEEMED),
(cs.OPENING, cs.REDEEMED), # channel never funded (dropped from mempool)
(cs.PREOPENING, cs.REDEEMED), # channel never funded
]
del cs # delete as name is ambiguous without context
class RevokeAndAck(NamedTuple):
per_commitment_secret: bytes
next_per_commitment_point: bytes
class RemoteCtnTooFarInFuture(Exception): pass
def htlcsum(htlcs: Iterable[UpdateAddHtlc]):
return sum([x.amount_msat for x in htlcs])
class AbstractChannel(Logger, ABC):
storage: Union['StoredDict', dict]
config: Dict[HTLCOwner, Union[LocalConfig, RemoteConfig]]
_sweep_info: Dict[str, Dict[str, 'SweepInfo']]
lnworker: Optional['LNWallet']
_fallback_sweep_address: str
channel_id: bytes
funding_outpoint: Outpoint
node_id: bytes
_state: ChannelState
def set_short_channel_id(self, short_id: ShortChannelID) -> None:
self.short_channel_id = short_id
self.storage["short_channel_id"] = short_id
def get_id_for_log(self) -> str:
scid = self.short_channel_id
if scid:
return str(scid)
return self.channel_id.hex()
def short_id_for_GUI(self) -> str:
return format_short_channel_id(self.short_channel_id)
def set_state(self, state: ChannelState) -> None:
""" set on-chain state """
old_state = self._state
if (old_state, state) not in state_transitions:
raise Exception(f"Transition not allowed: {old_state.name} -> {state.name}")
self.logger.debug(f'Setting channel state: {old_state.name} -> {state.name}')
self._state = state
self.storage['state'] = self._state.name
if self.lnworker:
self.lnworker.channel_state_changed(self)
def get_state(self) -> ChannelState:
return self._state
def is_funded(self):
return self.get_state() >= ChannelState.FUNDED
def is_open(self):
return self.get_state() == ChannelState.OPEN
def is_closing(self):
return ChannelState.SHUTDOWN <= self.get_state() <= ChannelState.FORCE_CLOSING
def is_closed(self):
# the closing txid has been saved
return self.get_state() >= ChannelState.CLOSING
def is_redeemed(self):
return self.get_state() == ChannelState.REDEEMED
def save_funding_height(self, *, txid: str, height: int, timestamp: Optional[int]) -> None:
self.storage['funding_height'] = txid, height, timestamp
def get_funding_height(self):
return self.storage.get('funding_height')
def delete_funding_height(self):
self.storage.pop('funding_height', None)
def save_closing_height(self, *, txid: str, height: int, timestamp: Optional[int]) -> None:
self.storage['closing_height'] = txid, height, timestamp
def get_closing_height(self):
return self.storage.get('closing_height')
def delete_closing_height(self):
self.storage.pop('closing_height', None)
def create_sweeptxs_for_our_ctx(self, ctx):
return create_sweeptxs_for_our_ctx(chan=self, ctx=ctx, sweep_address=self.sweep_address)
def create_sweeptxs_for_their_ctx(self, ctx):
return create_sweeptxs_for_their_ctx(chan=self, ctx=ctx, sweep_address=self.sweep_address)
def is_backup(self):
return False
def sweep_ctx(self, ctx: Transaction) -> Dict[str, SweepInfo]:
txid = ctx.txid()
if self._sweep_info.get(txid) is None:
our_sweep_info = self.create_sweeptxs_for_our_ctx(ctx)
their_sweep_info = self.create_sweeptxs_for_their_ctx(ctx)
if our_sweep_info is not None:
self._sweep_info[txid] = our_sweep_info
self.logger.info(f'we force closed')
elif their_sweep_info is not None:
self._sweep_info[txid] = their_sweep_info
self.logger.info(f'they force closed.')
else:
self._sweep_info[txid] = {}
self.logger.info(f'not sure who closed.')
return self._sweep_info[txid]
def update_onchain_state(self, *, funding_txid: str, funding_height: TxMinedInfo,
closing_txid: str, closing_height: TxMinedInfo, keep_watching: bool) -> None:
# note: state transitions are irreversible, but
# save_funding_height, save_closing_height are reversible
if funding_height.height == TX_HEIGHT_LOCAL:
self.update_unfunded_state()
elif closing_height.height == TX_HEIGHT_LOCAL:
self.update_funded_state(funding_txid=funding_txid, funding_height=funding_height)
else:
self.update_closed_state(funding_txid=funding_txid,
funding_height=funding_height,
closing_txid=closing_txid,
closing_height=closing_height,
keep_watching=keep_watching)
def update_unfunded_state(self):
self.delete_funding_height()
self.delete_closing_height()
if self.get_state() in [ChannelState.PREOPENING, ChannelState.OPENING, ChannelState.FORCE_CLOSING] and self.lnworker:
if self.is_initiator():
# set channel state to REDEEMED so that it can be removed manually
# to protect ourselves against a server lying by omission,
# we check that funding_inputs have been double spent and deeply mined
inputs = self.storage.get('funding_inputs', [])
if not inputs:
self.logger.info(f'channel funding inputs are not provided')
self.set_state(ChannelState.REDEEMED)
for i in inputs:
spender_txid = self.lnworker.wallet.db.get_spent_outpoint(*i)
if spender_txid is None:
continue
if spender_txid != self.funding_outpoint.txid:
tx_mined_height = self.lnworker.wallet.get_tx_height(spender_txid)
if tx_mined_height.conf > lnutil.REDEEM_AFTER_DOUBLE_SPENT_DELAY:
self.logger.info(f'channel is double spent {inputs}')
self.set_state(ChannelState.REDEEMED)
break
else:
now = int(time.time())
if now - self.storage.get('init_timestamp', 0) > CHANNEL_OPENING_TIMEOUT:
self.lnworker.remove_channel(self.channel_id)
def update_funded_state(self, *, funding_txid: str, funding_height: TxMinedInfo) -> None:
self.save_funding_height(txid=funding_txid, height=funding_height.height, timestamp=funding_height.timestamp)
self.delete_closing_height()
if funding_height.conf>0:
self.set_short_channel_id(ShortChannelID.from_components(
funding_height.height, funding_height.txpos, self.funding_outpoint.output_index))
if self.get_state() == ChannelState.OPENING:
if self.is_funding_tx_mined(funding_height):
self.set_state(ChannelState.FUNDED)
def update_closed_state(self, *, funding_txid: str, funding_height: TxMinedInfo,
closing_txid: str, closing_height: TxMinedInfo, keep_watching: bool) -> None:
self.save_funding_height(txid=funding_txid, height=funding_height.height, timestamp=funding_height.timestamp)
self.save_closing_height(txid=closing_txid, height=closing_height.height, timestamp=closing_height.timestamp)
if funding_height.conf>0:
self.set_short_channel_id(ShortChannelID.from_components(
funding_height.height, funding_height.txpos, self.funding_outpoint.output_index))
if self.get_state() < ChannelState.CLOSED:
conf = closing_height.conf
if conf > 0:
self.set_state(ChannelState.CLOSED)
else:
# we must not trust the server with unconfirmed transactions
# if the remote force closed, we remain OPEN until the closing tx is confirmed
pass
if self.get_state() == ChannelState.CLOSED and not keep_watching:
self.set_state(ChannelState.REDEEMED)
@property
def sweep_address(self) -> str:
# TODO: in case of unilateral close with pending HTLCs, this address will be reused
addr = None
if self.is_static_remotekey_enabled():
our_payment_pubkey = self.config[LOCAL].payment_basepoint.pubkey
addr = make_commitment_output_to_remote_address(our_payment_pubkey)
if addr is None:
addr = self._fallback_sweep_address
assert addr
if self.lnworker:
assert self.lnworker.wallet.is_mine(addr)
return addr
@abstractmethod
def is_initiator(self) -> bool:
pass
@abstractmethod
def is_funding_tx_mined(self, funding_height: TxMinedInfo) -> bool:
pass
@abstractmethod
def get_funding_address(self) -> str:
pass
@abstractmethod
def get_state_for_GUI(self) -> str:
pass
@abstractmethod
def get_oldest_unrevoked_ctn(self, subject: HTLCOwner) -> int:
pass
@abstractmethod
def included_htlcs(self, subject: HTLCOwner, direction: Direction, ctn: int = None) -> Sequence[UpdateAddHtlc]:
pass
@abstractmethod
def funding_txn_minimum_depth(self) -> int:
pass
@abstractmethod
def balance(self, whose: HTLCOwner, *, ctx_owner=HTLCOwner.LOCAL, ctn: int = None) -> int:
"""This balance (in msat) only considers HTLCs that have been settled by ctn.
It disregards reserve, fees, and pending HTLCs (in both directions).
"""
pass
@abstractmethod
def balance_minus_outgoing_htlcs(self, whose: HTLCOwner, *,
ctx_owner: HTLCOwner = HTLCOwner.LOCAL,
ctn: int = None) -> int:
"""This balance (in msat), which includes the value of
pending outgoing HTLCs, is used in the UI.
"""
pass
@abstractmethod
def is_frozen_for_sending(self) -> bool:
"""Whether the user has marked this channel as frozen for sending.
Frozen channels are not supposed to be used for new outgoing payments.
(note that payment-forwarding ignores this option)
"""
pass
@abstractmethod
def is_frozen_for_receiving(self) -> bool:
"""Whether the user has marked this channel as frozen for receiving.
Frozen channels are not supposed to be used for new incoming payments.
(note that payment-forwarding ignores this option)
"""
pass
@abstractmethod
def is_static_remotekey_enabled(self) -> bool:
pass
class ChannelBackup(AbstractChannel):
"""
current capabilities:
- detect force close
- request force close
- sweep my ctx to_local
future:
- will need to sweep their ctx to_remote
"""
def __init__(self, cb: ChannelBackupStorage, *, sweep_address=None, lnworker=None):
self.name = None
Logger.__init__(self)
self.cb = cb
self._sweep_info = {}
self._fallback_sweep_address = sweep_address
self.storage = {} # dummy storage
self._state = ChannelState.OPENING
self.config = {}
self.config[LOCAL] = LocalConfig.from_seed(
channel_seed=cb.channel_seed,
to_self_delay=cb.local_delay,
# dummy values
static_remotekey=None,
dust_limit_sat=None,
max_htlc_value_in_flight_msat=None,
max_accepted_htlcs=None,
initial_msat=None,
reserve_sat=None,
funding_locked_received=False,
was_announced=False,
current_commitment_signature=None,
current_htlc_signatures=b'',
htlc_minimum_msat=1,
)
self.config[REMOTE] = RemoteConfig(
payment_basepoint=OnlyPubkeyKeypair(cb.remote_payment_pubkey),
revocation_basepoint=OnlyPubkeyKeypair(cb.remote_revocation_pubkey),
to_self_delay=cb.remote_delay,
# dummy values
multisig_key=OnlyPubkeyKeypair(None),
htlc_basepoint=OnlyPubkeyKeypair(None),
delayed_basepoint=OnlyPubkeyKeypair(None),
dust_limit_sat=None,
max_htlc_value_in_flight_msat=None,
max_accepted_htlcs=None,
initial_msat = None,
reserve_sat = None,
htlc_minimum_msat=None,
next_per_commitment_point=None,
current_per_commitment_point=None)
self.node_id = cb.node_id
self.channel_id = cb.channel_id()
self.funding_outpoint = cb.funding_outpoint()
self.lnworker = lnworker
self.short_channel_id = None
def is_backup(self):
return True
def create_sweeptxs_for_their_ctx(self, ctx):
return {}
def get_funding_address(self):
return self.cb.funding_address
def is_initiator(self):
return self.cb.is_initiator
def short_id_for_GUI(self) -> str:
if self.short_channel_id:
return 'BACKUP of ' + format_short_channel_id(self.short_channel_id)
else:
return 'BACKUP'
def get_state_for_GUI(self):
cs = self.get_state()
return cs.name
def get_oldest_unrevoked_ctn(self, who):
return -1
def included_htlcs(self, subject, direction, ctn=None):
return []
def funding_txn_minimum_depth(self):
return 1
def is_funding_tx_mined(self, funding_height):
return funding_height.conf > 1
def balance_minus_outgoing_htlcs(self, whose: HTLCOwner, *, ctx_owner: HTLCOwner = HTLCOwner.LOCAL, ctn: int = None):
return 0
def balance(self, whose: HTLCOwner, *, ctx_owner=HTLCOwner.LOCAL, ctn: int = None) -> int:
return 0
def is_frozen_for_sending(self) -> bool:
return False
def is_frozen_for_receiving(self) -> bool:
return False
def is_static_remotekey_enabled(self) -> bool:
# Return False so that self.sweep_address will return self._fallback_sweep_address
# Since channel backups do not save the static_remotekey, payment_basepoint in
# their local config is not static)
return False
class Channel(AbstractChannel):
# note: try to avoid naming ctns/ctxs/etc as "current" and "pending".
# they are ambiguous. Use "oldest_unrevoked" or "latest" or "next".
# TODO enforce this ^
def __init__(self, state: 'StoredDict', *, sweep_address=None, name=None, lnworker=None, initial_feerate=None):
self.name = name
Logger.__init__(self)
self.lnworker = lnworker
self._fallback_sweep_address = sweep_address
self.storage = state
self.db_lock = self.storage.db.lock if self.storage.db else threading.RLock()
self.config = {}
self.config[LOCAL] = state["local_config"]
self.config[REMOTE] = state["remote_config"]
self.channel_id = bfh(state["channel_id"])
self.constraints = state["constraints"] # type: ChannelConstraints
self.funding_outpoint = state["funding_outpoint"]
self.node_id = bfh(state["node_id"])
self.short_channel_id = ShortChannelID.normalize(state["short_channel_id"])
self.onion_keys = state['onion_keys'] # type: Dict[int, bytes]
self.data_loss_protect_remote_pcp = state['data_loss_protect_remote_pcp']
self.hm = HTLCManager(log=state['log'], initial_feerate=initial_feerate)
self._state = ChannelState[state['state']]
self.peer_state = PeerState.DISCONNECTED
self._sweep_info = {}
self._outgoing_channel_update = None # type: Optional[bytes]
self._chan_ann_without_sigs = None # type: Optional[bytes]
self.revocation_store = RevocationStore(state["revocation_store"])
self._can_send_ctx_updates = True # type: bool
self._receive_fail_reasons = {} # type: Dict[int, (bytes, OnionRoutingFailureMessage)]
self._ignore_max_htlc_value = False # used in tests
def is_initiator(self):
return self.constraints.is_initiator
def is_active(self):
return self.get_state() == ChannelState.OPEN and self.peer_state == PeerState.GOOD
def funding_txn_minimum_depth(self):
return self.constraints.funding_txn_minimum_depth
def diagnostic_name(self):
if self.name:
return str(self.name)
try:
return f"lnchannel_{bh2u(self.channel_id[-4:])}"
except:
return super().diagnostic_name()
def set_onion_key(self, key: int, value: bytes):
self.onion_keys[key] = value
def get_onion_key(self, key: int) -> bytes:
return self.onion_keys.get(key)
def set_data_loss_protect_remote_pcp(self, key, value):
self.data_loss_protect_remote_pcp[key] = value
def get_data_loss_protect_remote_pcp(self, key):
return self.data_loss_protect_remote_pcp.get(key)
def get_local_pubkey(self) -> bytes:
if not self.lnworker:
raise Exception('lnworker not set for channel!')
return self.lnworker.node_keypair.pubkey
def set_remote_update(self, raw: bytes) -> None:
self.storage['remote_update'] = raw.hex()
def get_remote_update(self) -> Optional[bytes]:
return bfh(self.storage.get('remote_update')) if self.storage.get('remote_update') else None
def add_or_update_peer_addr(self, peer: LNPeerAddr) -> None:
if 'peer_network_addresses' not in self.storage:
self.storage['peer_network_addresses'] = {}
now = int(time.time())
self.storage['peer_network_addresses'][peer.net_addr_str()] = now
def get_peer_addresses(self) -> Iterator[LNPeerAddr]:
# sort by timestamp: most recent first
addrs = sorted(self.storage.get('peer_network_addresses', {}).items(),
key=lambda x: x[1], reverse=True)
for net_addr_str, ts in addrs:
net_addr = NetAddress.from_string(net_addr_str)
yield LNPeerAddr(host=str(net_addr.host), port=net_addr.port, pubkey=self.node_id)
def get_outgoing_gossip_channel_update(self) -> bytes:
if self._outgoing_channel_update is not None:
return self._outgoing_channel_update
if not self.lnworker:
raise Exception('lnworker not set for channel!')
sorted_node_ids = list(sorted([self.node_id, self.get_local_pubkey()]))
channel_flags = b'\x00' if sorted_node_ids[0] == self.get_local_pubkey() else b'\x01'
now = int(time.time())
htlc_maximum_msat = min(self.config[REMOTE].max_htlc_value_in_flight_msat, 1000 * self.constraints.capacity)
chan_upd = encode_msg(
"channel_update",
short_channel_id=self.short_channel_id,
channel_flags=channel_flags,
message_flags=b'\x01',
cltv_expiry_delta=lnutil.NBLOCK_OUR_CLTV_EXPIRY_DELTA,
htlc_minimum_msat=self.config[REMOTE].htlc_minimum_msat,
htlc_maximum_msat=htlc_maximum_msat,
fee_base_msat=lnutil.OUR_FEE_BASE_MSAT,
fee_proportional_millionths=lnutil.OUR_FEE_PROPORTIONAL_MILLIONTHS,
chain_hash=constants.net.rev_genesis_bytes(),
timestamp=now,
)
sighash = sha256d(chan_upd[2 + 64:])
sig = ecc.ECPrivkey(self.lnworker.node_keypair.privkey).sign(sighash, ecc.sig_string_from_r_and_s)
message_type, payload = decode_msg(chan_upd)
payload['signature'] = sig
chan_upd = encode_msg(message_type, **payload)
self._outgoing_channel_update = chan_upd
return chan_upd
def construct_channel_announcement_without_sigs(self) -> bytes:
if self._chan_ann_without_sigs is not None:
return self._chan_ann_without_sigs
if not self.lnworker:
raise Exception('lnworker not set for channel!')
bitcoin_keys = [self.config[REMOTE].multisig_key.pubkey,
self.config[LOCAL].multisig_key.pubkey]
node_ids = [self.node_id, self.get_local_pubkey()]
sorted_node_ids = list(sorted(node_ids))
if sorted_node_ids != node_ids:
node_ids = sorted_node_ids
bitcoin_keys.reverse()
chan_ann = encode_msg(
"channel_announcement",
len=0,
features=b'',
chain_hash=constants.net.rev_genesis_bytes(),
short_channel_id=self.short_channel_id,
node_id_1=node_ids[0],
node_id_2=node_ids[1],
bitcoin_key_1=bitcoin_keys[0],
bitcoin_key_2=bitcoin_keys[1],
)
self._chan_ann_without_sigs = chan_ann
return chan_ann
def is_static_remotekey_enabled(self) -> bool:
return bool(self.storage.get('static_remotekey_enabled'))
def get_wallet_addresses_channel_might_want_reserved(self) -> Sequence[str]:
ret = []
if self.is_static_remotekey_enabled():
our_payment_pubkey = self.config[LOCAL].payment_basepoint.pubkey
to_remote_address = make_commitment_output_to_remote_address(our_payment_pubkey)
ret.append(to_remote_address)
return ret
def get_feerate(self, subject: HTLCOwner, *, ctn: int) -> int:
# returns feerate in sat/kw
return self.hm.get_feerate(subject, ctn)
def get_oldest_unrevoked_feerate(self, subject: HTLCOwner) -> int:
return self.hm.get_feerate_in_oldest_unrevoked_ctx(subject)
def get_latest_feerate(self, subject: HTLCOwner) -> int:
return self.hm.get_feerate_in_latest_ctx(subject)
def get_next_feerate(self, subject: HTLCOwner) -> int:
return self.hm.get_feerate_in_next_ctx(subject)
def get_payments(self):
out = []
for direction, htlc in self.hm.all_htlcs_ever():
htlc_proposer = LOCAL if direction is SENT else REMOTE
if self.hm.was_htlc_failed(htlc_id=htlc.htlc_id, htlc_proposer=htlc_proposer):
status = 'failed'
elif self.hm.was_htlc_preimage_released(htlc_id=htlc.htlc_id, htlc_proposer=htlc_proposer):
status = 'settled'
else:
status = 'inflight'
rhash = htlc.payment_hash.hex()
out.append((rhash, self.channel_id, htlc, direction, status))
return out
def get_settled_payments(self):
out = defaultdict(list)
for direction, htlc in self.hm.all_htlcs_ever():
htlc_proposer = LOCAL if direction is SENT else REMOTE
if self.hm.was_htlc_preimage_released(htlc_id=htlc.htlc_id, htlc_proposer=htlc_proposer):
rhash = htlc.payment_hash.hex()
out[rhash].append((self.channel_id, htlc, direction))
return out
def open_with_first_pcp(self, remote_pcp: bytes, remote_sig: bytes) -> None:
with self.db_lock:
self.config[REMOTE].current_per_commitment_point = remote_pcp
self.config[REMOTE].next_per_commitment_point = None
self.config[LOCAL].current_commitment_signature = remote_sig
self.hm.channel_open_finished()
self.peer_state = PeerState.GOOD
def get_state_for_GUI(self):
# status displayed in the GUI
cs = self.get_state()
if self.is_closed():
return cs.name
ps = self.peer_state
if ps != PeerState.GOOD:
return ps.name
return cs.name
def set_can_send_ctx_updates(self, b: bool) -> None:
self._can_send_ctx_updates = b
def can_send_ctx_updates(self) -> bool:
"""Whether we can send update_fee, update_*_htlc changes to the remote."""
if not (self.is_open() or self.is_closing()):
return False
if self.peer_state != PeerState.GOOD:
return False
if not self._can_send_ctx_updates:
return False
return True
def can_send_update_add_htlc(self) -> bool:
return self.can_send_ctx_updates() and not self.is_closing()
def is_frozen_for_sending(self) -> bool:
return self.storage.get('frozen_for_sending', False)
def set_frozen_for_sending(self, b: bool) -> None:
self.storage['frozen_for_sending'] = bool(b)
util.trigger_callback('channel', self.lnworker.wallet, self)
def is_frozen_for_receiving(self) -> bool:
return self.storage.get('frozen_for_receiving', False)
def set_frozen_for_receiving(self, b: bool) -> None:
self.storage['frozen_for_receiving'] = bool(b)
util.trigger_callback('channel', self.lnworker.wallet, self)
def _assert_can_add_htlc(self, *, htlc_proposer: HTLCOwner, amount_msat: int,
ignore_min_htlc_value: bool = False) -> None:
"""Raises PaymentFailure if the htlc_proposer cannot add this new HTLC.
(this is relevant both for forwarding and endpoint)
"""
htlc_receiver = htlc_proposer.inverted()
# note: all these tests are about the *receiver's* *next* commitment transaction,
# and the constraints are the ones imposed by their config
ctn = self.get_next_ctn(htlc_receiver)
chan_config = self.config[htlc_receiver]
if self.get_state() != ChannelState.OPEN:
raise PaymentFailure('Channel not open', self.get_state())
if htlc_proposer == LOCAL:
if not self.can_send_ctx_updates():
raise PaymentFailure('Channel cannot send ctx updates')
if not self.can_send_update_add_htlc():
raise PaymentFailure('Channel cannot add htlc')
# If proposer is LOCAL we apply stricter checks as that is behaviour we can control.
# This should lead to fewer disagreements (i.e. channels failing).
strict = (htlc_proposer == LOCAL)
# check htlc raw value
if not ignore_min_htlc_value:
if amount_msat <= 0:
raise PaymentFailure("HTLC value must be positive")
if amount_msat < chan_config.htlc_minimum_msat:
raise PaymentFailure(f'HTLC value too small: {amount_msat} msat')
if amount_msat > LN_MAX_HTLC_VALUE_MSAT and not self._ignore_max_htlc_value:
raise PaymentFailure(f"HTLC value over protocol maximum: {amount_msat} > {LN_MAX_HTLC_VALUE_MSAT} msat")
# check proposer can afford htlc
max_can_send_msat = self.available_to_spend(htlc_proposer, strict=strict)
if max_can_send_msat < amount_msat:
raise PaymentFailure(f'Not enough balance. can send: {max_can_send_msat}, tried: {amount_msat}')
# check "max_accepted_htlcs"
# this is the loose check BOLT-02 specifies:
if len(self.hm.htlcs_by_direction(htlc_receiver, direction=RECEIVED, ctn=ctn)) + 1 > chan_config.max_accepted_htlcs:
raise PaymentFailure('Too many HTLCs already in channel')
# however, c-lightning is a lot stricter, so extra checks:
if strict:
max_concurrent_htlcs = min(self.config[htlc_proposer].max_accepted_htlcs,
self.config[htlc_receiver].max_accepted_htlcs)
if len(self.hm.htlcs(htlc_receiver, ctn=ctn)) + 1 > max_concurrent_htlcs:
raise PaymentFailure('Too many HTLCs already in channel')
# check "max_htlc_value_in_flight_msat"
current_htlc_sum = htlcsum(self.hm.htlcs_by_direction(htlc_receiver, direction=RECEIVED, ctn=ctn).values())
if current_htlc_sum + amount_msat > chan_config.max_htlc_value_in_flight_msat:
raise PaymentFailure(f'HTLC value sum (sum of pending htlcs: {current_htlc_sum/1000} sat '
f'plus new htlc: {amount_msat/1000} sat) '
f'would exceed max allowed: {chan_config.max_htlc_value_in_flight_msat/1000} sat')
def can_pay(self, amount_msat: int, *, check_frozen=False) -> bool:
"""Returns whether we can add an HTLC of given value."""
if check_frozen and self.is_frozen_for_sending():
return False
try:
self._assert_can_add_htlc(htlc_proposer=LOCAL, amount_msat=amount_msat)
except PaymentFailure:
return False
return True
def can_receive(self, amount_msat: int, *, check_frozen=False,
ignore_min_htlc_value: bool = False) -> bool:
"""Returns whether the remote can add an HTLC of given value."""
if check_frozen and self.is_frozen_for_receiving():
return False
try:
self._assert_can_add_htlc(htlc_proposer=REMOTE,
amount_msat=amount_msat,
ignore_min_htlc_value=ignore_min_htlc_value)
except PaymentFailure:
return False
return True
def should_try_to_reestablish_peer(self) -> bool:
return ChannelState.PREOPENING < self._state < ChannelState.CLOSING and self.peer_state == PeerState.DISCONNECTED
def get_funding_address(self):
script = funding_output_script(self.config[LOCAL], self.config[REMOTE])
return redeem_script_to_address('p2wsh', script)
def add_htlc(self, htlc: UpdateAddHtlc) -> UpdateAddHtlc:
"""Adds a new LOCAL HTLC to the channel.
Action must be initiated by LOCAL.
"""
if isinstance(htlc, dict): # legacy conversion # FIXME remove
htlc = UpdateAddHtlc(**htlc)
assert isinstance(htlc, UpdateAddHtlc)
self._assert_can_add_htlc(htlc_proposer=LOCAL, amount_msat=htlc.amount_msat)
if htlc.htlc_id is None:
htlc = attr.evolve(htlc, htlc_id=self.hm.get_next_htlc_id(LOCAL))
with self.db_lock:
self.hm.send_htlc(htlc)
self.logger.info("add_htlc")
return htlc
def receive_htlc(self, htlc: UpdateAddHtlc, onion_packet:bytes = None) -> UpdateAddHtlc:
"""Adds a new REMOTE HTLC to the channel.
Action must be initiated by REMOTE.
"""
if isinstance(htlc, dict): # legacy conversion # FIXME remove
htlc = UpdateAddHtlc(**htlc)
assert isinstance(htlc, UpdateAddHtlc)
try:
self._assert_can_add_htlc(htlc_proposer=REMOTE, amount_msat=htlc.amount_msat)
except PaymentFailure as e:
raise RemoteMisbehaving(e) from e
if htlc.htlc_id is None: # used in unit tests
htlc = attr.evolve(htlc, htlc_id=self.hm.get_next_htlc_id(REMOTE))
with self.db_lock:
self.hm.recv_htlc(htlc)
local_ctn = self.get_latest_ctn(LOCAL)
remote_ctn = self.get_latest_ctn(REMOTE)
if onion_packet:
self.hm.log['unfulfilled_htlcs'][htlc.htlc_id] = local_ctn, remote_ctn, onion_packet.hex(), False
self.logger.info("receive_htlc")
return htlc
def sign_next_commitment(self) -> Tuple[bytes, Sequence[bytes]]:
"""Returns signatures for our next remote commitment tx.
Action must be initiated by LOCAL.
Finally, the next remote ctx becomes the latest remote ctx.
"""
next_remote_ctn = self.get_next_ctn(REMOTE)
self.logger.info(f"sign_next_commitment {next_remote_ctn}")
pending_remote_commitment = self.get_next_commitment(REMOTE)
sig_64 = sign_and_get_sig_string(pending_remote_commitment, self.config[LOCAL], self.config[REMOTE])
their_remote_htlc_privkey_number = derive_privkey(
int.from_bytes(self.config[LOCAL].htlc_basepoint.privkey, 'big'),
self.config[REMOTE].next_per_commitment_point)
their_remote_htlc_privkey = their_remote_htlc_privkey_number.to_bytes(32, 'big')
htlcsigs = []
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=self,
ctx=pending_remote_commitment,
pcp=self.config[REMOTE].next_per_commitment_point,
subject=REMOTE,
ctn=next_remote_ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
_script, htlc_tx = make_htlc_tx_with_open_channel(chan=self,
pcp=self.config[REMOTE].next_per_commitment_point,
subject=REMOTE,
ctn=next_remote_ctn,
htlc_direction=direction,
commit=pending_remote_commitment,
ctx_output_idx=ctx_output_idx,
htlc=htlc)
sig = bfh(htlc_tx.sign_txin(0, their_remote_htlc_privkey))
htlc_sig = ecc.sig_string_from_der_sig(sig[:-1])
htlcsigs.append((ctx_output_idx, htlc_sig))
htlcsigs.sort()
htlcsigs = [x[1] for x in htlcsigs]
with self.db_lock:
self.hm.send_ctx()
return sig_64, htlcsigs
def receive_new_commitment(self, sig: bytes, htlc_sigs: Sequence[bytes]) -> None:
"""Processes signatures for our next local commitment tx, sent by the REMOTE.
Action must be initiated by REMOTE.
If all checks pass, the next local ctx becomes the latest local ctx.
"""
# TODO in many failure cases below, we should "fail" the channel (force-close)
next_local_ctn = self.get_next_ctn(LOCAL)
self.logger.info(f"receive_new_commitment. ctn={next_local_ctn}, len(htlc_sigs)={len(htlc_sigs)}")
assert len(htlc_sigs) == 0 or type(htlc_sigs[0]) is bytes
pending_local_commitment = self.get_next_commitment(LOCAL)
preimage_hex = pending_local_commitment.serialize_preimage(0)
pre_hash = sha256d(bfh(preimage_hex))
if not ecc.verify_signature(self.config[REMOTE].multisig_key.pubkey, sig, pre_hash):
raise Exception(f'failed verifying signature of our updated commitment transaction: {bh2u(sig)} preimage is {preimage_hex}')
htlc_sigs_string = b''.join(htlc_sigs)
_secret, pcp = self.get_secret_and_point(subject=LOCAL, ctn=next_local_ctn)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=self,
ctx=pending_local_commitment,
pcp=pcp,
subject=LOCAL,
ctn=next_local_ctn)
if len(htlc_to_ctx_output_idx_map) != len(htlc_sigs):
raise Exception(f'htlc sigs failure. recv {len(htlc_sigs)} sigs, expected {len(htlc_to_ctx_output_idx_map)}')
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
htlc_sig = htlc_sigs[htlc_relative_idx]
self._verify_htlc_sig(htlc=htlc,
htlc_sig=htlc_sig,
htlc_direction=direction,
pcp=pcp,
ctx=pending_local_commitment,
ctx_output_idx=ctx_output_idx,
ctn=next_local_ctn)
with self.db_lock:
self.hm.recv_ctx()
self.config[LOCAL].current_commitment_signature=sig
self.config[LOCAL].current_htlc_signatures=htlc_sigs_string
def _verify_htlc_sig(self, *, htlc: UpdateAddHtlc, htlc_sig: bytes, htlc_direction: Direction,
pcp: bytes, ctx: Transaction, ctx_output_idx: int, ctn: int) -> None:
_script, htlc_tx = make_htlc_tx_with_open_channel(chan=self,
pcp=pcp,
subject=LOCAL,
ctn=ctn,
htlc_direction=htlc_direction,
commit=ctx,
ctx_output_idx=ctx_output_idx,
htlc=htlc)
pre_hash = sha256d(bfh(htlc_tx.serialize_preimage(0)))
remote_htlc_pubkey = derive_pubkey(self.config[REMOTE].htlc_basepoint.pubkey, pcp)
if not ecc.verify_signature(remote_htlc_pubkey, htlc_sig, pre_hash):
raise Exception(f'failed verifying HTLC signatures: {htlc} {htlc_direction}')
def get_remote_htlc_sig_for_htlc(self, *, htlc_relative_idx: int) -> bytes:
data = self.config[LOCAL].current_htlc_signatures
htlc_sigs = list(chunks(data, 64))
htlc_sig = htlc_sigs[htlc_relative_idx]
remote_htlc_sig = ecc.der_sig_from_sig_string(htlc_sig) + b'\x01'
return remote_htlc_sig
def revoke_current_commitment(self):
self.logger.info("revoke_current_commitment")
new_ctn = self.get_latest_ctn(LOCAL)
new_ctx = self.get_latest_commitment(LOCAL)
if not self.signature_fits(new_ctx):
# this should never fail; as receive_new_commitment already did this test
raise Exception("refusing to revoke as remote sig does not fit")
with self.db_lock:
self.hm.send_rev()
if self.lnworker:
received = self.hm.received_in_ctn(new_ctn)
for htlc in received:
self.lnworker.payment_received(self, htlc.payment_hash)
last_secret, last_point = self.get_secret_and_point(LOCAL, new_ctn - 1)
next_secret, next_point = self.get_secret_and_point(LOCAL, new_ctn + 1)
return RevokeAndAck(last_secret, next_point)
def receive_revocation(self, revocation: RevokeAndAck):
self.logger.info("receive_revocation")
new_ctn = self.get_latest_ctn(REMOTE)
cur_point = self.config[REMOTE].current_per_commitment_point
derived_point = ecc.ECPrivkey(revocation.per_commitment_secret).get_public_key_bytes(compressed=True)
if cur_point != derived_point:
raise Exception('revoked secret not for current point')
with self.db_lock:
self.revocation_store.add_next_entry(revocation.per_commitment_secret)
##### start applying fee/htlc changes
self.hm.recv_rev()
self.config[REMOTE].current_per_commitment_point=self.config[REMOTE].next_per_commitment_point
self.config[REMOTE].next_per_commitment_point=revocation.next_per_commitment_point
# lnworker callbacks
if self.lnworker:
sent = self.hm.sent_in_ctn(new_ctn)
for htlc in sent:
self.lnworker.payment_sent(self, htlc.payment_hash)
failed = self.hm.failed_in_ctn(new_ctn)
for htlc in failed:
try:
error_bytes, failure_message = self._receive_fail_reasons.pop(htlc.htlc_id)
except KeyError:
error_bytes, failure_message = None, None
# if we are forwarding, save error message to disk
if self.lnworker.get_payment_info(htlc.payment_hash) is None:
self.save_fail_htlc_reason(htlc.htlc_id, error_bytes, failure_message)
else:
self.lnworker.payment_failed(self, htlc.payment_hash, error_bytes, failure_message)
def save_fail_htlc_reason(
self,
htlc_id: int,
error_bytes: Optional[bytes],
failure_message: Optional['OnionRoutingFailureMessage'],
):
error_hex = error_bytes.hex() if error_bytes else None
failure_hex = failure_message.to_bytes().hex() if failure_message else None
self.hm.log['fail_htlc_reasons'][htlc_id] = (error_hex, failure_hex)
def pop_fail_htlc_reason(self, htlc_id):
error_hex, failure_hex = self.hm.log['fail_htlc_reasons'].pop(htlc_id, (None, None))
error_bytes = bytes.fromhex(error_hex) if error_hex else None
failure_message = OnionRoutingFailureMessage.from_bytes(bytes.fromhex(failure_hex)) if failure_hex else None
return error_bytes, failure_message
def extract_preimage_from_htlc_txin(self, txin: TxInput) -> None:
witness = txin.witness_elements()
if len(witness) == 5: # HTLC success tx
preimage = witness[3]
elif len(witness) == 3: # spending offered HTLC directly from ctx
preimage = witness[1]
else:
return
payment_hash = sha256(preimage)
for direction, htlc in itertools.chain(self.hm.get_htlcs_in_oldest_unrevoked_ctx(REMOTE),
self.hm.get_htlcs_in_latest_ctx(REMOTE)):
if htlc.payment_hash == payment_hash:
is_sent = direction == RECEIVED
break
else:
for direction, htlc in itertools.chain(self.hm.get_htlcs_in_oldest_unrevoked_ctx(LOCAL),
self.hm.get_htlcs_in_latest_ctx(LOCAL)):
if htlc.payment_hash == payment_hash:
is_sent = direction == SENT
break
else:
return
if self.lnworker.get_preimage(payment_hash) is None:
self.logger.info(f'found preimage for {payment_hash.hex()} in witness of length {len(witness)}')
self.lnworker.save_preimage(payment_hash, preimage)
info = self.lnworker.get_payment_info(payment_hash)
if info is not None and info.status != PR_PAID:
if is_sent:
self.lnworker.payment_sent(self, payment_hash)
else:
self.lnworker.payment_received(self, payment_hash)
def balance(self, whose: HTLCOwner, *, ctx_owner=HTLCOwner.LOCAL, ctn: int = None) -> int:
assert type(whose) is HTLCOwner
initial = self.config[whose].initial_msat
return self.hm.get_balance_msat(whose=whose,
ctx_owner=ctx_owner,
ctn=ctn,
initial_balance_msat=initial)
def balance_minus_outgoing_htlcs(self, whose: HTLCOwner, *, ctx_owner: HTLCOwner = HTLCOwner.LOCAL,
ctn: int = None) -> int:
assert type(whose) is HTLCOwner
if ctn is None:
ctn = self.get_next_ctn(ctx_owner)
committed_balance = self.balance(whose, ctx_owner=ctx_owner, ctn=ctn)
direction = RECEIVED if whose != ctx_owner else SENT
balance_in_htlcs = self.balance_tied_up_in_htlcs_by_direction(ctx_owner, ctn=ctn, direction=direction)
return committed_balance - balance_in_htlcs
def balance_tied_up_in_htlcs_by_direction(self, ctx_owner: HTLCOwner = LOCAL, *, ctn: int = None,
direction: Direction):
# in msat
if ctn is None:
ctn = self.get_next_ctn(ctx_owner)
return htlcsum(self.hm.htlcs_by_direction(ctx_owner, direction, ctn).values())
def available_to_spend(self, subject: HTLCOwner, *, strict: bool = True) -> int:
"""The usable balance of 'subject' in msat, after taking reserve and fees into
consideration. Note that fees (and hence the result) fluctuate even without user interaction.
"""
assert type(subject) is HTLCOwner
sender = subject
receiver = subject.inverted()
initiator = LOCAL if self.constraints.is_initiator else REMOTE # the initiator/funder pays on-chain fees
is_frozen = self.is_frozen_for_sending() if subject == LOCAL else self.is_frozen_for_receiving()
if not self.is_active() or is_frozen:
return 0
def consider_ctx(*, ctx_owner: HTLCOwner, is_htlc_dust: bool) -> int:
ctn = self.get_next_ctn(ctx_owner)
sender_balance_msat = self.balance_minus_outgoing_htlcs(whose=sender, ctx_owner=ctx_owner, ctn=ctn)
receiver_balance_msat = self.balance_minus_outgoing_htlcs(whose=receiver, ctx_owner=ctx_owner, ctn=ctn)
sender_reserve_msat = self.config[receiver].reserve_sat * 1000
receiver_reserve_msat = self.config[sender].reserve_sat * 1000
num_htlcs_in_ctx = len(self.included_htlcs(ctx_owner, SENT, ctn=ctn) + self.included_htlcs(ctx_owner, RECEIVED, ctn=ctn))
feerate = self.get_feerate(ctx_owner, ctn=ctn)
ctx_fees_msat = calc_fees_for_commitment_tx(
num_htlcs=num_htlcs_in_ctx,
feerate=feerate,
is_local_initiator=self.constraints.is_initiator,
round_to_sat=False,
)
htlc_fee_msat = fee_for_htlc_output(feerate=feerate)
htlc_trim_func = received_htlc_trim_threshold_sat if ctx_owner == receiver else offered_htlc_trim_threshold_sat
htlc_trim_threshold_msat = htlc_trim_func(dust_limit_sat=self.config[ctx_owner].dust_limit_sat, feerate=feerate) * 1000
if sender == initiator == LOCAL: # see https://github.com/lightningnetwork/lightning-rfc/pull/740
fee_spike_buffer = calc_fees_for_commitment_tx(
num_htlcs=num_htlcs_in_ctx + int(not is_htlc_dust) + 1,
feerate=2 * feerate,
is_local_initiator=self.constraints.is_initiator,
round_to_sat=False,
)[sender]
max_send_msat = sender_balance_msat - sender_reserve_msat - fee_spike_buffer
else:
max_send_msat = sender_balance_msat - sender_reserve_msat - ctx_fees_msat[sender]
if is_htlc_dust:
return min(max_send_msat, htlc_trim_threshold_msat - 1)
else:
if sender == initiator:
return max_send_msat - htlc_fee_msat
else:
# the receiver is the initiator, so they need to be able to pay tx fees
if receiver_balance_msat - receiver_reserve_msat - ctx_fees_msat[receiver] - htlc_fee_msat < 0:
return 0
return max_send_msat
max_send_msat = min(
max(
consider_ctx(ctx_owner=receiver, is_htlc_dust=True),
consider_ctx(ctx_owner=receiver, is_htlc_dust=False),
),
max(
consider_ctx(ctx_owner=sender, is_htlc_dust=True),
consider_ctx(ctx_owner=sender, is_htlc_dust=False),
),
)
max_send_msat = max(max_send_msat, 0)
return max_send_msat
def included_htlcs(self, subject: HTLCOwner, direction: Direction, ctn: int = None, *,
feerate: int = None) -> Sequence[UpdateAddHtlc]:
"""Returns list of non-dust HTLCs for subject's commitment tx at ctn,
filtered by direction (of HTLCs).
"""
assert type(subject) is HTLCOwner
assert type(direction) is Direction
if ctn is None:
ctn = self.get_oldest_unrevoked_ctn(subject)
if feerate is None:
feerate = self.get_feerate(subject, ctn=ctn)
conf = self.config[subject]
if direction == RECEIVED:
threshold_sat = received_htlc_trim_threshold_sat(dust_limit_sat=conf.dust_limit_sat, feerate=feerate)
else:
threshold_sat = offered_htlc_trim_threshold_sat(dust_limit_sat=conf.dust_limit_sat, feerate=feerate)
htlcs = self.hm.htlcs_by_direction(subject, direction, ctn=ctn).values()
return list(filter(lambda htlc: htlc.amount_msat // 1000 >= threshold_sat, htlcs))
def get_secret_and_point(self, subject: HTLCOwner, ctn: int) -> Tuple[Optional[bytes], bytes]:
assert type(subject) is HTLCOwner
assert ctn >= 0, ctn
offset = ctn - self.get_oldest_unrevoked_ctn(subject)
if subject == REMOTE:
if offset > 1:
raise RemoteCtnTooFarInFuture(f"offset: {offset}")
conf = self.config[REMOTE]
if offset == 1:
secret = None
point = conf.next_per_commitment_point
elif offset == 0:
secret = None
point = conf.current_per_commitment_point
else:
secret = self.revocation_store.retrieve_secret(RevocationStore.START_INDEX - ctn)
point = secret_to_pubkey(int.from_bytes(secret, 'big'))
else:
secret = get_per_commitment_secret_from_seed(self.config[LOCAL].per_commitment_secret_seed, RevocationStore.START_INDEX - ctn)
point = secret_to_pubkey(int.from_bytes(secret, 'big'))
return secret, point
def get_secret_and_commitment(self, subject: HTLCOwner, *, ctn: int) -> Tuple[Optional[bytes], PartialTransaction]:
secret, point = self.get_secret_and_point(subject, ctn)
ctx = self.make_commitment(subject, point, ctn)
return secret, ctx
def get_commitment(self, subject: HTLCOwner, *, ctn: int) -> PartialTransaction:
secret, ctx = self.get_secret_and_commitment(subject, ctn=ctn)
return ctx
def get_next_commitment(self, subject: HTLCOwner) -> PartialTransaction:
ctn = self.get_next_ctn(subject)
return self.get_commitment(subject, ctn=ctn)
def get_latest_commitment(self, subject: HTLCOwner) -> PartialTransaction:
ctn = self.get_latest_ctn(subject)
return self.get_commitment(subject, ctn=ctn)
def get_oldest_unrevoked_commitment(self, subject: HTLCOwner) -> PartialTransaction:
ctn = self.get_oldest_unrevoked_ctn(subject)
return self.get_commitment(subject, ctn=ctn)
def create_sweeptxs(self, ctn: int) -> List[Transaction]:
from .lnsweep import create_sweeptxs_for_watchtower
secret, ctx = self.get_secret_and_commitment(REMOTE, ctn=ctn)
return create_sweeptxs_for_watchtower(self, ctx, secret, self.sweep_address)
def get_oldest_unrevoked_ctn(self, subject: HTLCOwner) -> int:
return self.hm.ctn_oldest_unrevoked(subject)
def get_latest_ctn(self, subject: HTLCOwner) -> int:
return self.hm.ctn_latest(subject)
def get_next_ctn(self, subject: HTLCOwner) -> int:
return self.hm.ctn_latest(subject) + 1
def total_msat(self, direction: Direction) -> int:
"""Return the cumulative total msat amount received/sent so far."""
assert type(direction) is Direction
return htlcsum(self.hm.all_settled_htlcs_ever_by_direction(LOCAL, direction))
def settle_htlc(self, preimage: bytes, htlc_id: int) -> None:
"""Settle/fulfill a pending received HTLC.
Action must be initiated by LOCAL.
"""
self.logger.info("settle_htlc")
assert self.can_send_ctx_updates(), f"cannot update channel. {self.get_state()!r} {self.peer_state!r}"
htlc = self.hm.get_htlc_by_id(REMOTE, htlc_id)
assert htlc.payment_hash == sha256(preimage)
assert htlc_id not in self.hm.log[REMOTE]['settles']
self.hm.send_settle(htlc_id)
def get_payment_hash(self, htlc_id: int) -> bytes:
htlc = self.hm.get_htlc_by_id(LOCAL, htlc_id)
return htlc.payment_hash
def decode_onion_error(self, reason: bytes, route: Sequence['RouteEdge'],
htlc_id: int) -> Tuple[OnionRoutingFailureMessage, int]:
failure_msg, sender_idx = decode_onion_error(
reason,
[x.node_id for x in route],
self.onion_keys[htlc_id])
return failure_msg, sender_idx
def receive_htlc_settle(self, preimage: bytes, htlc_id: int) -> None:
"""Settle/fulfill a pending offered HTLC.
Action must be initiated by REMOTE.
"""
self.logger.info("receive_htlc_settle")
htlc = self.hm.get_htlc_by_id(LOCAL, htlc_id)
assert htlc.payment_hash == sha256(preimage)
assert htlc_id not in self.hm.log[LOCAL]['settles']
with self.db_lock:
self.hm.recv_settle(htlc_id)
def fail_htlc(self, htlc_id: int) -> None:
"""Fail a pending received HTLC.
Action must be initiated by LOCAL.
"""
self.logger.info("fail_htlc")
assert self.can_send_ctx_updates(), f"cannot update channel. {self.get_state()!r} {self.peer_state!r}"
with self.db_lock:
self.hm.send_fail(htlc_id)
def receive_fail_htlc(self, htlc_id: int, *,
error_bytes: Optional[bytes],
reason: Optional[OnionRoutingFailureMessage] = None) -> None:
"""Fail a pending offered HTLC.
Action must be initiated by REMOTE.
"""
self.logger.info("receive_fail_htlc")
with self.db_lock:
self.hm.recv_fail(htlc_id)
self._receive_fail_reasons[htlc_id] = (error_bytes, reason)
def get_next_fee(self, subject: HTLCOwner) -> int:
return self.constraints.capacity - sum(x.value for x in self.get_next_commitment(subject).outputs())
def get_latest_fee(self, subject: HTLCOwner) -> int:
return self.constraints.capacity - sum(x.value for x in self.get_latest_commitment(subject).outputs())
def update_fee(self, feerate: int, from_us: bool) -> None:
# feerate uses sat/kw
if self.constraints.is_initiator != from_us:
raise Exception(f"Cannot update_fee: wrong initiator. us: {from_us}")
sender = LOCAL if from_us else REMOTE
ctx_owner = -sender
ctn = self.get_next_ctn(ctx_owner)
sender_balance_msat = self.balance_minus_outgoing_htlcs(whose=sender, ctx_owner=ctx_owner, ctn=ctn)
sender_reserve_msat = self.config[-sender].reserve_sat * 1000
num_htlcs_in_ctx = len(self.included_htlcs(ctx_owner, SENT, ctn=ctn, feerate=feerate) +
self.included_htlcs(ctx_owner, RECEIVED, ctn=ctn, feerate=feerate))
ctx_fees_msat = calc_fees_for_commitment_tx(
num_htlcs=num_htlcs_in_ctx,
feerate=feerate,
is_local_initiator=self.constraints.is_initiator,
)
remainder = sender_balance_msat - sender_reserve_msat - ctx_fees_msat[sender]
if remainder < 0:
raise Exception(f"Cannot update_fee. {sender} tried to update fee but they cannot afford it. "
f"Their balance would go below reserve: {remainder} msat missing.")
with self.db_lock:
if from_us:
assert self.can_send_ctx_updates(), f"cannot update channel. {self.get_state()!r} {self.peer_state!r}"
self.hm.send_update_fee(feerate)
else:
self.hm.recv_update_fee(feerate)
def make_commitment(self, subject: HTLCOwner, this_point: bytes, ctn: int) -> PartialTransaction:
assert type(subject) is HTLCOwner
feerate = self.get_feerate(subject, ctn=ctn)
other = subject.inverted()
local_msat = self.balance(subject, ctx_owner=subject, ctn=ctn)
remote_msat = self.balance(other, ctx_owner=subject, ctn=ctn)
received_htlcs = self.hm.htlcs_by_direction(subject, RECEIVED, ctn).values()
sent_htlcs = self.hm.htlcs_by_direction(subject, SENT, ctn).values()
remote_msat -= htlcsum(received_htlcs)
local_msat -= htlcsum(sent_htlcs)
assert remote_msat >= 0
assert local_msat >= 0
# same htlcs as before, but now without dust.
received_htlcs = self.included_htlcs(subject, RECEIVED, ctn)
sent_htlcs = self.included_htlcs(subject, SENT, ctn)
this_config = self.config[subject]
other_config = self.config[-subject]
other_htlc_pubkey = derive_pubkey(other_config.htlc_basepoint.pubkey, this_point)
this_htlc_pubkey = derive_pubkey(this_config.htlc_basepoint.pubkey, this_point)
other_revocation_pubkey = derive_blinded_pubkey(other_config.revocation_basepoint.pubkey, this_point)
htlcs = [] # type: List[ScriptHtlc]
for is_received_htlc, htlc_list in zip((True, False), (received_htlcs, sent_htlcs)):
for htlc in htlc_list:
htlcs.append(ScriptHtlc(make_htlc_output_witness_script(
is_received_htlc=is_received_htlc,
remote_revocation_pubkey=other_revocation_pubkey,
remote_htlc_pubkey=other_htlc_pubkey,
local_htlc_pubkey=this_htlc_pubkey,
payment_hash=htlc.payment_hash,
cltv_expiry=htlc.cltv_expiry), htlc))
# note: maybe flip initiator here for fee purposes, we want LOCAL and REMOTE
# in the resulting dict to correspond to the to_local and to_remote *outputs* of the ctx
onchain_fees = calc_fees_for_commitment_tx(
num_htlcs=len(htlcs),
feerate=feerate,
is_local_initiator=self.constraints.is_initiator == (subject == LOCAL),
)
if self.is_static_remotekey_enabled():
payment_pubkey = other_config.payment_basepoint.pubkey
else:
payment_pubkey = derive_pubkey(other_config.payment_basepoint.pubkey, this_point)
return make_commitment(
ctn=ctn,
local_funding_pubkey=this_config.multisig_key.pubkey,
remote_funding_pubkey=other_config.multisig_key.pubkey,
remote_payment_pubkey=payment_pubkey,
funder_payment_basepoint=self.config[LOCAL if self.constraints.is_initiator else REMOTE].payment_basepoint.pubkey,
fundee_payment_basepoint=self.config[LOCAL if not self.constraints.is_initiator else REMOTE].payment_basepoint.pubkey,
revocation_pubkey=other_revocation_pubkey,
delayed_pubkey=derive_pubkey(this_config.delayed_basepoint.pubkey, this_point),
to_self_delay=other_config.to_self_delay,
funding_txid=self.funding_outpoint.txid,
funding_pos=self.funding_outpoint.output_index,
funding_sat=self.constraints.capacity,
local_amount=local_msat,
remote_amount=remote_msat,
dust_limit_sat=this_config.dust_limit_sat,
fees_per_participant=onchain_fees,
htlcs=htlcs,
)
def make_closing_tx(self, local_script: bytes, remote_script: bytes,
fee_sat: int, *, drop_remote = False) -> Tuple[bytes, PartialTransaction]:
""" cooperative close """
_, outputs = make_commitment_outputs(
fees_per_participant={
LOCAL: fee_sat * 1000 if self.constraints.is_initiator else 0,
REMOTE: fee_sat * 1000 if not self.constraints.is_initiator else 0,
},
local_amount_msat=self.balance(LOCAL),
remote_amount_msat=self.balance(REMOTE) if not drop_remote else 0,
local_script=bh2u(local_script),
remote_script=bh2u(remote_script),
htlcs=[],
dust_limit_sat=self.config[LOCAL].dust_limit_sat)
closing_tx = make_closing_tx(self.config[LOCAL].multisig_key.pubkey,
self.config[REMOTE].multisig_key.pubkey,
funding_txid=self.funding_outpoint.txid,
funding_pos=self.funding_outpoint.output_index,
funding_sat=self.constraints.capacity,
outputs=outputs)
der_sig = bfh(closing_tx.sign_txin(0, self.config[LOCAL].multisig_key.privkey))
sig = ecc.sig_string_from_der_sig(der_sig[:-1])
return sig, closing_tx
def signature_fits(self, tx: PartialTransaction) -> bool:
remote_sig = self.config[LOCAL].current_commitment_signature
preimage_hex = tx.serialize_preimage(0)
msg_hash = sha256d(bfh(preimage_hex))
assert remote_sig
res = ecc.verify_signature(self.config[REMOTE].multisig_key.pubkey, remote_sig, msg_hash)
return res
def force_close_tx(self) -> PartialTransaction:
tx = self.get_latest_commitment(LOCAL)
assert self.signature_fits(tx)
tx.sign({bh2u(self.config[LOCAL].multisig_key.pubkey): (self.config[LOCAL].multisig_key.privkey, True)})
remote_sig = self.config[LOCAL].current_commitment_signature
remote_sig = ecc.der_sig_from_sig_string(remote_sig) + b"\x01"
tx.add_signature_to_txin(txin_idx=0,
signing_pubkey=self.config[REMOTE].multisig_key.pubkey.hex(),
sig=remote_sig.hex())
assert tx.is_complete()
return tx
def maybe_sweep_revoked_htlc(self, ctx: Transaction, htlc_tx: Transaction) -> Optional[SweepInfo]:
# look at the output address, check if it matches
return create_sweeptx_for_their_revoked_htlc(self, ctx, htlc_tx, self.sweep_address)
def has_pending_changes(self, subject: HTLCOwner) -> bool:
next_htlcs = self.hm.get_htlcs_in_next_ctx(subject)
latest_htlcs = self.hm.get_htlcs_in_latest_ctx(subject)
return not (next_htlcs == latest_htlcs and self.get_next_feerate(subject) == self.get_latest_feerate(subject))
def should_be_closed_due_to_expiring_htlcs(self, local_height) -> bool:
htlcs_we_could_reclaim = {} # type: Dict[Tuple[Direction, int], UpdateAddHtlc]
# If there is a received HTLC for which we already released the preimage
# but the remote did not revoke yet, and the CLTV of this HTLC is dangerously close
# to the present, then unilaterally close channel
recv_htlc_deadline = lnutil.NBLOCK_DEADLINE_BEFORE_EXPIRY_FOR_RECEIVED_HTLCS
for sub, dir, ctn in ((LOCAL, RECEIVED, self.get_latest_ctn(LOCAL)),
(REMOTE, SENT, self.get_oldest_unrevoked_ctn(LOCAL)),
(REMOTE, SENT, self.get_latest_ctn(LOCAL)),):
for htlc_id, htlc in self.hm.htlcs_by_direction(subject=sub, direction=dir, ctn=ctn).items():
if not self.hm.was_htlc_preimage_released(htlc_id=htlc_id, htlc_proposer=REMOTE):
continue
if htlc.cltv_expiry - recv_htlc_deadline > local_height:
continue
htlcs_we_could_reclaim[(RECEIVED, htlc_id)] = htlc
# If there is an offered HTLC which has already expired (+ some grace period after), we
# will unilaterally close the channel and time out the HTLC
offered_htlc_deadline = lnutil.NBLOCK_DEADLINE_AFTER_EXPIRY_FOR_OFFERED_HTLCS
for sub, dir, ctn in ((LOCAL, SENT, self.get_latest_ctn(LOCAL)),
(REMOTE, RECEIVED, self.get_oldest_unrevoked_ctn(LOCAL)),
(REMOTE, RECEIVED, self.get_latest_ctn(LOCAL)),):
for htlc_id, htlc in self.hm.htlcs_by_direction(subject=sub, direction=dir, ctn=ctn).items():
if htlc.cltv_expiry + offered_htlc_deadline > local_height:
continue
htlcs_we_could_reclaim[(SENT, htlc_id)] = htlc
total_value_sat = sum([htlc.amount_msat // 1000 for htlc in htlcs_we_could_reclaim.values()])
num_htlcs = len(htlcs_we_could_reclaim)
min_value_worth_closing_channel_over_sat = max(num_htlcs * 10 * self.config[REMOTE].dust_limit_sat,
500_000)
return total_value_sat > min_value_worth_closing_channel_over_sat
def is_funding_tx_mined(self, funding_height):
funding_txid = self.funding_outpoint.txid
funding_idx = self.funding_outpoint.output_index
conf = funding_height.conf
if conf < self.funding_txn_minimum_depth():
self.logger.info(f"funding tx is still not at sufficient depth. actual depth: {conf}")
return False
assert conf > 0
# check funding_tx amount and script
funding_tx = self.lnworker.lnwatcher.db.get_transaction(funding_txid)
if not funding_tx:
self.logger.info(f"no funding_tx {funding_txid}")
return False
outp = funding_tx.outputs()[funding_idx]
redeem_script = funding_output_script(self.config[REMOTE], self.config[LOCAL])
funding_address = redeem_script_to_address('p2wsh', redeem_script)
funding_sat = self.constraints.capacity
if not (outp.address == funding_address and outp.value == funding_sat):
self.logger.info('funding outpoint mismatch')
return False
return True
| 1 | 13,971 | this method is not used | spesmilo-electrum | py |
@@ -1016,7 +1016,15 @@ class AbstractTab(QWidget):
return
sess_manager.save_autosave()
+ self.load_finished.emit(ok)
+
+ if not self.title():
+ self.title_changed.emit(self.url().toDisplayString())
+ self.zoom.reapply()
+
+ def _update_load_status(self, ok: bool) -> None:
+ """Update the load status after a page finished loading."""
if ok and not self._has_ssl_errors:
if self.url().scheme() == 'https':
self._set_load_status(usertypes.LoadStatus.success_https) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2019 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Base class for a wrapper over QWebView/QWebEngineView."""
import enum
import itertools
import typing
import attr
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QUrl, QObject, QSizeF, Qt,
QEvent, QPoint)
from PyQt5.QtGui import QKeyEvent, QIcon
from PyQt5.QtWidgets import QWidget, QApplication, QDialog
from PyQt5.QtPrintSupport import QPrintDialog, QPrinter
from PyQt5.QtNetwork import QNetworkAccessManager
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.keyinput import modeman
from qutebrowser.config import config
from qutebrowser.utils import (utils, objreg, usertypes, log, qtutils,
urlutils, message)
from qutebrowser.misc import miscwidgets, objects
from qutebrowser.browser import mouse, hints
from qutebrowser.qt import sip
MYPY = False
if MYPY:
# pylint can't interpret type comments with Python 3.7
# pylint: disable=unused-import,useless-suppression
from qutebrowser.browser import webelem
from qutebrowser.browser.inspector import AbstractWebInspector
tab_id_gen = itertools.count(0)
def create(win_id: int,
private: bool,
parent: QWidget = None) -> 'AbstractTab':
"""Get a QtWebKit/QtWebEngine tab object.
Args:
win_id: The window ID where the tab will be shown.
private: Whether the tab is a private/off the record tab.
parent: The Qt parent to set.
"""
# Importing modules here so we don't depend on QtWebEngine without the
# argument and to avoid circular imports.
mode_manager = modeman.instance(win_id)
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
tab_class = webenginetab.WebEngineTab
else:
from qutebrowser.browser.webkit import webkittab
tab_class = webkittab.WebKitTab
return tab_class(win_id=win_id, mode_manager=mode_manager, private=private,
parent=parent)
def init() -> None:
"""Initialize backend-specific modules."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
webenginetab.init()
class WebTabError(Exception):
"""Base class for various errors."""
class UnsupportedOperationError(WebTabError):
"""Raised when an operation is not supported with the given backend."""
TerminationStatus = enum.Enum('TerminationStatus', [
'normal',
'abnormal', # non-zero exit status
'crashed', # e.g. segfault
'killed',
'unknown',
])
@attr.s
class TabData:
"""A simple namespace with a fixed set of attributes.
Attributes:
keep_icon: Whether the (e.g. cloned) icon should not be cleared on page
load.
inspector: The QWebInspector used for this webview.
viewing_source: Set if we're currently showing a source view.
Only used when sources are shown via pygments.
open_target: Where to open the next link.
Only used for QtWebKit.
override_target: Override for open_target for fake clicks (like hints).
Only used for QtWebKit.
pinned: Flag to pin the tab.
fullscreen: Whether the tab has a video shown fullscreen currently.
netrc_used: Whether netrc authentication was performed.
input_mode: current input mode for the tab.
"""
keep_icon = attr.ib(False) # type: bool
viewing_source = attr.ib(False) # type: bool
inspector = attr.ib(None) # type: typing.Optional[AbstractWebInspector]
open_target = attr.ib(
usertypes.ClickTarget.normal) # type: usertypes.ClickTarget
override_target = attr.ib(None) # type: usertypes.ClickTarget
pinned = attr.ib(False) # type: bool
fullscreen = attr.ib(False) # type: bool
netrc_used = attr.ib(False) # type: bool
input_mode = attr.ib(usertypes.KeyMode.normal) # type: usertypes.KeyMode
def should_show_icon(self) -> bool:
return (config.val.tabs.favicons.show == 'always' or
config.val.tabs.favicons.show == 'pinned' and self.pinned)
class AbstractAction:
"""Attribute ``action`` of AbstractTab for Qt WebActions."""
# The class actions are defined on (QWeb{Engine,}Page)
action_class = None # type: type
# The type of the actions (QWeb{Engine,}Page.WebAction)
action_base = None # type: type
def __init__(self, tab: 'AbstractTab') -> None:
self._widget = typing.cast(QWidget, None)
self._tab = tab
def exit_fullscreen(self) -> None:
"""Exit the fullscreen mode."""
raise NotImplementedError
def save_page(self) -> None:
"""Save the current page."""
raise NotImplementedError
def run_string(self, name: str) -> None:
"""Run a webaction based on its name."""
member = getattr(self.action_class, name, None)
if not isinstance(member, self.action_base):
raise WebTabError("{} is not a valid web action!".format(name))
self._widget.triggerPageAction(member)
def show_source(
self,
pygments: bool = False # pylint: disable=redefined-outer-name
) -> None:
"""Show the source of the current page in a new tab."""
raise NotImplementedError
def _show_source_pygments(self) -> None:
def show_source_cb(source: str) -> None:
"""Show source as soon as it's ready."""
# WORKAROUND for https://github.com/PyCQA/pylint/issues/491
# pylint: disable=no-member
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table')
# pylint: enable=no-member
highlighted = pygments.highlight(source, lexer, formatter)
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
new_tab = tb.tabopen(background=False, related=True)
new_tab.set_html(highlighted, self._tab.url())
new_tab.data.viewing_source = True
self._tab.dump_async(show_source_cb)
class AbstractPrinting:
"""Attribute ``printing`` of AbstractTab for printing the page."""
def __init__(self, tab: 'AbstractTab') -> None:
self._widget = None
self._tab = tab
def check_pdf_support(self) -> None:
"""Check whether writing to PDFs is supported.
If it's not supported (by the current Qt version), a WebTabError is
raised.
"""
raise NotImplementedError
def check_printer_support(self) -> None:
"""Check whether writing to a printer is supported.
If it's not supported (by the current Qt version), a WebTabError is
raised.
"""
raise NotImplementedError
def check_preview_support(self) -> None:
"""Check whether showing a print preview is supported.
If it's not supported (by the current Qt version), a WebTabError is
raised.
"""
raise NotImplementedError
def to_pdf(self, filename: str) -> bool:
"""Print the tab to a PDF with the given filename."""
raise NotImplementedError
def to_printer(self, printer: QPrinter,
callback: typing.Callable[[bool], None] = None) -> None:
"""Print the tab.
Args:
printer: The QPrinter to print to.
callback: Called with a boolean
(True if printing succeeded, False otherwise)
"""
raise NotImplementedError
def show_dialog(self) -> None:
"""Print with a QPrintDialog."""
self.check_printer_support()
def print_callback(ok: bool) -> None:
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
def do_print() -> None:
"""Called when the dialog was closed."""
self.to_printer(diag.printer(), print_callback)
diag = QPrintDialog(self._tab)
if utils.is_mac:
# For some reason we get a segfault when using open() on macOS
ret = diag.exec_()
if ret == QDialog.Accepted:
do_print()
else:
diag.open(do_print)
class AbstractSearch(QObject):
"""Attribute ``search`` of AbstractTab for doing searches.
Attributes:
text: The last thing this view was searched for.
search_displayed: Whether we're currently displaying search results in
this view.
_flags: The flags of the last search (needs to be set by subclasses).
_widget: The underlying WebView widget.
"""
#: Signal emitted when a search was finished
#: (True if the text was found, False otherwise)
finished = pyqtSignal(bool)
#: Signal emitted when an existing search was cleared.
cleared = pyqtSignal()
_Callback = typing.Callable[[bool], None]
def __init__(self, tab: 'AbstractTab', parent: QWidget = None):
super().__init__(parent)
self._tab = tab
self._widget = None
self.text = None # type: typing.Optional[str]
self.search_displayed = False
def _is_case_sensitive(self, ignore_case: usertypes.IgnoreCase) -> bool:
"""Check if case-sensitivity should be used.
This assumes self.text is already set properly.
Arguments:
ignore_case: The ignore_case value from the config.
"""
assert self.text is not None
mapping = {
usertypes.IgnoreCase.smart: not self.text.islower(),
usertypes.IgnoreCase.never: True,
usertypes.IgnoreCase.always: False,
}
return mapping[ignore_case]
def search(self, text: str, *,
ignore_case: usertypes.IgnoreCase = usertypes.IgnoreCase.never,
reverse: bool = False,
result_cb: _Callback = None) -> None:
"""Find the given text on the page.
Args:
text: The text to search for.
ignore_case: Search case-insensitively.
reverse: Reverse search direction.
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def clear(self) -> None:
"""Clear the current search."""
raise NotImplementedError
def prev_result(self, *, result_cb: _Callback = None) -> None:
"""Go to the previous result of the current search.
Args:
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def next_result(self, *, result_cb: _Callback = None) -> None:
"""Go to the next result of the current search.
Args:
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
class AbstractZoom(QObject):
"""Attribute ``zoom`` of AbstractTab for controlling zoom."""
def __init__(self, tab: 'AbstractTab', parent: QWidget = None) -> None:
super().__init__(parent)
self._tab = tab
self._widget = None
# Whether zoom was changed from the default.
self._default_zoom_changed = False
self._init_neighborlist()
config.instance.changed.connect(self._on_config_changed)
self._zoom_factor = float(config.val.zoom.default) / 100
@pyqtSlot(str)
def _on_config_changed(self, option: str) -> None:
if option in ['zoom.levels', 'zoom.default']:
if not self._default_zoom_changed:
factor = float(config.val.zoom.default) / 100
self.set_factor(factor)
self._init_neighborlist()
def _init_neighborlist(self) -> None:
"""Initialize self._neighborlist.
It is a NeighborList with the zoom levels."""
levels = config.val.zoom.levels
self._neighborlist = usertypes.NeighborList(
levels, mode=usertypes.NeighborList.Modes.edge)
self._neighborlist.fuzzyval = config.val.zoom.default
def apply_offset(self, offset: int) -> None:
"""Increase/Decrease the zoom level by the given offset.
Args:
offset: The offset in the zoom level list.
Return:
The new zoom percentage.
"""
level = self._neighborlist.getitem(offset)
self.set_factor(float(level) / 100, fuzzyval=False)
return level
def _set_factor_internal(self, factor: float) -> None:
raise NotImplementedError
def set_factor(self, factor: float, *, fuzzyval: bool = True) -> None:
"""Zoom to a given zoom factor.
Args:
factor: The zoom factor as float.
fuzzyval: Whether to set the NeighborLists fuzzyval.
"""
if fuzzyval:
self._neighborlist.fuzzyval = int(factor * 100)
if factor < 0:
raise ValueError("Can't zoom to factor {}!".format(factor))
default_zoom_factor = float(config.val.zoom.default) / 100
self._default_zoom_changed = (factor != default_zoom_factor)
self._zoom_factor = factor
self._set_factor_internal(factor)
def factor(self) -> float:
return self._zoom_factor
def apply_default(self) -> None:
self._set_factor_internal(float(config.val.zoom.default) / 100)
def reapply(self) -> None:
self._set_factor_internal(self._zoom_factor)
class AbstractCaret(QObject):
"""Attribute ``caret`` of AbstractTab for caret browsing."""
#: Signal emitted when the selection was toggled.
#: (argument - whether the selection is now active)
selection_toggled = pyqtSignal(bool)
#: Emitted when a ``follow_selection`` action is done.
follow_selected_done = pyqtSignal()
def __init__(self,
tab: 'AbstractTab',
mode_manager: modeman.ModeManager,
parent: QWidget = None) -> None:
super().__init__(parent)
self._tab = tab
self._widget = None
self.selection_enabled = False
self._mode_manager = mode_manager
mode_manager.entered.connect(self._on_mode_entered)
mode_manager.left.connect(self._on_mode_left)
def _on_mode_entered(self, mode: usertypes.KeyMode) -> None:
raise NotImplementedError
def _on_mode_left(self, mode: usertypes.KeyMode) -> None:
raise NotImplementedError
def move_to_next_line(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_line(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_next_char(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_char(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_next_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_line(self) -> None:
raise NotImplementedError
def move_to_end_of_line(self) -> None:
raise NotImplementedError
def move_to_start_of_next_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_prev_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_next_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_prev_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_document(self) -> None:
raise NotImplementedError
def move_to_end_of_document(self) -> None:
raise NotImplementedError
def toggle_selection(self) -> None:
raise NotImplementedError
def drop_selection(self) -> None:
raise NotImplementedError
def selection(self, callback: typing.Callable[[str], None]) -> None:
raise NotImplementedError
def reverse_selection(self) -> None:
raise NotImplementedError
def _follow_enter(self, tab: bool) -> None:
"""Follow a link by faking an enter press."""
if tab:
self._tab.fake_key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.fake_key_press(Qt.Key_Enter)
def follow_selected(self, *, tab: bool = False) -> None:
raise NotImplementedError
class AbstractScroller(QObject):
"""Attribute ``scroller`` of AbstractTab to manage scroll position."""
#: Signal emitted when the scroll position changed (int, int)
perc_changed = pyqtSignal(int, int)
#: Signal emitted before the user requested a jump.
#: Used to set the special ' mark so the user can return.
before_jump_requested = pyqtSignal()
def __init__(self, tab: 'AbstractTab', parent: QWidget = None):
super().__init__(parent)
self._tab = tab
self._widget = None # type: typing.Optional[QWidget]
self.perc_changed.connect(self._log_scroll_pos_change)
@pyqtSlot()
def _log_scroll_pos_change(self) -> None:
log.webview.vdebug( # type: ignore
"Scroll position changed to {}".format(self.pos_px()))
def _init_widget(self, widget: QWidget) -> None:
self._widget = widget
def pos_px(self) -> int:
raise NotImplementedError
def pos_perc(self) -> int:
raise NotImplementedError
def to_perc(self, x: int = None, y: int = None) -> None:
raise NotImplementedError
def to_point(self, point: QPoint) -> None:
raise NotImplementedError
def to_anchor(self, name: str) -> None:
raise NotImplementedError
def delta(self, x: int = 0, y: int = 0) -> None:
raise NotImplementedError
def delta_page(self, x: float = 0, y: float = 0) -> None:
raise NotImplementedError
def up(self, count: int = 1) -> None:
raise NotImplementedError
def down(self, count: int = 1) -> None:
raise NotImplementedError
def left(self, count: int = 1) -> None:
raise NotImplementedError
def right(self, count: int = 1) -> None:
raise NotImplementedError
def top(self) -> None:
raise NotImplementedError
def bottom(self) -> None:
raise NotImplementedError
def page_up(self, count: int = 1) -> None:
raise NotImplementedError
def page_down(self, count: int = 1) -> None:
raise NotImplementedError
def at_top(self) -> bool:
raise NotImplementedError
def at_bottom(self) -> bool:
raise NotImplementedError
class AbstractHistoryPrivate:
"""Private API related to the history."""
def __init__(self, tab: 'AbstractTab'):
self._tab = tab
self._history = None
def serialize(self) -> bytes:
"""Serialize into an opaque format understood by self.deserialize."""
raise NotImplementedError
def deserialize(self, data: bytes) -> None:
"""Deserialize from a format produced by self.serialize."""
raise NotImplementedError
def load_items(self, items: typing.Sequence) -> None:
"""Deserialize from a list of WebHistoryItems."""
raise NotImplementedError
class AbstractHistory:
"""The history attribute of a AbstractTab."""
def __init__(self, tab: 'AbstractTab') -> None:
self._tab = tab
self._history = None
self.private_api = AbstractHistoryPrivate(tab)
def __len__(self) -> int:
raise NotImplementedError
def __iter__(self) -> typing.Iterable:
raise NotImplementedError
def _check_count(self, count: int) -> None:
"""Check whether the count is positive."""
if count < 0:
raise WebTabError("count needs to be positive!")
def current_idx(self) -> int:
raise NotImplementedError
def back(self, count: int = 1) -> None:
"""Go back in the tab's history."""
self._check_count(count)
idx = self.current_idx() - count
if idx >= 0:
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(0))
raise WebTabError("At beginning of history.")
def forward(self, count: int = 1) -> None:
"""Go forward in the tab's history."""
self._check_count(count)
idx = self.current_idx() + count
if idx < len(self):
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(len(self) - 1))
raise WebTabError("At end of history.")
def can_go_back(self) -> bool:
raise NotImplementedError
def can_go_forward(self) -> bool:
raise NotImplementedError
def _item_at(self, i: int) -> typing.Any:
raise NotImplementedError
def _go_to_item(self, item: typing.Any) -> None:
raise NotImplementedError
class AbstractElements:
"""Finding and handling of elements on the page."""
_MultiCallback = typing.Callable[
[typing.Sequence['webelem.AbstractWebElement']], None]
_SingleCallback = typing.Callable[
[typing.Optional['webelem.AbstractWebElement']], None]
_ErrorCallback = typing.Callable[[Exception], None]
def __init__(self, tab: 'AbstractTab') -> None:
self._widget = None
self._tab = tab
def find_css(self, selector: str,
callback: _MultiCallback,
error_cb: _ErrorCallback, *,
only_visible: bool = False) -> None:
"""Find all HTML elements matching a given selector async.
If there's an error, the callback is called with a webelem.Error
instance.
Args:
callback: The callback to be called when the search finished.
error_cb: The callback to be called when an error occurred.
selector: The CSS selector to search for.
only_visible: Only show elements which are visible on screen.
"""
raise NotImplementedError
def find_id(self, elem_id: str, callback: _SingleCallback) -> None:
"""Find the HTML element with the given ID async.
Args:
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
elem_id: The ID to search for.
"""
raise NotImplementedError
def find_focused(self, callback: _SingleCallback) -> None:
"""Find the focused element on the page async.
Args:
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
def find_at_pos(self, pos: QPoint, callback: _SingleCallback) -> None:
"""Find the element at the given position async.
This is also called "hit test" elsewhere.
Args:
pos: The QPoint to get the element for.
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
class AbstractAudio(QObject):
"""Handling of audio/muting for this tab."""
muted_changed = pyqtSignal(bool)
recently_audible_changed = pyqtSignal(bool)
def __init__(self, tab: 'AbstractTab', parent: QWidget = None) -> None:
super().__init__(parent)
self._widget = None # type: typing.Optional[QWidget]
self._tab = tab
def set_muted(self, muted: bool, override: bool = False) -> None:
"""Set this tab as muted or not.
Arguments:
override: If set to True, muting/unmuting was done manually and
overrides future automatic mute/unmute changes based on
the URL.
"""
raise NotImplementedError
def is_muted(self) -> bool:
raise NotImplementedError
def is_recently_audible(self) -> bool:
"""Whether this tab has had audio playing recently."""
raise NotImplementedError
class AbstractTabPrivate:
"""Tab-related methods which are only needed in the core.
Those methods are not part of the API which is exposed to extensions, and
should ideally be removed at some point in the future.
"""
def __init__(self, mode_manager: modeman.ModeManager,
tab: 'AbstractTab') -> None:
self._widget = None # type: typing.Optional[QWidget]
self._tab = tab
self._mode_manager = mode_manager
def event_target(self) -> QWidget:
"""Return the widget events should be sent to."""
raise NotImplementedError
def handle_auto_insert_mode(self, ok: bool) -> None:
"""Handle `input.insert_mode.auto_load` after loading finished."""
if not config.val.input.insert_mode.auto_load or not ok:
return
cur_mode = self._mode_manager.mode
if cur_mode == usertypes.KeyMode.insert:
return
def _auto_insert_mode_cb(elem: 'webelem.AbstractWebElement') -> None:
"""Called from JS after finding the focused element."""
if elem is None:
log.webview.debug("No focused element!")
return
if elem.is_editable():
modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,
'load finished', only_if_normal=True)
self._tab.elements.find_focused(_auto_insert_mode_cb)
def clear_ssl_errors(self) -> None:
raise NotImplementedError
def networkaccessmanager(self) -> typing.Optional[QNetworkAccessManager]:
"""Get the QNetworkAccessManager for this tab.
This is only implemented for QtWebKit.
For QtWebEngine, always returns None.
"""
raise NotImplementedError
def user_agent(self) -> typing.Optional[str]:
"""Get the user agent for this tab.
This is only implemented for QtWebKit.
For QtWebEngine, always returns None.
"""
raise NotImplementedError
def shutdown(self) -> None:
raise NotImplementedError
class AbstractTab(QWidget):
"""An adapter for QWebView/QWebEngineView representing a single tab."""
#: Signal emitted when a website requests to close this tab.
window_close_requested = pyqtSignal()
#: Signal emitted when a link is hovered (the hover text)
link_hovered = pyqtSignal(str)
#: Signal emitted when a page started loading
load_started = pyqtSignal()
#: Signal emitted when a page is loading (progress percentage)
load_progress = pyqtSignal(int)
#: Signal emitted when a page finished loading (success as bool)
load_finished = pyqtSignal(bool)
#: Signal emitted when a page's favicon changed (icon as QIcon)
icon_changed = pyqtSignal(QIcon)
#: Signal emitted when a page's title changed (new title as str)
title_changed = pyqtSignal(str)
#: Signal emitted when a new tab should be opened (url as QUrl)
new_tab_requested = pyqtSignal(QUrl)
#: Signal emitted when a page's URL changed (url as QUrl)
url_changed = pyqtSignal(QUrl)
#: Signal emitted when a tab's content size changed
#: (new size as QSizeF)
contents_size_changed = pyqtSignal(QSizeF)
#: Signal emitted when a page requested full-screen (bool)
fullscreen_requested = pyqtSignal(bool)
#: Signal emitted before load starts (URL as QUrl)
before_load_started = pyqtSignal(QUrl)
# Signal emitted when a page's load status changed
# (argument: usertypes.LoadStatus)
load_status_changed = pyqtSignal(usertypes.LoadStatus)
# Signal emitted before shutting down
shutting_down = pyqtSignal()
# Signal emitted when a history item should be added
history_item_triggered = pyqtSignal(QUrl, QUrl, str)
# Signal emitted when the underlying renderer process terminated.
# arg 0: A TerminationStatus member.
# arg 1: The exit code.
renderer_process_terminated = pyqtSignal(TerminationStatus, int)
def __init__(self, *, win_id: int, private: bool,
parent: QWidget = None) -> None:
self.is_private = private
self.win_id = win_id
self.tab_id = next(tab_id_gen)
super().__init__(parent)
self.registry = objreg.ObjectRegistry()
tab_registry = objreg.get('tab-registry', scope='window',
window=win_id)
tab_registry[self.tab_id] = self
objreg.register('tab', self, registry=self.registry)
self.data = TabData()
self._layout = miscwidgets.WrapperLayout(self)
self._widget = None # type: typing.Optional[QWidget]
self._progress = 0
self._has_ssl_errors = False
self._load_status = usertypes.LoadStatus.none
self._mouse_event_filter = mouse.MouseEventFilter(
self, parent=self)
self.backend = None
# FIXME:qtwebengine Should this be public api via self.hints?
# Also, should we get it out of objreg?
hintmanager = hints.HintManager(win_id, self.tab_id, parent=self)
objreg.register('hintmanager', hintmanager, scope='tab',
window=self.win_id, tab=self.tab_id)
self.before_load_started.connect(self._on_before_load_started)
def _set_widget(self, widget: QWidget) -> None:
# pylint: disable=protected-access
self._widget = widget
self._layout.wrap(self, widget)
self.history._history = widget.history()
self.history.private_api._history = widget.history()
self.scroller._init_widget(widget)
self.caret._widget = widget
self.zoom._widget = widget
self.search._widget = widget
self.printing._widget = widget
self.action._widget = widget
self.elements._widget = widget
self.audio._widget = widget
self.private_api._widget = widget
self.settings._settings = widget.settings()
self._install_event_filter()
self.zoom.apply_default()
def _install_event_filter(self) -> None:
raise NotImplementedError
def _set_load_status(self, val: usertypes.LoadStatus) -> None:
"""Setter for load_status."""
if not isinstance(val, usertypes.LoadStatus):
raise TypeError("Type {} is no LoadStatus member!".format(val))
log.webview.debug("load status for {}: {}".format(repr(self), val))
self._load_status = val
self.load_status_changed.emit(val)
def send_event(self, evt: QEvent) -> None:
"""Send the given event to the underlying widget.
The event will be sent via QApplication.postEvent.
Note that a posted event must not be re-used in any way!
"""
# This only gives us some mild protection against re-using events, but
# it's certainly better than a segfault.
if getattr(evt, 'posted', False):
raise utils.Unreachable("Can't re-use an event which was already "
"posted!")
recipient = self.private_api.event_target()
if recipient is None:
# https://github.com/qutebrowser/qutebrowser/issues/3888
log.webview.warning("Unable to find event target!")
return
evt.posted = True
QApplication.postEvent(recipient, evt)
def navigation_blocked(self) -> bool:
"""Test if navigation is allowed on the current tab."""
return self.data.pinned and config.val.tabs.pinned.frozen
@pyqtSlot(QUrl)
def _on_before_load_started(self, url: QUrl) -> None:
"""Adjust the title if we are going to visit a URL soon."""
qtutils.ensure_valid(url)
url_string = url.toDisplayString()
log.webview.debug("Going to start loading: {}".format(url_string))
self.title_changed.emit(url_string)
@pyqtSlot(QUrl)
def _on_url_changed(self, url: QUrl) -> None:
"""Update title when URL has changed and no title is available."""
if url.isValid() and not self.title():
self.title_changed.emit(url.toDisplayString())
self.url_changed.emit(url)
@pyqtSlot()
def _on_load_started(self) -> None:
self._progress = 0
self._has_ssl_errors = False
self.data.viewing_source = False
self._set_load_status(usertypes.LoadStatus.loading)
self.load_started.emit()
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(
self,
navigation: usertypes.NavigationRequest
) -> None:
"""Handle common acceptNavigationRequest code."""
url = utils.elide(navigation.url.toDisplayString(), 100)
log.webview.debug("navigation request: url {}, type {}, is_main_frame "
"{}".format(url,
navigation.navigation_type,
navigation.is_main_frame))
if not navigation.url.isValid():
# Also a WORKAROUND for missing IDNA 2008 support in QUrl, see
# https://bugreports.qt.io/browse/QTBUG-60364
if navigation.navigation_type == navigation.Type.link_clicked:
msg = urlutils.get_errstring(navigation.url,
"Invalid link clicked")
message.error(msg)
self.data.open_target = usertypes.ClickTarget.normal
log.webview.debug("Ignoring invalid URL {} in "
"acceptNavigationRequest: {}".format(
navigation.url.toDisplayString(),
navigation.url.errorString()))
navigation.accepted = False
@pyqtSlot(bool)
def _on_load_finished(self, ok: bool) -> None:
assert self._widget is not None
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
try:
sess_manager = objreg.get('session-manager')
except KeyError:
# https://github.com/qutebrowser/qutebrowser/issues/4311
return
sess_manager.save_autosave()
if ok and not self._has_ssl_errors:
if self.url().scheme() == 'https':
self._set_load_status(usertypes.LoadStatus.success_https)
else:
self._set_load_status(usertypes.LoadStatus.success)
elif ok:
self._set_load_status(usertypes.LoadStatus.warn)
else:
self._set_load_status(usertypes.LoadStatus.error)
self.load_finished.emit(ok)
if not self.title():
self.title_changed.emit(self.url().toDisplayString())
self.zoom.reapply()
@pyqtSlot()
def _on_history_trigger(self) -> None:
"""Emit history_item_triggered based on backend-specific signal."""
raise NotImplementedError
@pyqtSlot(int)
def _on_load_progress(self, perc: int) -> None:
self._progress = perc
self.load_progress.emit(perc)
def url(self, *, requested: bool = False) -> QUrl:
raise NotImplementedError
def progress(self) -> int:
return self._progress
def load_status(self) -> usertypes.LoadStatus:
return self._load_status
def _load_url_prepare(self, url: QUrl, *,
emit_before_load_started: bool = True) -> None:
qtutils.ensure_valid(url)
if emit_before_load_started:
self.before_load_started.emit(url)
def load_url(self, url: QUrl, *,
emit_before_load_started: bool = True) -> None:
raise NotImplementedError
def reload(self, *, force: bool = False) -> None:
raise NotImplementedError
def stop(self) -> None:
raise NotImplementedError
def fake_key_press(self,
key: Qt.Key,
modifier: Qt.KeyboardModifier = Qt.NoModifier) -> None:
"""Send a fake key event to this tab."""
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
def dump_async(self,
callback: typing.Callable[[str], None], *,
plain: bool = False) -> None:
"""Dump the current page's html asynchronously.
The given callback will be called with the result when dumping is
complete.
"""
raise NotImplementedError
def run_js_async(
self,
code: str,
callback: typing.Callable[[typing.Any], None] = None, *,
world: typing.Union[usertypes.JsWorld, int] = None
) -> None:
"""Run javascript async.
The given callback will be called with the result when running JS is
complete.
Args:
code: The javascript code to run.
callback: The callback to call with the result, or None.
world: A world ID (int or usertypes.JsWorld member) to run the JS
in the main world or in another isolated world.
"""
raise NotImplementedError
def title(self) -> str:
raise NotImplementedError
def icon(self) -> None:
raise NotImplementedError
def set_html(self, html: str, base_url: QUrl = QUrl()) -> None:
raise NotImplementedError
def __repr__(self) -> str:
try:
qurl = self.url()
url = qurl.toDisplayString(QUrl.EncodeUnicode) # type: ignore
except (AttributeError, RuntimeError) as exc:
url = '<{}>'.format(exc.__class__.__name__)
else:
url = utils.elide(url, 100)
return utils.get_repr(self, tab_id=self.tab_id, url=url)
def is_deleted(self) -> bool:
assert self._widget is not None
return sip.isdeleted(self._widget)
| 1 | 23,480 | Right now it's a bit unclear that this needs to be called explicitly by the implementing class. If, for example, there's another backend, it won't get this update unless we add the same function as webkit. Could you either add a note to this docstring explaining that this needs to be called, or find some way to automatically trigger this so the webkit override isn't needed (and some override passed on webengine). | qutebrowser-qutebrowser | py |
@@ -162,7 +162,7 @@ var xdpTestCases = []xdpTest{
SrcPort: 54321,
},
Drop: false,
- Metadata: true,
+ Metadata: false,
},
{
Description: "6 - Match against a deny policy, must drop", | 1 | // Copyright (c) 2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ut_test
import (
"fmt"
"net"
"testing"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/failsafes"
"github.com/projectcalico/felix/bpf/polprog"
"github.com/projectcalico/felix/proto"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
. "github.com/onsi/gomega"
)
func MapForTest(mc *bpf.MapContext) bpf.Map {
return mc.NewPinnedMap(bpf.MapParameters{
Filename: "/sys/fs/bpf/cali_jump_xdp",
Type: "prog_array",
KeySize: 4,
ValueSize: 4,
MaxEntries: 8,
Name: "cali_jump",
})
}
const (
TOS_BYTE = 15
TOS_NOTSET = 0
TOS_SET = 128
)
var denyAllRulesXDP = polprog.Rules{
ForXDP: true,
ForHostInterface: true,
HostNormalTiers: []polprog.Tier{{
Policies: []polprog.Policy{{
Name: "deny all",
Rules: []polprog.Rule{{Rule: &proto.Rule{
Action: "Deny",
}}},
}},
}},
}
var allowAllRulesXDP = polprog.Rules{
ForXDP: true,
ForHostInterface: true,
HostNormalTiers: []polprog.Tier{{
Policies: []polprog.Policy{{
Name: "Allow all",
Rules: []polprog.Rule{{Rule: &proto.Rule{
Action: "allow",
}}},
}},
}},
}
var oneXDPRule = polprog.Rules{
ForXDP: true,
ForHostInterface: true,
HostNormalTiers: []polprog.Tier{{
EndAction: "pass",
Policies: []polprog.Policy{{
Name: "Allow some",
Rules: []polprog.Rule{{
Rule: &proto.Rule{
SrcNet: []string{"9.8.2.1/32"},
DstNet: []string{"1.2.8.9/32"},
Action: "Deny",
}}, {
Rule: &proto.Rule{
DstNet: []string{"1.2.0.0/16"},
Action: "Allow",
}}, {
Rule: &proto.Rule{
SrcNet: []string{"9.8.7.0/24"},
Action: "Deny",
}},
}}},
}},
}
type xdpTest struct {
Description string
Rules *polprog.Rules
IPv4Header *layers.IPv4
NextHeader gopacket.Layer
Drop bool
Metadata bool
}
var xdpTestCases = []xdpTest{
{
Description: "1 - A malformed packet, must drop",
Rules: &allowAllRulesXDP,
IPv4Header: &layers.IPv4{
Version: 4,
IHL: 4,
TTL: 64,
Flags: layers.IPv4DontFragment,
SrcIP: net.IPv4(4, 4, 4, 4),
DstIP: net.IPv4(1, 1, 1, 1),
},
NextHeader: &layers.UDP{
DstPort: 53,
SrcPort: 54321,
},
Drop: true,
Metadata: false,
},
{
Description: "2 - Packets not matched, must pass without metadata",
Rules: nil,
IPv4Header: ipv4Default,
Drop: false,
Metadata: false,
},
{
Description: "3 - Deny all rule, packet must drop",
Rules: &denyAllRulesXDP,
IPv4Header: ipv4Default,
Drop: true,
Metadata: false,
},
{
Description: "4 - Allow all rule, packet must pass with metada",
Rules: &allowAllRulesXDP,
IPv4Header: ipv4Default,
Drop: false,
Metadata: true,
},
{
Description: "5 - Match with failsafe, must pass with metadata",
Rules: nil,
IPv4Header: &layers.IPv4{
Version: 4,
IHL: 5,
TTL: 64,
Flags: layers.IPv4DontFragment,
SrcIP: net.IPv4(4, 4, 4, 4),
DstIP: net.IPv4(1, 1, 1, 1),
},
NextHeader: &layers.UDP{
DstPort: 53,
SrcPort: 54321,
},
Drop: false,
Metadata: true,
},
{
Description: "6 - Match against a deny policy, must drop",
Rules: &oneXDPRule,
IPv4Header: &layers.IPv4{
Version: 4,
IHL: 5,
TTL: 64,
Flags: layers.IPv4DontFragment,
SrcIP: net.IPv4(9, 8, 7, 6),
DstIP: net.IPv4(10, 0, 0, 10),
},
NextHeader: &layers.TCP{
DstPort: 80,
SrcPort: 55555,
},
Drop: true,
Metadata: false,
},
{
Description: "7 - Match against a deny policy, must drop",
Rules: &oneXDPRule,
IPv4Header: &layers.IPv4{
Version: 4,
IHL: 5,
TTL: 64,
Flags: layers.IPv4DontFragment,
SrcIP: net.IPv4(9, 8, 2, 1),
DstIP: net.IPv4(1, 2, 8, 9),
},
NextHeader: &layers.TCP{
DstPort: 80,
SrcPort: 55555,
},
Drop: true,
Metadata: false,
},
{
Description: "8 - Match against an allow policy, must pass with metadata",
Rules: &oneXDPRule,
IPv4Header: &layers.IPv4{
Version: 4,
IHL: 5,
TTL: 64,
Flags: layers.IPv4DontFragment,
SrcIP: net.IPv4(3, 3, 3, 3),
DstIP: net.IPv4(1, 2, 3, 4),
},
NextHeader: &layers.TCP{
DstPort: 80,
SrcPort: 55555,
},
Drop: false,
Metadata: true,
},
{
Description: "9 - Unmatched packet against failsafe and a policy",
Rules: &oneXDPRule,
IPv4Header: &layers.IPv4{
Version: 4,
IHL: 5,
TTL: 64,
Flags: layers.IPv4DontFragment,
SrcIP: net.IPv4(8, 8, 8, 8),
DstIP: net.IPv4(9, 9, 9, 9),
},
NextHeader: &layers.UDP{
DstPort: 8080,
SrcPort: 54321,
},
Drop: false,
Metadata: false,
},
}
func TestXDPPrograms(t *testing.T) {
RegisterTestingT(t)
defer resetBPFMaps()
err := fsafeMap.Update(
failsafes.MakeKey(17, 53, false, "4.4.4.4", 16).ToSlice(),
failsafes.Value(),
)
Expect(err).NotTo(HaveOccurred())
for _, tc := range xdpTestCases {
runBpfTest(t, "calico_entrypoint_xdp", true, tc.Rules, func(bpfrun bpfProgRunFn) {
_, _, _, _, pktBytes, err := testPacket(nil, tc.IPv4Header, tc.NextHeader, nil)
Expect(err).NotTo(HaveOccurred())
res, err := bpfrun(pktBytes)
Expect(err).NotTo(HaveOccurred())
result := "XDP_PASS"
if tc.Drop {
result = "XDP_DROP"
}
pktR := gopacket.NewPacket(res.dataOut, layers.LayerTypeEthernet, gopacket.Default)
fmt.Printf("pktR = %+v\n", pktR)
Expect(res.RetvalStrXDP()).To(Equal(result), fmt.Sprintf("expected the program to return %s", result))
Expect(res.dataOut).To(HaveLen(len(pktBytes)))
if tc.Metadata {
Expect(res.dataOut[TOS_BYTE]).To(Equal(uint8(TOS_SET)))
res.dataOut[TOS_BYTE] = TOS_NOTSET
} else {
Expect(res.dataOut[TOS_BYTE]).To(Equal(uint8(TOS_NOTSET)))
}
Expect(res.dataOut).To(Equal(pktBytes))
})
}
}
| 1 | 19,293 | Why this change (test name still says "must pass with metadata")? | projectcalico-felix | go |
@@ -45,7 +45,8 @@ module Travis
end
def ruby_version
- config[:rvm].to_s.gsub(/-(1[89]|2[01])mode$/, '-d\1')
+ vers = config[:rvm].to_s.gsub(/-(1[89]|2[01])mode$/, '-d\1')
+ force_187_p371 vers
end
def setup_rvm | 1 | module Travis
module Build
class Script
module RVM
include Chruby
MSGS = {
setup_ruby_head: 'Setting up latest %s'
}
CONFIG = %w(
rvm_remote_server_url3=https://s3.amazonaws.com/travis-rubies/binaries
rvm_remote_server_type3=rubies
rvm_remote_server_verify_downloads3=1
)
def export
super
sh.export 'TRAVIS_RUBY_VERSION', config[:rvm], echo: false if rvm?
end
def setup
super
setup_rvm if rvm?
end
def announce
super
sh.cmd 'ruby --version'
sh.cmd 'rvm --version' if rvm?
end
def cache_slug
super.tap { |slug| slug << "--rvm-" << ruby_version.to_s if rvm? }
end
private
def version
config[:rvm].to_s
end
def rvm?
!!config[:rvm]
end
def ruby_version
config[:rvm].to_s.gsub(/-(1[89]|2[01])mode$/, '-d\1')
end
def setup_rvm
write_default_gems
sh.cmd('type rvm &>/dev/null || source ~/.rvm/scripts/rvm', echo: false, assert: false, timing: false)
sh.file '$rvm_path/user/db', CONFIG.join("\n")
send rvm_strategy
end
def rvm_strategy
return :use_ruby_head if ruby_version.include?('ruby-head')
return :use_default_ruby if ruby_version == 'default'
:use_ruby_version
end
def use_ruby_head
sh.fold('rvm') do
sh.echo MSGS[:setup_ruby_head] % ruby_version, ansi: :yellow
sh.cmd "rvm get stable", assert: false if ruby_version == 'jruby-head'
sh.export 'ruby_alias', "`rvm alias show #{ruby_version} 2>/dev/null`"
sh.cmd "rvm alias delete #{ruby_version}"
sh.cmd "rvm remove ${ruby_alias:-#{ruby_version}} --gems"
sh.cmd "rvm remove #{ruby_version} --gems --fuzzy"
sh.cmd "rvm install #{ruby_version} --binary"
sh.cmd "rvm use #{ruby_version}"
end
end
def use_default_ruby
sh.if '-f .ruby-version' do
use_ruby_version_file
end
sh.else do
use_rvm_default_ruby
end
end
def use_ruby_version_file
sh.fold('rvm') do
sh.cmd 'rvm use $(< .ruby-version) --install --binary --fuzzy'
end
end
def use_rvm_default_ruby
sh.fold('rvm') do
sh.cmd "rvm use default", timing: true
end
end
def use_ruby_version
skip_deps_install if rbx?
sh.fold('rvm') do
sh.cmd "rvm use #{ruby_version} --install --binary --fuzzy"
end
end
def rbx?
/^(rbx\S*)/.match(version)
end
def skip_deps_install
sh.cmd "rvm autolibs disable", echo: false, timing: false
end
def write_default_gems
sh.mkdir '$rvm_path/gemsets', recursive: true, echo: false
sh.cmd 'echo -e "gem-wrappers\nrubygems-bundler\nbundler\nrake\nrvm\n" > $rvm_path/gemsets/global.gems', echo: false, timing: false
end
end
end
end
end
| 1 | 14,667 | Another nitpick: parens around the arg pretty please | travis-ci-travis-build | rb |
@@ -54,7 +54,6 @@ module Selenium
it 'does not set the chrome.detach capability by default' do
Driver.new(http_client: http)
- expect(caps['goog:chromeOptions']).to eq({})
expect(caps['chrome.detach']).to be nil
end
| 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require File.expand_path('../../spec_helper', __FILE__)
module Selenium
module WebDriver
module Chrome
describe Driver do
let(:resp) { {'sessionId' => 'foo', 'value' => Remote::Capabilities.chrome.as_json} }
let(:service) { instance_double(Service, start: true, uri: 'http://example.com') }
let(:caps) { Remote::Capabilities.new }
let(:http) { instance_double(Remote::Http::Default, call: resp).as_null_object }
before do
allow(Remote::Capabilities).to receive(:chrome).and_return(caps)
allow(Service).to receive(:binary_path).and_return('/foo')
allow(Service).to receive(:new).and_return(service)
end
it 'sets the args capability' do
Driver.new(http_client: http, args: %w[--foo=bar])
expect(caps['goog:chromeOptions'][:args]).to eq(%w[--foo=bar])
end
it 'sets the args capability from switches' do
Driver.new(http_client: http, switches: %w[--foo=bar])
expect(caps['goog:chromeOptions'][:args]).to eq(%w[--foo=bar])
end
it 'sets the proxy capabilitiy' do
proxy = Proxy.new(http: 'localhost:1234')
Driver.new(http_client: http, proxy: proxy)
expect(caps[:proxy]).to eq(proxy)
end
it 'does not set the chrome.detach capability by default' do
Driver.new(http_client: http)
expect(caps['goog:chromeOptions']).to eq({})
expect(caps['chrome.detach']).to be nil
end
it 'sets the prefs capability' do
Driver.new(http_client: http, prefs: {foo: 'bar'})
expect(caps['goog:chromeOptions'][:prefs]).to eq(foo: 'bar')
end
it 'lets the user override chrome.detach' do
Driver.new(http_client: http, detach: true)
expect(caps['goog:chromeOptions'][:detach]).to be true
end
it 'raises an ArgumentError if args is not an Array' do
expect { Driver.new(args: '--foo=bar') }.to raise_error(ArgumentError)
end
it 'uses the given profile' do
profile = Profile.new
profile['some_pref'] = true
profile.add_extension(__FILE__)
Driver.new(http_client: http, profile: profile)
profile_data = profile.as_json
expect(caps['goog:chromeOptions'][:args].first).to include(profile_data[:directory])
expect(caps['goog:chromeOptions'][:extensions]).to eq(profile_data[:extensions])
end
it 'takes desired capabilities' do
custom_caps = Remote::Capabilities.new
custom_caps[:chrome_options] = {'foo' => 'bar'}
expect(http).to receive(:call) do |_, _, payload|
expect(payload[:desiredCapabilities][:chrome_options]).to include('foo' => 'bar')
resp
end
Driver.new(http_client: http, desired_capabilities: custom_caps)
end
it 'lets direct arguments take presedence over capabilities' do
custom_caps = Remote::Capabilities.new
custom_caps['goog:chromeOptions'] = {'args' => %w[foo bar]}
expect(http).to receive(:call) do |_, _, payload|
expect(payload[:desiredCapabilities]['goog:chromeOptions'][:args]).to eq(['baz'])
resp
end
Driver.new(http_client: http, desired_capabilities: custom_caps, args: %w[baz])
end
it 'handshakes protocol' do
expect(Remote::Bridge).to receive(:handshake)
Driver.new(http_client: http)
end
end
end # Chrome
end # WebDriver
end # Selenium
| 1 | 15,563 | This spec can be modified, giving you extra strength (Check this fetch key doesn't work and therefore returns `nil`) | SeleniumHQ-selenium | py |
@@ -877,3 +877,7 @@ type CtxKey string
// context.WithValue to access the original request URI that accompanied the
// server request. The associated value will be of type string.
const URLPathCtxKey CtxKey = "url_path"
+
+// URIxRewriteCtxKey is a context key used to store original unrewritten
+// URI in context.WithValue
+const URIxRewriteCtxKey CtxKey = "caddy_rewrite_original_uri" | 1 | // Package caddy implements the Caddy server manager.
//
// To use this package:
//
// 1. Set the AppName and AppVersion variables.
// 2. Call LoadCaddyfile() to get the Caddyfile.
// Pass in the name of the server type (like "http").
// Make sure the server type's package is imported
// (import _ "github.com/mholt/caddy/caddyhttp").
// 3. Call caddy.Start() to start Caddy. You get back
// an Instance, on which you can call Restart() to
// restart it or Stop() to stop it.
//
// You should call Wait() on your instance to wait for
// all servers to quit before your process exits.
package caddy
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/mholt/caddy/caddyfile"
)
// Configurable application parameters
var (
// AppName is the name of the application.
AppName string
// AppVersion is the version of the application.
AppVersion string
// Quiet mode will not show any informative output on initialization.
Quiet bool
// PidFile is the path to the pidfile to create.
PidFile string
// GracefulTimeout is the maximum duration of a graceful shutdown.
GracefulTimeout time.Duration
// isUpgrade will be set to true if this process
// was started as part of an upgrade, where a parent
// Caddy process started this one.
isUpgrade bool
// started will be set to true when the first
// instance is started; it never gets set to
// false after that.
started bool
// mu protects the variables 'isUpgrade' and 'started'.
mu sync.Mutex
)
// Instance contains the state of servers created as a result of
// calling Start and can be used to access or control those servers.
type Instance struct {
// serverType is the name of the instance's server type
serverType string
// caddyfileInput is the input configuration text used for this process
caddyfileInput Input
// wg is used to wait for all servers to shut down
wg *sync.WaitGroup
// context is the context created for this instance.
context Context
// servers is the list of servers with their listeners.
servers []ServerListener
// these callbacks execute when certain events occur
onFirstStartup []func() error // starting, not as part of a restart
onStartup []func() error // starting, even as part of a restart
onRestart []func() error // before restart commences
onShutdown []func() error // stopping, even as part of a restart
onFinalShutdown []func() error // stopping, not as part of a restart
}
// Servers returns the ServerListeners in i.
func (i *Instance) Servers() []ServerListener { return i.servers }
// Stop stops all servers contained in i. It does NOT
// execute shutdown callbacks.
func (i *Instance) Stop() error {
// stop the servers
for _, s := range i.servers {
if gs, ok := s.server.(GracefulServer); ok {
if err := gs.Stop(); err != nil {
log.Printf("[ERROR] Stopping %s: %v", gs.Address(), err)
}
}
}
// splice i out of instance list, causing it to be garbage-collected
instancesMu.Lock()
for j, other := range instances {
if other == i {
instances = append(instances[:j], instances[j+1:]...)
break
}
}
instancesMu.Unlock()
return nil
}
// ShutdownCallbacks executes all the shutdown callbacks of i,
// including ones that are scheduled only for the final shutdown
// of i. An error returned from one does not stop execution of
// the rest. All the non-nil errors will be returned.
func (i *Instance) ShutdownCallbacks() []error {
var errs []error
for _, shutdownFunc := range i.onShutdown {
err := shutdownFunc()
if err != nil {
errs = append(errs, err)
}
}
for _, finalShutdownFunc := range i.onFinalShutdown {
err := finalShutdownFunc()
if err != nil {
errs = append(errs, err)
}
}
return errs
}
// Restart replaces the servers in i with new servers created from
// executing the newCaddyfile. Upon success, it returns the new
// instance to replace i. Upon failure, i will not be replaced.
func (i *Instance) Restart(newCaddyfile Input) (*Instance, error) {
log.Println("[INFO] Reloading")
i.wg.Add(1)
defer i.wg.Done()
// run restart callbacks
for _, fn := range i.onRestart {
err := fn()
if err != nil {
return i, err
}
}
if newCaddyfile == nil {
newCaddyfile = i.caddyfileInput
}
// Add file descriptors of all the sockets that are capable of it
restartFds := make(map[string]restartTriple)
for _, s := range i.servers {
gs, srvOk := s.server.(GracefulServer)
ln, lnOk := s.listener.(Listener)
pc, pcOk := s.packet.(PacketConn)
if srvOk {
if lnOk && pcOk {
restartFds[gs.Address()] = restartTriple{server: gs, listener: ln, packet: pc}
continue
}
if lnOk {
restartFds[gs.Address()] = restartTriple{server: gs, listener: ln}
continue
}
if pcOk {
restartFds[gs.Address()] = restartTriple{server: gs, packet: pc}
continue
}
}
}
// create new instance; if the restart fails, it is simply discarded
newInst := &Instance{serverType: newCaddyfile.ServerType(), wg: i.wg}
// attempt to start new instance
err := startWithListenerFds(newCaddyfile, newInst, restartFds)
if err != nil {
return i, err
}
// success! stop the old instance
for _, shutdownFunc := range i.onShutdown {
err := shutdownFunc()
if err != nil {
return i, err
}
}
i.Stop()
log.Println("[INFO] Reloading complete")
return newInst, nil
}
// SaveServer adds s and its associated listener ln to the
// internally-kept list of servers that is running. For
// saved servers, graceful restarts will be provided.
func (i *Instance) SaveServer(s Server, ln net.Listener) {
i.servers = append(i.servers, ServerListener{server: s, listener: ln})
}
// HasListenerWithAddress returns whether this package is
// tracking a server using a listener with the address
// addr.
func HasListenerWithAddress(addr string) bool {
instancesMu.Lock()
defer instancesMu.Unlock()
for _, inst := range instances {
for _, sln := range inst.servers {
if listenerAddrEqual(sln.listener, addr) {
return true
}
}
}
return false
}
// listenerAddrEqual compares a listener's address with
// addr. Extra care is taken to match addresses with an
// empty hostname portion, as listeners tend to report
// [::]:80, for example, when the matching address that
// created the listener might be simply :80.
func listenerAddrEqual(ln net.Listener, addr string) bool {
lnAddr := ln.Addr().String()
hostname, port, err := net.SplitHostPort(addr)
if err != nil {
return lnAddr == addr
}
if lnAddr == net.JoinHostPort("::", port) {
return true
}
if lnAddr == net.JoinHostPort("0.0.0.0", port) {
return true
}
return hostname != "" && lnAddr == addr
}
// TCPServer is a type that can listen and serve connections.
// A TCPServer must associate with exactly zero or one net.Listeners.
type TCPServer interface {
// Listen starts listening by creating a new listener
// and returning it. It does not start accepting
// connections. For UDP-only servers, this method
// can be a no-op that returns (nil, nil).
Listen() (net.Listener, error)
// Serve starts serving using the provided listener.
// Serve must start the server loop nearly immediately,
// or at least not return any errors before the server
// loop begins. Serve blocks indefinitely, or in other
// words, until the server is stopped. For UDP-only
// servers, this method can be a no-op that returns nil.
Serve(net.Listener) error
}
// UDPServer is a type that can listen and serve packets.
// A UDPServer must associate with exactly zero or one net.PacketConns.
type UDPServer interface {
// ListenPacket starts listening by creating a new packetconn
// and returning it. It does not start accepting connections.
// TCP-only servers may leave this method blank and return
// (nil, nil).
ListenPacket() (net.PacketConn, error)
// ServePacket starts serving using the provided packetconn.
// ServePacket must start the server loop nearly immediately,
// or at least not return any errors before the server
// loop begins. ServePacket blocks indefinitely, or in other
// words, until the server is stopped. For TCP-only servers,
// this method can be a no-op that returns nil.
ServePacket(net.PacketConn) error
}
// Server is a type that can listen and serve. It supports both
// TCP and UDP, although the UDPServer interface can be used
// for more than just UDP.
//
// If the server uses TCP, it should implement TCPServer completely.
// If it uses UDP or some other protocol, it should implement
// UDPServer completely. If it uses both, both interfaces should be
// fully implemented. Any unimplemented methods should be made as
// no-ops that simply return nil values.
type Server interface {
TCPServer
UDPServer
}
// Stopper is a type that can stop serving. The stop
// does not necessarily have to be graceful.
type Stopper interface {
// Stop stops the server. It blocks until the
// server is completely stopped.
Stop() error
}
// GracefulServer is a Server and Stopper, the stopping
// of which is graceful (whatever that means for the kind
// of server being implemented). It must be able to return
// the address it is configured to listen on so that its
// listener can be paired with it upon graceful restarts.
// The net.Listener that a GracefulServer creates must
// implement the Listener interface for restarts to be
// graceful (assuming the listener is for TCP).
type GracefulServer interface {
Server
Stopper
// Address returns the address the server should
// listen on; it is used to pair the server to
// its listener during a graceful/zero-downtime
// restart. Thus when implementing this method,
// you must not access a listener to get the
// address; you must store the address the
// server is to serve on some other way.
Address() string
}
// Listener is a net.Listener with an underlying file descriptor.
// A server's listener should implement this interface if it is
// to support zero-downtime reloads.
type Listener interface {
net.Listener
File() (*os.File, error)
}
// PacketConn is a net.PacketConn with an underlying file descriptor.
// A server's packetconn should implement this interface if it is
// to support zero-downtime reloads (in sofar this holds true for datagram
// connections).
type PacketConn interface {
net.PacketConn
File() (*os.File, error)
}
// AfterStartup is an interface that can be implemented
// by a server type that wants to run some code after all
// servers for the same Instance have started.
type AfterStartup interface {
OnStartupComplete()
}
// LoadCaddyfile loads a Caddyfile by calling the plugged in
// Caddyfile loader methods. An error is returned if more than
// one loader returns a non-nil Caddyfile input. If no loaders
// load a Caddyfile, the default loader is used. If no default
// loader is registered or it returns nil, the server type's
// default Caddyfile is loaded. If the server type does not
// specify any default Caddyfile value, then an empty Caddyfile
// is returned. Consequently, this function never returns a nil
// value as long as there are no errors.
func LoadCaddyfile(serverType string) (Input, error) {
// Ask plugged-in loaders for a Caddyfile
cdyfile, err := loadCaddyfileInput(serverType)
if err != nil {
return nil, err
}
// Otherwise revert to default
if cdyfile == nil {
cdyfile = DefaultInput(serverType)
}
// Still nil? Geez.
if cdyfile == nil {
cdyfile = CaddyfileInput{ServerTypeName: serverType}
}
return cdyfile, nil
}
// Wait blocks until all of i's servers have stopped.
func (i *Instance) Wait() {
i.wg.Wait()
}
// CaddyfileFromPipe loads the Caddyfile input from f if f is
// not interactive input. f is assumed to be a pipe or stream,
// such as os.Stdin. If f is not a pipe, no error is returned
// but the Input value will be nil. An error is only returned
// if there was an error reading the pipe, even if the length
// of what was read is 0.
func CaddyfileFromPipe(f *os.File, serverType string) (Input, error) {
fi, err := f.Stat()
if err == nil && fi.Mode()&os.ModeCharDevice == 0 {
// Note that a non-nil error is not a problem. Windows
// will not create a stdin if there is no pipe, which
// produces an error when calling Stat(). But Unix will
// make one either way, which is why we also check that
// bitmask.
// NOTE: Reading from stdin after this fails (e.g. for the let's encrypt email address) (OS X)
confBody, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
return CaddyfileInput{
Contents: confBody,
Filepath: f.Name(),
ServerTypeName: serverType,
}, nil
}
// not having input from the pipe is not itself an error,
// just means no input to return.
return nil, nil
}
// Caddyfile returns the Caddyfile used to create i.
func (i *Instance) Caddyfile() Input {
return i.caddyfileInput
}
// Start starts Caddy with the given Caddyfile.
//
// This function blocks until all the servers are listening.
func Start(cdyfile Input) (*Instance, error) {
writePidFile()
inst := &Instance{serverType: cdyfile.ServerType(), wg: new(sync.WaitGroup)}
return inst, startWithListenerFds(cdyfile, inst, nil)
}
func startWithListenerFds(cdyfile Input, inst *Instance, restartFds map[string]restartTriple) error {
if cdyfile == nil {
cdyfile = CaddyfileInput{}
}
err := ValidateAndExecuteDirectives(cdyfile, inst, false)
if err != nil {
return err
}
slist, err := inst.context.MakeServers()
if err != nil {
return err
}
// run startup callbacks
if restartFds == nil {
for _, firstStartupFunc := range inst.onFirstStartup {
err := firstStartupFunc()
if err != nil {
return err
}
}
}
for _, startupFunc := range inst.onStartup {
err := startupFunc()
if err != nil {
return err
}
}
err = startServers(slist, inst, restartFds)
if err != nil {
return err
}
instancesMu.Lock()
instances = append(instances, inst)
instancesMu.Unlock()
// run any AfterStartup callbacks if this is not
// part of a restart; then show file descriptor notice
if restartFds == nil {
for _, srvln := range inst.servers {
if srv, ok := srvln.server.(AfterStartup); ok {
srv.OnStartupComplete()
}
}
if !Quiet {
for _, srvln := range inst.servers {
if !IsLoopback(srvln.listener.Addr().String()) {
checkFdlimit()
break
}
}
}
}
mu.Lock()
started = true
mu.Unlock()
return nil
}
// ValidateAndExecuteDirectives will load the server blocks from cdyfile
// by parsing it, then execute the directives configured by it and store
// the resulting server blocks into inst. If justValidate is true, parse
// callbacks will not be executed between directives, since the purpose
// is only to check the input for valid syntax.
func ValidateAndExecuteDirectives(cdyfile Input, inst *Instance, justValidate bool) error {
// If parsing only inst will be nil, create an instance for this function call only.
if justValidate {
inst = &Instance{serverType: cdyfile.ServerType(), wg: new(sync.WaitGroup)}
}
stypeName := cdyfile.ServerType()
stype, err := getServerType(stypeName)
if err != nil {
return err
}
inst.caddyfileInput = cdyfile
sblocks, err := loadServerBlocks(stypeName, cdyfile.Path(), bytes.NewReader(cdyfile.Body()))
if err != nil {
return err
}
inst.context = stype.NewContext()
if inst.context == nil {
return fmt.Errorf("server type %s produced a nil Context", stypeName)
}
sblocks, err = inst.context.InspectServerBlocks(cdyfile.Path(), sblocks)
if err != nil {
return err
}
err = executeDirectives(inst, cdyfile.Path(), stype.Directives(), sblocks, justValidate)
if err != nil {
return err
}
return nil
}
func executeDirectives(inst *Instance, filename string,
directives []string, sblocks []caddyfile.ServerBlock, justValidate bool) error {
// map of server block ID to map of directive name to whatever.
storages := make(map[int]map[string]interface{})
// It is crucial that directives are executed in the proper order.
// We loop with the directives on the outer loop so we execute
// a directive for all server blocks before going to the next directive.
// This is important mainly due to the parsing callbacks (below).
for _, dir := range directives {
for i, sb := range sblocks {
var once sync.Once
if _, ok := storages[i]; !ok {
storages[i] = make(map[string]interface{})
}
for j, key := range sb.Keys {
// Execute directive if it is in the server block
if tokens, ok := sb.Tokens[dir]; ok {
controller := &Controller{
instance: inst,
Key: key,
Dispenser: caddyfile.NewDispenserTokens(filename, tokens),
OncePerServerBlock: func(f func() error) error {
var err error
once.Do(func() {
err = f()
})
return err
},
ServerBlockIndex: i,
ServerBlockKeyIndex: j,
ServerBlockKeys: sb.Keys,
ServerBlockStorage: storages[i][dir],
}
setup, err := DirectiveAction(inst.serverType, dir)
if err != nil {
return err
}
err = setup(controller)
if err != nil {
return err
}
storages[i][dir] = controller.ServerBlockStorage // persist for this server block
}
}
}
if !justValidate {
// See if there are any callbacks to execute after this directive
if allCallbacks, ok := parsingCallbacks[inst.serverType]; ok {
callbacks := allCallbacks[dir]
for _, callback := range callbacks {
if err := callback(inst.context); err != nil {
return err
}
}
}
}
}
return nil
}
func startServers(serverList []Server, inst *Instance, restartFds map[string]restartTriple) error {
errChan := make(chan error, len(serverList))
for _, s := range serverList {
var (
ln net.Listener
pc net.PacketConn
err error
)
// If this is a reload and s is a GracefulServer,
// reuse the listener for a graceful restart.
if gs, ok := s.(GracefulServer); ok && restartFds != nil {
addr := gs.Address()
if old, ok := restartFds[addr]; ok {
// listener
if old.listener != nil {
file, err := old.listener.File()
if err != nil {
return err
}
ln, err = net.FileListener(file)
if err != nil {
return err
}
file.Close()
}
// packetconn
if old.packet != nil {
file, err := old.packet.File()
if err != nil {
return err
}
pc, err = net.FilePacketConn(file)
if err != nil {
return err
}
file.Close()
}
}
}
if ln == nil {
ln, err = s.Listen()
if err != nil {
return err
}
}
if pc == nil {
pc, err = s.ListenPacket()
if err != nil {
return err
}
}
inst.wg.Add(2)
go func(s Server, ln net.Listener, pc net.PacketConn, inst *Instance) {
defer inst.wg.Done()
go func() {
errChan <- s.Serve(ln)
defer inst.wg.Done()
}()
errChan <- s.ServePacket(pc)
}(s, ln, pc, inst)
inst.servers = append(inst.servers, ServerListener{server: s, listener: ln, packet: pc})
}
// Log errors that may be returned from Serve() calls,
// these errors should only be occurring in the server loop.
go func() {
for err := range errChan {
if err == nil {
continue
}
if strings.Contains(err.Error(), "use of closed network connection") {
// this error is normal when closing the listener
continue
}
log.Println(err)
}
}()
return nil
}
func getServerType(serverType string) (ServerType, error) {
stype, ok := serverTypes[serverType]
if ok {
return stype, nil
}
if len(serverTypes) == 0 {
return ServerType{}, fmt.Errorf("no server types plugged in")
}
if serverType == "" {
if len(serverTypes) == 1 {
for _, stype := range serverTypes {
return stype, nil
}
}
return ServerType{}, fmt.Errorf("multiple server types available; must choose one")
}
return ServerType{}, fmt.Errorf("unknown server type '%s'", serverType)
}
func loadServerBlocks(serverType, filename string, input io.Reader) ([]caddyfile.ServerBlock, error) {
validDirectives := ValidDirectives(serverType)
serverBlocks, err := caddyfile.Parse(filename, input, validDirectives)
if err != nil {
return nil, err
}
if len(serverBlocks) == 0 && serverTypes[serverType].DefaultInput != nil {
newInput := serverTypes[serverType].DefaultInput()
serverBlocks, err = caddyfile.Parse(newInput.Path(),
bytes.NewReader(newInput.Body()), validDirectives)
if err != nil {
return nil, err
}
}
return serverBlocks, nil
}
// Stop stops ALL servers. It blocks until they are all stopped.
// It does NOT execute shutdown callbacks, and it deletes all
// instances after stopping is completed. Do not re-use any
// references to old instances after calling Stop.
func Stop() error {
// This awkward for loop is to avoid a deadlock since
// inst.Stop() also acquires the instancesMu lock.
for {
instancesMu.Lock()
if len(instances) == 0 {
break
}
inst := instances[0]
instancesMu.Unlock()
if err := inst.Stop(); err != nil {
log.Printf("[ERROR] Stopping %s: %v", inst.serverType, err)
}
}
return nil
}
// IsLoopback returns true if the hostname of addr looks
// explicitly like a common local hostname. addr must only
// be a host or a host:port combination.
func IsLoopback(addr string) bool {
host, _, err := net.SplitHostPort(addr)
if err != nil {
host = addr // happens if the addr is just a hostname
}
return host == "localhost" ||
strings.Trim(host, "[]") == "::1" ||
strings.HasPrefix(host, "127.")
}
// Upgrade re-launches the process, preserving the listeners
// for a graceful restart. It does NOT load new configuration;
// it only starts the process anew with a fresh binary.
//
// TODO: This is not yet implemented
func Upgrade() error {
return fmt.Errorf("not implemented")
// TODO: have child process set isUpgrade = true
}
// IsUpgrade returns true if this process is part of an upgrade
// where a parent caddy process spawned this one to upgrade
// the binary.
func IsUpgrade() bool {
mu.Lock()
defer mu.Unlock()
return isUpgrade
}
// Started returns true if at least one instance has been
// started by this package. It never gets reset to false
// once it is set to true.
func Started() bool {
mu.Lock()
defer mu.Unlock()
return started
}
// CaddyfileInput represents a Caddyfile as input
// and is simply a convenient way to implement
// the Input interface.
type CaddyfileInput struct {
Filepath string
Contents []byte
ServerTypeName string
}
// Body returns c.Contents.
func (c CaddyfileInput) Body() []byte { return c.Contents }
// Path returns c.Filepath.
func (c CaddyfileInput) Path() string { return c.Filepath }
// ServerType returns c.ServerType.
func (c CaddyfileInput) ServerType() string { return c.ServerTypeName }
// Input represents a Caddyfile; its contents and file path
// (which should include the file name at the end of the path).
// If path does not apply (e.g. piped input) you may use
// any understandable value. The path is mainly used for logging,
// error messages, and debugging.
type Input interface {
// Gets the Caddyfile contents
Body() []byte
// Gets the path to the origin file
Path() string
// The type of server this input is intended for
ServerType() string
}
// DefaultInput returns the default Caddyfile input
// to use when it is otherwise empty or missing.
// It uses the default host and port (depends on
// host, e.g. localhost is 2015, otherwise 443) and
// root.
func DefaultInput(serverType string) Input {
if _, ok := serverTypes[serverType]; !ok {
return nil
}
if serverTypes[serverType].DefaultInput == nil {
return nil
}
return serverTypes[serverType].DefaultInput()
}
// writePidFile writes the process ID to the file at PidFile.
// It does nothing if PidFile is not set.
func writePidFile() error {
if PidFile == "" {
return nil
}
pid := []byte(strconv.Itoa(os.Getpid()) + "\n")
return ioutil.WriteFile(PidFile, pid, 0644)
}
type restartTriple struct {
server GracefulServer
listener Listener
packet PacketConn
}
var (
// instances is the list of running Instances.
instances []*Instance
// instancesMu protects instances.
instancesMu sync.Mutex
)
var (
// DefaultConfigFile is the name of the configuration file that is loaded
// by default if no other file is specified.
DefaultConfigFile = "Caddyfile"
)
// CtxKey is a value for use with context.WithValue.
type CtxKey string
// URLPathCtxKey is a context key. It can be used in HTTP handlers with
// context.WithValue to access the original request URI that accompanied the
// server request. The associated value will be of type string.
const URLPathCtxKey CtxKey = "url_path"
| 1 | 10,262 | Oh, I guess I mentioned/pressed this point in the other issue, that this should probably go into the httpserver package. In fact, so should the const above this (URLPathCtxKey). These are specific to the HTTP server. | caddyserver-caddy | go |
@@ -41,7 +41,7 @@ type ValidationFunc func(certificate *cmapi.Certificate, secret *corev1.Secret)
// ExpectValidKeysInSecret checks that the secret contains valid keys
func ExpectValidKeysInSecret(_ *cmapi.Certificate, secret *corev1.Secret) error {
- validKeys := [5]string{corev1.TLSPrivateKeyKey, corev1.TLSCertKey, cmmeta.TLSCAKey, cmapi.AdditionalOutputFormatDERKey, cmapi.AdditionalOutputFormatPEMKey}
+ validKeys := [5]string{corev1.TLSPrivateKeyKey, corev1.TLSCertKey, cmmeta.TLSCAKey, cmapi.CertificateOutputFormatDERKey, cmapi.CertificateOutputFormatCombinedPEMKey}
nbValidKeys := 0
for k := range secret.Data {
for _, k2 := range validKeys { | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import (
"bytes"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"strings"
"github.com/kr/pretty"
corev1 "k8s.io/api/core/v1"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
"github.com/jetstack/cert-manager/pkg/util"
"github.com/jetstack/cert-manager/pkg/util/pki"
)
// ValidationFunc describes a Certificate validation helper function
type ValidationFunc func(certificate *cmapi.Certificate, secret *corev1.Secret) error
// ExpectValidKeysInSecret checks that the secret contains valid keys
func ExpectValidKeysInSecret(_ *cmapi.Certificate, secret *corev1.Secret) error {
validKeys := [5]string{corev1.TLSPrivateKeyKey, corev1.TLSCertKey, cmmeta.TLSCAKey, cmapi.AdditionalOutputFormatDERKey, cmapi.AdditionalOutputFormatPEMKey}
nbValidKeys := 0
for k := range secret.Data {
for _, k2 := range validKeys {
if k == k2 {
nbValidKeys++
break
}
}
}
if nbValidKeys < 2 {
return fmt.Errorf("Expected at least 2 valid keys in certificate secret, but there was %d", nbValidKeys)
}
return nil
}
// ExpectValidAnnotations checks if the correct annotations on the secret are present
func ExpectValidAnnotations(certificate *cmapi.Certificate, secret *corev1.Secret) error {
label, ok := secret.Annotations[cmapi.CertificateNameKey]
if !ok {
return fmt.Errorf("Expected secret to have certificate-name label, but had none")
}
if label != certificate.Name {
return fmt.Errorf("Expected secret to have certificate-name label with a value of %q, but got %q", certificate.Name, label)
}
return nil
}
// ExpectValidPrivateKeyData checks of the secret's private key matches the request
func ExpectValidPrivateKeyData(certificate *cmapi.Certificate, secret *corev1.Secret) error {
keyBytes, ok := secret.Data[corev1.TLSPrivateKeyKey]
if !ok {
return fmt.Errorf("No private key data found for Certificate %q (secret %q)", certificate.Name, certificate.Spec.SecretName)
}
key, err := pki.DecodePrivateKeyBytes(keyBytes)
if err != nil {
return err
}
// validate private key is of the correct type (rsa, ed25519 or ecdsa)
if certificate.Spec.PrivateKey != nil {
switch certificate.Spec.PrivateKey.Algorithm {
case cmapi.PrivateKeyAlgorithm(""),
cmapi.RSAKeyAlgorithm:
_, ok := key.(*rsa.PrivateKey)
if !ok {
return fmt.Errorf("Expected private key of type RSA, but it was: %T", key)
}
case cmapi.ECDSAKeyAlgorithm:
_, ok := key.(*ecdsa.PrivateKey)
if !ok {
return fmt.Errorf("Expected private key of type ECDSA, but it was: %T", key)
}
case cmapi.Ed25519KeyAlgorithm:
_, ok := key.(ed25519.PrivateKey)
if !ok {
return fmt.Errorf("Expected private key of type Ed25519, but it was: %T", key)
}
default:
return fmt.Errorf("unrecognised requested private key algorithm %q", certificate.Spec.PrivateKey.Algorithm)
}
}
// TODO: validate private key KeySize
return nil
}
// ExpectValidCertificate checks if the certificate is a valid x509 certificate
func ExpectValidCertificate(certificate *cmapi.Certificate, secret *corev1.Secret) error {
certBytes, ok := secret.Data[corev1.TLSCertKey]
if !ok {
return fmt.Errorf("No certificate data found for Certificate %q (secret %q)", certificate.Name, certificate.Spec.SecretName)
}
_, err := pki.DecodeX509CertificateBytes(certBytes)
if err != nil {
return err
}
return nil
}
// ExpectCertificateOrganizationToMatch checks if the issued certificate has the same Organization as the requested one
func ExpectCertificateOrganizationToMatch(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
expectedOrganization := pki.OrganizationForCertificate(certificate)
if !util.EqualUnsorted(cert.Subject.Organization, expectedOrganization) {
return fmt.Errorf("Expected certificate valid for O %v, but got a certificate valid for O %v", expectedOrganization, cert.Subject.Organization)
}
return nil
}
// ExpectCertificateDNSNamesToMatch checks if the issued certificate has all DNS names it requested
func ExpectCertificateDNSNamesToMatch(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
expectedDNSNames := certificate.Spec.DNSNames
if !util.Subset(cert.DNSNames, expectedDNSNames) {
return fmt.Errorf("Expected certificate valid for DNSNames %v, but got a certificate valid for DNSNames %v", expectedDNSNames, cert.DNSNames)
}
return nil
}
// ExpectCertificateURIsToMatch checks if the issued certificate has all URI SANs names it requested
func ExpectCertificateURIsToMatch(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
uris, err := pki.URIsForCertificate(certificate)
if err != nil {
return fmt.Errorf("failed to parse URIs: %s", err)
}
actualURIs := pki.URLsToString(cert.URIs)
expectedURIs := pki.URLsToString(uris)
if !util.EqualUnsorted(actualURIs, expectedURIs) {
return fmt.Errorf("Expected certificate valid for URIs %v, but got a certificate valid for URIs %v", expectedURIs, pki.URLsToString(cert.URIs))
}
return nil
}
// ExpectValidCommonName checks if the issued certificate has the requested CN or one of the DNS SANs
func ExpectValidCommonName(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
expectedCN := certificate.Spec.CommonName
if len(expectedCN) == 0 && len(cert.Subject.CommonName) > 0 {
// no CN is specified but our CA set one, checking if it is one of our DNS names or IP Addresses
if !util.Contains(cert.DNSNames, cert.Subject.CommonName) && !util.Contains(pki.IPAddressesToString(cert.IPAddresses), cert.Subject.CommonName) {
return fmt.Errorf("Expected a common name for one of our DNSNames %v or IP Addresses %v, but got a CN of %v", cert.DNSNames, pki.IPAddressesToString(cert.IPAddresses), cert.Subject.CommonName)
}
} else if expectedCN != cert.Subject.CommonName {
return fmt.Errorf("Expected a common name of %v, but got a CN of %v", expectedCN, cert.Subject.CommonName)
}
return nil
}
// ExpectValidNotAfterDate checks if the issued certificate matches the requested duration
func ExpectValidNotAfterDate(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
if certificate.Status.NotAfter == nil {
return fmt.Errorf("No certificate expiration found for Certificate %q", certificate.Name)
}
if !cert.NotAfter.Equal(certificate.Status.NotAfter.Time) {
return fmt.Errorf("Expected certificate expiry date to be %v, but got %v", certificate.Status.NotAfter, cert.NotAfter)
}
return nil
}
func containsExtKeyUsage(s []x509.ExtKeyUsage, e x509.ExtKeyUsage) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
// ExpectKeyUsageExtKeyUsageServerAuth checks if the issued certificate has the extended key usage of server auth
func ExpectKeyUsageExtKeyUsageServerAuth(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
if !containsExtKeyUsage(cert.ExtKeyUsage, x509.ExtKeyUsageServerAuth) {
return fmt.Errorf("Expected certificate to have ExtKeyUsageServerAuth, but got %v", cert.ExtKeyUsage)
}
return nil
}
// ExpectKeyUsageExtKeyUsageClientAuth checks if the issued certificate has the extended key usage of client auth
func ExpectKeyUsageExtKeyUsageClientAuth(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
if !containsExtKeyUsage(cert.ExtKeyUsage, x509.ExtKeyUsageClientAuth) {
return fmt.Errorf("Expected certificate to have ExtKeyUsageClientAuth, but got %v", cert.ExtKeyUsage)
}
return nil
}
// UsageDigitalSignature checks if a cert has the KeyUsageDigitalSignature key usage set
func ExpectKeyUsageUsageDigitalSignature(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
// taking the key usage here and use a binary OR to flip all non KeyUsageCertSign bits to 0
// so if KeyUsageCertSign the value will be exactly x509.KeyUsageCertSign
usage := cert.KeyUsage
usage &= x509.KeyUsageDigitalSignature
if usage != x509.KeyUsageDigitalSignature {
return fmt.Errorf("Expected certificate to have KeyUsageDigitalSignature %#b, but got %v %#b", x509.KeyUsageDigitalSignature, usage, usage)
}
return nil
}
// ExpectKeyUsageUsageDataEncipherment checks if a cert has the KeyUsageDataEncipherment key usage set
func ExpectKeyUsageUsageDataEncipherment(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
// taking the key usage here and use a binary OR to flip all non KeyUsageDataEncipherment bits to 0
// so if KeyUsageDataEncipherment the value will be exactly x509.KeyUsageDataEncipherment
usage := cert.KeyUsage
usage &= x509.KeyUsageDataEncipherment
if usage != x509.KeyUsageDataEncipherment {
return fmt.Errorf("Expected certificate to have KeyUsageDataEncipherment %#b, but got %v %#b", x509.KeyUsageDataEncipherment, usage, usage)
}
return nil
}
// ExpectEmailsToMatch check if the issued certificate has all requested email SANs
func ExpectEmailsToMatch(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
if !util.EqualUnsorted(cert.EmailAddresses, certificate.Spec.EmailAddresses) {
return fmt.Errorf("certificate doesn't contain Email SANs: exp=%v got=%v", certificate.Spec.EmailAddresses, cert.EmailAddresses)
}
return nil
}
// ExpectCorrectTrustChain checks if the cert is signed by the root CA if one is provided
func ExpectCorrectTrustChain(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
var dnsName string
if len(certificate.Spec.DNSNames) > 0 {
dnsName = certificate.Spec.DNSNames[0]
}
rootCertPool := x509.NewCertPool()
rootCertPool.AppendCertsFromPEM(secret.Data[cmmeta.TLSCAKey])
intermediateCertPool := x509.NewCertPool()
intermediateCertPool.AppendCertsFromPEM(secret.Data[corev1.TLSCertKey])
opts := x509.VerifyOptions{
DNSName: dnsName,
Intermediates: intermediateCertPool,
Roots: rootCertPool,
}
if _, err := cert.Verify(opts); err != nil {
return fmt.Errorf(
"verify error. CERT:\n%s\nROOTS\n%s\nINTERMEDIATES\n%v\nERROR\n%s\n",
pretty.Sprint(cert),
pretty.Sprint(rootCertPool),
pretty.Sprint(intermediateCertPool),
err,
)
}
return nil
}
// ExpectCARootCertificate checks if the CA cert is root CA if one is provided
func ExpectCARootCertificate(certificate *cmapi.Certificate, secret *corev1.Secret) error {
caCert, err := pki.DecodeX509CertificateBytes(secret.Data[cmmeta.TLSCAKey])
if err != nil {
return err
}
if !bytes.Equal(caCert.RawSubject, caCert.RawIssuer) {
return fmt.Errorf("expected CA certificate to be root CA; want Issuer %v, but got %v", caCert.Subject, caCert.Issuer)
}
return nil
}
// ExpectConditionReadyObservedGeneration checks that the ObservedGeneration
// field on the Ready condition which must be true, is set to the Generation of
// the Certificate.
func ExpectConditionReadyObservedGeneration(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cond := apiutil.GetCertificateCondition(certificate, cmapi.CertificateConditionReady)
if cond.Status != cmmeta.ConditionTrue || cond.ObservedGeneration != certificate.Generation {
return fmt.Errorf("expected Certificate to have ready condition true, observedGeneration matching the Certificate generation, got=%+v",
cond)
}
return nil
}
// ExpectValidBasicConstraints asserts that basicConstraints are set correctly on issued certificates
func ExpectValidBasicConstraints(certificate *cmapi.Certificate, secret *corev1.Secret) error {
cert, err := pki.DecodeX509CertificateBytes(secret.Data[corev1.TLSCertKey])
if err != nil {
return err
}
if certificate.Spec.IsCA != cert.IsCA {
return fmt.Errorf("Expected CA basicConstraint to be %v, but got %v", certificate.Spec.IsCA, cert.IsCA)
}
// TODO: also validate pathLen
return nil
}
// ExpectValidAdditionalOutputFormats assert that if additional output formats are requested
// It contains the additional output format keys in the secret and the content are valid.
func ExpectValidAdditionalOutputFormats(certificate *cmapi.Certificate, secret *corev1.Secret) error {
if len(certificate.Spec.AdditionalOutputFormats) > 0 {
for _, f := range certificate.Spec.AdditionalOutputFormats {
switch f.Type {
case cmapi.AdditionalOutputFormatDER:
if derKey, ok := secret.Data[cmapi.AdditionalOutputFormatDERKey]; ok {
privateKey := secret.Data[corev1.TLSPrivateKeyKey]
block, _ := pem.Decode(privateKey)
if !bytes.Equal(derKey, block.Bytes) {
return fmt.Errorf("expected additional output Format DER %s to contain the binary formated private Key", cmapi.AdditionalOutputFormatDERKey)
}
} else {
return fmt.Errorf("expected additional output format DER key %s to be present in secret", cmapi.AdditionalOutputFormatDERKey)
}
case cmapi.AdditionalOutputFormatCombinedPEM:
if combinedPem, ok := secret.Data[cmapi.AdditionalOutputFormatPEMKey]; ok {
privateKey := secret.Data[corev1.TLSPrivateKeyKey]
certificate := secret.Data[corev1.TLSCertKey]
expectedCombinedPem := []byte(strings.Join([]string{string(privateKey), string(certificate)}, "\n"))
if !bytes.Equal(combinedPem, expectedCombinedPem) {
return fmt.Errorf("expected additional output format CombinedPEM %s to contain the combination of privateKey and certificate", cmapi.AdditionalOutputFormatPEMKey)
}
} else {
return fmt.Errorf("expected additional output format CombinedPEM key %s to be present in secret", cmapi.AdditionalOutputFormatPEMKey)
}
default:
return fmt.Errorf("unknown additional output format %s", f.Type)
}
}
}
return nil
}
| 1 | 30,679 | Not added by you, but we don't really need the '5' here.. | jetstack-cert-manager | go |
@@ -284,7 +284,6 @@ function getDefaultService() {
Options.prototype.CAPABILITY_KEY = 'goog:chromeOptions'
Options.prototype.BROWSER_NAME_VALUE = Browser.CHROME
Driver.getDefaultService = getDefaultService
-Driver.prototype.VENDOR_COMMAND_PREFIX = 'goog'
// PUBLIC API
exports.Driver = Driver | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview Defines a {@linkplain Driver WebDriver} client for the Chrome
* web browser. Before using this module, you must download the latest
* [ChromeDriver release] and ensure it can be found on your system [PATH].
*
* There are three primary classes exported by this module:
*
* 1. {@linkplain ServiceBuilder}: configures the
* {@link selenium-webdriver/remote.DriverService remote.DriverService}
* that manages the [ChromeDriver] child process.
*
* 2. {@linkplain Options}: defines configuration options for each new Chrome
* session, such as which {@linkplain Options#setProxy proxy} to use,
* what {@linkplain Options#addExtensions extensions} to install, or
* what {@linkplain Options#addArguments command-line switches} to use when
* starting the browser.
*
* 3. {@linkplain Driver}: the WebDriver client; each new instance will control
* a unique browser session with a clean user profile (unless otherwise
* configured through the {@link Options} class).
*
* __Headless Chrome__ <a id="headless"></a>
*
* To start Chrome in headless mode, simply call
* {@linkplain Options#headless Options.headless()}.
*
* let chrome = require('selenium-webdriver/chrome');
* let {Builder} = require('selenium-webdriver');
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options().headless())
* .build();
*
* __Customizing the ChromeDriver Server__ <a id="custom-server"></a>
*
* By default, every Chrome session will use a single driver service, which is
* started the first time a {@link Driver} instance is created and terminated
* when this process exits. The default service will inherit its environment
* from the current process and direct all output to /dev/null. You may obtain
* a handle to this default service using
* {@link #getDefaultService getDefaultService()} and change its configuration
* with {@link #setDefaultService setDefaultService()}.
*
* You may also create a {@link Driver} with its own driver service. This is
* useful if you need to capture the server's log output for a specific session:
*
* let chrome = require('selenium-webdriver/chrome');
*
* let service = new chrome.ServiceBuilder()
* .loggingTo('/my/log/file.txt')
* .enableVerboseLogging()
* .build();
*
* let options = new chrome.Options();
* // configure browser options ...
*
* let driver = chrome.Driver.createSession(options, service);
*
* Users should only instantiate the {@link Driver} class directly when they
* need a custom driver service configuration (as shown above). For normal
* operation, users should start Chrome using the
* {@link selenium-webdriver.Builder}.
*
* __Working with Android__ <a id="android"></a>
*
* The [ChromeDriver][android] supports running tests on the Chrome browser as
* well as [WebView apps][webview] starting in Android 4.4 (KitKat). In order to
* work with Android, you must first start the adb
*
* adb start-server
*
* By default, adb will start on port 5037. You may change this port, but this
* will require configuring a [custom server](#custom-server) that will connect
* to adb on the {@linkplain ServiceBuilder#setAdbPort correct port}:
*
* let service = new chrome.ServiceBuilder()
* .setAdbPort(1234)
* build();
* // etc.
*
* The ChromeDriver may be configured to launch Chrome on Android using
* {@link Options#androidChrome()}:
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options().androidChrome())
* .build();
*
* Alternatively, you can configure the ChromeDriver to launch an app with a
* Chrome-WebView by setting the {@linkplain Options#androidActivity
* androidActivity} option:
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options()
* .androidPackage('com.example')
* .androidActivity('com.example.Activity'))
* .build();
*
* [Refer to the ChromeDriver site] for more information on using the
* [ChromeDriver with Android][android].
*
* [ChromeDriver]: https://chromedriver.chromium.org/
* [ChromeDriver release]: http://chromedriver.storage.googleapis.com/index.html
* [PATH]: http://en.wikipedia.org/wiki/PATH_%28variable%29
* [android]: https://chromedriver.chromium.org/getting-started/getting-started---android
* [webview]: https://developer.chrome.com/multidevice/webview/overview
*/
'use strict'
const io = require('./io')
const { Browser } = require('./lib/capabilities')
const chromium = require('./chromium')
/**
* Name of the ChromeDriver executable.
* @type {string}
* @const
*/
const CHROMEDRIVER_EXE =
process.platform === 'win32' ? 'chromedriver.exe' : 'chromedriver'
/** @type {remote.DriverService} */
let defaultService = null
/**
* Creates {@link selenium-webdriver/remote.DriverService} instances that manage
* a [ChromeDriver](https://chromedriver.chromium.org/)
* server in a child process.
*/
class ServiceBuilder extends chromium.ServiceBuilder {
/**
* @param {string=} opt_exe Path to the server executable to use. If omitted,
* the builder will attempt to locate the chromedriver on the current
* PATH.
* @throws {Error} If provided executable does not exist, or the chromedriver
* cannot be found on the PATH.
*/
constructor(opt_exe) {
let exe = opt_exe || locateSynchronously()
if (!exe) {
throw Error(
`The ChromeDriver could not be found on the current PATH. Please ` +
`download the latest version of the ChromeDriver from ` +
`http://chromedriver.storage.googleapis.com/index.html and ensure ` +
`it can be found on your PATH.`
)
}
super(exe)
}
}
/**
* Class for managing ChromeDriver specific options.
*/
class Options extends chromium.Options {
/**
* Sets the path to the Chrome binary to use. On Mac OS X, this path should
* reference the actual Chrome executable, not just the application binary
* (e.g. "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome").
*
* The binary path be absolute or relative to the chromedriver server
* executable, but it must exist on the machine that will launch Chrome.
*
* @param {string} path The path to the Chrome binary to use.
* @return {!Options} A self reference.
*/
setChromeBinaryPath(path) {
return this.setBinaryPath(path)
}
/**
* Configures the ChromeDriver to launch Chrome on Android via adb. This
* function is shorthand for
* {@link #androidPackage options.androidPackage('com.android.chrome')}.
* @return {!Options} A self reference.
*/
androidChrome() {
return this.androidPackage('com.android.chrome')
}
/**
* Sets the path to Chrome's log file. This path should exist on the machine
* that will launch Chrome.
* @param {string} path Path to the log file to use.
* @return {!Options} A self reference.
*/
setChromeLogFile(path) {
return this.setBrowserLogFile(path)
}
/**
* Sets the directory to store Chrome minidumps in. This option is only
* supported when ChromeDriver is running on Linux.
* @param {string} path The directory path.
* @return {!Options} A self reference.
*/
setChromeMinidumpPath(path) {
return this.setBrowserMinidumpPath(path)
}
}
/**
* Creates a new WebDriver client for Chrome.
*/
class Driver extends chromium.Driver {
/**
* Creates a new session with the ChromeDriver.
*
* @param {(Capabilities|Options)=} opt_config The configuration options.
* @param {(remote.DriverService|http.Executor)=} opt_serviceExecutor Either
* a DriverService to use for the remote end, or a preconfigured executor
* for an externally managed endpoint. If neither is provided, the
* {@linkplain ##getDefaultService default service} will be used by
* default.
* @return {!Driver} A new driver instance.
*/
static createSession(opt_config, opt_serviceExecutor) {
let caps = opt_config || new Options()
return /** @type {!Driver} */ (super.createSession(
caps,
opt_serviceExecutor
))
}
}
/**
* _Synchronously_ attempts to locate the chromedriver executable on the current
* system.
*
* @return {?string} the located executable, or `null`.
*/
function locateSynchronously() {
return io.findInPath(CHROMEDRIVER_EXE, true)
}
/**
* Sets the default service to use for new ChromeDriver instances.
* @param {!remote.DriverService} service The service to use.
* @throws {Error} If the default service is currently running.
*/
function setDefaultService(service) {
if (defaultService && defaultService.isRunning()) {
throw Error(
`The previously configured ChromeDriver service is still running. ` +
`You must shut it down before you may adjust its configuration.`
)
}
defaultService = service
}
/**
* Returns the default ChromeDriver service. If such a service has not been
* configured, one will be constructed using the default configuration for
* a ChromeDriver executable found on the system PATH.
* @return {!remote.DriverService} The default ChromeDriver service.
*/
function getDefaultService() {
if (!defaultService) {
defaultService = new ServiceBuilder().build()
}
return defaultService
}
Options.prototype.CAPABILITY_KEY = 'goog:chromeOptions'
Options.prototype.BROWSER_NAME_VALUE = Browser.CHROME
Driver.getDefaultService = getDefaultService
Driver.prototype.VENDOR_COMMAND_PREFIX = 'goog'
// PUBLIC API
exports.Driver = Driver
exports.Options = Options
exports.ServiceBuilder = ServiceBuilder
exports.getDefaultService = getDefaultService
exports.setDefaultService = setDefaultService
exports.locateSynchronously = locateSynchronously
| 1 | 18,744 | The vendor prefix is still being used on Chromium based browsers like Edge Chromium and Chrome. Did you mean to remove this? | SeleniumHQ-selenium | py |
@@ -36,6 +36,7 @@ def _makeKbEmulateScript(scriptName):
# __name__ must be str; i.e. can't be unicode.
scriptName = scriptName.encode("mbcs")
func.__name__ = "script_%s" % scriptName
+ func.emuGesture = emuGesture
func.__doc__ = _("Emulates pressing %s on the system keyboard") % emuGesture.displayName
return func
| 1 | #scriptHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2007-2017 NV Access Limited, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import time
import weakref
import inspect
import config
import speech
import sayAllHandler
import appModuleHandler
import api
import queueHandler
from logHandler import log
import inputCore
import globalPluginHandler
import braille
import keyLabels
_numScriptsQueued=0 #Number of scripts that are queued to be executed
#: Number of scripts that send their gestures on that are queued to be executed or are currently being executed.
_numIncompleteInterceptedCommandScripts=0
_lastScriptTime=0 #Time in MS of when the last script was executed
_lastScriptRef=None #Holds a weakref to the last script that was executed
_lastScriptCount=0 #The amount of times the last script was repeated
_isScriptRunning=False
def _makeKbEmulateScript(scriptName):
import keyboardHandler
keyName = scriptName[3:]
emuGesture = keyboardHandler.KeyboardInputGesture.fromName(keyName)
func = lambda gesture: inputCore.manager.emulateGesture(emuGesture)
if isinstance(scriptName, unicode):
# __name__ must be str; i.e. can't be unicode.
scriptName = scriptName.encode("mbcs")
func.__name__ = "script_%s" % scriptName
func.__doc__ = _("Emulates pressing %s on the system keyboard") % emuGesture.displayName
return func
def _getObjScript(obj, gesture, globalMapScripts):
# Search the scripts from the global gesture maps.
for cls, scriptName in globalMapScripts:
if isinstance(obj, cls):
if scriptName is None:
# The global map specified that no script should execute for this gesture and object.
return None
if scriptName.startswith("kb:"):
# Emulate a key press.
return _makeKbEmulateScript(scriptName)
try:
return getattr(obj, "script_%s" % scriptName)
except AttributeError:
pass
# Search the object itself for in-built bindings.
return obj.getScript(gesture)
def findScript(gesture):
focus = api.getFocusObject()
if not focus:
return None
# Import late to avoid circular import.
# We need to import this here because this might be the first import of this module
# and it might be needed by global maps.
import globalCommands
globalMapScripts = []
globalMaps = [inputCore.manager.userGestureMap, inputCore.manager.localeGestureMap]
globalMap = braille.handler.display.gestureMap
if globalMap:
globalMaps.append(globalMap)
for globalMap in globalMaps:
for identifier in gesture.normalizedIdentifiers:
globalMapScripts.extend(globalMap.getScriptsForGesture(identifier))
# Gesture specific scriptable object.
obj = gesture.scriptableObject
if obj:
func = _getObjScript(obj, gesture, globalMapScripts)
if func:
return func
# Global plugin level.
for plugin in globalPluginHandler.runningPlugins:
func = _getObjScript(plugin, gesture, globalMapScripts)
if func:
return func
# App module level.
app = focus.appModule
if app:
func = _getObjScript(app, gesture, globalMapScripts)
if func:
return func
# Tree interceptor level.
treeInterceptor = focus.treeInterceptor
if treeInterceptor and treeInterceptor.isReady:
func = _getObjScript(treeInterceptor, gesture, globalMapScripts)
from browseMode import BrowseModeTreeInterceptor
if isinstance(treeInterceptor,BrowseModeTreeInterceptor):
func=treeInterceptor.getAlternativeScript(gesture,func)
if func and (not treeInterceptor.passThrough or getattr(func,"ignoreTreeInterceptorPassThrough",False)):
return func
# NVDAObject level.
func = _getObjScript(focus, gesture, globalMapScripts)
if func:
return func
for obj in reversed(api.getFocusAncestors()):
func = _getObjScript(obj, gesture, globalMapScripts)
if func and getattr(func, 'canPropagate', False):
return func
# Global commands.
func = _getObjScript(globalCommands.commands, gesture, globalMapScripts)
if func:
return func
return None
def getScriptName(script):
return script.__name__[7:]
def getScriptLocation(script):
try:
instance = script.__self__
except AttributeError:
# Not an instance method, so this must be a fake script.
return None
name=script.__name__
for cls in instance.__class__.__mro__:
if name in cls.__dict__:
return "%s.%s"%(cls.__module__,cls.__name__)
def _isInterceptedCommandScript(script):
return not getattr(script,'__doc__',None)
def _queueScriptCallback(script,gesture):
global _numScriptsQueued, _numIncompleteInterceptedCommandScripts
_numScriptsQueued-=1
executeScript(script,gesture)
if _isInterceptedCommandScript(script):
_numIncompleteInterceptedCommandScripts-=1
def queueScript(script,gesture):
global _numScriptsQueued, _numIncompleteInterceptedCommandScripts
_numScriptsQueued+=1
if _isInterceptedCommandScript(script):
_numIncompleteInterceptedCommandScripts+=1
queueHandler.queueFunction(queueHandler.eventQueue,_queueScriptCallback,script,gesture)
def willSayAllResume(gesture):
return config.conf['keyboard']['allowSkimReadingInSayAll']and gesture.wasInSayAll and getattr(gesture.script,'resumeSayAllMode',None)==sayAllHandler.lastSayAllMode
def executeScript(script,gesture):
"""Executes a given script (function) passing it the given gesture.
It also keeps track of the execution of duplicate scripts with in a certain amount of time, and counts how many times this happens.
Use L{getLastScriptRepeatCount} to find out this count value.
@param script: the function or method that should be executed. The function or method must take an argument of 'gesture'. This must be the same value as gesture.script, but its passed in here purely for performance.
@type script: callable.
@param gesture: the input gesture that activated this script
@type gesture: L{inputCore.InputGesture}
"""
global _lastScriptTime, _lastScriptCount, _lastScriptRef, _isScriptRunning
lastScriptRef=_lastScriptRef() if _lastScriptRef else None
#We don't allow the same script to be executed from with in itself, but we still should pass the key through
scriptFunc=getattr(script,"__func__",script)
if _isScriptRunning and lastScriptRef==scriptFunc:
return gesture.send()
_isScriptRunning=True
resumeSayAllMode=None
if willSayAllResume(gesture):
resumeSayAllMode=sayAllHandler.lastSayAllMode
try:
scriptTime=time.time()
scriptRef=weakref.ref(scriptFunc)
if (scriptTime-_lastScriptTime)<=0.5 and scriptFunc==lastScriptRef:
_lastScriptCount+=1
else:
_lastScriptCount=0
_lastScriptRef=scriptRef
_lastScriptTime=scriptTime
script(gesture)
except:
log.exception("error executing script: %s with gesture %r"%(script,gesture.displayName))
finally:
_isScriptRunning=False
if resumeSayAllMode is not None:
sayAllHandler.readText(resumeSayAllMode)
def getLastScriptRepeatCount():
"""The count of how many times the most recent script has been executed.
This should only be called from with in a script.
@returns: a value greater or equal to 0. If the script has not been repeated it is 0, if it has been repeated once its 1, and so forth.
@rtype: integer
"""
if (time.time()-_lastScriptTime)>0.5:
return 0
else:
return _lastScriptCount
def isScriptWaiting():
return bool(_numScriptsQueued)
def isCurrentScript(scriptFunc):
"""Finds out if the given script is equal to the script that L{isCurrentScript} is being called from.
@param scriptFunc: the script retreaved from ScriptableObject.getScript(gesture)
@type scriptFunc: Instance method
@returns: True if they are equal, False otherwise
@rtype: boolean
"""
try:
givenFunc=getattr(scriptFunc.im_self.__class__,scriptFunc.__name__)
except AttributeError:
log.debugWarning("Could not get unbound method from given script",exc_info=True)
return False
parentFrame=inspect.currentframe().f_back
try:
realObj=parentFrame.f_locals['self']
except KeyError:
log.debugWarning("Could not get self instance from parent frame instance method",exc_info=True)
return False
try:
realFunc=getattr(realObj.__class__,parentFrame.f_code.co_name)
except AttributeError:
log.debugWarning("Could not get unbound method from parent frame instance",exc_info=True)
return False
return givenFunc==realFunc
| 1 | 20,407 | I don't think this is needed anymore? | nvaccess-nvda | py |
@@ -91,8 +91,15 @@ func (s *Service) ListAgents(ctx context.Context, req *agentv1.ListAgentsRequest
// Parse proto filter into datastore request
if req.Filter != nil {
filter := req.Filter
+
+ // TODO should this be a helper function?
+ var byBanned *bool
+ if filter.ByBanned != nil {
+ byBanned = &filter.ByBanned.Value
+ }
+
listReq.ByAttestationType = filter.ByAttestationType
- listReq.ByBanned = filter.ByBanned
+ listReq.ByBanned = byBanned
if filter.BySelectorMatch != nil {
selectors, err := api.SelectorsFromProto(filter.BySelectorMatch.Selectors) | 1 | package agent
import (
"context"
"crypto/x509"
"errors"
"fmt"
"path"
"time"
"github.com/andres-erbsen/clock"
"github.com/gofrs/uuid"
"github.com/sirupsen/logrus"
"github.com/spiffe/go-spiffe/v2/spiffeid"
agentv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/agent/v1"
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/nodeutil"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/pkg/common/x509util"
"github.com/spiffe/spire/pkg/server/api"
"github.com/spiffe/spire/pkg/server/api/rpccontext"
"github.com/spiffe/spire/pkg/server/ca"
"github.com/spiffe/spire/pkg/server/catalog"
"github.com/spiffe/spire/pkg/server/plugin/datastore"
"github.com/spiffe/spire/pkg/server/plugin/nodeattestor"
"github.com/spiffe/spire/proto/spire/common"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
// Config is the service configuration
type Config struct {
Catalog catalog.Catalog
Clock clock.Clock
DataStore datastore.DataStore
ServerCA ca.ServerCA
TrustDomain spiffeid.TrustDomain
}
// Service implements the v1 agent service
type Service struct {
agentv1.UnsafeAgentServer
cat catalog.Catalog
clk clock.Clock
ds datastore.DataStore
ca ca.ServerCA
td spiffeid.TrustDomain
}
// New creates a new agent service
func New(config Config) *Service {
return &Service{
cat: config.Catalog,
clk: config.Clock,
ds: config.DataStore,
ca: config.ServerCA,
td: config.TrustDomain,
}
}
// RegisterService registers the agent service on the gRPC server/
func RegisterService(s *grpc.Server, service *Service) {
agentv1.RegisterAgentServer(s, service)
}
// CountAgents returns the total number of agents.
func (s *Service) CountAgents(ctx context.Context, req *agentv1.CountAgentsRequest) (*agentv1.CountAgentsResponse, error) {
count, err := s.ds.CountAttestedNodes(ctx)
if err != nil {
log := rpccontext.Logger(ctx)
return nil, api.MakeErr(log, codes.Internal, "failed to count agents", err)
}
return &agentv1.CountAgentsResponse{Count: count}, nil
}
// ListAgents returns an optionally filtered and/or paginated list of agents.
func (s *Service) ListAgents(ctx context.Context, req *agentv1.ListAgentsRequest) (*agentv1.ListAgentsResponse, error) {
log := rpccontext.Logger(ctx)
listReq := &datastore.ListAttestedNodesRequest{}
if req.OutputMask == nil || req.OutputMask.Selectors {
listReq.FetchSelectors = true
}
// Parse proto filter into datastore request
if req.Filter != nil {
filter := req.Filter
listReq.ByAttestationType = filter.ByAttestationType
listReq.ByBanned = filter.ByBanned
if filter.BySelectorMatch != nil {
selectors, err := api.SelectorsFromProto(filter.BySelectorMatch.Selectors)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "failed to parse selectors", err)
}
listReq.BySelectorMatch = &datastore.BySelectors{
Match: datastore.MatchBehavior(filter.BySelectorMatch.Match),
Selectors: selectors,
}
}
}
// Set pagination parameters
if req.PageSize > 0 {
listReq.Pagination = &datastore.Pagination{
PageSize: req.PageSize,
Token: req.PageToken,
}
}
dsResp, err := s.ds.ListAttestedNodes(ctx, listReq)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to list agents", err)
}
resp := &agentv1.ListAgentsResponse{}
if dsResp.Pagination != nil {
resp.NextPageToken = dsResp.Pagination.Token
}
// Parse nodes into proto and apply output mask
for _, node := range dsResp.Nodes {
a, err := api.ProtoFromAttestedNode(node)
if err != nil {
log.WithError(err).WithField(telemetry.SPIFFEID, node.SpiffeId).Warn("Failed to parse agent")
continue
}
applyMask(a, req.OutputMask)
resp.Agents = append(resp.Agents, a)
}
return resp, nil
}
// GetAgent returns the agent associated with the given SpiffeID.
func (s *Service) GetAgent(ctx context.Context, req *agentv1.GetAgentRequest) (*types.Agent, error) {
log := rpccontext.Logger(ctx)
agentID, err := api.TrustDomainAgentIDFromProto(s.td, req.Id)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "invalid agent ID", err)
}
log = log.WithField(telemetry.SPIFFEID, agentID.String())
attestedNode, err := s.ds.FetchAttestedNode(ctx, agentID.String())
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to fetch agent", err)
}
if attestedNode == nil {
return nil, api.MakeErr(log, codes.NotFound, "agent not found", err)
}
selectors, err := s.getSelectorsFromAgentID(ctx, attestedNode.SpiffeId)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to get selectors from agent", err)
}
agent, err := api.AttestedNodeToProto(attestedNode, selectors)
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to convert attested node to agent", err)
}
applyMask(agent, req.OutputMask)
return agent, nil
}
// DeleteAgent removes the agent with the given SpiffeID.
func (s *Service) DeleteAgent(ctx context.Context, req *agentv1.DeleteAgentRequest) (*emptypb.Empty, error) {
log := rpccontext.Logger(ctx)
id, err := api.TrustDomainAgentIDFromProto(s.td, req.Id)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "invalid agent ID", err)
}
log = log.WithField(telemetry.SPIFFEID, id.String())
_, err = s.ds.DeleteAttestedNode(ctx, id.String())
switch status.Code(err) {
case codes.OK:
log.Info("Agent deleted")
return &emptypb.Empty{}, nil
case codes.NotFound:
return nil, api.MakeErr(log, codes.NotFound, "agent not found", err)
default:
return nil, api.MakeErr(log, codes.Internal, "failed to remove agent", err)
}
}
// BanAgent sets the agent with the given SpiffeID to the banned state.
func (s *Service) BanAgent(ctx context.Context, req *agentv1.BanAgentRequest) (*emptypb.Empty, error) {
log := rpccontext.Logger(ctx)
id, err := api.TrustDomainAgentIDFromProto(s.td, req.Id)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "invalid agent ID", err)
}
log = log.WithField(telemetry.SPIFFEID, id.String())
// The agent "Banned" state is pointed out by setting its
// serial numbers (current and new) to empty strings.
banned := &common.AttestedNode{SpiffeId: id.String()}
mask := &common.AttestedNodeMask{
CertSerialNumber: true,
NewCertSerialNumber: true,
}
_, err = s.ds.UpdateAttestedNode(ctx, banned, mask)
switch status.Code(err) {
case codes.OK:
log.Info("Agent banned")
return &emptypb.Empty{}, nil
case codes.NotFound:
return nil, api.MakeErr(log, codes.NotFound, "agent not found", err)
default:
return nil, api.MakeErr(log, codes.Internal, "failed to ban agent", err)
}
}
// AttestAgent attests the authenticity of the given agent.
func (s *Service) AttestAgent(stream agentv1.Agent_AttestAgentServer) error {
ctx := stream.Context()
log := rpccontext.Logger(ctx)
if err := rpccontext.RateLimit(ctx, 1); err != nil {
return api.MakeErr(log, status.Code(err), "rejecting request due to attest agent rate limiting", err)
}
req, err := stream.Recv()
if err != nil {
return api.MakeErr(log, codes.InvalidArgument, "failed to receive request from stream", err)
}
// validate
params := req.GetParams()
if err := validateAttestAgentParams(params); err != nil {
return api.MakeErr(log, codes.InvalidArgument, "malformed param", err)
}
log = log.WithField(telemetry.NodeAttestorType, params.Data.Type)
// attest
var attestResult *nodeattestor.AttestResult
if params.Data.Type == "join_token" {
attestResult, err = s.attestJoinToken(ctx, string(params.Data.Payload))
if err != nil {
return err
}
} else {
attestResult, err = s.attestChallengeResponse(ctx, stream, params)
if err != nil {
return err
}
}
agentID := attestResult.AgentID
log = log.WithField(telemetry.AgentID, agentID)
if err := idutil.CheckAgentIDStringNormalization(agentID); err != nil {
return api.MakeErr(log, codes.Internal, "agent ID is malformed", err)
}
agentSpiffeID, err := spiffeid.FromString(agentID)
if err != nil {
return api.MakeErr(log, codes.Internal, "invalid agent id", err)
}
// fetch the agent/node to check if it was already attested or banned
attestedNode, err := s.ds.FetchAttestedNode(ctx, agentID)
if err != nil {
return api.MakeErr(log, codes.Internal, "failed to fetch agent", err)
}
if attestedNode != nil && nodeutil.IsAgentBanned(attestedNode) {
return api.MakeErr(log, codes.PermissionDenied, "failed to attest: agent is banned", nil)
}
// parse and sign CSR
svid, err := s.signSvid(ctx, agentSpiffeID, params.Params.Csr, log)
if err != nil {
return err
}
// augment selectors with resolver
resolvedSelectors, err := s.resolveSelectors(ctx, agentID, params.Data.Type)
if err != nil {
return api.MakeErr(log, codes.Internal, "failed to resolve selectors", err)
}
// store augmented selectors
_, err = s.ds.SetNodeSelectors(ctx, &datastore.SetNodeSelectorsRequest{
Selectors: &datastore.NodeSelectors{
SpiffeId: agentID,
Selectors: append(attestResult.Selectors, resolvedSelectors...),
},
})
if err != nil {
return api.MakeErr(log, codes.Internal, "failed to update selectors", err)
}
// create or update attested entry
if attestedNode == nil {
node := &common.AttestedNode{
AttestationDataType: params.Data.Type,
SpiffeId: agentID,
CertNotAfter: svid[0].NotAfter.Unix(),
CertSerialNumber: svid[0].SerialNumber.String(),
}
if _, err := s.ds.CreateAttestedNode(ctx, node); err != nil {
return api.MakeErr(log, codes.Internal, "failed to create attested agent", err)
}
} else {
node := &common.AttestedNode{
SpiffeId: agentID,
CertNotAfter: svid[0].NotAfter.Unix(),
CertSerialNumber: svid[0].SerialNumber.String(),
}
if _, err := s.ds.UpdateAttestedNode(ctx, node, nil); err != nil {
return api.MakeErr(log, codes.Internal, "failed to update attested agent", err)
}
}
// build and send response
response := getAttestAgentResponse(agentSpiffeID, svid)
if p, ok := peer.FromContext(ctx); ok {
log = log.WithField(telemetry.Address, p.Addr.String())
}
log.Info("Agent attestation request completed")
if err := stream.Send(response); err != nil {
return api.MakeErr(log, codes.Internal, "failed to send response over stream", err)
}
return nil
}
// RenewAgent renews the SVID of the agent with the given SpiffeID.
func (s *Service) RenewAgent(ctx context.Context, req *agentv1.RenewAgentRequest) (*agentv1.RenewAgentResponse, error) {
log := rpccontext.Logger(ctx)
if err := rpccontext.RateLimit(ctx, 1); err != nil {
return nil, api.MakeErr(log, status.Code(err), "rejecting request due to renew agent rate limiting", err)
}
callerID, ok := rpccontext.CallerID(ctx)
if !ok {
return nil, api.MakeErr(log, codes.Internal, "caller ID missing from request context", nil)
}
log.Debug("Renewing agent SVID")
if req.Params == nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "params cannot be nil", nil)
}
if len(req.Params.Csr) == 0 {
return nil, api.MakeErr(log, codes.InvalidArgument, "missing CSR", nil)
}
agentSVID, err := s.signSvid(ctx, callerID, req.Params.Csr, log)
if err != nil {
return nil, err
}
update := &common.AttestedNode{
SpiffeId: callerID.String(),
NewCertNotAfter: agentSVID[0].NotAfter.Unix(),
NewCertSerialNumber: agentSVID[0].SerialNumber.String(),
}
mask := &common.AttestedNodeMask{
NewCertNotAfter: true,
NewCertSerialNumber: true,
}
if err := s.updateAttestedNode(ctx, update, mask, log); err != nil {
return nil, err
}
// Send response with new X509 SVID
return &agentv1.RenewAgentResponse{
Svid: &types.X509SVID{
Id: api.ProtoFromID(callerID),
ExpiresAt: agentSVID[0].NotAfter.Unix(),
CertChain: x509util.RawCertsFromCertificates(agentSVID),
},
}, nil
}
// CreateJoinToken returns a new JoinToken for an agent.
func (s *Service) CreateJoinToken(ctx context.Context, req *agentv1.CreateJoinTokenRequest) (*types.JoinToken, error) {
log := rpccontext.Logger(ctx)
if req.Ttl < 1 {
return nil, api.MakeErr(log, codes.InvalidArgument, "ttl is required, you must provide one", nil)
}
// If provided, check that the AgentID is valid BEFORE creating the join token so we can fail early
var agentID spiffeid.ID
var err error
if req.AgentId != nil {
agentID, err = api.TrustDomainWorkloadIDFromProto(s.td, req.AgentId)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "invalid agent ID", err)
}
if err := idutil.CheckIDProtoNormalization(req.AgentId); err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "agent ID is malformed", err)
}
log.WithField(telemetry.SPIFFEID, agentID.String())
}
// Generate a token if one wasn't specified
if req.Token == "" {
u, err := uuid.NewV4()
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to generate token UUID", err)
}
req.Token = u.String()
}
expiry := s.clk.Now().Add(time.Second * time.Duration(req.Ttl))
err = s.ds.CreateJoinToken(ctx, &datastore.JoinToken{
Token: req.Token,
Expiry: expiry,
})
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to create token", err)
}
if req.AgentId != nil {
err := s.createJoinTokenRegistrationEntry(ctx, req.Token, agentID.String())
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to create join token registration entry", err)
}
}
return &types.JoinToken{Value: req.Token, ExpiresAt: expiry.Unix()}, nil
}
func (s *Service) createJoinTokenRegistrationEntry(ctx context.Context, token string, agentID string) error {
parentID := s.td.NewID(path.Join("spire", "agent", "join_token", token))
entry := &common.RegistrationEntry{
ParentId: parentID.String(),
SpiffeId: agentID,
Selectors: []*common.Selector{
{Type: "spiffe_id", Value: parentID.String()},
},
}
_, err := s.ds.CreateRegistrationEntry(ctx, entry)
if err != nil {
return err
}
return nil
}
func (s *Service) updateAttestedNode(ctx context.Context, node *common.AttestedNode, mask *common.AttestedNodeMask, log logrus.FieldLogger) error {
_, err := s.ds.UpdateAttestedNode(ctx, node, mask)
switch status.Code(err) {
case codes.OK:
return nil
case codes.NotFound:
return api.MakeErr(log, codes.NotFound, "agent not found", err)
default:
return api.MakeErr(log, codes.Internal, "failed to update agent", err)
}
}
func (s *Service) signSvid(ctx context.Context, agentID spiffeid.ID, csr []byte, log logrus.FieldLogger) ([]*x509.Certificate, error) {
parsedCsr, err := x509.ParseCertificateRequest(csr)
if err != nil {
return nil, api.MakeErr(log, codes.InvalidArgument, "failed to parse CSR", err)
}
// Sign a new X509 SVID
x509Svid, err := s.ca.SignX509SVID(ctx, ca.X509SVIDParams{
SpiffeID: agentID,
PublicKey: parsedCsr.PublicKey,
})
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to sign X509 SVID", err)
}
return x509Svid, nil
}
func (s *Service) getSelectorsFromAgentID(ctx context.Context, agentID string) ([]*types.Selector, error) {
resp, err := s.ds.GetNodeSelectors(ctx, &datastore.GetNodeSelectorsRequest{
SpiffeId: agentID,
})
if err != nil {
return nil, fmt.Errorf("failed to get node selectors: %v", err)
}
return api.NodeSelectorsToProto(resp.Selectors)
}
func (s *Service) attestJoinToken(ctx context.Context, token string) (*nodeattestor.AttestResult, error) {
log := rpccontext.Logger(ctx).WithField(telemetry.NodeAttestorType, "join_token")
joinToken, err := s.ds.FetchJoinToken(ctx, token)
switch {
case err != nil:
return nil, api.MakeErr(log, codes.Internal, "failed to fetch join token", err)
case joinToken == nil:
return nil, api.MakeErr(log, codes.InvalidArgument, "failed to attest: join token does not exist or has already been used", nil)
}
err = s.ds.DeleteJoinToken(ctx, token)
switch {
case err != nil:
return nil, api.MakeErr(log, codes.Internal, "failed to delete join token", err)
case joinToken.Expiry.Before(s.clk.Now()):
return nil, api.MakeErr(log, codes.InvalidArgument, "join token expired", nil)
}
tokenPath := path.Join("spire", "agent", "join_token", token)
return &nodeattestor.AttestResult{
AgentID: s.td.NewID(tokenPath).String(),
}, nil
}
func (s *Service) attestChallengeResponse(ctx context.Context, agentStream agentv1.Agent_AttestAgentServer, params *agentv1.AttestAgentRequest_Params) (*nodeattestor.AttestResult, error) {
attestorType := params.Data.Type
log := rpccontext.Logger(ctx).WithField(telemetry.NodeAttestorType, attestorType)
nodeAttestor, ok := s.cat.GetNodeAttestorNamed(attestorType)
if !ok {
return nil, api.MakeErr(log, codes.FailedPrecondition, "error getting node attestor", fmt.Errorf("could not find node attestor type %q", attestorType))
}
result, err := nodeAttestor.Attest(ctx, params.Data.Payload, func(ctx context.Context, challenge []byte) ([]byte, error) {
resp := &agentv1.AttestAgentResponse{
Step: &agentv1.AttestAgentResponse_Challenge{
Challenge: challenge,
},
}
if err := agentStream.Send(resp); err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to send challenge to agent", err)
}
req, err := agentStream.Recv()
if err != nil {
return nil, api.MakeErr(log, codes.Internal, "failed to receive challenge from agent", err)
}
return req.GetChallengeResponse(), nil
})
if err != nil {
st := status.Convert(err)
return nil, api.MakeErr(log, st.Code(), st.Message(), nil)
}
return result, nil
}
func (s *Service) resolveSelectors(ctx context.Context, agentID string, attestationType string) ([]*common.Selector, error) {
if nodeResolver, ok := s.cat.GetNodeResolverNamed(attestationType); ok {
return nodeResolver.Resolve(ctx, agentID)
}
return nil, nil
}
func applyMask(a *types.Agent, mask *types.AgentMask) {
if mask == nil {
return
}
if !mask.AttestationType {
a.AttestationType = ""
}
if !mask.X509SvidSerialNumber {
a.X509SvidSerialNumber = ""
}
if !mask.X509SvidExpiresAt {
a.X509SvidExpiresAt = 0
}
if !mask.Selectors {
a.Selectors = nil
}
if !mask.Banned {
a.Banned = false
}
}
func validateAttestAgentParams(params *agentv1.AttestAgentRequest_Params) error {
switch {
case params == nil:
return errors.New("missing params")
case params.Data == nil:
return errors.New("missing attestation data")
case params.Params == nil:
return errors.New("missing X509-SVID parameters")
case len(params.Params.Csr) == 0:
return errors.New("missing CSR")
case params.Data.Type == "":
return errors.New("missing attestation data type")
case len(params.Data.Payload) == 0:
return errors.New("missing attestation data payload")
default:
return nil
}
}
func getAttestAgentResponse(spiffeID spiffeid.ID, certificates []*x509.Certificate) *agentv1.AttestAgentResponse {
svid := &types.X509SVID{
Id: api.ProtoFromID(spiffeID),
CertChain: x509util.RawCertsFromCertificates(certificates),
ExpiresAt: certificates[0].NotAfter.Unix(),
}
return &agentv1.AttestAgentResponse{
Step: &agentv1.AttestAgentResponse_Result_{
Result: &agentv1.AttestAgentResponse_Result{
Svid: svid,
},
},
}
}
| 1 | 16,648 | This is the only occurrence I see in the code where we now need to convert from a boolean protobuf wrapper to a boolean pointer. This felt a little cumbersome here; should we consider moving it somewhere else as a helper function? | spiffe-spire | go |
@@ -46,9 +46,6 @@ void HBProcessor::process(const cpp2::HBReq& req) {
}
HostInfo info(time::WallClock::fastNowInMilliSec(), req.get_role(), req.get_git_info_sha());
- if (req.version_ref().has_value()) {
- info.version_ = *req.version_ref();
- }
if (req.leader_partIds_ref().has_value()) {
ret = ActiveHostsMan::updateHostInfo(kvstore_, host, info, &*req.leader_partIds_ref());
} else { | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "meta/processors/admin/HBProcessor.h"
#include "common/time/WallClock.h"
#include "meta/ActiveHostsMan.h"
#include "meta/KVBasedClusterIdMan.h"
#include "meta/MetaVersionMan.h"
namespace nebula {
namespace meta {
HBCounters kHBCounters;
std::atomic<int64_t> HBProcessor::metaVersion_ = -1;
void HBProcessor::onFinished() {
if (counters_) {
stats::StatsManager::addValue(counters_->numCalls_);
stats::StatsManager::addValue(counters_->latency_, this->duration_.elapsedInUSec());
}
Base::onFinished();
}
void HBProcessor::process(const cpp2::HBReq& req) {
HostAddr host((*req.host_ref()).host, (*req.host_ref()).port);
nebula::cpp2::ErrorCode ret;
LOG(INFO) << "Receive heartbeat from " << host
<< ", role = " << apache::thrift::util::enumNameSafe(req.get_role());
if (req.get_role() == cpp2::HostRole::STORAGE) {
ClusterID peerClusterId = req.get_cluster_id();
if (peerClusterId == 0) {
LOG(INFO) << "Set clusterId for new host " << host << "!";
resp_.set_cluster_id(clusterId_);
} else if (peerClusterId != clusterId_) {
LOG(ERROR) << "Reject wrong cluster host " << host << "!";
handleErrorCode(nebula::cpp2::ErrorCode::E_WRONGCLUSTER);
onFinished();
return;
}
}
HostInfo info(time::WallClock::fastNowInMilliSec(), req.get_role(), req.get_git_info_sha());
if (req.version_ref().has_value()) {
info.version_ = *req.version_ref();
}
if (req.leader_partIds_ref().has_value()) {
ret = ActiveHostsMan::updateHostInfo(kvstore_, host, info, &*req.leader_partIds_ref());
} else {
ret = ActiveHostsMan::updateHostInfo(kvstore_, host, info);
}
if (ret == nebula::cpp2::ErrorCode::E_LEADER_CHANGED) {
auto leaderRet = kvstore_->partLeader(kDefaultSpaceId, kDefaultPartId);
if (nebula::ok(leaderRet)) {
resp_.set_leader(toThriftHost(nebula::value(leaderRet)));
}
}
auto lastUpdateTimeRet = LastUpdateTimeMan::get(kvstore_);
if (nebula::ok(lastUpdateTimeRet)) {
resp_.set_last_update_time_in_ms(nebula::value(lastUpdateTimeRet));
} else if (nebula::error(lastUpdateTimeRet) == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) {
resp_.set_last_update_time_in_ms(0);
}
auto version = metaVersion_.load();
if (version == -1) {
metaVersion_.store(static_cast<int64_t>(MetaVersionMan::getMetaVersionFromKV(kvstore_)));
}
resp_.set_meta_version(metaVersion_.load());
handleErrorCode(ret);
onFinished();
}
} // namespace meta
} // namespace nebula
| 1 | 32,309 | Need you to delete the `version_` from `HostInfo` struct ? | vesoft-inc-nebula | cpp |
@@ -0,0 +1,5 @@
+const { getBaseLang } = axe.commons.utils;
+const primaryLangValue = getBaseLang(node.getAttribute('lang') || '');
+const primaryXmlLangValue = getBaseLang(node.getAttribute('xml:lang') || '');
+
+return primaryLangValue === primaryXmlLangValue; | 1 | 1 | 12,935 | This could be the cleanest looking check we've got. Great job Jey. | dequelabs-axe-core | js |
|
@@ -148,7 +148,7 @@ class Controller
$url = Request::path();
}
- if (!strlen($url)) {
+ if ('' === $url) {
$url = '/';
}
| 1 | <?php namespace Cms\Classes;
use Cms;
use Url;
use Str;
use App;
use File;
use View;
use Lang;
use Flash;
use Config;
use Session;
use Request;
use Response;
use Exception;
use BackendAuth;
use Twig_Environment;
use Twig_Cache_Filesystem;
use Cms\Twig\Loader as TwigLoader;
use Cms\Twig\DebugExtension;
use Cms\Twig\Extension as CmsTwigExtension;
use Cms\Models\MaintenanceSetting;
use System\Models\RequestLog;
use System\Helpers\View as ViewHelper;
use System\Classes\ErrorHandler;
use System\Classes\CombineAssets;
use System\Twig\Extension as SystemTwigExtension;
use October\Rain\Exception\AjaxException;
use October\Rain\Exception\SystemException;
use October\Rain\Exception\ValidationException;
use October\Rain\Exception\ApplicationException;
use October\Rain\Parse\Bracket as TextParser;
use Illuminate\Http\RedirectResponse;
/**
* The CMS controller class.
* The controller finds and serves requested pages.
*
* @package october\cms
* @author Alexey Bobkov, Samuel Georges
*/
class Controller
{
use \System\Traits\AssetMaker;
use \System\Traits\EventEmitter;
/**
* @var \Cms\Classes\Theme A reference to the CMS theme processed by the controller.
*/
protected $theme;
/**
* @var \Cms\Classes\Router A reference to the Router object.
*/
protected $router;
/**
* @var \Cms\Twig\Loader A reference to the Twig template loader.
*/
protected $loader;
/**
* @var \Cms\Classes\Page A reference to the CMS page template being processed.
*/
protected $page;
/**
* @var \Cms\Classes\CodeBase A reference to the CMS page code section object.
*/
protected $pageObj;
/**
* @var \Cms\Classes\Layout A reference to the CMS layout template used by the page.
*/
protected $layout;
/**
* @var \Cms\Classes\CodeBase A reference to the CMS layout code section object.
*/
protected $layoutObj;
/**
* @var \Twig_Environment Keeps the Twig environment object.
*/
protected $twig;
/**
* @var string Contains the rendered page contents string.
*/
protected $pageContents;
/**
* @var array A list of variables to pass to the page.
*/
public $vars = [];
/**
* @var int Response status code
*/
protected $statusCode = 200;
/**
* @var self Cache of self
*/
protected static $instance = null;
/**
* @var \Cms\Classes\ComponentBase Object of the active component, used internally.
*/
protected $componentContext = null;
/**
* @var array Component partial stack, used internally.
*/
protected $partialStack = [];
/**
* Creates the controller.
* @param \Cms\Classes\Theme $theme Specifies the CMS theme.
* If the theme is not specified, the current active theme used.
*/
public function __construct($theme = null)
{
$this->theme = $theme ? $theme : Theme::getActiveTheme();
if (!$this->theme) {
throw new CmsException(Lang::get('cms::lang.theme.active.not_found'));
}
$this->assetPath = Config::get('cms.themesPath', '/themes').'/'.$this->theme->getDirName();
$this->router = new Router($this->theme);
$this->partialStack = new PartialStack;
$this->initTwigEnvironment();
self::$instance = $this;
}
/**
* Finds and serves the requested page.
* If the page cannot be found, returns the page with the URL /404.
* If the /404 page doesn't exist, returns the system 404 page.
* @param string $url Specifies the requested page URL.
* If the parameter is omitted, the current URL used.
* @return string Returns the processed page content.
*/
public function run($url = '/')
{
if ($url === null) {
$url = Request::path();
}
if (!strlen($url)) {
$url = '/';
}
/*
* Hidden page
*/
$page = $this->router->findByUrl($url);
if ($page && $page->is_hidden) {
if (!BackendAuth::getUser()) {
$page = null;
}
}
/*
* Maintenance mode
*/
if (
MaintenanceSetting::isConfigured() &&
MaintenanceSetting::get('is_enabled', false) &&
!BackendAuth::getUser()
) {
if (!Request::ajax()) {
$this->setStatusCode(503);
}
$page = Page::loadCached($this->theme, MaintenanceSetting::get('cms_page'));
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.beforeDisplay', [$url, $page])) {
if ($event instanceof Page) {
$page = $event;
}
else {
return $event;
}
}
/*
* If the page was not found, render the 404 page - either provided by the theme or the built-in one.
*/
if (!$page || $url === '404') {
if (!Request::ajax()) {
$this->setStatusCode(404);
}
// Log the 404 request
if (!App::runningUnitTests()) {
RequestLog::add();
}
if (!$page = $this->router->findByUrl('/404')) {
return Response::make(View::make('cms::404'), $this->statusCode);
}
}
/*
* Run the page
*/
$result = $this->runPage($page);
/*
* Post-processing
*/
$result = $this->postProcessResult($page, $url, $result);
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.display', [$url, $page, $result])) {
return $event;
}
if (!is_string($result)) {
return $result;
}
return Response::make($result, $this->statusCode);
}
/**
* Renders a page in its entirety, including component initialization.
* AJAX will be disabled for this process.
* @param string $pageFile Specifies the CMS page file name to run.
* @param array $parameters Routing parameters.
* @param \Cms\Classes\Theme $theme Theme object
*/
public static function render($pageFile, $parameters = [], $theme = null)
{
if (!$theme && (!$theme = Theme::getActiveTheme())) {
throw new CmsException(Lang::get('cms::lang.theme.active.not_found'));
}
$controller = new static($theme);
$controller->getRouter()->setParameters($parameters);
if (($page = Page::load($theme, $pageFile)) === null) {
throw new CmsException(Lang::get('cms::lang.page.not_found_name', ['name'=>$pageFile]));
}
return $controller->runPage($page, false);
}
/**
* Runs a page directly from its object and supplied parameters.
* @param \Cms\Classes\Page $page Specifies the CMS page to run.
* @return string
*/
public function runPage($page, $useAjax = true)
{
$this->page = $page;
/*
* If the page doesn't refer any layout, create the fallback layout.
* Otherwise load the layout specified in the page.
*/
if (!$page->layout) {
$layout = Layout::initFallback($this->theme);
}
elseif (($layout = Layout::loadCached($this->theme, $page->layout)) === null) {
throw new CmsException(Lang::get('cms::lang.layout.not_found_name', ['name'=>$page->layout]));
}
$this->layout = $layout;
/*
* The 'this' variable is reserved for default variables.
*/
$this->vars['this'] = [
'page' => $this->page,
'layout' => $this->layout,
'theme' => $this->theme,
'param' => $this->router->getParameters(),
'controller' => $this,
'environment' => App::environment(),
'session' => App::make('session'),
];
/*
* Check for the presence of validation errors in the session.
*/
$this->vars['errors'] = (Config::get('session.driver') && Session::has('errors'))
? Session::get('errors')
: new \Illuminate\Support\ViewErrorBag;
/*
* Handle AJAX requests and execute the life cycle functions
*/
$this->initCustomObjects();
$this->initComponents();
/*
* Give the layout and page an opportunity to participate
* after components are initialized and before AJAX is handled.
*/
if ($this->layoutObj) {
CmsException::mask($this->layout, 300);
$this->layoutObj->onInit();
CmsException::unmask();
}
CmsException::mask($this->page, 300);
$this->pageObj->onInit();
CmsException::unmask();
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.init', [$page])) {
return $event;
}
/*
* Execute AJAX event
*/
if ($useAjax && $ajaxResponse = $this->execAjaxHandlers()) {
return $ajaxResponse;
}
/*
* Execute postback handler
*/
if (
$useAjax &&
($handler = post('_handler')) &&
($this->verifyCsrfToken()) &&
($handlerResponse = $this->runAjaxHandler($handler)) &&
$handlerResponse !== true
) {
return $handlerResponse;
}
/*
* Execute page lifecycle
*/
if ($cycleResponse = $this->execPageCycle()) {
return $cycleResponse;
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.beforeRenderPage', [$page])) {
$this->pageContents = $event;
}
else {
/*
* Render the page
*/
CmsException::mask($this->page, 400);
$this->loader->setObject($this->page);
$template = $this->twig->loadTemplate($this->page->getFilePath());
$this->pageContents = $template->render($this->vars);
CmsException::unmask();
}
/*
* Render the layout
*/
CmsException::mask($this->layout, 400);
$this->loader->setObject($this->layout);
$template = $this->twig->loadTemplate($this->layout->getFilePath());
$result = $template->render($this->vars);
CmsException::unmask();
return $result;
}
/**
* Invokes the current page cycle without rendering the page,
* used by AJAX handler that may rely on the logic inside the action.
*/
public function pageCycle()
{
return $this->execPageCycle();
}
/**
* Executes the page life cycle.
* Creates an object from the PHP sections of the page and
* it's layout, then executes their life cycle functions.
*/
protected function execPageCycle()
{
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.start')) {
return $event;
}
/*
* Run layout functions
*/
if ($this->layoutObj) {
CmsException::mask($this->layout, 300);
$response = (
($result = $this->layoutObj->onStart()) ||
($result = $this->layout->runComponents()) ||
($result = $this->layoutObj->onBeforePageStart())
) ? $result : null;
CmsException::unmask();
if ($response) {
return $response;
}
}
/*
* Run page functions
*/
CmsException::mask($this->page, 300);
$response = (
($result = $this->pageObj->onStart()) ||
($result = $this->page->runComponents()) ||
($result = $this->pageObj->onEnd())
) ? $result : null;
CmsException::unmask();
if ($response) {
return $response;
}
/*
* Run remaining layout functions
*/
if ($this->layoutObj) {
CmsException::mask($this->layout, 300);
$response = ($result = $this->layoutObj->onEnd()) ? $result : null;
CmsException::unmask();
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.end')) {
return $event;
}
return $response;
}
/**
* Post-processes page HTML code before it's sent to the client.
* Note for pre-processing see cms.template.processTwigContent event.
* @param \Cms\Classes\Page $page Specifies the current CMS page.
* @param string $url Specifies the current URL.
* @param string $content The page markup to post processs.
* @return string Returns the updated result string.
*/
protected function postProcessResult($page, $url, $content)
{
$content = MediaViewHelper::instance()->processHtml($content);
$dataHolder = (object) ['content' => $content];
$this->fireSystemEvent('cms.page.postprocess', [$url, $page, $dataHolder]);
return $dataHolder->content;
}
//
// Initialization
//
/**
* Initializes the Twig environment and loader.
* Registers the \Cms\Twig\Extension object with Twig.
* @return void
*/
protected function initTwigEnvironment()
{
$this->loader = new TwigLoader;
$useCache = !Config::get('cms.twigNoCache');
$isDebugMode = Config::get('app.debug', false);
$forceBytecode = Config::get('cms.forceBytecodeInvalidation', false);
$options = [
'auto_reload' => true,
'debug' => $isDebugMode,
];
if ($useCache) {
$options['cache'] = new Twig_Cache_Filesystem(
storage_path().'/cms/twig',
$forceBytecode ? Twig_Cache_Filesystem::FORCE_BYTECODE_INVALIDATION : 0
);
}
$this->twig = new Twig_Environment($this->loader, $options);
$this->twig->addExtension(new CmsTwigExtension($this));
$this->twig->addExtension(new SystemTwigExtension);
if ($isDebugMode) {
$this->twig->addExtension(new DebugExtension($this));
}
}
/**
* Initializes the custom layout and page objects.
* @return void
*/
protected function initCustomObjects()
{
$this->layoutObj = null;
if (!$this->layout->isFallBack()) {
CmsException::mask($this->layout, 300);
$parser = new CodeParser($this->layout);
$this->layoutObj = $parser->source($this->page, $this->layout, $this);
CmsException::unmask();
}
CmsException::mask($this->page, 300);
$parser = new CodeParser($this->page);
$this->pageObj = $parser->source($this->page, $this->layout, $this);
CmsException::unmask();
}
/**
* Initializes the components for the layout and page.
* @return void
*/
protected function initComponents()
{
if (!$this->layout->isFallBack()) {
foreach ($this->layout->settings['components'] as $component => $properties) {
list($name, $alias) = strpos($component, ' ')
? explode(' ', $component)
: [$component, $component];
$this->addComponent($name, $alias, $properties, true);
}
}
foreach ($this->page->settings['components'] as $component => $properties) {
list($name, $alias) = strpos($component, ' ')
? explode(' ', $component)
: [$component, $component];
$this->addComponent($name, $alias, $properties);
}
/*
* Extensibility
*/
$this->fireSystemEvent('cms.page.initComponents', [$this->page, $this->layout]);
}
//
// AJAX
//
/**
* Returns the AJAX handler for the current request, if available.
* @return string
*/
public function getAjaxHandler()
{
if (!Request::ajax() || Request::method() != 'POST') {
return null;
}
if ($handler = Request::header('X_OCTOBER_REQUEST_HANDLER')) {
return trim($handler);
}
return null;
}
/**
* Executes the page, layout, component and plugin AJAX handlers.
* @return mixed Returns the AJAX Response object or null.
*/
protected function execAjaxHandlers()
{
if ($handler = $this->getAjaxHandler()) {
try {
/*
* Validate the handler name
*/
if (!preg_match('/^(?:\w+\:{2})?on[A-Z]{1}[\w+]*$/', $handler)) {
throw new CmsException(Lang::get('cms::lang.ajax_handler.invalid_name', ['name'=>$handler]));
}
/*
* Validate the handler partial list
*/
if ($partialList = trim(Request::header('X_OCTOBER_REQUEST_PARTIALS'))) {
$partialList = explode('&', $partialList);
foreach ($partialList as $partial) {
if (!preg_match('/^(?:\w+\:{2}|@)?[a-z0-9\_\-\.\/]+$/i', $partial)) {
throw new CmsException(Lang::get('cms::lang.partial.invalid_name', ['name'=>$partial]));
}
}
}
else {
$partialList = [];
}
$responseContents = [];
/*
* Execute the handler
*/
if (!$result = $this->runAjaxHandler($handler)) {
throw new CmsException(Lang::get('cms::lang.ajax_handler.not_found', ['name'=>$handler]));
}
/*
* Render partials and return the response as array that will be converted to JSON automatically.
*/
foreach ($partialList as $partial) {
$responseContents[$partial] = $this->renderPartial($partial);
}
/*
* If the handler returned a redirect, process the URL and dispose of it so
* framework.js knows to redirect the browser and not the request!
*/
if ($result instanceof RedirectResponse) {
$responseContents['X_OCTOBER_REDIRECT'] = $result->getTargetUrl();
$result = null;
}
/*
* No redirect is used, look for any flash messages
*/
elseif (Request::header('X_OCTOBER_REQUEST_FLASH') && Flash::check()) {
$responseContents['X_OCTOBER_FLASH_MESSAGES'] = Flash::all();
}
/*
* If the handler returned an array, we should add it to output for rendering.
* If it is a string, add it to the array with the key "result".
* If an object, pass it to Laravel as a response object.
*/
if (is_array($result)) {
$responseContents = array_merge($responseContents, $result);
}
elseif (is_string($result)) {
$responseContents['result'] = $result;
}
elseif (is_object($result)) {
return $result;
}
return Response::make($responseContents, $this->statusCode);
}
catch (ValidationException $ex) {
/*
* Handle validation errors
*/
$responseContents['X_OCTOBER_ERROR_FIELDS'] = $ex->getFields();
$responseContents['X_OCTOBER_ERROR_MESSAGE'] = $ex->getMessage();
throw new AjaxException($responseContents);
}
catch (Exception $ex) {
throw $ex;
}
}
return null;
}
/**
* Tries to find and run an AJAX handler in the page, layout, components and plugins.
* The method stops as soon as the handler is found.
* @param string $handler name of the ajax handler
* @return boolean Returns true if the handler was found. Returns false otherwise.
*/
protected function runAjaxHandler($handler)
{
/*
* Process Component handler
*/
if (strpos($handler, '::')) {
list($componentName, $handlerName) = explode('::', $handler);
$componentObj = $this->findComponentByName($componentName);
if ($componentObj && $componentObj->methodExists($handlerName)) {
$this->componentContext = $componentObj;
$result = $componentObj->runAjaxHandler($handlerName);
return ($result) ?: true;
}
}
/*
* Process code section handler
*/
else {
if (method_exists($this->pageObj, $handler)) {
$result = $this->pageObj->$handler();
return ($result) ?: true;
}
if (!$this->layout->isFallBack() && method_exists($this->layoutObj, $handler)) {
$result = $this->layoutObj->$handler();
return ($result) ?: true;
}
/*
* Cycle each component to locate a usable handler
*/
if (($componentObj = $this->findComponentByHandler($handler)) !== null) {
$this->componentContext = $componentObj;
$result = $componentObj->runAjaxHandler($handler);
return ($result) ?: true;
}
}
/*
* Generic handler that does nothing
*/
if ($handler == 'onAjax') {
return true;
}
return false;
}
//
// Rendering
//
/**
* Renders a requested page.
* The framework uses this method internally.
*/
public function renderPage()
{
$contents = $this->pageContents;
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.render', [$contents])) {
return $event;
}
return $contents;
}
/**
* Renders a requested partial.
* The framework uses this method internally.
* @param string $name The view to load.
* @param array $parameters Parameter variables to pass to the view.
* @param bool $throwException Throw an exception if the partial is not found.
* @return mixed Partial contents or false if not throwing an exception.
*/
public function renderPartial($name, $parameters = [], $throwException = true)
{
$vars = $this->vars;
$this->vars = array_merge($this->vars, $parameters);
/*
* Alias @ symbol for ::
*/
if (substr($name, 0, 1) == '@') {
$name = '::' . substr($name, 1);
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.beforeRenderPartial', [$name])) {
$partial = $event;
}
/*
* Process Component partial
*/
elseif (strpos($name, '::') !== false) {
list($componentAlias, $partialName) = explode('::', $name);
/*
* Component alias not supplied
*/
if (!strlen($componentAlias)) {
if ($this->componentContext !== null) {
$componentObj = $this->componentContext;
}
elseif (($componentObj = $this->findComponentByPartial($partialName)) === null) {
if ($throwException) {
throw new CmsException(Lang::get('cms::lang.partial.not_found_name', ['name'=>$partialName]));
}
else {
return false;
}
}
}
/*
* Component alias is supplied
*/
else {
if (($componentObj = $this->findComponentByName($componentAlias)) === null) {
if ($throwException) {
throw new CmsException(Lang::get('cms::lang.component.not_found', ['name'=>$componentAlias]));
}
else {
return false;
}
}
}
$partial = null;
$this->componentContext = $componentObj;
/*
* Check if the theme has an override
*/
if (strpos($partialName, '/') === false) {
$partial = ComponentPartial::loadOverrideCached($this->theme, $componentObj, $partialName);
}
/*
* Check the component partial
*/
if ($partial === null) {
$partial = ComponentPartial::loadCached($componentObj, $partialName);
}
if ($partial === null) {
if ($throwException) {
throw new CmsException(Lang::get('cms::lang.partial.not_found_name', ['name'=>$name]));
}
else {
return false;
}
}
/*
* Set context for self access
*/
$this->vars['__SELF__'] = $componentObj;
}
else {
/*
* Process theme partial
*/
if (($partial = Partial::loadCached($this->theme, $name)) === null) {
if ($throwException) {
throw new CmsException(Lang::get('cms::lang.partial.not_found_name', ['name'=>$name]));
}
else {
return false;
}
}
}
/*
* Run functions for CMS partials only (Cms\Classes\Partial)
*/
if ($partial instanceof Partial) {
$this->partialStack->stackPartial();
$manager = ComponentManager::instance();
foreach ($partial->settings['components'] as $component => $properties) {
// Do not inject the viewBag component to the environment.
// Not sure if they're needed there by the requirements,
// but there were problems with array-typed properties used by Static Pages
// snippets and setComponentPropertiesFromParams(). --ab
if ($component == 'viewBag') {
continue;
}
list($name, $alias) = strpos($component, ' ')
? explode(' ', $component)
: [$component, $component];
if (!$componentObj = $manager->makeComponent($name, $this->pageObj, $properties)) {
throw new CmsException(Lang::get('cms::lang.component.not_found', ['name'=>$name]));
}
$componentObj->alias = $alias;
$parameters[$alias] = $partial->components[$alias] = $componentObj;
$this->partialStack->addComponent($alias, $componentObj);
$this->setComponentPropertiesFromParams($componentObj, $parameters);
$componentObj->init();
}
CmsException::mask($this->page, 300);
$parser = new CodeParser($partial);
$partialObj = $parser->source($this->page, $this->layout, $this);
CmsException::unmask();
CmsException::mask($partial, 300);
$partialObj->onStart();
$partial->runComponents();
$partialObj->onEnd();
CmsException::unmask();
}
/*
* Render the partial
*/
CmsException::mask($partial, 400);
$this->loader->setObject($partial);
$template = $this->twig->loadTemplate($partial->getFilePath());
$partialContent = $template->render(array_merge($this->vars, $parameters));
CmsException::unmask();
if ($partial instanceof Partial) {
$this->partialStack->unstackPartial();
}
$this->vars = $vars;
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.renderPartial', [$name, &$partialContent])) {
return $event;
}
return $partialContent;
}
/**
* Renders a requested content file.
* The framework uses this method internally.
* @param string $name The content view to load.
* @param array $parameters Parameter variables to pass to the view.
* @return string
*/
public function renderContent($name, $parameters = [])
{
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.beforeRenderContent', [$name])) {
$content = $event;
}
/*
* Load content from theme
*/
elseif (($content = Content::loadCached($this->theme, $name)) === null) {
throw new CmsException(Lang::get('cms::lang.content.not_found_name', ['name'=>$name]));
}
$fileContent = $content->parsedMarkup;
/*
* Inject global view variables
*/
$globalVars = ViewHelper::getGlobalVars();
if (!empty($globalVars)) {
$parameters = (array) $parameters + $globalVars;
}
/*
* Parse basic template variables
*/
if (!empty($parameters)) {
$fileContent = TextParser::parse($fileContent, $parameters);
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.renderContent', [$name, &$fileContent])) {
return $event;
}
return $fileContent;
}
/**
* Renders a component's default content, preserves the previous component context.
* @param $name
* @param array $parameters
* @return string Returns the component default contents.
*/
public function renderComponent($name, $parameters = [])
{
$result = null;
$previousContext = $this->componentContext;
if ($componentObj = $this->findComponentByName($name)) {
$componentObj->id = uniqid($name);
$componentObj->setProperties(array_merge($componentObj->getProperties(), $parameters));
$this->componentContext = $componentObj;
$result = $componentObj->onRender();
}
if (!$result) {
$result = $this->renderPartial($name.'::default', [], false);
}
$this->componentContext = $previousContext;
return $result;
}
//
// Setters
//
/**
* Sets the status code for the current web response.
* @param int $code Status code
* @return self
*/
public function setStatusCode($code)
{
$this->statusCode = (int) $code;
return $this;
}
//
// Getters
//
/**
* Returns the status code for the current web response.
* @return int Status code
*/
public function getStatusCode()
{
return $this->statusCode;
}
/**
* Returns an existing instance of the controller.
* If the controller doesn't exists, returns null.
* @return mixed Returns the controller object or null.
*/
public static function getController()
{
return self::$instance;
}
/**
* Returns the current CMS theme.
* @return \Cms\Classes\Theme
*/
public function getTheme()
{
return $this->theme;
}
/**
* Returns the Twig environment.
* @return Twig_Environment
*/
public function getTwig()
{
return $this->twig;
}
/**
* Returns the Twig loader.
* @return \Cms\Twig\Loader
*/
public function getLoader()
{
return $this->loader;
}
/**
* Returns the routing object.
* @return \Cms\Classes\Router
*/
public function getRouter()
{
return $this->router;
}
/**
* Intended to be called from the layout, returns the page code base object.
* @return \Cms\Classes\CodeBase
*/
public function getPageObject()
{
return $this->pageObj;
}
/**
* Returns the CMS page object being processed by the controller.
* The object is not available on the early stages of the controller
* initialization.
* @return \Cms\Classes\Page Returns the Page object or null.
*/
public function getPage()
{
return $this->page;
}
/**
* Intended to be called from the page, returns the layout code base object.
* @return \Cms\Classes\CodeBase
*/
public function getLayoutObject()
{
return $this->layoutObj;
}
//
// Page helpers
//
/**
* Looks up the URL for a supplied page and returns it relative to the website root.
*
* @param mixed $name Specifies the Cms Page file name.
* @param array $parameters Route parameters to consider in the URL.
* @param bool $routePersistence By default the existing routing parameters will be included
* @return string
*/
public function pageUrl($name, $parameters = [], $routePersistence = true)
{
if (!$name) {
return $this->currentPageUrl($parameters, $routePersistence);
}
/*
* Second parameter can act as third
*/
if (is_bool($parameters)) {
$routePersistence = $parameters;
}
if (!is_array($parameters)) {
$parameters = [];
}
if ($routePersistence) {
$parameters = array_merge($this->router->getParameters(), $parameters);
}
if (!$url = $this->router->findByFile($name, $parameters)) {
return null;
}
return Cms::url($url);
}
/**
* Looks up the current page URL with supplied parameters and route persistence.
* @param array $parameters
* @param bool $routePersistence
* @return null|string
*/
public function currentPageUrl($parameters = [], $routePersistence = true)
{
if (!$currentFile = $this->page->getFileName()) {
return null;
}
return $this->pageUrl($currentFile, $parameters, $routePersistence);
}
/**
* Converts supplied URL to a theme URL relative to the website root. If the URL provided is an
* array then the files will be combined.
* @param mixed $url Specifies the theme-relative URL. If null, the theme path is returned.
* @return string
*/
public function themeUrl($url = null)
{
$themeDir = $this->getTheme()->getDirName();
if (is_array($url)) {
$_url = Url::to(CombineAssets::combine($url, themes_path().'/'.$themeDir));
}
else {
$_url = Config::get('cms.themesPath', '/themes').'/'.$themeDir;
if ($url !== null) {
$_url .= '/'.$url;
}
$_url = Url::asset($_url);
}
return $_url;
}
/**
* Returns a routing parameter.
* @param string $name Routing parameter name.
* @param string $default Default to use if none is found.
* @return string
*/
public function param($name, $default = null)
{
return $this->router->getParameter($name, $default);
}
//
// Component helpers
//
/**
* Adds a component to the page object
* @param mixed $name Component class name or short name
* @param string $alias Alias to give the component
* @param array $properties Component properties
* @param bool $addToLayout Add to layout, instead of page
* @return ComponentBase Component object
*/
public function addComponent($name, $alias, $properties, $addToLayout = false)
{
$manager = ComponentManager::instance();
if ($addToLayout) {
if (!$componentObj = $manager->makeComponent($name, $this->layoutObj, $properties)) {
throw new CmsException(Lang::get('cms::lang.component.not_found', ['name'=>$name]));
}
$componentObj->alias = $alias;
$this->vars[$alias] = $this->layout->components[$alias] = $componentObj;
}
else {
if (!$componentObj = $manager->makeComponent($name, $this->pageObj, $properties)) {
throw new CmsException(Lang::get('cms::lang.component.not_found', ['name'=>$name]));
}
$componentObj->alias = $alias;
$this->vars[$alias] = $this->page->components[$alias] = $componentObj;
}
$this->setComponentPropertiesFromParams($componentObj);
$componentObj->init();
return $componentObj;
}
/**
* Searches the layout and page components by an alias
* @param $name
* @return ComponentBase The component object, if found
*/
public function findComponentByName($name)
{
if (isset($this->page->components[$name])) {
return $this->page->components[$name];
}
if (isset($this->layout->components[$name])) {
return $this->layout->components[$name];
}
$partialComponent = $this->partialStack->getComponent($name);
if ($partialComponent !== null) {
return $partialComponent;
}
return null;
}
/**
* Searches the layout and page components by an AJAX handler
* @param string $handler
* @return ComponentBase The component object, if found
*/
public function findComponentByHandler($handler)
{
foreach ($this->page->components as $component) {
if ($component->methodExists($handler)) {
return $component;
}
}
foreach ($this->layout->components as $component) {
if ($component->methodExists($handler)) {
return $component;
}
}
return null;
}
/**
* Searches the layout and page components by a partial file
* @param string $partial
* @return ComponentBase The component object, if found
*/
public function findComponentByPartial($partial)
{
foreach ($this->page->components as $component) {
if (ComponentPartial::check($component, $partial)) {
return $component;
}
}
foreach ($this->layout->components as $component) {
if (ComponentPartial::check($component, $partial)) {
return $component;
}
}
return null;
}
/**
* Set the component context manually, used by Components when calling renderPartial.
* @param ComponentBase $component
* @return void
*/
public function setComponentContext(ComponentBase $component = null)
{
$this->componentContext = $component;
}
/**
* Sets component property values from partial parameters.
* The property values should be defined as {{ param }}.
* @param ComponentBase $component The component object.
* @param array $parameters Specifies the partial parameters.
*/
protected function setComponentPropertiesFromParams($component, $parameters = [])
{
$properties = $component->getProperties();
$routerParameters = $this->router->getParameters();
foreach ($properties as $propertyName => $propertyValue) {
if (is_array($propertyValue)) {
continue;
}
$matches = [];
if (preg_match('/^\{\{([^\}]+)\}\}$/', $propertyValue, $matches)) {
$paramName = trim($matches[1]);
if (substr($paramName, 0, 1) == ':') {
$routeParamName = substr($paramName, 1);
$newPropertyValue = array_key_exists($routeParamName, $routerParameters)
? $routerParameters[$routeParamName]
: null;
}
else {
$newPropertyValue = array_key_exists($paramName, $parameters)
? $parameters[$paramName]
: null;
}
$component->setProperty($propertyName, $newPropertyValue);
$component->setExternalPropertyName($propertyName, $paramName);
}
}
}
//
// Security
//
/**
* Checks the request data / headers for a valid CSRF token.
* Returns false if a valid token is not found. Override this
* method to disable the check.
* @return bool
*/
protected function verifyCsrfToken()
{
if (!Config::get('cms.enableCsrfProtection')) {
return true;
}
if (in_array(Request::method(), ['HEAD', 'GET', 'OPTIONS'])) {
return true;
}
$token = Request::input('_token') ?: Request::header('X-CSRF-TOKEN');
if (!strlen($token)) {
return false;
}
return hash_equals(
Session::token(),
$token
);
}
}
| 1 | 13,002 | Who invited yoda? In all seriousness though, wouldn't an `if (empty())` be better here? | octobercms-october | php |
@@ -1341,7 +1341,7 @@ define(['playbackManager', 'dom', 'inputManager', 'datetime', 'itemHelper', 'med
});
} catch (e) {
require(['appRouter'], function(appRouter) {
- appRouter.showDirect('/');
+ window.location.href = 'index.html';
});
}
}); | 1 | define(['playbackManager', 'dom', 'inputManager', 'datetime', 'itemHelper', 'mediaInfo', 'focusManager', 'imageLoader', 'scrollHelper', 'events', 'connectionManager', 'browser', 'globalize', 'apphost', 'layoutManager', 'userSettings', 'keyboardnavigation', 'scrollStyles', 'emby-slider', 'paper-icon-button-light', 'css!assets/css/videoosd'], function (playbackManager, dom, inputManager, datetime, itemHelper, mediaInfo, focusManager, imageLoader, scrollHelper, events, connectionManager, browser, globalize, appHost, layoutManager, userSettings, keyboardnavigation) {
'use strict';
function seriesImageUrl(item, options) {
if ('Episode' !== item.Type) {
return null;
}
options = options || {};
options.type = options.type || 'Primary';
if ('Primary' === options.type && item.SeriesPrimaryImageTag) {
options.tag = item.SeriesPrimaryImageTag;
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options);
}
if ('Thumb' === options.type) {
if (item.SeriesThumbImageTag) {
options.tag = item.SeriesThumbImageTag;
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options);
}
if (item.ParentThumbImageTag) {
options.tag = item.ParentThumbImageTag;
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.ParentThumbItemId, options);
}
}
return null;
}
function imageUrl(item, options) {
options = options || {};
options.type = options.type || 'Primary';
if (item.ImageTags && item.ImageTags[options.type]) {
options.tag = item.ImageTags[options.type];
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.PrimaryImageItemId || item.Id, options);
}
if ('Primary' === options.type && item.AlbumId && item.AlbumPrimaryImageTag) {
options.tag = item.AlbumPrimaryImageTag;
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.AlbumId, options);
}
return null;
}
return function (view, params) {
function onVerticalSwipe(e, elem, data) {
var player = currentPlayer;
if (player) {
var deltaY = data.currentDeltaY;
var windowSize = dom.getWindowSize();
if (supportsBrightnessChange && data.clientX < windowSize.innerWidth / 2) {
return void doBrightnessTouch(deltaY, player, windowSize.innerHeight);
}
doVolumeTouch(deltaY, player, windowSize.innerHeight);
}
}
function doBrightnessTouch(deltaY, player, viewHeight) {
var delta = -deltaY / viewHeight * 100;
var newValue = playbackManager.getBrightness(player) + delta;
newValue = Math.min(newValue, 100);
newValue = Math.max(newValue, 0);
playbackManager.setBrightness(newValue, player);
}
function doVolumeTouch(deltaY, player, viewHeight) {
var delta = -deltaY / viewHeight * 100;
var newValue = playbackManager.getVolume(player) + delta;
newValue = Math.min(newValue, 100);
newValue = Math.max(newValue, 0);
playbackManager.setVolume(newValue, player);
}
function onDoubleClick(e) {
var clientX = e.clientX;
if (null != clientX) {
if (clientX < dom.getWindowSize().innerWidth / 2) {
playbackManager.rewind(currentPlayer);
} else {
playbackManager.fastForward(currentPlayer);
}
e.preventDefault();
e.stopPropagation();
}
}
function getDisplayItem(item) {
if ('TvChannel' === item.Type) {
var apiClient = connectionManager.getApiClient(item.ServerId);
return apiClient.getItem(apiClient.getCurrentUserId(), item.Id).then(function (refreshedItem) {
return {
originalItem: refreshedItem,
displayItem: refreshedItem.CurrentProgram
};
});
}
return Promise.resolve({
originalItem: item
});
}
function updateRecordingButton(item) {
if (!item || 'Program' !== item.Type) {
if (recordingButtonManager) {
recordingButtonManager.destroy();
recordingButtonManager = null;
}
return void view.querySelector('.btnRecord').classList.add('hide');
}
connectionManager.getApiClient(item.ServerId).getCurrentUser().then(function (user) {
if (user.Policy.EnableLiveTvManagement) {
require(['recordingButton'], function (RecordingButton) {
if (recordingButtonManager) {
return void recordingButtonManager.refreshItem(item);
}
recordingButtonManager = new RecordingButton({
item: item,
button: view.querySelector('.btnRecord')
});
view.querySelector('.btnRecord').classList.remove('hide');
});
}
});
}
function updateDisplayItem(itemInfo) {
var item = itemInfo.originalItem;
currentItem = item;
var displayItem = itemInfo.displayItem || item;
updateRecordingButton(displayItem);
setPoster(displayItem, item);
var parentName = displayItem.SeriesName || displayItem.Album;
if (displayItem.EpisodeTitle || displayItem.IsSeries) {
parentName = displayItem.Name;
}
setTitle(displayItem, parentName);
var titleElement;
var osdTitle = view.querySelector('.osdTitle');
titleElement = osdTitle;
var displayName = itemHelper.getDisplayName(displayItem, {
includeParentInfo: 'Program' !== displayItem.Type,
includeIndexNumber: 'Program' !== displayItem.Type
});
if (!displayName) {
displayName = displayItem.Type;
}
titleElement.innerHTML = displayName;
if (displayName) {
titleElement.classList.remove('hide');
} else {
titleElement.classList.add('hide');
}
var mediaInfoHtml = mediaInfo.getPrimaryMediaInfoHtml(displayItem, {
runtime: false,
subtitles: false,
tomatoes: false,
endsAt: false,
episodeTitle: false,
originalAirDate: 'Program' !== displayItem.Type,
episodeTitleIndexNumber: 'Program' !== displayItem.Type,
programIndicator: false
});
var osdMediaInfo = view.querySelector('.osdMediaInfo');
osdMediaInfo.innerHTML = mediaInfoHtml;
if (mediaInfoHtml) {
osdMediaInfo.classList.remove('hide');
} else {
osdMediaInfo.classList.add('hide');
}
var secondaryMediaInfo = view.querySelector('.osdSecondaryMediaInfo');
var secondaryMediaInfoHtml = mediaInfo.getSecondaryMediaInfoHtml(displayItem, {
startDate: false,
programTime: false
});
secondaryMediaInfo.innerHTML = secondaryMediaInfoHtml;
if (secondaryMediaInfoHtml) {
secondaryMediaInfo.classList.remove('hide');
} else {
secondaryMediaInfo.classList.add('hide');
}
if (displayName) {
view.querySelector('.osdMainTextContainer').classList.remove('hide');
} else {
view.querySelector('.osdMainTextContainer').classList.add('hide');
}
if (enableProgressByTimeOfDay) {
setDisplayTime(startTimeText, displayItem.StartDate);
setDisplayTime(endTimeText, displayItem.EndDate);
startTimeText.classList.remove('hide');
endTimeText.classList.remove('hide');
programStartDateMs = displayItem.StartDate ? datetime.parseISO8601Date(displayItem.StartDate).getTime() : 0;
programEndDateMs = displayItem.EndDate ? datetime.parseISO8601Date(displayItem.EndDate).getTime() : 0;
} else {
startTimeText.classList.add('hide');
endTimeText.classList.add('hide');
startTimeText.innerHTML = '';
endTimeText.innerHTML = '';
programStartDateMs = 0;
programEndDateMs = 0;
}
}
function getDisplayTimeWithoutAmPm(date, showSeconds) {
if (showSeconds) {
return datetime.toLocaleTimeString(date, {
hour: 'numeric',
minute: '2-digit',
second: '2-digit'
}).toLowerCase().replace('am', '').replace('pm', '').trim();
}
return datetime.getDisplayTime(date).toLowerCase().replace('am', '').replace('pm', '').trim();
}
function setDisplayTime(elem, date) {
var html;
if (date) {
date = datetime.parseISO8601Date(date);
html = getDisplayTimeWithoutAmPm(date);
}
elem.innerHTML = html || '';
}
function shouldEnableProgressByTimeOfDay(item) {
return !('TvChannel' !== item.Type || !item.CurrentProgram);
}
function updateNowPlayingInfo(player, state) {
var item = state.NowPlayingItem;
currentItem = item;
if (!item) {
setPoster(null);
updateRecordingButton(null);
Emby.Page.setTitle('');
nowPlayingVolumeSlider.disabled = true;
nowPlayingPositionSlider.disabled = true;
btnFastForward.disabled = true;
btnRewind.disabled = true;
view.querySelector('.btnSubtitles').classList.add('hide');
view.querySelector('.btnAudio').classList.add('hide');
view.querySelector('.osdTitle').innerHTML = '';
view.querySelector('.osdMediaInfo').innerHTML = '';
return;
}
enableProgressByTimeOfDay = shouldEnableProgressByTimeOfDay(item);
getDisplayItem(item).then(updateDisplayItem);
nowPlayingVolumeSlider.disabled = false;
nowPlayingPositionSlider.disabled = false;
btnFastForward.disabled = false;
btnRewind.disabled = false;
if (playbackManager.subtitleTracks(player).length) {
view.querySelector('.btnSubtitles').classList.remove('hide');
toggleSubtitleSync();
} else {
view.querySelector('.btnSubtitles').classList.add('hide');
toggleSubtitleSync('forceToHide');
}
if (playbackManager.audioTracks(player).length > 1) {
view.querySelector('.btnAudio').classList.remove('hide');
} else {
view.querySelector('.btnAudio').classList.add('hide');
}
}
function setTitle(item, parentName) {
Emby.Page.setTitle(parentName || '');
var documentTitle = parentName || (item ? item.Name : null);
if (documentTitle) {
document.title = documentTitle;
}
}
function setPoster(item, secondaryItem) {
var osdPoster = view.querySelector('.osdPoster');
if (item) {
var imgUrl = seriesImageUrl(item, {
maxWidth: osdPoster.clientWidth * 2,
type: 'Primary'
}) || seriesImageUrl(item, {
maxWidth: osdPoster.clientWidth * 2,
type: 'Thumb'
}) || imageUrl(item, {
maxWidth: osdPoster.clientWidth * 2,
type: 'Primary'
});
if (!imgUrl && secondaryItem && (imgUrl = seriesImageUrl(secondaryItem, {
maxWidth: osdPoster.clientWidth * 2,
type: 'Primary'
}) || seriesImageUrl(secondaryItem, {
maxWidth: osdPoster.clientWidth * 2,
type: 'Thumb'
}) || imageUrl(secondaryItem, {
maxWidth: osdPoster.clientWidth * 2,
type: 'Primary'
})), imgUrl) {
return void (osdPoster.innerHTML = '<img src="' + imgUrl + '" />');
}
}
osdPoster.innerHTML = '';
}
function showOsd() {
slideDownToShow(headerElement);
showMainOsdControls();
startOsdHideTimer();
}
function hideOsd() {
slideUpToHide(headerElement);
hideMainOsdControls();
}
function toggleOsd() {
if ('osd' === currentVisibleMenu) {
hideOsd();
} else if (!currentVisibleMenu) {
showOsd();
}
}
function startOsdHideTimer() {
stopOsdHideTimer();
osdHideTimeout = setTimeout(hideOsd, 3e3);
}
function stopOsdHideTimer() {
if (osdHideTimeout) {
clearTimeout(osdHideTimeout);
osdHideTimeout = null;
}
}
function slideDownToShow(elem) {
elem.classList.remove('osdHeader-hidden');
}
function slideUpToHide(elem) {
elem.classList.add('osdHeader-hidden');
}
function clearHideAnimationEventListeners(elem) {
dom.removeEventListener(elem, transitionEndEventName, onHideAnimationComplete, {
once: true
});
}
function onHideAnimationComplete(e) {
var elem = e.target;
if (elem != osdBottomElement)
return;
elem.classList.add('hide');
dom.removeEventListener(elem, transitionEndEventName, onHideAnimationComplete, {
once: true
});
}
function showMainOsdControls() {
if (!currentVisibleMenu) {
var elem = osdBottomElement;
currentVisibleMenu = 'osd';
clearHideAnimationEventListeners(elem);
elem.classList.remove('hide');
elem.classList.remove('videoOsdBottom-hidden');
if (!layoutManager.mobile) {
setTimeout(function () {
focusManager.focus(elem.querySelector('.btnPause'));
}, 50);
}
toggleSubtitleSync();
}
}
function hideMainOsdControls() {
if ('osd' === currentVisibleMenu) {
var elem = osdBottomElement;
clearHideAnimationEventListeners(elem);
elem.classList.add('videoOsdBottom-hidden');
dom.addEventListener(elem, transitionEndEventName, onHideAnimationComplete, {
once: true
});
currentVisibleMenu = null;
toggleSubtitleSync('hide');
// Firefox does not blur by itself
if (document.activeElement) {
document.activeElement.blur();
}
}
}
function onPointerMove(e) {
if ('mouse' === (e.pointerType || (layoutManager.mobile ? 'touch' : 'mouse'))) {
var eventX = e.screenX || 0;
var eventY = e.screenY || 0;
var obj = lastPointerMoveData;
if (!obj) {
lastPointerMoveData = {
x: eventX,
y: eventY
};
return;
}
if (Math.abs(eventX - obj.x) < 10 && Math.abs(eventY - obj.y) < 10) {
return;
}
obj.x = eventX;
obj.y = eventY;
showOsd();
}
}
function onInputCommand(e) {
var player = currentPlayer;
switch (e.detail.command) {
case 'left':
if ('osd' === currentVisibleMenu) {
showOsd();
} else {
if (!currentVisibleMenu) {
e.preventDefault();
playbackManager.rewind(player);
}
}
break;
case 'right':
if ('osd' === currentVisibleMenu) {
showOsd();
} else if (!currentVisibleMenu) {
e.preventDefault();
playbackManager.fastForward(player);
}
break;
case 'pageup':
playbackManager.nextChapter(player);
break;
case 'pagedown':
playbackManager.previousChapter(player);
break;
case 'up':
case 'down':
case 'select':
case 'menu':
case 'info':
case 'play':
case 'playpause':
case 'pause':
case 'fastforward':
case 'rewind':
case 'next':
case 'previous':
showOsd();
break;
case 'record':
onRecordingCommand();
showOsd();
break;
case 'togglestats':
toggleStats();
}
}
function onRecordingCommand() {
var btnRecord = view.querySelector('.btnRecord');
if (!btnRecord.classList.contains('hide')) {
btnRecord.click();
}
}
function updateFullscreenIcon() {
const button = view.querySelector('.btnFullscreen');
const icon = button.querySelector('.material-icons');
icon.classList.remove('fullscreen_exit', 'fullscreen');
if (playbackManager.isFullscreen(currentPlayer)) {
button.setAttribute('title', globalize.translate('ExitFullscreen') + ' (f)');
icon.classList.add('fullscreen_exit');
} else {
button.setAttribute('title', globalize.translate('Fullscreen') + ' (f)');
icon.classList.add('fullscreen');
}
}
function onPlayerChange() {
bindToPlayer(playbackManager.getCurrentPlayer());
}
function onStateChanged(event, state) {
var player = this;
if (state.NowPlayingItem) {
isEnabled = true;
updatePlayerStateInternal(event, player, state);
updatePlaylist(player);
enableStopOnBack(true);
}
}
function onPlayPauseStateChanged(e) {
if (isEnabled) {
updatePlayPauseState(this.paused());
}
}
function onVolumeChanged(e) {
if (isEnabled) {
var player = this;
updatePlayerVolumeState(player, player.isMuted(), player.getVolume());
}
}
function onPlaybackStart(e, state) {
console.debug('nowplaying event: ' + e.type);
var player = this;
onStateChanged.call(player, e, state);
resetUpNextDialog();
}
function resetUpNextDialog() {
comingUpNextDisplayed = false;
var dlg = currentUpNextDialog;
if (dlg) {
dlg.destroy();
currentUpNextDialog = null;
}
}
function onPlaybackStopped(e, state) {
currentRuntimeTicks = null;
resetUpNextDialog();
console.debug('nowplaying event: ' + e.type);
if ('Video' !== state.NextMediaType) {
view.removeEventListener('viewbeforehide', onViewHideStopPlayback);
Emby.Page.back();
}
}
function onMediaStreamsChanged(e) {
var player = this;
var state = playbackManager.getPlayerState(player);
onStateChanged.call(player, {
type: 'init'
}, state);
}
function onBeginFetch() {
document.querySelector('.osdMediaStatus').classList.remove('hide');
}
function onEndFetch() {
document.querySelector('.osdMediaStatus').classList.add('hide');
}
function bindToPlayer(player) {
if (player !== currentPlayer) {
releaseCurrentPlayer();
currentPlayer = player;
if (!player) return;
}
var state = playbackManager.getPlayerState(player);
onStateChanged.call(player, {
type: 'init'
}, state);
events.on(player, 'playbackstart', onPlaybackStart);
events.on(player, 'playbackstop', onPlaybackStopped);
events.on(player, 'volumechange', onVolumeChanged);
events.on(player, 'pause', onPlayPauseStateChanged);
events.on(player, 'unpause', onPlayPauseStateChanged);
events.on(player, 'timeupdate', onTimeUpdate);
events.on(player, 'fullscreenchange', updateFullscreenIcon);
events.on(player, 'mediastreamschange', onMediaStreamsChanged);
events.on(player, 'beginFetch', onBeginFetch);
events.on(player, 'endFetch', onEndFetch);
resetUpNextDialog();
if (player.isFetching) {
onBeginFetch();
}
}
function releaseCurrentPlayer() {
destroyStats();
destroySubtitleSync();
resetUpNextDialog();
var player = currentPlayer;
if (player) {
events.off(player, 'playbackstart', onPlaybackStart);
events.off(player, 'playbackstop', onPlaybackStopped);
events.off(player, 'volumechange', onVolumeChanged);
events.off(player, 'pause', onPlayPauseStateChanged);
events.off(player, 'unpause', onPlayPauseStateChanged);
events.off(player, 'timeupdate', onTimeUpdate);
events.off(player, 'fullscreenchange', updateFullscreenIcon);
events.off(player, 'mediastreamschange', onMediaStreamsChanged);
currentPlayer = null;
}
}
function onTimeUpdate(e) {
// Test for 'currentItem' is required for Firefox since its player spams 'timeupdate' events even being at breakpoint
if (isEnabled && currentItem) {
var now = new Date().getTime();
if (!(now - lastUpdateTime < 700)) {
lastUpdateTime = now;
var player = this;
currentRuntimeTicks = playbackManager.duration(player);
var currentTime = playbackManager.currentTime(player);
updateTimeDisplay(currentTime, currentRuntimeTicks, playbackManager.playbackStartTime(player), playbackManager.getBufferedRanges(player));
var item = currentItem;
refreshProgramInfoIfNeeded(player, item);
showComingUpNextIfNeeded(player, item, currentTime, currentRuntimeTicks);
}
}
}
function showComingUpNextIfNeeded(player, currentItem, currentTimeTicks, runtimeTicks) {
if (runtimeTicks && currentTimeTicks && !comingUpNextDisplayed && !currentVisibleMenu && 'Episode' === currentItem.Type && userSettings.enableNextVideoInfoOverlay()) {
var showAtSecondsLeft = runtimeTicks >= 3e10 ? 40 : runtimeTicks >= 24e9 ? 35 : 30;
var showAtTicks = runtimeTicks - 1e3 * showAtSecondsLeft * 1e4;
var timeRemainingTicks = runtimeTicks - currentTimeTicks;
if (currentTimeTicks >= showAtTicks && runtimeTicks >= 6e9 && timeRemainingTicks >= 2e8) {
showComingUpNext(player);
}
}
}
function onUpNextHidden() {
if ('upnext' === currentVisibleMenu) {
currentVisibleMenu = null;
}
}
function showComingUpNext(player) {
require(['upNextDialog'], function (UpNextDialog) {
if (!(currentVisibleMenu || currentUpNextDialog)) {
currentVisibleMenu = 'upnext';
comingUpNextDisplayed = true;
playbackManager.nextItem(player).then(function (nextItem) {
currentUpNextDialog = new UpNextDialog({
parent: view.querySelector('.upNextContainer'),
player: player,
nextItem: nextItem
});
events.on(currentUpNextDialog, 'hide', onUpNextHidden);
}, onUpNextHidden);
}
});
}
function refreshProgramInfoIfNeeded(player, item) {
if ('TvChannel' === item.Type) {
var program = item.CurrentProgram;
if (program && program.EndDate) {
try {
var endDate = datetime.parseISO8601Date(program.EndDate);
if (new Date().getTime() >= endDate.getTime()) {
console.debug('program info needs to be refreshed');
var state = playbackManager.getPlayerState(player);
onStateChanged.call(player, {
type: 'init'
}, state);
}
} catch (e) {
console.error('error parsing date: ' + program.EndDate);
}
}
}
}
function updatePlayPauseState(isPaused) {
const btnPlayPause = view.querySelector('.btnPause');
const btnPlayPauseIcon = btnPlayPause.querySelector('.material-icons');
btnPlayPauseIcon.classList.remove('play_arrow', 'pause');
if (isPaused) {
btnPlayPauseIcon.classList.add('play_arrow');
btnPlayPause.setAttribute('title', globalize.translate('ButtonPlay') + ' (k)');
} else {
btnPlayPauseIcon.classList.add('pause');
btnPlayPause.setAttribute('title', globalize.translate('ButtonPause') + ' (k)');
}
}
function updatePlayerStateInternal(event, player, state) {
var playState = state.PlayState || {};
updatePlayPauseState(playState.IsPaused);
var supportedCommands = playbackManager.getSupportedCommands(player);
currentPlayerSupportedCommands = supportedCommands;
supportsBrightnessChange = -1 !== supportedCommands.indexOf('SetBrightness');
updatePlayerVolumeState(player, playState.IsMuted, playState.VolumeLevel);
if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) {
nowPlayingPositionSlider.disabled = !playState.CanSeek;
}
btnFastForward.disabled = !playState.CanSeek;
btnRewind.disabled = !playState.CanSeek;
var nowPlayingItem = state.NowPlayingItem || {};
playbackStartTimeTicks = playState.PlaybackStartTimeTicks;
updateTimeDisplay(playState.PositionTicks, nowPlayingItem.RunTimeTicks, playState.PlaybackStartTimeTicks, playState.BufferedRanges || []);
updateNowPlayingInfo(player, state);
if (state.MediaSource && state.MediaSource.SupportsTranscoding && -1 !== supportedCommands.indexOf('SetMaxStreamingBitrate')) {
view.querySelector('.btnVideoOsdSettings').classList.remove('hide');
} else {
view.querySelector('.btnVideoOsdSettings').classList.add('hide');
}
var isProgressClear = state.MediaSource && null == state.MediaSource.RunTimeTicks;
nowPlayingPositionSlider.setIsClear(isProgressClear);
if (nowPlayingItem.RunTimeTicks) {
nowPlayingPositionSlider.setKeyboardSteps(userSettings.skipBackLength() * 1000000 / nowPlayingItem.RunTimeTicks,
userSettings.skipForwardLength() * 1000000 / nowPlayingItem.RunTimeTicks);
}
if (-1 === supportedCommands.indexOf('ToggleFullscreen') || player.isLocalPlayer && layoutManager.tv && playbackManager.isFullscreen(player)) {
view.querySelector('.btnFullscreen').classList.add('hide');
} else {
view.querySelector('.btnFullscreen').classList.remove('hide');
}
if (-1 === supportedCommands.indexOf('PictureInPicture')) {
view.querySelector('.btnPip').classList.add('hide');
} else {
view.querySelector('.btnPip').classList.remove('hide');
}
if (-1 === supportedCommands.indexOf('AirPlay')) {
view.querySelector('.btnAirPlay').classList.add('hide');
} else {
view.querySelector('.btnAirPlay').classList.remove('hide');
}
updateFullscreenIcon();
}
function getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, currentTimeMs) {
return (currentTimeMs - programStartDateMs) / programRuntimeMs * 100;
}
function updateTimeDisplay(positionTicks, runtimeTicks, playbackStartTimeTicks, bufferedRanges) {
if (enableProgressByTimeOfDay) {
if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) {
if (programStartDateMs && programEndDateMs) {
var currentTimeMs = (playbackStartTimeTicks + (positionTicks || 0)) / 1e4;
var programRuntimeMs = programEndDateMs - programStartDateMs;
if (nowPlayingPositionSlider.value = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, currentTimeMs), bufferedRanges.length) {
var rangeStart = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, (playbackStartTimeTicks + (bufferedRanges[0].start || 0)) / 1e4);
var rangeEnd = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, (playbackStartTimeTicks + (bufferedRanges[0].end || 0)) / 1e4);
nowPlayingPositionSlider.setBufferedRanges([{
start: rangeStart,
end: rangeEnd
}]);
} else {
nowPlayingPositionSlider.setBufferedRanges([]);
}
} else {
nowPlayingPositionSlider.value = 0;
nowPlayingPositionSlider.setBufferedRanges([]);
}
}
nowPlayingPositionText.innerHTML = '';
nowPlayingDurationText.innerHTML = '';
} else {
if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) {
if (runtimeTicks) {
var pct = positionTicks / runtimeTicks;
pct *= 100;
nowPlayingPositionSlider.value = pct;
} else {
nowPlayingPositionSlider.value = 0;
}
if (runtimeTicks && null != positionTicks && currentRuntimeTicks && !enableProgressByTimeOfDay && currentItem.RunTimeTicks && 'Recording' !== currentItem.Type) {
endsAtText.innerHTML = ' - ' + mediaInfo.getEndsAtFromPosition(runtimeTicks, positionTicks, true);
} else {
endsAtText.innerHTML = '';
}
}
if (nowPlayingPositionSlider) {
nowPlayingPositionSlider.setBufferedRanges(bufferedRanges, runtimeTicks, positionTicks);
}
updateTimeText(nowPlayingPositionText, positionTicks);
updateTimeText(nowPlayingDurationText, runtimeTicks, true);
}
}
function updatePlayerVolumeState(player, isMuted, volumeLevel) {
var supportedCommands = currentPlayerSupportedCommands;
var showMuteButton = true;
var showVolumeSlider = true;
if (-1 === supportedCommands.indexOf('Mute')) {
showMuteButton = false;
}
if (-1 === supportedCommands.indexOf('SetVolume')) {
showVolumeSlider = false;
}
if (player.isLocalPlayer && appHost.supports('physicalvolumecontrol')) {
showMuteButton = false;
showVolumeSlider = false;
}
const buttonMute = view.querySelector('.buttonMute');
const buttonMuteIcon = buttonMute.querySelector('.material-icons');
buttonMuteIcon.classList.remove('volume_off', 'volume_up');
if (isMuted) {
buttonMute.setAttribute('title', globalize.translate('Unmute') + ' (m)');
buttonMuteIcon.classList.add('volume_off');
} else {
buttonMute.setAttribute('title', globalize.translate('Mute') + ' (m)');
buttonMuteIcon.classList.add('volume_up');
}
if (showMuteButton) {
buttonMute.classList.remove('hide');
} else {
buttonMute.classList.add('hide');
}
if (nowPlayingVolumeSlider) {
if (showVolumeSlider) {
nowPlayingVolumeSliderContainer.classList.remove('hide');
} else {
nowPlayingVolumeSliderContainer.classList.add('hide');
}
if (!nowPlayingVolumeSlider.dragging) {
nowPlayingVolumeSlider.value = volumeLevel || 0;
}
}
}
function updatePlaylist(player) {
var btnPreviousTrack = view.querySelector('.btnPreviousTrack');
var btnNextTrack = view.querySelector('.btnNextTrack');
btnPreviousTrack.classList.remove('hide');
btnNextTrack.classList.remove('hide');
btnNextTrack.disabled = false;
btnPreviousTrack.disabled = false;
}
function updateTimeText(elem, ticks, divider) {
if (null == ticks) {
elem.innerHTML = '';
return;
}
var html = datetime.getDisplayRunningTime(ticks);
if (divider) {
html = ' / ' + html;
}
elem.innerHTML = html;
}
function onSettingsButtonClick(e) {
var btn = this;
require(['playerSettingsMenu'], function (playerSettingsMenu) {
var player = currentPlayer;
if (player) {
// show subtitle offset feature only if player and media support it
var showSubOffset = playbackManager.supportSubtitleOffset(player) &&
playbackManager.canHandleOffsetOnCurrentSubtitle(player);
playerSettingsMenu.show({
mediaType: 'Video',
player: player,
positionTo: btn,
stats: true,
suboffset: showSubOffset,
onOption: onSettingsOption
});
}
});
}
function onSettingsOption(selectedOption) {
if ('stats' === selectedOption) {
toggleStats();
} else if ('suboffset' === selectedOption) {
var player = currentPlayer;
if (player) {
playbackManager.enableShowingSubtitleOffset(player);
toggleSubtitleSync();
}
}
}
function toggleStats() {
require(['playerStats'], function (PlayerStats) {
var player = currentPlayer;
if (player) {
if (statsOverlay) {
statsOverlay.toggle();
} else {
statsOverlay = new PlayerStats({
player: player
});
}
}
});
}
function destroyStats() {
if (statsOverlay) {
statsOverlay.destroy();
statsOverlay = null;
}
}
function showAudioTrackSelection() {
var player = currentPlayer;
var audioTracks = playbackManager.audioTracks(player);
var currentIndex = playbackManager.getAudioStreamIndex(player);
var menuItems = audioTracks.map(function (stream) {
var opt = {
name: stream.DisplayTitle,
id: stream.Index
};
if (stream.Index === currentIndex) {
opt.selected = true;
}
return opt;
});
var positionTo = this;
require(['actionsheet'], function (actionsheet) {
actionsheet.show({
items: menuItems,
title: globalize.translate('Audio'),
positionTo: positionTo
}).then(function (id) {
var index = parseInt(id);
if (index !== currentIndex) {
playbackManager.setAudioStreamIndex(index, player);
}
});
});
}
function showSubtitleTrackSelection() {
var player = currentPlayer;
var streams = playbackManager.subtitleTracks(player);
var currentIndex = playbackManager.getSubtitleStreamIndex(player);
if (null == currentIndex) {
currentIndex = -1;
}
streams.unshift({
Index: -1,
DisplayTitle: globalize.translate('Off')
});
var menuItems = streams.map(function (stream) {
var opt = {
name: stream.DisplayTitle,
id: stream.Index
};
if (stream.Index === currentIndex) {
opt.selected = true;
}
return opt;
});
var positionTo = this;
require(['actionsheet'], function (actionsheet) {
actionsheet.show({
title: globalize.translate('Subtitles'),
items: menuItems,
positionTo: positionTo
}).then(function (id) {
var index = parseInt(id);
if (index !== currentIndex) {
playbackManager.setSubtitleStreamIndex(index, player);
}
toggleSubtitleSync();
});
});
}
function toggleSubtitleSync(action) {
require(['subtitleSync'], function (SubtitleSync) {
var player = currentPlayer;
if (subtitleSyncOverlay) {
subtitleSyncOverlay.toggle(action);
} else if (player) {
subtitleSyncOverlay = new SubtitleSync(player);
}
});
}
function destroySubtitleSync() {
if (subtitleSyncOverlay) {
subtitleSyncOverlay.destroy();
subtitleSyncOverlay = null;
}
}
/**
* Clicked element.
* To skip 'click' handling on Firefox/Edge.
*/
var clickedElement;
function onWindowKeyDown(e) {
clickedElement = e.srcElement;
var key = keyboardnavigation.getKeyName(e);
if (!currentVisibleMenu && 32 === e.keyCode) {
playbackManager.playPause(currentPlayer);
showOsd();
return;
}
if (layoutManager.tv && keyboardnavigation.isNavigationKey(key)) {
showOsd();
return;
}
switch (key) {
case 'Enter':
showOsd();
break;
case 'Escape':
case 'Back':
// Ignore key when some dialog is opened
if (currentVisibleMenu === 'osd' && !document.querySelector('.dialogContainer')) {
hideOsd();
e.stopPropagation();
}
break;
case 'k':
playbackManager.playPause(currentPlayer);
showOsd();
break;
case 'ArrowUp':
case 'Up':
playbackManager.volumeUp(currentPlayer);
break;
case 'ArrowDown':
case 'Down':
playbackManager.volumeDown(currentPlayer);
break;
case 'l':
case 'ArrowRight':
case 'Right':
playbackManager.fastForward(currentPlayer);
showOsd();
break;
case 'j':
case 'ArrowLeft':
case 'Left':
playbackManager.rewind(currentPlayer);
showOsd();
break;
case 'f':
if (!e.ctrlKey && !e.metaKey) {
playbackManager.toggleFullscreen(currentPlayer);
showOsd();
}
break;
case 'm':
playbackManager.toggleMute(currentPlayer);
showOsd();
break;
case 'p':
case 'P':
if (e.shiftKey) {
playbackManager.previousTrack(currentPlayer);
}
break;
case 'n':
case 'N':
if (e.shiftKey) {
playbackManager.nextTrack(currentPlayer);
}
break;
case 'NavigationLeft':
case 'GamepadDPadLeft':
case 'GamepadLeftThumbstickLeft':
// Ignores gamepad events that are always triggered, even when not focused.
if (document.hasFocus()) { /* eslint-disable-line compat/compat */
playbackManager.rewind(currentPlayer);
showOsd();
}
break;
case 'NavigationRight':
case 'GamepadDPadRight':
case 'GamepadLeftThumbstickRight':
// Ignores gamepad events that are always triggered, even when not focused.
if (document.hasFocus()) { /* eslint-disable-line compat/compat */
playbackManager.fastForward(currentPlayer);
showOsd();
}
break;
case 'Home':
playbackManager.seekPercent(0, currentPlayer);
break;
case 'End':
playbackManager.seekPercent(100, currentPlayer);
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
var percent = parseInt(key, 10) * 10;
playbackManager.seekPercent(percent, currentPlayer);
break;
}
}
function onWindowMouseDown(e) {
clickedElement = e.srcElement;
}
function onWindowTouchStart(e) {
clickedElement = e.srcElement;
}
function getImgUrl(item, chapter, index, maxWidth, apiClient) {
if (chapter.ImageTag) {
return apiClient.getScaledImageUrl(item.Id, {
maxWidth: maxWidth,
tag: chapter.ImageTag,
type: 'Chapter',
index: index
});
}
return null;
}
function getChapterBubbleHtml(apiClient, item, chapters, positionTicks) {
var chapter;
var index = -1;
for (var i = 0, length = chapters.length; i < length; i++) {
var currentChapter = chapters[i];
if (positionTicks >= currentChapter.StartPositionTicks) {
chapter = currentChapter;
index = i;
}
}
if (!chapter) {
return null;
}
var src = getImgUrl(item, chapter, index, 400, apiClient);
if (src) {
var html = '<div class="chapterThumbContainer">';
html += '<img class="chapterThumb" src="' + src + '" />';
html += '<div class="chapterThumbTextContainer">';
html += '<div class="chapterThumbText chapterThumbText-dim">';
html += chapter.Name;
html += '</div>';
html += '<h2 class="chapterThumbText">';
html += datetime.getDisplayRunningTime(positionTicks);
html += '</h2>';
html += '</div>';
return html + '</div>';
}
return null;
}
var playPauseClickTimeout;
function onViewHideStopPlayback() {
if (playbackManager.isPlayingVideo()) {
require(['shell'], function (shell) {
shell.disableFullscreen();
});
clearTimeout(playPauseClickTimeout);
var player = currentPlayer;
view.removeEventListener('viewbeforehide', onViewHideStopPlayback);
releaseCurrentPlayer();
playbackManager.stop(player);
}
}
function enableStopOnBack(enabled) {
view.removeEventListener('viewbeforehide', onViewHideStopPlayback);
if (enabled && playbackManager.isPlayingVideo(currentPlayer)) {
view.addEventListener('viewbeforehide', onViewHideStopPlayback);
}
}
require(['shell'], function (shell) {
shell.enableFullscreen();
});
var currentPlayer;
var comingUpNextDisplayed;
var currentUpNextDialog;
var isEnabled;
var currentItem;
var recordingButtonManager;
var enableProgressByTimeOfDay;
var supportsBrightnessChange;
var currentVisibleMenu;
var statsOverlay;
var osdHideTimeout;
var lastPointerMoveData;
var self = this;
var currentPlayerSupportedCommands = [];
var currentRuntimeTicks = 0;
var lastUpdateTime = 0;
var programStartDateMs = 0;
var programEndDateMs = 0;
var playbackStartTimeTicks = 0;
var subtitleSyncOverlay;
var nowPlayingVolumeSlider = view.querySelector('.osdVolumeSlider');
var nowPlayingVolumeSliderContainer = view.querySelector('.osdVolumeSliderContainer');
var nowPlayingPositionSlider = view.querySelector('.osdPositionSlider');
var nowPlayingPositionText = view.querySelector('.osdPositionText');
var nowPlayingDurationText = view.querySelector('.osdDurationText');
var startTimeText = view.querySelector('.startTimeText');
var endTimeText = view.querySelector('.endTimeText');
var endsAtText = view.querySelector('.endsAtText');
var btnRewind = view.querySelector('.btnRewind');
var btnFastForward = view.querySelector('.btnFastForward');
var transitionEndEventName = dom.whichTransitionEvent();
var headerElement = document.querySelector('.skinHeader');
var osdBottomElement = document.querySelector('.videoOsdBottom-maincontrols');
if (layoutManager.tv) {
nowPlayingPositionSlider.classList.add('focusable');
nowPlayingPositionSlider.enableKeyboardDragging();
}
view.addEventListener('viewbeforeshow', function (e) {
headerElement.classList.add('osdHeader');
Emby.Page.setTransparency('full');
});
view.addEventListener('viewshow', function (e) {
try {
events.on(playbackManager, 'playerchange', onPlayerChange);
bindToPlayer(playbackManager.getCurrentPlayer());
dom.addEventListener(document, window.PointerEvent ? 'pointermove' : 'mousemove', onPointerMove, {
passive: true
});
showOsd();
inputManager.on(window, onInputCommand);
dom.addEventListener(window, 'keydown', onWindowKeyDown, {
capture: true
});
dom.addEventListener(window, window.PointerEvent ? 'pointerdown' : 'mousedown', onWindowMouseDown, {
passive: true
});
dom.addEventListener(window, 'touchstart', onWindowTouchStart, {
passive: true
});
} catch (e) {
require(['appRouter'], function(appRouter) {
appRouter.showDirect('/');
});
}
});
view.addEventListener('viewbeforehide', function () {
if (statsOverlay) {
statsOverlay.enabled(false);
}
dom.removeEventListener(window, 'keydown', onWindowKeyDown, {
capture: true
});
dom.removeEventListener(window, window.PointerEvent ? 'pointerdown' : 'mousedown', onWindowMouseDown, {
passive: true
});
dom.removeEventListener(window, 'touchstart', onWindowTouchStart, {
passive: true
});
stopOsdHideTimer();
headerElement.classList.remove('osdHeader');
headerElement.classList.remove('osdHeader-hidden');
dom.removeEventListener(document, window.PointerEvent ? 'pointermove' : 'mousemove', onPointerMove, {
passive: true
});
inputManager.off(window, onInputCommand);
events.off(playbackManager, 'playerchange', onPlayerChange);
releaseCurrentPlayer();
});
view.querySelector('.btnFullscreen').addEventListener('click', function () {
playbackManager.toggleFullscreen(currentPlayer);
});
view.querySelector('.btnPip').addEventListener('click', function () {
playbackManager.togglePictureInPicture(currentPlayer);
});
view.querySelector('.btnAirPlay').addEventListener('click', function () {
playbackManager.toggleAirPlay(currentPlayer);
});
view.querySelector('.btnVideoOsdSettings').addEventListener('click', onSettingsButtonClick);
view.addEventListener('viewhide', function () {
headerElement.classList.remove('hide');
});
view.addEventListener('viewdestroy', function () {
if (self.touchHelper) {
self.touchHelper.destroy();
self.touchHelper = null;
}
if (recordingButtonManager) {
recordingButtonManager.destroy();
recordingButtonManager = null;
}
destroyStats();
destroySubtitleSync();
});
var lastPointerDown = 0;
dom.addEventListener(view, window.PointerEvent ? 'pointerdown' : 'click', function (e) {
if (dom.parentWithClass(e.target, ['videoOsdBottom', 'upNextContainer'])) {
return void showOsd();
}
var pointerType = e.pointerType || (layoutManager.mobile ? 'touch' : 'mouse');
var now = new Date().getTime();
switch (pointerType) {
case 'touch':
if (now - lastPointerDown > 300) {
lastPointerDown = now;
toggleOsd();
}
break;
case 'mouse':
if (!e.button) {
if (playPauseClickTimeout) {
clearTimeout(playPauseClickTimeout);
playPauseClickTimeout = 0;
} else {
playPauseClickTimeout = setTimeout(function() {
playbackManager.playPause(currentPlayer);
showOsd();
playPauseClickTimeout = 0;
}, 300);
}
}
break;
default:
playbackManager.playPause(currentPlayer);
showOsd();
}
}, {
passive: true
});
if (browser.touch) {
dom.addEventListener(view, 'dblclick', onDoubleClick, {});
} else {
var options = { passive: true };
dom.addEventListener(view, 'dblclick', function () {
playbackManager.toggleFullscreen(currentPlayer);
}, options);
}
function setVolume() {
playbackManager.setVolume(this.value, currentPlayer);
}
view.querySelector('.buttonMute').addEventListener('click', function () {
playbackManager.toggleMute(currentPlayer);
});
nowPlayingVolumeSlider.addEventListener('change', setVolume);
nowPlayingVolumeSlider.addEventListener('mousemove', setVolume);
nowPlayingVolumeSlider.addEventListener('touchmove', setVolume);
nowPlayingPositionSlider.addEventListener('change', function () {
var player = currentPlayer;
if (player) {
var newPercent = parseFloat(this.value);
if (enableProgressByTimeOfDay) {
var seekAirTimeTicks = newPercent / 100 * (programEndDateMs - programStartDateMs) * 1e4;
seekAirTimeTicks += 1e4 * programStartDateMs;
seekAirTimeTicks -= playbackStartTimeTicks;
playbackManager.seek(seekAirTimeTicks, player);
} else {
playbackManager.seekPercent(newPercent, player);
}
}
});
nowPlayingPositionSlider.getBubbleHtml = function (value) {
showOsd();
if (enableProgressByTimeOfDay) {
if (programStartDateMs && programEndDateMs) {
var ms = programEndDateMs - programStartDateMs;
ms /= 100;
ms *= value;
ms += programStartDateMs;
return '<h1 class="sliderBubbleText">' + getDisplayTimeWithoutAmPm(new Date(parseInt(ms)), true) + '</h1>';
}
return '--:--';
}
if (!currentRuntimeTicks) {
return '--:--';
}
var ticks = currentRuntimeTicks;
ticks /= 100;
ticks *= value;
var item = currentItem;
if (item && item.Chapters && item.Chapters.length && item.Chapters[0].ImageTag) {
var html = getChapterBubbleHtml(connectionManager.getApiClient(item.ServerId), item, item.Chapters, ticks);
if (html) {
return html;
}
}
return '<h1 class="sliderBubbleText">' + datetime.getDisplayRunningTime(ticks) + '</h1>';
};
view.querySelector('.btnPreviousTrack').addEventListener('click', function () {
playbackManager.previousTrack(currentPlayer);
});
view.querySelector('.btnPause').addEventListener('click', function () {
// Ignore 'click' if another element was originally clicked (Firefox/Edge issue)
if (this.contains(clickedElement)) {
playbackManager.playPause(currentPlayer);
}
});
view.querySelector('.btnNextTrack').addEventListener('click', function () {
playbackManager.nextTrack(currentPlayer);
});
btnRewind.addEventListener('click', function () {
playbackManager.rewind(currentPlayer);
});
btnFastForward.addEventListener('click', function () {
playbackManager.fastForward(currentPlayer);
});
view.querySelector('.btnAudio').addEventListener('click', showAudioTrackSelection);
view.querySelector('.btnSubtitles').addEventListener('click', showSubtitleTrackSelection);
if (browser.touch) {
(function () {
require(['touchHelper'], function (TouchHelper) {
self.touchHelper = new TouchHelper(view, {
swipeYThreshold: 30,
triggerOnMove: true,
preventDefaultOnMove: true,
ignoreTagNames: ['BUTTON', 'INPUT', 'TEXTAREA']
});
events.on(self.touchHelper, 'swipeup', onVerticalSwipe);
events.on(self.touchHelper, 'swipedown', onVerticalSwipe);
});
})();
}
};
});
| 1 | 16,333 | You can use `appRouter.goHome` to do this (It's defined in site.js), it's less hacky than overriding the href. | jellyfin-jellyfin-web | js |
@@ -101,7 +101,14 @@ bool NebulaStore::init() {
enginePtr->removePart(partId);
continue;
} else {
- partIds.emplace_back(partId);
+ auto it = std::find(partIds.begin(), partIds.end(), partId);
+ if (it != partIds.end()) {
+ LOG(INFO) << "Part " << partId
+ << " has been loaded, skip current one, remove it!";
+ enginePtr->removePart(partId);
+ } else {
+ partIds.emplace_back(partId);
+ }
}
}
if (partIds.empty()) { | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/NebulaStore.h"
#include <folly/Likely.h>
#include <algorithm>
#include <cstdint>
#include "network/NetworkUtils.h"
#include "fs/FileUtils.h"
#include "kvstore/RocksEngine.h"
#include "kvstore/SnapshotManagerImpl.h"
DEFINE_string(engine_type, "rocksdb", "rocksdb, memory...");
DEFINE_int32(custom_filter_interval_secs, 24 * 3600, "interval to trigger custom compaction");
DEFINE_int32(num_workers, 4, "Number of worker threads");
DEFINE_bool(check_leader, true, "Check leader or not");
namespace nebula {
namespace kvstore {
NebulaStore::~NebulaStore() {
LOG(INFO) << "Cut off the relationship with meta client";
options_.partMan_.reset();
LOG(INFO) << "Stop the raft service...";
raftService_->stop();
LOG(INFO) << "Waiting for the raft service stop...";
raftService_->waitUntilStop();
spaces_.clear();
bgWorkers_->stop();
bgWorkers_->wait();
LOG(INFO) << "~NebulaStore()";
}
bool NebulaStore::init() {
LOG(INFO) << "Start the raft service...";
bgWorkers_ = std::make_shared<thread::GenericThreadPool>();
bgWorkers_->start(FLAGS_num_workers, "nebula-bgworkers");
snapshot_.reset(new SnapshotManagerImpl(this));
raftService_ = raftex::RaftexService::createService(ioPool_,
workers_,
raftAddr_.second);
if (!raftService_->start()) {
LOG(ERROR) << "Start the raft service failed";
return false;
}
CHECK(!!options_.partMan_);
LOG(INFO) << "Scan the local path, and init the spaces_";
{
for (auto& path : options_.dataPaths_) {
auto rootPath = folly::stringPrintf("%s/nebula", path.c_str());
auto dirs = fs::FileUtils::listAllDirsInDir(rootPath.c_str());
for (auto& dir : dirs) {
LOG(INFO) << "Scan path \"" << path << "/" << dir << "\"";
try {
GraphSpaceID spaceId;
try {
spaceId = folly::to<GraphSpaceID>(dir);
} catch (const std::exception& ex) {
LOG(ERROR) << "Data path invalid: " << ex.what();
return false;
}
if (!options_.partMan_->spaceExist(storeSvcAddr_, spaceId).ok()) {
// TODO We might want to have a second thought here.
// Removing the data directly feels a little strong
LOG(INFO) << "Space " << spaceId
<< " does not exist any more, remove the data!";
auto dataPath = folly::stringPrintf("%s/%s",
rootPath.c_str(),
dir.c_str());
CHECK(fs::FileUtils::remove(dataPath.c_str(), true));
continue;
}
KVEngine* enginePtr = nullptr;
{
folly::RWSpinLock::WriteHolder wh(&lock_);
auto engine = newEngine(spaceId, path);
auto spaceIt = this->spaces_.find(spaceId);
if (spaceIt == this->spaces_.end()) {
LOG(INFO) << "Load space " << spaceId << " from disk";
spaceIt = this->spaces_.emplace(
spaceId,
std::make_unique<SpacePartInfo>()).first;
}
spaceIt->second->engines_.emplace_back(std::move(engine));
enginePtr = spaceIt->second->engines_.back().get();
}
// partIds is the partition in this host waiting to open
std::vector<PartitionID> partIds;
for (auto& partId : enginePtr->allParts()) {
if (!options_.partMan_->partExist(storeSvcAddr_, spaceId, partId).ok()) {
LOG(INFO) << "Part " << partId
<< " does not exist any more, remove it!";
enginePtr->removePart(partId);
continue;
} else {
partIds.emplace_back(partId);
}
}
if (partIds.empty()) {
continue;
}
std::atomic<size_t> counter(partIds.size());
folly::Baton<true, std::atomic> baton;
LOG(INFO) << "Need to open " << partIds.size() << " parts of space " << spaceId;
for (auto& partId : partIds) {
bgWorkers_->addTask([
spaceId, partId, enginePtr, &counter, &baton, this] () mutable {
auto part = std::make_shared<Part>(spaceId,
partId,
raftAddr_,
folly::stringPrintf("%s/wal/%d",
enginePtr->getDataRoot(),
partId),
enginePtr,
ioPool_,
bgWorkers_,
workers_,
snapshot_);
auto status = options_.partMan_->partMeta(spaceId, partId);
if (!status.ok()) {
LOG(WARNING) << status.status().toString();
return;
}
auto partMeta = status.value();
std::vector<HostAddr> peers;
for (auto& h : partMeta.peers_) {
if (h != storeSvcAddr_) {
peers.emplace_back(getRaftAddr(h));
VLOG(1) << "Add peer " << peers.back();
}
}
raftService_->addPartition(part);
part->start(std::move(peers), false);
LOG(INFO) << "Load part " << spaceId << ", " << partId << " from disk";
{
folly::RWSpinLock::WriteHolder holder(&lock_);
auto iter = spaces_.find(spaceId);
CHECK(iter != spaces_.end());
iter->second->parts_.emplace(partId, part);
}
counter.fetch_sub(1);
if (counter.load() == 0) {
baton.post();
}
});
}
baton.wait();
LOG(INFO) << "Load space " << spaceId << " complete";
} catch (std::exception& e) {
LOG(FATAL) << "Invalid data directory \"" << dir << "\"";
}
}
}
}
LOG(INFO) << "Init data from partManager for " << storeSvcAddr_;
auto partsMap = options_.partMan_->parts(storeSvcAddr_);
for (auto& entry : partsMap) {
auto spaceId = entry.first;
addSpace(spaceId);
std::vector<PartitionID> partIds;
for (auto it = entry.second.begin(); it != entry.second.end(); it++) {
partIds.emplace_back(it->first);
}
std::sort(partIds.begin(), partIds.end());
for (auto& partId : partIds) {
addPart(spaceId, partId, false);
}
}
LOG(INFO) << "Register handler...";
options_.partMan_->registerHandler(this);
return true;
}
std::unique_ptr<KVEngine> NebulaStore::newEngine(GraphSpaceID spaceId,
const std::string& path) {
if (FLAGS_engine_type == "rocksdb") {
std::shared_ptr<KVCompactionFilterFactory> cfFactory = nullptr;
if (options_.cffBuilder_ != nullptr) {
cfFactory = options_.cffBuilder_->buildCfFactory(spaceId,
FLAGS_custom_filter_interval_secs);
}
return std::make_unique<RocksEngine>(spaceId,
path,
options_.mergeOp_,
cfFactory);
} else {
LOG(FATAL) << "Unknown engine type " << FLAGS_engine_type;
return nullptr;
}
}
ErrorOr<ResultCode, HostAddr> NebulaStore::partLeader(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return getStoreAddr(partIt->second->leader());
}
void NebulaStore::addSpace(GraphSpaceID spaceId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
if (this->spaces_.find(spaceId) != this->spaces_.end()) {
LOG(INFO) << "Space " << spaceId << " has existed!";
return;
}
LOG(INFO) << "Create space " << spaceId;
this->spaces_[spaceId] = std::make_unique<SpacePartInfo>();
for (auto& path : options_.dataPaths_) {
this->spaces_[spaceId]->engines_.emplace_back(newEngine(spaceId, path));
}
}
void NebulaStore::addPart(GraphSpaceID spaceId, PartitionID partId, bool asLearner) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
CHECK(spaceIt != this->spaces_.end()) << "Space should exist!";
if (spaceIt->second->parts_.find(partId) != spaceIt->second->parts_.end()) {
LOG(INFO) << "[" << spaceId << "," << partId << "] has existed!";
return;
}
int32_t minIndex = -1;
int32_t index = 0;
int32_t minPartsNum = 0x7FFFFFFF;
auto& engines = spaceIt->second->engines_;
for (auto& engine : engines) {
if (engine->totalPartsNum() < minPartsNum) {
minPartsNum = engine->totalPartsNum();
minIndex = index;
}
index++;
}
CHECK_GE(minIndex, 0) << "engines number:" << engines.size();
const auto& targetEngine = engines[minIndex];
// Write the information into related engine.
targetEngine->addPart(partId);
spaceIt->second->parts_.emplace(
partId,
newPart(spaceId, partId, targetEngine.get(), asLearner));
LOG(INFO) << "Space " << spaceId << ", part " << partId
<< " has been added, asLearner " << asLearner;
}
std::shared_ptr<Part> NebulaStore::newPart(GraphSpaceID spaceId,
PartitionID partId,
KVEngine* engine,
bool asLearner) {
auto part = std::make_shared<Part>(spaceId,
partId,
raftAddr_,
folly::stringPrintf("%s/wal/%d",
engine->getDataRoot(),
partId),
engine,
ioPool_,
bgWorkers_,
workers_,
snapshot_);
auto metaStatus = options_.partMan_->partMeta(spaceId, partId);
if (!metaStatus.ok()) {
return nullptr;
}
auto partMeta = metaStatus.value();
std::vector<HostAddr> peers;
for (auto& h : partMeta.peers_) {
if (h != storeSvcAddr_) {
peers.emplace_back(getRaftAddr(h));
VLOG(1) << "Add peer " << peers.back();
}
}
raftService_->addPartition(part);
part->start(std::move(peers), asLearner);
return part;
}
void NebulaStore::removeSpace(GraphSpaceID spaceId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
auto& engines = spaceIt->second->engines_;
for (auto& engine : engines) {
auto parts = engine->allParts();
for (auto& partId : parts) {
engine->removePart(partId);
}
CHECK_EQ(0, engine->totalPartsNum());
}
this->spaces_.erase(spaceIt);
// TODO(dangleptr): Should we delete the data?
LOG(INFO) << "Space " << spaceId << " has been removed!";
}
void NebulaStore::removePart(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
if (spaceIt != this->spaces_.end()) {
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
auto* e = partIt->second->engine();
CHECK_NOTNULL(e);
raftService_->removePartition(partIt->second);
partIt->second->reset();
spaceIt->second->parts_.erase(partId);
e->removePart(partId);
}
}
LOG(INFO) << "Space " << spaceId << ", part " << partId << " has been removed!";
}
void NebulaStore::updateSpaceOption(GraphSpaceID spaceId,
const std::unordered_map<std::string, std::string>& options,
bool isDbOption) {
if (isDbOption) {
for (const auto& kv : options) {
setDBOption(spaceId, kv.first, kv.second);
}
} else {
for (const auto& kv : options) {
setOption(spaceId, kv.first, kv.second);
}
}
}
ResultCode NebulaStore::get(GraphSpaceID spaceId,
PartitionID partId,
const std::string& key,
std::string* value) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->get(key, value);
}
ResultCode NebulaStore::multiGet(GraphSpaceID spaceId,
PartitionID partId,
const std::vector<std::string>& keys,
std::vector<std::string>* values) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->multiGet(keys, values);
}
ResultCode NebulaStore::range(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& end,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->range(start, end, iter);
}
ResultCode NebulaStore::prefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& prefix,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->prefix(prefix, iter);
}
ResultCode NebulaStore::rangeWithPrefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& prefix,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->rangeWithPrefix(start, prefix, iter);
}
ResultCode NebulaStore::sync(GraphSpaceID spaceId,
PartitionID partId) {
auto partRet = part(spaceId, partId);
if (!ok(partRet)) {
return error(partRet);
}
auto part = nebula::value(partRet);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
auto ret = ResultCode::SUCCEEDED;
folly::Baton<true, std::atomic> baton;
part->sync([&] (kvstore::ResultCode code) {
ret = code;
baton.post();
});
baton.wait();
return ret;
}
void NebulaStore::asyncMultiPut(GraphSpaceID spaceId,
PartitionID partId,
std::vector<KV> keyValues,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncMultiPut(std::move(keyValues), std::move(cb));
}
void NebulaStore::asyncRemove(GraphSpaceID spaceId,
PartitionID partId,
const std::string& key,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncRemove(key, std::move(cb));
}
void NebulaStore::asyncMultiRemove(GraphSpaceID spaceId,
PartitionID partId,
std::vector<std::string> keys,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncMultiRemove(std::move(keys), std::move(cb));
}
void NebulaStore::asyncRemoveRange(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& end,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncRemoveRange(start, end, std::move(cb));
}
void NebulaStore::asyncRemovePrefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& prefix,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncRemovePrefix(prefix, std::move(cb));
}
void NebulaStore::asyncAtomicOp(GraphSpaceID spaceId,
PartitionID partId,
raftex::AtomicOp op,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncAtomicOp(std::move(op), std::move(cb));
}
ErrorOr<ResultCode, std::shared_ptr<Part>> NebulaStore::part(GraphSpaceID spaceId,
PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return partIt->second;
}
ResultCode NebulaStore::ingest(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto parts = engine->allParts();
for (auto part : parts) {
auto ret = this->engine(spaceId, part);
if (!ok(ret)) {
return error(ret);
}
auto path = folly::stringPrintf("%s/download/%d", value(ret)->getDataRoot(), part);
if (!fs::FileUtils::exist(path)) {
LOG(INFO) << path << " not existed";
continue;
}
auto files = nebula::fs::FileUtils::listAllFilesInDir(path.c_str(), true, "*.sst");
for (auto file : files) {
LOG(INFO) << "Ingesting extra file: " << file;
auto code = engine->ingest(std::vector<std::string>({file}));
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setOption(GraphSpaceID spaceId,
const std::string& configKey,
const std::string& configValue) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->setOption(configKey, configValue);
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setDBOption(GraphSpaceID spaceId,
const std::string& configKey,
const std::string& configValue) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->setDBOption(configKey, configValue);
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::compact(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
auto code = ResultCode::SUCCEEDED;
std::vector<std::thread> threads;
for (auto& engine : space->engines_) {
threads.emplace_back(std::thread([&engine, &code] {
auto ret = engine->compact();
if (ret != ResultCode::SUCCEEDED) {
code = ret;
}
}));
}
// Wait for all threads to finish
for (auto& t : threads) {
t.join();
}
return code;
}
ResultCode NebulaStore::flush(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->flush();
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::createCheckpoint(GraphSpaceID spaceId, const std::string& name) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->createCheckpoint(name);
if (code != ResultCode::SUCCEEDED) {
return code;
}
// create wal hard link for all parts
auto parts = engine->allParts();
for (auto& part : parts) {
auto ret = this->part(spaceId, part);
if (!ok(ret)) {
LOG(ERROR) << "Part not found. space : " << spaceId << " Part : " << part;
return error(ret);
}
auto walPath = folly::stringPrintf("%s/checkpoints/%s/wal/%d",
engine->getDataRoot(), name.c_str(), part);
auto p = nebula::value(ret);
if (!p->linkCurrentWAL(walPath.data())) {
return ResultCode::ERR_CHECKPOINT_ERROR;
}
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::dropCheckpoint(GraphSpaceID spaceId, const std::string& name) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
/**
* Drop checkpoint and wal together
**/
auto checkpointPath = folly::stringPrintf("%s/checkpoints/%s",
engine->getDataRoot(),
name.c_str());
LOG(INFO) << "Drop checkpoint : " << checkpointPath;
if (!fs::FileUtils::exist(checkpointPath)) {
continue;
}
if (!fs::FileUtils::remove(checkpointPath.data(), true)) {
LOG(ERROR) << "Drop checkpoint dir failed : " << checkpointPath;
return ResultCode::ERR_IO_ERROR;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setWriteBlocking(GraphSpaceID spaceId, bool sign) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto parts = engine->allParts();
for (auto& part : parts) {
auto partRet = this->part(spaceId, part);
if (!ok(partRet)) {
LOG(ERROR) << "Part not found. space : " << spaceId << " Part : " << part;
return error(partRet);
}
auto p = nebula::value(partRet);
if (p->isLeader()) {
auto ret = ResultCode::SUCCEEDED;
p->setBlocking(sign);
if (sign) {
folly::Baton<true, std::atomic> baton;
p->sync([&ret, &baton] (kvstore::ResultCode code) {
if (kvstore::ResultCode::SUCCEEDED != code) {
ret = code;
}
baton.post();
});
baton.wait();
}
if (ret != ResultCode::SUCCEEDED) {
LOG(ERROR) << "Part sync failed. space : " << spaceId << " Part : " << part;
return ret;
}
}
}
}
return ResultCode::SUCCEEDED;
}
bool NebulaStore::isLeader(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto spaceIt = spaces_.find(spaceId);
if (spaceIt != this->spaces_.end()) {
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
return partIt->second->isLeader();
} else {
return false;
}
}
return false;
}
ErrorOr<ResultCode, KVEngine*> NebulaStore::engine(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return partIt->second->engine();
}
ErrorOr<ResultCode, std::shared_ptr<SpacePartInfo>> NebulaStore::space(GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
return it->second;
}
int32_t NebulaStore::allLeader(std::unordered_map<GraphSpaceID,
std::vector<PartitionID>>& leaderIds) {
folly::RWSpinLock::ReadHolder rh(&lock_);
int32_t count = 0;
for (const auto& spaceIt : spaces_) {
auto spaceId = spaceIt.first;
for (const auto& partIt : spaceIt.second->parts_) {
auto partId = partIt.first;
if (partIt.second->isLeader()) {
leaderIds[spaceId].emplace_back(partId);
++count;
}
}
}
return count;
}
bool NebulaStore::checkLeader(std::shared_ptr<Part> part) const {
return !FLAGS_check_leader || part->isLeader();
}
} // namespace kvstore
} // namespace nebula
| 1 | 28,208 | When could this happen? | vesoft-inc-nebula | cpp |
@@ -55,8 +55,7 @@ namespace Datadog.Trace.Agent.Transports
await memoryStream.FlushAsync().ConfigureAwait(false);
memoryStream.Seek(0, SeekOrigin.Begin);
var buffer = memoryStream.GetBuffer();
- _headers.Add("Content-Type", "application/json");
- var result = await PostSegmentAsync(new ArraySegment<byte>(buffer, 0, (int)memoryStream.Length)).ConfigureAwait(false);
+ var result = await PostSegmentAsync(new ArraySegment<byte>(buffer, 0, (int)memoryStream.Length), "application/json").ConfigureAwait(false);
var response = result.Item1;
var request = result.Item2;
if (response.StatusCode != 200 && response.StatusCode != 202) | 1 | // <copyright file="HttpStreamRequest.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using Datadog.Trace.AppSec;
using Datadog.Trace.HttpOverStreams;
using Datadog.Trace.HttpOverStreams.HttpContent;
using Datadog.Trace.Logging;
using Datadog.Trace.Util;
using Datadog.Trace.Vendors.Newtonsoft.Json;
namespace Datadog.Trace.Agent.Transports
{
internal class HttpStreamRequest : IApiRequest
{
/// <summary>
/// This value is greater than any reasonable response we would receive from the agent.
/// It is smaller than the internal default of 81920
/// https://source.dot.net/#System.Private.CoreLib/Stream.cs,122
/// It is a multiple of 4096.
/// </summary>
private const int ResponseReadBufferSize = 12_228;
private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<HttpStreamRequest>();
private readonly Uri _uri;
private readonly DatadogHttpClient _client;
private readonly IStreamFactory _streamFactory;
private readonly HttpHeaders _headers = new HttpHeaders();
public HttpStreamRequest(DatadogHttpClient client, Uri uri, IStreamFactory streamFactory)
{
_uri = uri;
_client = client;
_streamFactory = streamFactory;
}
public void AddHeader(string name, string value)
{
_headers.Add(name, value);
}
public async Task<IApiResponse> PostAsJsonAsync(IEvent events, JsonSerializer serializer)
{
var memoryStream = new MemoryStream();
var sw = new StreamWriter(memoryStream);
using (JsonWriter writer = new JsonTextWriter(sw))
{
serializer.Serialize(writer, events);
await writer.FlushAsync().ConfigureAwait(false);
await memoryStream.FlushAsync().ConfigureAwait(false);
memoryStream.Seek(0, SeekOrigin.Begin);
var buffer = memoryStream.GetBuffer();
_headers.Add("Content-Type", "application/json");
var result = await PostSegmentAsync(new ArraySegment<byte>(buffer, 0, (int)memoryStream.Length)).ConfigureAwait(false);
var response = result.Item1;
var request = result.Item2;
if (response.StatusCode != 200 && response.StatusCode != 202)
{
memoryStream.Seek(0, SeekOrigin.Begin);
using var sr = new StreamReader(memoryStream);
var headers = string.Join(", ", request.Headers.Select(h => $"{h.Name}: {h.Value}"));
var payload = await sr.ReadToEndAsync().ConfigureAwait(false);
Log.Warning("AppSec event not correctly sent to backend {statusCode} by class {className} with response {responseText}, request headers: were {headers}, payload was: {payload}", new object[] { response.StatusCode, nameof(HttpStreamRequest), await response.ReadAsStringAsync().ConfigureAwait(false), headers, payload });
}
return response;
}
}
public async Task<IApiResponse> PostAsync(ArraySegment<byte> traces) => (await PostSegmentAsync(traces).ConfigureAwait(false)).Item1;
private async Task<Tuple<IApiResponse, HttpRequest>> PostSegmentAsync(ArraySegment<byte> segment)
{
using (var bidirectionalStream = _streamFactory.GetBidirectionalStream())
{
var content = new BufferContent(segment);
var request = new HttpRequest("POST", _uri.Host, _uri.PathAndQuery, _headers, content);
// send request, get response
var response = await _client.SendAsync(request, bidirectionalStream, bidirectionalStream).ConfigureAwait(false);
// Content-Length is required as we don't support chunked transfer
var contentLength = response.Content.Length;
if (!contentLength.HasValue)
{
ThrowHelper.ThrowException("Content-Length is required but was not provided");
}
// buffer the entire contents for now
var buffer = new byte[contentLength.Value];
var responseContentStream = new MemoryStream(buffer);
await response.Content.CopyToAsync(buffer).ConfigureAwait(false);
responseContentStream.Position = 0;
return new Tuple<IApiResponse, HttpRequest>(new HttpStreamResponse(response.StatusCode, responseContentStream.Length, response.GetContentEncoding(), responseContentStream, response.Headers), request);
}
}
}
}
| 1 | 23,237 | I assume this was removed because it was redundant? and conflicted with the now dynamic contentType? Edit: Oh I see it being set was moved to the `PostSegmentAsync` call. | DataDog-dd-trace-dotnet | .cs |
@@ -21,7 +21,9 @@ public class ProxyConfig {
public static ProxyConfig http(String host, int port, String username, String password) {
return new ProxyConfig(Proxy.Type.HTTP, host, port, username, password);
}
-
+ public static ProxyConfig socks(String host, int port, String username, String password) {
+ return new ProxyConfig(Proxy.Type.SOCKS, host, port, username, password);
+ }
public ProxyConfig(Proxy.Type type, String host, int port, String username, String password) {
this.type = type;
this.host = host; | 1 | package de.danoeh.antennapod.core.service.download;
import android.support.annotation.Nullable;
import java.net.Proxy;
public class ProxyConfig {
public final Proxy.Type type;
@Nullable public final String host;
public final int port;
@Nullable public final String username;
@Nullable public final String password;
public static final int DEFAULT_PORT = 8080;
public static ProxyConfig direct() {
return new ProxyConfig(Proxy.Type.DIRECT, null, 0, null, null);
}
public static ProxyConfig http(String host, int port, String username, String password) {
return new ProxyConfig(Proxy.Type.HTTP, host, port, username, password);
}
public ProxyConfig(Proxy.Type type, String host, int port, String username, String password) {
this.type = type;
this.host = host;
this.port = port;
this.username = username;
this.password = password;
}
}
| 1 | 14,933 | Here is a newline missing | AntennaPod-AntennaPod | java |
@@ -126,7 +126,7 @@ module Travis
# R-devel builds available at mac.r-project.org
if r_version == 'devel'
- r_url = "https://mac.r-project.org/el-capitan/R-devel/R-devel-el-capitan.pkg"
+ r_url = "http://mac.r-project.org/high-sierra/R-4.0-branch/R-4.0-branch.pkg"
# The latest release is the only one available in /bin/macosx
elsif r_version == r_latest | 1 | # Maintained by:
# Jim Hester @jimhester james.hester@rstudio.com
# Jeroen Ooms @jeroen jeroen@berkeley.edu
#
module Travis
module Build
class Script
class R < Script
DEFAULTS = {
# Basic config options
cran: 'https://cloud.r-project.org',
repos: {},
warnings_are_errors: true,
# Dependencies (installed in this order)
apt_packages: [],
brew_packages: [],
r_binary_packages: [],
r_packages: [],
bioc_packages: [],
r_github_packages: [],
# Build/test options
r_build_args: '',
r_check_args: '--as-cran',
# Heavy dependencies
pandoc: true,
latex: true,
fortran: true,
pandoc_version: '2.2',
# Bioconductor
bioc: 'https://bioconductor.org/biocLite.R',
bioc_required: false,
bioc_check: false,
bioc_use_devel: false,
disable_homebrew: false,
use_devtools: false,
r: 'release'
}
def initialize(data)
# TODO: Is there a way to avoid explicitly naming arguments here?
super
@remotes_installed = false
@devtools_installed = false
@bioc_installed = false
end
def export
super
sh.export 'TRAVIS_R_VERSION', r_version, echo: false
sh.export 'TRAVIS_R_VERSION_STRING', config[:r].to_s, echo: false
sh.export 'R_LIBS_USER', '~/R/Library', echo: false
sh.export 'R_LIBS_SITE', '/usr/local/lib/R/site-library:/usr/lib/R/site-library', echo: false
sh.export '_R_CHECK_CRAN_INCOMING_', 'false', echo: false
sh.export 'NOT_CRAN', 'true', echo: false
end
def configure
super
sh.echo 'R for Travis-CI is not officially supported, '\
'but is community maintained.', ansi: :green
sh.echo 'Please file any issues at https://travis-ci.community/c/languages/r'
sh.echo 'and mention @jeroen and @jimhester in the issue'
sh.fold 'R-install' do
sh.with_options({ assert: true, echo: true, timing: true }) do
sh.echo 'Installing R', ansi: :yellow
case config[:os]
when 'linux'
# This key is added implicitly by the marutter PPA below
#sh.cmd 'apt-key adv --keyserver ha.pool.sks-keyservers.net '\
#'--recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9', sudo: true
# Add marutter's c2d4u plus ppa dependencies as listed on launchpad
if r_version_less_than('3.5.0')
sh.cmd 'sudo add-apt-repository -y "ppa:marutter/rrutter"'
sh.cmd 'sudo add-apt-repository -y "ppa:marutter/c2d4u"'
else
sh.cmd 'sudo add-apt-repository -y "ppa:marutter/rrutter3.5"'
sh.cmd 'sudo add-apt-repository -y "ppa:marutter/c2d4u3.5"'
sh.cmd 'sudo add-apt-repository -y "ppa:ubuntugis/ppa"'
sh.cmd 'sudo add-apt-repository -y "ppa:cran/travis"'
end
# Both c2d4u and c2d4u3.5 depend on this ppa for ffmpeg
sh.if "$(lsb_release -cs) = 'trusty'" do
sh.cmd 'sudo add-apt-repository -y "ppa:kirillshkrogalev/ffmpeg-next"'
end
# Update after adding all repositories. Retry several
# times to work around flaky connection to Launchpad PPAs.
sh.cmd 'travis_apt_get_update', retry: true
# Install precompiled R
# Install only the dependencies for an R development environment except for
# libpcre3-dev or r-base-core because they will be included in
# the R binary tarball.
# Dependencies queried with `apt-cache depends -i r-base-dev`.
# qpdf and texinfo are also needed for --as-cran # checks:
# https://stat.ethz.ch/pipermail/r-help//2012-September/335676.html
optional_apt_pkgs = ""
optional_apt_pkgs << "gfortran" if config[:fortran]
sh.cmd 'sudo apt-get install -y --no-install-recommends '\
'build-essential gcc g++ libblas-dev liblapack-dev '\
'libncurses5-dev libreadline-dev libjpeg-dev '\
'libpcre3-dev libpng-dev zlib1g-dev libbz2-dev liblzma-dev libicu-dev '\
'cdbs qpdf texinfo libssh2-1-dev devscripts '\
"#{optional_apt_pkgs}", retry: true
r_filename = "R-#{r_version}-$(lsb_release -cs).xz"
r_url = "https://travis-ci.rstudio.org/#{r_filename}"
sh.cmd "curl -fLo /tmp/#{r_filename} #{r_url}", retry: true
sh.cmd "tar xJf /tmp/#{r_filename} -C ~"
sh.export 'PATH', "${TRAVIS_HOME}/R-bin/bin:$PATH", echo: false
sh.export 'LD_LIBRARY_PATH', "${TRAVIS_HOME}/R-bin/lib:$LD_LIBRARY_PATH", echo: false
sh.rm "/tmp/#{r_filename}"
sh.cmd "sudo mkdir -p /usr/local/lib/R/site-library $R_LIBS_USER"
sh.cmd 'sudo chmod 2777 /usr/local/lib/R /usr/local/lib/R/site-library $R_LIBS_USER'
when 'osx'
# We want to update, but we don't need the 800+ lines of
# output.
unless config[:disable_homebrew]
sh.cmd 'brew update >/dev/null', retry: true
end
# R-devel builds available at mac.r-project.org
if r_version == 'devel'
r_url = "https://mac.r-project.org/el-capitan/R-devel/R-devel-el-capitan.pkg"
# The latest release is the only one available in /bin/macosx
elsif r_version == r_latest
r_url = "#{repos[:CRAN]}/bin/macosx/R-latest.pkg"
# 3.2.5 was never built for OS X so
# we need to use 3.2.4-revised, which is the same codebase
# https://stat.ethz.ch/pipermail/r-devel/2016-May/072642.html
elsif r_version == '3.2.5'
r_url = "#{repos[:CRAN]}/bin/macosx/old/R-3.2.4-revised.pkg"
# the old archive has moved after 3.4.0
elsif r_version_less_than('3.4.0')
r_url = "#{repos[:CRAN]}/bin/macosx/old/R-#{r_version}.pkg"
else
r_url = "#{repos[:CRAN]}/bin/macosx/el-capitan/base/R-#{r_version}.pkg"
end
# Install from latest CRAN binary build for OS X
sh.cmd "curl -fLo /tmp/R.pkg #{r_url}", retry: true
sh.echo 'Installing OS X binary package for R'
sh.cmd 'sudo installer -pkg "/tmp/R.pkg" -target /'
sh.rm '/tmp/R.pkg'
setup_fortran_osx if config[:fortran]
else
sh.failure "Operating system not supported: #{config[:os]}"
end
# Set repos in ~/.Rprofile
repos_str = repos.collect {|k,v| "#{k} = \"#{v}\""}.join(", ")
options_repos = "options(repos = c(#{repos_str}))"
sh.cmd %Q{echo '#{options_repos}' > ~/.Rprofile.site}
sh.export 'R_PROFILE', "~/.Rprofile.site", echo: false
# PDF manual requires latex
if config[:latex]
setup_latex
else
config[:r_check_args] = config[:r_check_args] + " --no-manual"
config[:r_build_args] = config[:r_build_args] + " --no-manual"
end
setup_bioc if needs_bioc?
setup_pandoc if config[:pandoc]
# Removes preinstalled homebrew
disable_homebrew if config[:disable_homebrew]
end
end
end
def announce
super
sh.fold 'R-session-info' do
sh.echo 'R session information', ansi: :yellow
sh.cmd 'Rscript -e \'sessionInfo()\''
end
end
def install
super
sh.if '! -e DESCRIPTION' do
sh.failure "No DESCRIPTION file found, user must supply their own install and script steps"
end
sh.fold "R-dependencies" do
sh.echo 'Installing package dependencies', ansi: :yellow
# Install any declared packages
apt_install config[:apt_packages]
brew_install config[:brew_packages]
r_binary_install config[:r_binary_packages]
r_install config[:r_packages]
r_install config[:bioc_packages]
r_github_install config[:r_github_packages]
# Install dependencies for the package we're testing.
install_deps
end
if @devtools_installed
sh.fold 'R-installed-versions' do
sh.echo 'Installed package versions', ansi: :yellow
sh.cmd 'Rscript -e \'devtools::session_info(installed.packages()[, "Package"])\''
end
end
end
def script
# Build the package
sh.if '! -e DESCRIPTION' do
sh.failure "No DESCRIPTION file found, user must supply their own install and script steps"
end
tarball_script =
'$version = $1 if (/^Version:\s(\S+)/);'\
'$package = $1 if (/^Package:\s*(\S+)/);'\
'END { print "${package}_$version.tar.gz" }'\
sh.export 'PKG_TARBALL', "$(perl -ne '#{tarball_script}' DESCRIPTION)", echo: false
sh.fold 'R-build' do
sh.echo 'Building package', ansi: :yellow
sh.echo "Building with: R CMD build ${R_BUILD_ARGS}"
sh.cmd "R CMD build #{config[:r_build_args]} .",
assert: true
end
# Check the package
sh.fold 'R-check' do
sh.echo 'Checking package', ansi: :yellow
# Test the package
sh.echo 'Checking with: R CMD check "${PKG_TARBALL}" '\
"#{config[:r_check_args]}"
sh.cmd "R CMD check \"${PKG_TARBALL}\" #{config[:r_check_args]}; "\
"CHECK_RET=$?", assert: false
end
export_rcheck_dir
if config[:bioc_check]
# BiocCheck the package
sh.fold 'Bioc-check' do
sh.echo 'Checking with: BiocCheck( "${PKG_TARBALL}" ) '
sh.cmd 'Rscript -e "BiocCheck::BiocCheck(\"${PKG_TARBALL}\", \'quit-with-status\'=TRUE)"'
end
end
if @devtools_installed
# Output check summary
sh.cmd 'Rscript -e "message(devtools::check_failures(path = \"${RCHECK_DIR}\"))"', echo: false
end
# Build fails if R CMD check fails
sh.if '$CHECK_RET -ne 0' do
dump_error_logs
sh.failure 'R CMD check failed'
end
# Turn warnings into errors, if requested.
if config[:warnings_are_errors]
sh.cmd 'grep -q -R "WARNING" "${RCHECK_DIR}/00check.log"', echo: false, assert: false
sh.if '$? -eq 0' do
dump_error_logs
sh.failure "Found warnings, treating as errors (as requested)."
end
end
end
def setup_cache
if data.cache?(:packages)
sh.fold 'package cache' do
sh.echo 'Setting up package cache', ansi: :yellow
directory_cache.add '$R_LIBS_USER'
end
end
end
def cache_slug
super << '--R-' << r_version
end
def use_directory_cache?
super || data.cache?(:packages)
end
private
def needs_bioc?
config[:bioc_required] || !config[:bioc_packages].empty?
end
def packages_as_arg(packages)
packages = Array(packages)
quoted_pkgs = packages.collect{|p| "\"#{p}\""}
"c(#{quoted_pkgs.join(', ')})"
end
def as_r_boolean(bool)
bool ? "TRUE" : "FALSE"
end
def r_install(packages)
return if packages.empty?
packages = Array(packages)
sh.echo "Installing R packages: #{packages.join(', ')}"
pkg_arg = packages_as_arg(packages)
install_script =
"install.packages(#{pkg_arg});"\
"if (!all(#{pkg_arg} %in% installed.packages())) {"\
' q(status = 1, save = "no")'\
'}'
sh.cmd "Rscript -e '#{install_script}'"
end
def r_github_install(packages)
return if packages.empty?
packages = Array(packages)
setup_remotes
setup_devtools if config[:use_devtools]
sh.echo "Installing R packages from GitHub: #{packages.join(', ')}"
pkg_arg = packages_as_arg(packages)
install_script = "remotes::install_github(#{pkg_arg})"
sh.cmd "Rscript -e '#{install_script}'"
end
def r_binary_install(packages)
return if packages.empty?
packages = Array(packages)
if config[:os] == 'linux'
if config[:dist] == 'precise'
sh.echo "R binary packages not supported for 'dist: precise', "\
' falling back to source install'
return r_install packages
end
sh.echo "Installing *binary* R packages: #{packages.join(', ')}"
apt_install packages.collect{|p| "r-cran-#{p.downcase}"}
else
sh.echo "R binary packages not supported on #{config[:os]}, "\
'falling back to source install'
r_install packages
end
end
def apt_install(packages)
return if packages.empty?
packages = Array(packages)
return unless (config[:os] == 'linux')
pkg_arg = packages.join(' ')
sh.echo "Installing apt packages: #{packages.join(', ')}"
sh.cmd "sudo apt-get install -y #{pkg_arg}", retry: true
end
def brew_install(packages)
return if packages.empty?
packages = Array(packages)
return unless (config[:os] == 'osx')
pkg_arg = packages.join(' ')
sh.echo "Installing brew packages: #{packages.join(', ')}"
sh.cmd "brew install #{pkg_arg}", retry: true
end
def install_deps
setup_remotes
setup_devtools if config[:use_devtools]
install_script =
'deps <- remotes::dev_package_deps(dependencies = NA);'\
'remotes::install_deps(dependencies = TRUE);'\
'if (!all(deps$package %in% installed.packages())) {'\
' message("missing: ", paste(setdiff(deps$package, installed.packages()), collapse=", "));'\
' q(status = 1, save = "no")'\
'}'
sh.cmd "Rscript -e '#{install_script}'"
end
def export_rcheck_dir
# Simply strip the tarball name until the last _ and add '.Rcheck',
# relevant R code # https://github.com/wch/r-source/blob/840a972338042b14aa5855cc431b2d0decf68234/src/library/tools/R/check.R#L4608-L4615
sh.export 'RCHECK_DIR', "$(expr \"$PKG_TARBALL\" : '\\(.*\\)_').Rcheck", echo: false
end
def dump_error_logs
dump_log("fail")
dump_log("log")
dump_log("out")
end
def dump_log(type)
sh.fold "#{type} logs" do
sh.echo "R CMD check #{type} logs", ansi: :yellow
cmd =
'for name in '\
"$(find \"${RCHECK_DIR}\" -type f -name \"*#{type}\");"\
'do '\
'echo ">>> Filename: ${name} <<<";'\
'cat ${name};'\
'done'
sh.cmd cmd
end
end
def setup_bioc
unless @bioc_installed
sh.fold 'Bioconductor' do
sh.echo 'Installing Bioconductor', ansi: :yellow
bioc_install_script =
if r_version_less_than("3.5.0")
"source(\"#{config[:bioc]}\");"\
'tryCatch('\
" useDevel(#{as_r_boolean(config[:bioc_use_devel])}),"\
' error=function(e) {if (!grepl("already in use", e$message)) {e}}'\
' );'\
'cat(append = TRUE, file = "~/.Rprofile.site", "options(repos = BiocInstaller::biocinstallRepos());")'
else
'if (!requireNamespace("BiocManager", quietly=TRUE))'\
' install.packages("BiocManager");'\
"if (#{as_r_boolean(config[:bioc_use_devel])})"\
' BiocManager::install(version = "devel", ask = FALSE);'\
'cat(append = TRUE, file = "~/.Rprofile.site", "options(repos = BiocManager::repositories());")'
end
sh.cmd "Rscript -e '#{bioc_install_script}'", retry: true
bioc_install_bioccheck =
if r_version_less_than("3.5.0")
'BiocInstaller::biocLite("BiocCheck")'
else
'BiocManager::install("BiocCheck")'
end
if config[:bioc_check]
sh.cmd "Rscript -e '#{bioc_install_bioccheck}'"
end
end
end
@bioc_installed = true
end
def setup_remotes
unless @remotes_installed
case config[:os]
when 'linux'
# We can't use remotes binaries because R versions < 3.5 are not
# compatible with R versions >= 3.5
r_install ['remotes']
else
remotes_check = '!requireNamespace("remotes", quietly = TRUE)'
remotes_install = 'install.packages("remotes")'
sh.cmd "Rscript -e 'if (#{remotes_check}) #{remotes_install}'",
retry: true
end
end
@remotes_installed = true
end
def setup_devtools
unless @devtools_installed
case config[:os]
when 'linux'
# We can't use devtools binaries because R versions < 3.5 are not
# compatible with R versions >= 3.5
r_install ['devtools']
else
devtools_check = '!requireNamespace("devtools", quietly = TRUE)'
devtools_install = 'install.packages("devtools")'
sh.cmd "Rscript -e 'if (#{devtools_check}) #{devtools_install}'",
retry: true
end
end
@devtools_installed = true
end
def setup_latex
case config[:os]
when 'linux'
texlive_filename = 'texlive.tar.gz'
texlive_url = 'https://github.com/jimhester/ubuntu-bin/releases/download/latest/texlive.tar.gz'
sh.cmd "curl -fLo /tmp/#{texlive_filename} #{texlive_url}", retry: true
sh.cmd "tar xzf /tmp/#{texlive_filename} -C ~"
sh.export 'PATH', "${TRAVIS_HOME}/texlive/bin/x86_64-linux:$PATH"
sh.cmd 'tlmgr update --self', assert: false
when 'osx'
# We use basictex due to disk space constraints.
mactex = 'BasicTeX.pkg'
# TODO: Confirm that this will route us to the nearest mirror.
sh.cmd "curl -fLo \"/tmp/#{mactex}\" --retry 3 http://mirror.ctan.org/systems/mac/mactex/"\
"#{mactex}"
sh.echo 'Installing OS X binary package for MacTeX'
sh.cmd "sudo installer -pkg \"/tmp/#{mactex}\" -target /"
sh.rm "/tmp/#{mactex}"
sh.export 'PATH', '/usr/texbin:/Library/TeX/texbin:$PATH'
sh.cmd 'sudo tlmgr update --self', assert: false
# Install common packages
sh.cmd 'sudo tlmgr install inconsolata upquote '\
'courier courier-scaled helvetic', assert: false
end
end
def setup_pandoc
case config[:os]
when 'linux'
pandoc_filename = "pandoc-#{config[:pandoc_version]}-1-amd64.deb"
pandoc_url = "https://github.com/jgm/pandoc/releases/download/#{config[:pandoc_version]}/"\
"#{pandoc_filename}"
# Download and install pandoc
sh.cmd "curl -fLo /tmp/#{pandoc_filename} #{pandoc_url}"
sh.cmd "sudo dpkg -i /tmp/#{pandoc_filename}"
# Fix any missing dependencies
sh.cmd "sudo apt-get install -f"
# Cleanup
sh.rm "/tmp/#{pandoc_filename}"
when 'osx'
# Change OS name if requested version is less than 1.19.2.2
# Name change was introduced in v2.0 of pandoc.
# c.f. "Build Infrastructure Improvements" section of
# https://github.com/jgm/pandoc/releases/tag/2.0
# Lastly, the last binary for macOS before 2.0 is 1.19.2.1
os_short_name = version_check_less_than("#{config[:pandoc_version]}", "1.19.2.2") ? "macOS" : "osx"
pandoc_filename = "pandoc-#{config[:pandoc_version]}-#{os_short_name}.pkg"
pandoc_url = "https://github.com/jgm/pandoc/releases/download/#{config[:pandoc_version]}/"\
"#{pandoc_filename}"
# Download and install pandoc
sh.cmd "curl -fLo /tmp/#{pandoc_filename} #{pandoc_url}"
sh.cmd "sudo installer -pkg \"/tmp/#{pandoc_filename}\" -target /"
# Cleanup
sh.rm "/tmp/#{pandoc_filename}"
end
end
# Install gfortran libraries the precompiled binaries are linked to
def setup_fortran_osx
return unless (config[:os] == 'osx')
if r_version_less_than('3.4')
sh.cmd 'curl -fLo /tmp/gfortran.tar.bz2 http://r.research.att.com/libs/gfortran-4.8.2-darwin13.tar.bz2', retry: true
sh.cmd 'sudo tar fvxz /tmp/gfortran.tar.bz2 -C /'
sh.rm '/tmp/gfortran.tar.bz2'
else
sh.cmd "curl -fLo /tmp/gfortran61.dmg #{repos[:CRAN]}/contrib/extra/macOS/gfortran-6.1-ElCapitan.dmg", retry: true
sh.cmd 'sudo hdiutil attach /tmp/gfortran61.dmg -mountpoint /Volumes/gfortran'
sh.cmd 'sudo installer -pkg "/Volumes/gfortran/gfortran-6.1-ElCapitan/gfortran.pkg" -target /'
sh.cmd 'sudo hdiutil detach /Volumes/gfortran'
sh.rm '/tmp/gfortran61.dmg'
end
end
# Uninstalls the preinstalled homebrew
# See FAQ: https://docs.brew.sh/FAQ#how-do-i-uninstall-old-versions-of-a-formula
def disable_homebrew
return unless (config[:os] == 'osx')
sh.cmd "curl -fsSOL https://raw.githubusercontent.com/Homebrew/install/master/uninstall"
sh.cmd "sudo ruby uninstall --force"
sh.cmd "rm uninstall"
sh.cmd "hash -r"
end
# Abstract out version check
def version_check_less_than(version_str_new, version_str_old)
Gem::Version.new(version_str_old) < Gem::Version.new(version_str_new)
end
def r_version
@r_version ||= normalized_r_version
end
def r_version_less_than(str)
return if normalized_r_version == 'devel' # always false (devel is highest version)
version_check_less_than(str, normalized_r_version)
end
def normalized_r_version(v=Array(config[:r]).first.to_s)
case v
when 'release' then '3.6.2'
when 'oldrel' then '3.5.3'
when '3.0' then '3.0.3'
when '3.1' then '3.1.3'
when '3.2' then '3.2.5'
when '3.3' then '3.3.3'
when '3.4' then '3.4.4'
when '3.5' then '3.5.3'
when '3.6' then '3.6.2'
when 'bioc-devel'
config[:bioc_required] = true
config[:bioc_use_devel] = true
config[:r] = 'devel'
normalized_r_version('devel')
when 'bioc-release'
config[:bioc_required] = true
config[:bioc_use_devel] = false
config[:r] = 'release'
normalized_r_version('release')
else v
end
end
def r_latest
normalized_r_version('release')
end
def repos
@repos ||= normalized_repos
end
# If CRAN is not set in repos set it with cran
def normalized_repos
v = config[:repos]
if not v.has_key?(:CRAN)
v[:CRAN] = config[:cran]
end
# If the version is less than 3.2 we need to use http repositories
if r_version_less_than('3.2')
v.each {|_, url| url.sub!(/^https:/, "http:")}
config[:bioc].sub!(/^https:/, "http:")
end
v
end
end
end
end
end
| 1 | 17,437 | Did you mean to make this http rather than https? | travis-ci-travis-build | rb |
@@ -1,15 +1,14 @@
#appModules/audacity.py
#A part of NonVisual Desktop Access (NVDA)
-#Copyright (C) 2006-2010 NVDA Contributors <http://www.nvda-project.org/>
+#Copyright (C) 2006-2018 NVDA Contributors <https://www.nvaccess.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
-import winUser
import controlTypes
class AppModule(appModuleHandler.AppModule):
def event_NVDAObject_init(self,obj):
if obj.windowClassName=="Button" and not obj.role in [controlTypes.ROLE_MENUBAR, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_POPUPMENU]:
- obj.name=winUser.getWindowText(obj.windowHandle).replace('&','')
+ obj.name=obj.name.replace('&','') | 1 | #appModules/audacity.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2010 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
import winUser
import controlTypes
class AppModule(appModuleHandler.AppModule):
def event_NVDAObject_init(self,obj):
if obj.windowClassName=="Button" and not obj.role in [controlTypes.ROLE_MENUBAR, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_POPUPMENU]:
obj.name=winUser.getWindowText(obj.windowHandle).replace('&','')
| 1 | 21,979 | The updated copyright header should be: # Copyright (C) 2006-2018 NV Access Limited, yourname | nvaccess-nvda | py |
@@ -21,6 +21,7 @@ class PerformanceTestSamplesAggregator
$maxQueryCount = 0;
$isSuccessful = true;
$worstStatusCode = null;
+ $performanceTestSample = null;
foreach ($performanceTestSamplesOfUrl as $performanceTestSample) {
/* @var $performanceTestSample \Tests\ShopBundle\Performance\Page\PerformanceTestSample */ | 1 | <?php
namespace Tests\ShopBundle\Performance\Page;
class PerformanceTestSamplesAggregator
{
/**
* @param \Tests\ShopBundle\Performance\Page\PerformanceTestSample[] $performanceTestSamples
* @return \Tests\ShopBundle\Performance\Page\PerformanceTestSample[]
*/
public function getPerformanceTestSamplesAggregatedByUrl(
array $performanceTestSamples
) {
$aggregatedPerformanceTestSamples = [];
$performanceTestSamplesGroupedByUrl = $this->getPerformanceTestSamplesGroupedByUrl($performanceTestSamples);
foreach ($performanceTestSamplesGroupedByUrl as $url => $performanceTestSamplesOfUrl) {
$samplesCount = 0;
$totalDuration = 0;
$maxQueryCount = 0;
$isSuccessful = true;
$worstStatusCode = null;
foreach ($performanceTestSamplesOfUrl as $performanceTestSample) {
/* @var $performanceTestSample \Tests\ShopBundle\Performance\Page\PerformanceTestSample */
$samplesCount++;
$totalDuration += $performanceTestSample->getDuration();
if ($performanceTestSample->getQueryCount() > $maxQueryCount) {
$maxQueryCount = $performanceTestSample->getQueryCount();
}
if (!$performanceTestSample->isSuccessful()) {
$isSuccessful = false;
}
if ($performanceTestSample->isSuccessful() || $worstStatusCode === null) {
$worstStatusCode = $performanceTestSample->getStatusCode();
}
}
$aggregatedPerformanceTestSamples[$url] = new PerformanceTestSample(
$performanceTestSample->getRouteName(),
$url,
$totalDuration / $samplesCount,
$maxQueryCount,
$worstStatusCode,
$isSuccessful
);
}
return $aggregatedPerformanceTestSamples;
}
/**
* @param \Tests\ShopBundle\Performance\Page\PerformanceTestSample[] $performanceTestSamples
*/
private function getPerformanceTestSamplesGroupedByUrl(array $performanceTestSamples)
{
$performanceTestSamplesGroupedByUrl = [];
foreach ($performanceTestSamples as $performanceTestSample) {
$performanceTestSamplesGroupedByUrl[$performanceTestSample->getUrl()][] = $performanceTestSample;
}
return $performanceTestSamplesGroupedByUrl;
}
}
| 1 | 16,264 | wow :+1: , i do not even know how this test works. | shopsys-shopsys | php |
@@ -2,14 +2,12 @@
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
-
-using MvvmCross.Plugins;
using MvvmCross.UI;
namespace MvvmCross.Plugin.Visibility.Platform.Uap
{
- public class Plugin
- : IMvxPlugin
+ [MvxPlugin]
+ public class Plugin : IMvxPlugin
{
public void Load()
{ | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
using MvvmCross.Plugins;
using MvvmCross.UI;
namespace MvvmCross.Plugin.Visibility.Platform.Uap
{
public class Plugin
: IMvxPlugin
{
public void Load()
{
Mvx.RegisterSingleton<IMvxNativeVisibility>(new MvxWinRTVisibility());
}
}
}
| 1 | 13,751 | File should be renamed `PlugIn` -> `Plugin` | MvvmCross-MvvmCross | .cs |
@@ -17,6 +17,7 @@ package egress
import (
"context"
"fmt"
+ "net"
"testing"
"time"
| 1 | // Copyright 2021 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package egress
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"antrea.io/antrea/pkg/apis/controlplane"
"antrea.io/antrea/pkg/apis/crd/v1alpha2"
corev1a2 "antrea.io/antrea/pkg/apis/crd/v1alpha2"
"antrea.io/antrea/pkg/client/clientset/versioned"
fakeversioned "antrea.io/antrea/pkg/client/clientset/versioned/fake"
crdinformers "antrea.io/antrea/pkg/client/informers/externalversions"
"antrea.io/antrea/pkg/controller/egress/store"
"antrea.io/antrea/pkg/controller/grouping"
)
var (
node1 = "node1"
node2 = "node2"
node3 = "node3"
// Fake Pods
podFoo1 = newPod("default", "podFoo1", map[string]string{"app": "foo"}, node1, "1.1.1.1")
podFoo2 = newPod("default", "podFoo2", map[string]string{"app": "foo"}, node2, "1.1.2.1")
podBar1 = newPod("default", "podBar1", map[string]string{"app": "bar"}, node1, "1.1.1.2")
podFoo1InOtherNamespace = newPod("other", "podFoo1", map[string]string{"app": "foo"}, node1, "1.1.1.3")
podUnscheduled = newPod("default", "podUnscheduled", map[string]string{"app": "foo"}, "", "")
podNonIP = newPod("default", "podNonIP", map[string]string{"app": "foo"}, "node1", "")
// Fake Namespaces
nsDefault = newNamespace("default", map[string]string{"company": "default"})
nsOther = newNamespace("other", map[string]string{"company": "other"})
)
func newNamespace(name string, labels map[string]string) *v1.Namespace {
return &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
}
}
func newPod(namespace, name string, labels map[string]string, nodeName string, ip string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
Labels: labels,
},
Spec: v1.PodSpec{
NodeName: nodeName,
},
}
if len(ip) > 0 {
pod.Status.PodIP = ip
pod.Status.PodIPs = []v1.PodIP{{IP: ip}}
}
return pod
}
type egressController struct {
*EgressController
client kubernetes.Interface
crdClient versioned.Interface
informerFactory informers.SharedInformerFactory
crdInformerFactory crdinformers.SharedInformerFactory
groupingController *grouping.GroupEntityController
}
// objects is an initial set of K8s objects that is exposed through the client.
func newController(objects ...runtime.Object) *egressController {
client := fake.NewSimpleClientset(objects...)
crdClient := fakeversioned.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
crdInformerFactory := crdinformers.NewSharedInformerFactory(crdClient, resyncPeriod)
egressGroupStore := store.NewEgressGroupStore()
egressInformer := crdInformerFactory.Crd().V1alpha2().Egresses()
groupEntityIndex := grouping.NewGroupEntityIndex()
groupingController := grouping.NewGroupEntityController(groupEntityIndex,
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Namespaces(),
crdInformerFactory.Crd().V1alpha2().ExternalEntities())
controller := NewEgressController(groupEntityIndex, egressInformer, egressGroupStore)
return &egressController{
controller,
client,
crdClient,
informerFactory,
crdInformerFactory,
groupingController,
}
}
func TestAddEgress(t *testing.T) {
tests := []struct {
name string
inputEgress *v1alpha2.Egress
expectedEgressGroups map[string]*controlplane.EgressGroup
}{
{
name: "Egress with podSelector and namespaceSelector",
inputEgress: &v1alpha2.Egress{
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
Spec: v1alpha2.EgressSpec{
AppliedTo: corev1a2.AppliedTo{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "foo"},
},
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: nsDefault.Labels,
},
},
EgressIP: "1.1.1.1",
},
},
expectedEgressGroups: map[string]*controlplane.EgressGroup{
node1: {
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
GroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo1.Name, Namespace: podFoo1.Namespace}},
},
},
node2: {
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
GroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo2.Name, Namespace: podFoo2.Namespace}},
},
},
node3: nil,
},
},
{
name: "Egress with namespaceSelector",
inputEgress: &v1alpha2.Egress{
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
Spec: v1alpha2.EgressSpec{
AppliedTo: corev1a2.AppliedTo{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: nsDefault.Labels,
},
},
EgressIP: "1.1.1.1",
},
},
expectedEgressGroups: map[string]*controlplane.EgressGroup{
node1: {
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
GroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo1.Name, Namespace: podFoo1.Namespace}},
{Pod: &controlplane.PodReference{Name: podBar1.Name, Namespace: podBar1.Namespace}},
},
},
node2: {
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
GroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo2.Name, Namespace: podFoo2.Namespace}},
},
},
node3: nil,
},
},
{
name: "Egress with podSelector",
inputEgress: &v1alpha2.Egress{
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
Spec: v1alpha2.EgressSpec{
AppliedTo: corev1a2.AppliedTo{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "foo"},
},
},
EgressIP: "1.1.1.1",
},
},
expectedEgressGroups: map[string]*controlplane.EgressGroup{
node1: {
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
GroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo1.Name, Namespace: podFoo1.Namespace}},
{Pod: &controlplane.PodReference{Name: podFoo1InOtherNamespace.Name, Namespace: podFoo1InOtherNamespace.Namespace}},
},
},
node2: {
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
GroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo2.Name, Namespace: podFoo2.Namespace}},
},
},
node3: nil,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
var fakeObjects []runtime.Object
fakeObjects = append(fakeObjects, nsDefault, nsOther)
fakeObjects = append(fakeObjects, podFoo1, podFoo2, podBar1, podFoo1InOtherNamespace, podUnscheduled, podNonIP)
controller := newController(fakeObjects...)
controller.informerFactory.Start(stopCh)
controller.crdInformerFactory.Start(stopCh)
go controller.groupingController.Run(stopCh)
go controller.Run(stopCh)
controller.crdClient.CrdV1alpha2().Egresses().Create(context.TODO(), tt.inputEgress, metav1.CreateOptions{})
for nodeName, expectedEgressGroup := range tt.expectedEgressGroups {
watcher, err := controller.egressGroupStore.Watch(context.TODO(), "", nil, fields.ParseSelectorOrDie(fmt.Sprintf("nodeName=%s", nodeName)))
assert.NoError(t, err)
gotEgressGroup := func() *controlplane.EgressGroup {
for {
select {
case <-stopCh:
return nil
case <-time.After(500 * time.Millisecond):
return nil
case event := <-watcher.ResultChan():
if event.Type == watch.Added {
return event.Object.(*controlplane.EgressGroup)
}
}
}
}()
if expectedEgressGroup == nil {
assert.Nil(t, gotEgressGroup)
} else {
require.NotNil(t, gotEgressGroup)
assert.Equal(t, expectedEgressGroup.ObjectMeta, gotEgressGroup.ObjectMeta)
assert.ElementsMatch(t, expectedEgressGroup.GroupMembers, gotEgressGroup.GroupMembers)
}
}
})
}
}
func TestUpdateEgress(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
controller := newController(nsDefault, podFoo1)
controller.informerFactory.Start(stopCh)
controller.crdInformerFactory.Start(stopCh)
go controller.groupingController.Run(stopCh)
go controller.Run(stopCh)
egress := &v1alpha2.Egress{
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
Spec: v1alpha2.EgressSpec{
AppliedTo: corev1a2.AppliedTo{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "foo"},
},
},
EgressIP: "1.1.1.1",
},
}
controller.crdClient.CrdV1alpha2().Egresses().Create(context.TODO(), egress, metav1.CreateOptions{})
watcher, err := controller.egressGroupStore.Watch(context.TODO(), "", nil, fields.ParseSelectorOrDie(fmt.Sprintf("nodeName=%s", node1)))
assert.NoError(t, err)
getEvent := func() *watch.Event {
for {
select {
case <-stopCh:
return nil
case <-time.After(500 * time.Millisecond):
return nil
case event := <-watcher.ResultChan():
if event.Type != watch.Bookmark {
return &event
}
}
}
}
assert.Equal(t, &watch.Event{
Type: watch.Added,
Object: &controlplane.EgressGroup{
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
GroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo1.Name, Namespace: podFoo1.Namespace}},
},
},
}, getEvent())
// Add a Pod matching the Egress's selector and running on this Node.
controller.client.CoreV1().Pods(podFoo1InOtherNamespace.Namespace).Create(context.TODO(), podFoo1InOtherNamespace, metav1.CreateOptions{})
assert.Equal(t, &watch.Event{
Type: watch.Modified,
Object: &controlplane.EgressGroupPatch{
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
AddedGroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo1InOtherNamespace.Name, Namespace: podFoo1InOtherNamespace.Namespace}},
},
},
}, getEvent())
// Delete the above Pod.
controller.client.CoreV1().Pods(podFoo1InOtherNamespace.Namespace).Delete(context.TODO(), podFoo1InOtherNamespace.Name, metav1.DeleteOptions{})
assert.Equal(t, &watch.Event{
Type: watch.Modified,
Object: &controlplane.EgressGroupPatch{
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
RemovedGroupMembers: []controlplane.GroupMember{
{Pod: &controlplane.PodReference{Name: podFoo1InOtherNamespace.Name, Namespace: podFoo1InOtherNamespace.Namespace}},
},
},
}, getEvent())
// Updating the Egress's spec to make it match no Pods on this Node.
egress.Spec.AppliedTo = corev1a2.AppliedTo{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "non-existing-app"},
},
}
controller.crdClient.CrdV1alpha2().Egresses().Update(context.TODO(), egress, metav1.UpdateOptions{})
assert.Equal(t, &watch.Event{
Type: watch.Deleted,
Object: &controlplane.EgressGroup{
ObjectMeta: metav1.ObjectMeta{Name: "egressA", UID: "uidA"},
},
}, getEvent())
}
| 1 | 38,646 | I scanned the new test cases quickly. Do we have one for an egressIPPool with multiple different IP ranges? | antrea-io-antrea | go |
@@ -148,6 +148,17 @@ abstract class Abstract_Component implements Component {
add_action( 'init', [ $this, 'define_settings' ] );
}
+ /**
+ * Method to filter component loading if needed.
+ *
+ * @since 1.0.1
+ * @access public
+ * @return bool
+ */
+ public function should_component_be_active() {
+ return true;
+ }
+
/**
* Method to set protected properties for class.
* | 1 | <?php
/**
* Abstract Component class for Header Footer Grid.
*
* Name: Header Footer Grid
* Author: Bogdan Preda <bogdan.preda@themeisle.com>
*
* @version 1.0.0
* @package HFG
*/
namespace HFG\Core\Components;
use HFG\Core\Interfaces\Component;
use HFG\Core\Settings;
use HFG\Core\Settings\Manager as SettingsManager;
use HFG\Main;
use HFG\Traits\Core;
use WP_Customize_Manager;
/**
* Class Abstract_Component
*
* @package HFG\Core
*/
abstract class Abstract_Component implements Component {
use Core;
const ALIGNMENT_ID = 'component_align';
const PADDING_ID = 'component_padding';
const MARGIN_ID = 'component_margin';
/**
* Current id of the component.
*
* @since 1.0.0
* @access public
* @var null|string
*/
public static $current_component = null;
/**
* Default alignament value for the component.
*
* @since 1.0.0
* @access public
* @var null|string
*/
public $default_align = 'left';
/**
* Current X pos of the component if set.
*
* @since 1.0.0
* @access public
* @var mixed|null $current_x
*/
public $current_x = null;
/**
* Current Width of the component if set.
*
* @since 1.0.0
* @access public
* @var mixed|null $current_width
*/
public $current_width = null;
/**
* The ID of component.
*
* @since 1.0.0
* @access protected
* @var string $id
*/
protected $id;
/**
* The section name for the component
*
* @since 1.0.0
* @access protected
* @var string $section
*/
protected $section;
/**
* The component default width.
*
* @since 1.0.0
* @access protected
* @var int $width
*/
protected $width = 1;
/**
* The component label
*
* @since 1.0.0
* @access protected
* @var string $label
*/
protected $label;
/**
* The component description
*
* @since 1.0.1
* @access protected
* @var string $description
*/
protected $description;
/**
* The component priority in customizer
*
* @since 1.0.0
* @access protected
* @var int $priority
*/
protected $priority = 30;
/**
* The name of the component panel
*
* @since 1.0.0
* @access protected
* @var string $panel
*/
protected $panel;
/**
* Holds component builder id.
*
* @var string $builder_id Builder id.
*/
private $builder_id;
/**
* Can override the default css selector.
* Allows child components to specify their own selector for inherited style rules.
*
* @since 1.0.0
* @access protected
* @var null|string $default_selector
*/
protected $default_selector = null;
/**
* Abstract_Component constructor.
*
* @param string $panel Builder panel.
*/
public function __construct( $panel ) {
$this->init();
$this->set_property( 'panel', $panel );
if ( $this->section === null ) {
$this->set_property( 'section', $this->get_id() );
}
add_action( 'init', [ $this, 'define_settings' ] );
}
/**
* Method to set protected properties for class.
*
* @param string $key The property key name.
* @param string $value The property value.
*
* @return bool
* @since 1.0.0
* @access protected
*/
protected function set_property( $key = '', $value = '' ) {
if ( ! property_exists( $this, $key ) ) {
return false;
}
$this->$key = $value;
return true;
}
/**
* Utility method to return the component ID.
*
* @return string
* @since 1.0.0
* @access public
*/
public function get_id() {
return $this->id;
}
/**
* Return the settings for the component.
*
* @since 1.0.0
* @updated 1.0.1
* @access public
* @return array
*/
public function get_settings() {
return array(
'name' => $this->label,
'description' => $this->description,
'id' => $this->id,
'width' => $this->width,
'section' => $this->section, // Customizer section to focus when click settings.
);
}
/**
* Get the section id.
*
* @return string
* @since 1.0.0
* @access public
*/
public function get_section_id() {
return $this->section;
}
/**
* Method to get protected properties for class.
*
* @param string $key The property key name.
*
* @return bool
* @since 1.0.0
* @access protected
*/
public function get_property( $key = '' ) {
if ( ! property_exists( $this, $key ) ) {
return false;
}
return $this->$key;
}
/**
* Define global settings.
*/
public function define_settings() {
$this->add_settings();
SettingsManager::get_instance()->add(
[
'id' => self::ALIGNMENT_ID,
'group' => $this->get_id(),
'tab' => SettingsManager::TAB_LAYOUT,
'transport' => 'post' . $this->get_builder_id(),
'sanitize_callback' => 'wp_filter_nohtml_kses',
'default' => $this->default_align,
'label' => __( 'Component Alignment', 'neve' ),
'type' => '\Neve\Customizer\Controls\Radio_Image',
'options' => [
'choices' => [
'left' => [
'url' => 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAS8AAADYBAMAAABIEHj+AAAAHlBMVEU/xPuk3/2w4/3P7f7V7/7a8f72/P/6/f/7/f////+OFjDPAAAA2ElEQVR42u3boQ2AMBCG0WLwBIElBMEarMBkWLZFV1RcQmibvG+CZy+XPz1tdicwMDAwMDAwMDAwMDAwMDAwMDCwfmHXFur4DbamUCMYGBgYGBgYGBgYWF+wcwq1ON/AwMDAwMDAwMDAwD6BBT8j5fa651u5AQwMDAwMDAwMDAysRVjwM1JudleCgYGBgYGBgYGBgdmMgIGBgYGBgYGBgYG1DrMZAQMDAwMDAwMDAwOrArMZAQMDAwMDAwMDAwOzGXFXgoGBgYGBgYGBgYGBgYGBgYGBgeWwF756V4XSI6GKAAAAAElFTkSuQmCC',
],
'center' => [
'url' => 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAS8AAADYBAMAAABIEHj+AAAAHlBMVEU/xPuk3/2w4/3P7f7V7/7a8f72/P/6/f/7/f////+OFjDPAAAA1UlEQVR42u3bMQ2AMBRF0SKhwQBpMIIFXCABCWysuGVma8hPU8i5Cs76hpeuPjsTGBgYGBgYGBgYGBgYGBgYGBgY2H9gpbqjLSxVt4GBgYGBgYGBgYGBfRCWq9vNNzAwMDAwMDAwMDCw97C1BDUHw6YU1AAGBgYGBgYGBgYG1iNsyUGNdiUYGBgYGBgYGBgYmM8IGBgYGBgYGBgYGNjXYD4jYGBgYGBgYGBgYGBNYD4jYGBgYGBgYGBgYGA+I3YlGBgYGBgYGBgYGBgYGBgYGBgY2BN2A1O85EFHf1n6AAAAAElFTkSuQmCC',
],
'right' => [
'url' => 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAS8AAADYBAMAAABIEHj+AAAAElBMVEU/xPuk3/2w4/3V7/7a8f7///90D081AAAA1ElEQVR42u3bsQ2AIBCGURoGsKA3JA7gCk5g4/6rWFtechow75vgtRf4yzVmZwEDAwMDAwMDAwMDAwMDAwMDAwP7I+zoobbPYGsJVcHAwMDAwMDAwMDA5oLtS6jmfAMDAwMDAwMDAwMDC8B6VtkvIyWrCgYGBgYGBgYGBgY2ImzJqrkrwcDAwMDAwMDAwMBsRvy4AwMDAwMDAwMDA5sXZjMCBgYGBgYGBgYGBvYizGYEDAwMDAwMDAwMDMxmxF0JBgYGBgYGBgYGBgYGBgYGBgYG9oTdBpDUhkRAaPoAAAAASUVORK5CYII=',
],
],
],
'section' => $this->section,
]
);
SettingsManager::get_instance()->add(
[
'id' => self::PADDING_ID,
'group' => $this->get_id(),
'tab' => SettingsManager::TAB_LAYOUT,
'transport' => 'post' . $this->get_id(),
'sanitize_callback' => array( $this, 'sanitize_spacing_array' ),
'default' => array(
'desktop' => array(
'top' => '',
'right' => '',
'bottom' => '',
'left' => '',
),
'tablet' => array(
'top' => '',
'right' => '',
'bottom' => '',
'left' => '',
),
'mobile' => array(
'top' => '',
'right' => '',
'bottom' => '',
'left' => '',
),
'desktop-unit' => 'px',
'tablet-unit' => 'px',
'mobile-unit' => 'px',
),
'label' => __( 'Padding', 'neve' ),
'type' => '\HFG\Core\Customizer\SpacingControl',
'options' => [
'linked_choices' => true,
'unit_choices' => array( 'px', 'em', '%' ),
'choices' => array(
'top' => __( 'Top', 'neve' ),
'right' => __( 'Right', 'neve' ),
'bottom' => __( 'Bottom', 'neve' ),
'left' => __( 'Left', 'neve' ),
),
],
'section' => $this->section,
]
);
SettingsManager::get_instance()->add(
[
'id' => self::MARGIN_ID,
'group' => $this->get_id(),
'tab' => SettingsManager::TAB_LAYOUT,
'transport' => 'post' . $this->get_id(),
'sanitize_callback' => array( $this, 'sanitize_spacing_array' ),
'default' => array(
'desktop' => array(
'top' => '',
'right' => '',
'bottom' => '',
'left' => '',
),
'tablet' => array(
'top' => '',
'right' => '',
'bottom' => '',
'left' => '',
),
'mobile' => array(
'top' => '',
'right' => '',
'bottom' => '',
'left' => '',
),
'desktop-unit' => 'px',
'tablet-unit' => 'px',
'mobile-unit' => 'px',
),
'label' => __( 'Margin', 'neve' ),
'type' => '\HFG\Core\Customizer\SpacingControl',
'options' => [
'linked_choices' => true,
'unit_choices' => array( 'px', 'em', '%' ),
'choices' => array(
'top' => __( 'Top', 'neve' ),
'right' => __( 'Right', 'neve' ),
'bottom' => __( 'Bottom', 'neve' ),
'left' => __( 'Left', 'neve' ),
),
],
'section' => $this->section,
]
);
do_action( 'hfg_component_settings', $this->get_id() );
}
/**
* Get builder where component can be used.
*
* @return string Assigned builder.
*/
public function get_builder_id() {
return $this->builder_id;
}
/**
* Called to register component controls.
*
* @param WP_Customize_Manager $wp_customize The Customize Manager.
*
* @return WP_Customize_Manager
* @since 1.0.0
* @updated 1.0.1
* @access public
*/
public function customize_register( WP_Customize_Manager $wp_customize ) {
$description = ( isset( $this->description ) && ! empty( $this->description ) )
? $this->description
: '';
$wp_customize->add_section(
$this->section,
array(
'title' => $this->label,
'description' => $description,
'description_hidden' => ( $description !== '' ),
'priority' => $this->priority,
'panel' => $this->panel,
)
);
$wp_customize->register_control_type( '\HFG\Core\Customizer\SpacingControl' );
Settings\Manager::get_instance()->load( $this->get_id(), $wp_customize );
$wp_customize->selective_refresh->add_partial(
$this->get_id() . '_partial',
array(
'selector' => '.builder-item--' . $this->get_id(),
'settings' => Settings\Manager::get_instance()->get_transport_group( $this->get_id() ),
'render_callback' => [ $this, 'render' ],
)
);
return $wp_customize;
}
/**
* Render component markup.
*/
public function render() {
self::$current_component = $this->get_id();
if ( is_customize_preview() ) {
$style = $this->css_array_to_css( $this->add_style() );
echo '<style type="text/css">' . $style . '</style>'; // WPCS: XSS OK.
}
Main::get_instance()->load( 'component-wrapper' );
}
/**
* Method to add Component css styles.
*
* @param array $css_array An array containing css rules.
*
* @return array
* @since 1.0.0
* @access public
*/
public function add_style( array $css_array = array() ) {
$layout_padding = SettingsManager::get_instance()->get( $this->get_id() . '_' . self::PADDING_ID, null );
$selector = '.builder-item--' . $this->get_id() . ' > :first-child';
if ( $this->default_selector !== null ) {
$selector = $this->default_selector;
}
if ( isset( $layout_padding['mobile'] ) ) {
$css_array[' @media (max-width: 576px)'][ $selector ]['padding'] = $layout_padding['mobile']['top'] . $layout_padding['mobile-unit'] . ' ' .
$layout_padding['mobile']['right'] . $layout_padding['mobile-unit'] . ' ' .
$layout_padding['mobile']['bottom'] . $layout_padding['mobile-unit'] . ' ' .
$layout_padding['mobile']['left'] . $layout_padding['mobile-unit'];
}
if ( isset( $layout_padding['tablet'] ) ) {
$css_array[' @media (min-width: 576px)'][ $selector ]['padding'] = $layout_padding['tablet']['top'] . $layout_padding['tablet-unit'] . ' ' .
$layout_padding['tablet']['right'] . $layout_padding['tablet-unit'] . ' ' .
$layout_padding['tablet']['bottom'] . $layout_padding['tablet-unit'] . ' ' .
$layout_padding['tablet']['left'] . $layout_padding['tablet-unit'];
}
if ( isset( $layout_padding['desktop'] ) ) {
$css_array[' @media (min-width: 961px)'][ $selector ]['padding'] = $layout_padding['desktop']['top'] . $layout_padding['desktop-unit'] . ' ' .
$layout_padding['desktop']['right'] . $layout_padding['desktop-unit'] . ' ' .
$layout_padding['desktop']['bottom'] . $layout_padding['desktop-unit'] . ' ' .
$layout_padding['desktop']['left'] . $layout_padding['desktop-unit'];
}
$layout_margin = SettingsManager::get_instance()->get( $this->get_id() . '_' . self::MARGIN_ID, null );
$selector = '.builder-item--' . $this->get_id();
if ( isset( $layout_margin['mobile'] ) ) {
$css_array[' @media (max-width: 576px)'][ $selector ]['margin'] = $layout_margin['mobile']['top'] . $layout_margin['mobile-unit'] . ' ' .
$layout_margin['mobile']['right'] . $layout_margin['mobile-unit'] . ' ' .
$layout_margin['mobile']['bottom'] . $layout_margin['mobile-unit'] . ' ' .
$layout_margin['mobile']['left'] . $layout_margin['mobile-unit'];
}
if ( isset( $layout_margin['tablet'] ) ) {
$css_array[' @media (min-width: 576px)'][ $selector ]['margin'] = $layout_margin['tablet']['top'] . $layout_margin['tablet-unit'] . ' ' .
$layout_margin['tablet']['right'] . $layout_margin['tablet-unit'] . ' ' .
$layout_margin['tablet']['bottom'] . $layout_margin['tablet-unit'] . ' ' .
$layout_margin['tablet']['left'] . $layout_margin['tablet-unit'];
}
if ( isset( $layout_margin['desktop'] ) ) {
$css_array[' @media (min-width: 961px)'][ $selector ]['margin'] = $layout_margin['desktop']['top'] . $layout_margin['desktop-unit'] . ' ' .
$layout_margin['desktop']['right'] . $layout_margin['desktop-unit'] . ' ' .
$layout_margin['desktop']['bottom'] . $layout_margin['desktop-unit'] . ' ' .
$layout_margin['desktop']['left'] . $layout_margin['desktop-unit'];
}
return $css_array;
}
/**
* Assign component to builder.
*
* @param string $builder_id Builder unique id.
*/
public function assign_builder( $builder_id ) {
$this->builder_id = $builder_id;
}
}
| 1 | 19,231 | you can use a different name, like `maybe_activate` or `is_active` without `component` in the method name as this is used in the class name. E.g: `$component->should_component_be_active` is using twice the `component` word | Codeinwp-neve | php |
@@ -114,6 +114,11 @@ class OrderedBulkOperation extends BulkOperationBase {
*/
execute(_writeConcern, options, callback) {
const ret = this.bulkExecute(_writeConcern, options, callback);
+
+ if (!(ret && ret.options && ret.callback)) {
+ return ret;
+ }
+
options = ret.options;
callback = ret.callback;
| 1 | 'use strict';
const common = require('./common');
const BulkOperationBase = common.BulkOperationBase;
const utils = require('../utils');
const toError = utils.toError;
const handleCallback = utils.handleCallback;
const BulkWriteResult = common.BulkWriteResult;
const Batch = common.Batch;
const mergeBatchResults = common.mergeBatchResults;
const executeOperation = utils.executeOperation;
const MongoWriteConcernError = require('mongodb-core').MongoWriteConcernError;
const handleMongoWriteConcernError = require('./common').handleMongoWriteConcernError;
const bson = common.bson;
/**
* Add to internal list of Operations
*
* @param {OrderedBulkOperation} bulkOperation
* @param {number} docType number indicating the document type
* @param {object} document
* @return {OrderedBulkOperation}
*/
function addToOperationsList(bulkOperation, docType, document) {
// Get the bsonSize
const bsonSize = bson.calculateObjectSize(document, {
checkKeys: false
});
// Throw error if the doc is bigger than the max BSON size
if (bsonSize >= bulkOperation.s.maxBatchSizeBytes)
throw toError('document is larger than the maximum size ' + bulkOperation.s.maxBatchSizeBytes);
// Create a new batch object if we don't have a current one
if (bulkOperation.s.currentBatch == null)
bulkOperation.s.currentBatch = new Batch(docType, bulkOperation.s.currentIndex);
const maxKeySize = bulkOperation.s.maxKeySize;
// Check if we need to create a new batch
if (
bulkOperation.s.currentBatchSize + 1 >= bulkOperation.s.maxWriteBatchSize ||
bulkOperation.s.currentBatchSizeBytes + maxKeySize + bsonSize >=
bulkOperation.s.maxBatchSizeBytes ||
bulkOperation.s.currentBatch.batchType !== docType
) {
// Save the batch to the execution stack
bulkOperation.s.batches.push(bulkOperation.s.currentBatch);
// Create a new batch
bulkOperation.s.currentBatch = new Batch(docType, bulkOperation.s.currentIndex);
// Reset the current size trackers
bulkOperation.s.currentBatchSize = 0;
bulkOperation.s.currentBatchSizeBytes = 0;
}
if (docType === common.INSERT) {
bulkOperation.s.bulkResult.insertedIds.push({
index: bulkOperation.s.currentIndex,
_id: document._id
});
}
// We have an array of documents
if (Array.isArray(document)) {
throw toError('operation passed in cannot be an Array');
}
bulkOperation.s.currentBatch.originalIndexes.push(bulkOperation.s.currentIndex);
bulkOperation.s.currentBatch.operations.push(document);
bulkOperation.s.currentBatchSize += 1;
bulkOperation.s.currentBatchSizeBytes += maxKeySize + bsonSize;
bulkOperation.s.currentIndex += 1;
// Return bulkOperation
return bulkOperation;
}
/**
* Create a new OrderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {OrderedBulkOperation} a OrderedBulkOperation instance.
*/
class OrderedBulkOperation extends BulkOperationBase {
constructor(topology, collection, options) {
options = options || {};
options = Object.assign(options, { addToOperationsList });
super(topology, collection, options, true);
}
/**
* The callback format for results
* @callback OrderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {OrderedBulkOperation~resultCallback} [callback] The result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
execute(_writeConcern, options, callback) {
const ret = this.bulkExecute(_writeConcern, options, callback);
options = ret.options;
callback = ret.callback;
return executeOperation(this.s.topology, executeCommands, [this, options, callback]);
}
}
/**
* Execute next write command in a chain
*
* @param {OrderedBulkOperation} bulkOperation
* @param {object} options
* @param {function} callback
*/
function executeCommands(bulkOperation, options, callback) {
if (bulkOperation.s.batches.length === 0) {
return handleCallback(callback, null, new BulkWriteResult(bulkOperation.s.bulkResult));
}
// Ordered execution of the command
const batch = bulkOperation.s.batches.shift();
function resultHandler(err, result) {
// Error is a driver related error not a bulk op error, terminate
if (((err && err.driver) || (err && err.message)) && !(err instanceof MongoWriteConcernError)) {
return handleCallback(callback, err);
}
// If we have and error
if (err) err.ok = 0;
if (err instanceof MongoWriteConcernError) {
return handleMongoWriteConcernError(batch, bulkOperation.s.bulkResult, true, err, callback);
}
// Merge the results together
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult);
const mergeResult = mergeBatchResults(true, batch, bulkOperation.s.bulkResult, err, result);
if (mergeResult != null) {
return handleCallback(callback, null, writeResult);
}
if (bulkOperation.handleWriteError(callback, writeResult)) return;
// Execute the next command in line
executeCommands(bulkOperation, options, callback);
}
bulkOperation.finalOptionsHandler({ options, batch, resultHandler }, callback);
}
/**
* Returns an unordered batch object
* @ignore
*/
function initializeOrderedBulkOp(topology, collection, options) {
return new OrderedBulkOperation(topology, collection, options);
}
initializeOrderedBulkOp.OrderedBulkOperation = OrderedBulkOperation;
module.exports = initializeOrderedBulkOp;
module.exports.Bulk = OrderedBulkOperation;
| 1 | 15,187 | I think this might not be a complete enough check: what if `options` is `null`/`undefined`? | mongodb-node-mongodb-native | js |
@@ -0,0 +1,19 @@
+require "rails_helper"
+
+feature "Masquerade" do
+ scenario "admin masquerades as a user" do
+ email = "foo@bar.com"
+ user = create(:user, email: email)
+ admin = create(:user, admin: true)
+
+ sign_in_as(admin)
+ visit admin_users_path
+ click_on("Masquerade")
+
+ expect(page).to have_flash("Now masquerading as #{email}")
+ end
+
+ def have_flash(text)
+ have_css(".flash", text: text)
+ end
+end | 1 | 1 | 15,566 | Useless assignment to variable - `user`. | thoughtbot-upcase | rb |
|
@@ -74,8 +74,8 @@ module Travis
sh.echo "#{release} is not installed. Downloading and installing pre-build binary.", ansi: :yellow
sh.echo "Downloading archive: ${archive_url}", ansi: :yellow
- sh.cmd "wget -o ${TRAVIS_HOME}/erlang.tar.bz2 ${archive_url}"
- sh.cmd "mkdir -p ~/otp && tar -xf #{archive_name(release)} -C ~/otp/", echo: true
+ sh.cmd "wget -O ${TRAVIS_HOME}/erlang.tar.bz2 ${archive_url}"
+ sh.cmd "mkdir -p ~/otp && tar -xf ${TRAVIS_HOME}/erlang.tar.bz2 -C ~/otp/", echo: true
sh.cmd "mkdir -p ~/.kerl", echo: true
sh.cmd "echo '#{release},#{release}' >> ~/.kerl/otp_builds", echo: true
sh.cmd "echo '#{release} ${TRAVIS_HOME}/otp/#{release}' >> ~/.kerl/otp_builds", echo: true | 1 | module Travis
module Build
class Script
class Erlang < Script
DEFAULTS = {
otp_release: 'R14B04'
}
def export
super
sh.export 'TRAVIS_OTP_RELEASE', otp_release, echo: false
end
def setup
super
sh.if "! -f #{activate_file}" do
install_erlang otp_release
end
sh.cmd "source #{activate_file}"
end
def install
sh.if "#{rebar_configured} && -f ./rebar" do
sh.cmd './rebar get-deps', fold: 'install', retry: true
end
sh.elif rebar_configured do
sh.if "-z $(command -v rebar3)" do
sh.cmd 'rebar get-deps', fold: 'install', retry: true
end
end
end
def script
sh.if "#{rebar_configured} && -f ./rebar" do
sh.cmd './rebar compile && ./rebar skip_deps=true eunit'
end
sh.elif rebar_configured do
sh.if "-n $(command -v rebar3)" do
sh.cmd 'rebar3 eunit'
end
sh.else do
sh.cmd 'rebar compile && rebar skip_deps=true eunit'
end
end
sh.else do
sh.cmd 'make test'
end
end
def cache_slug
super << '--otp-' << otp_release
end
private
def otp_release
Array(config[:otp_release]).first.to_s
end
def rebar_configured
'(-f rebar.config || -f Rebar.config)'
end
def activate_file
"${TRAVIS_HOME}/otp/#{otp_release}/activate"
end
def archive_name(release)
"erlang-#{release}-nonroot.tar.bz2"
end
def install_erlang(release)
sh.raw archive_url_for('travis-otp-releases',release, 'erlang').sub(/\.tar\.bz2/, '-nonroot.tar.bz2')
sh.echo "#{release} is not installed. Downloading and installing pre-build binary.", ansi: :yellow
sh.echo "Downloading archive: ${archive_url}", ansi: :yellow
sh.cmd "wget -o ${TRAVIS_HOME}/erlang.tar.bz2 ${archive_url}"
sh.cmd "mkdir -p ~/otp && tar -xf #{archive_name(release)} -C ~/otp/", echo: true
sh.cmd "mkdir -p ~/.kerl", echo: true
sh.cmd "echo '#{release},#{release}' >> ~/.kerl/otp_builds", echo: true
sh.cmd "echo '#{release} ${TRAVIS_HOME}/otp/#{release}' >> ~/.kerl/otp_builds", echo: true
sh.raw "rm -f ${TRAVIS_HOME}/erlang.tar.bz2"
end
end
end
end
end
| 1 | 17,373 | I think we would want to keep `-O` instead. I wonder how this worked before, though. | travis-ci-travis-build | rb |
@@ -26,13 +26,17 @@ namespace OpenTelemetry.Metrics
internal sealed class AggregatorStore
{
private static readonly ObjectArrayEqualityComparer ObjectArrayComparer = new ObjectArrayEqualityComparer();
+ private static readonly StringArrayEqualityComparer StringArrayComparer = new StringArrayEqualityComparer();
private readonly object lockZeroTags = new object();
private readonly HashSet<string> tagKeysInteresting;
private readonly int tagsKeysInterestingCount;
+ private readonly ConcurrentDictionary<string[], string[]> tagKeyCombinations =
+ new ConcurrentDictionary<string[], string[]>(StringArrayComparer);
+
// Two-Level lookup. TagKeys x [ TagValues x Metrics ]
private readonly ConcurrentDictionary<string[], ConcurrentDictionary<object[], int>> keyValue2MetricAggs =
- new ConcurrentDictionary<string[], ConcurrentDictionary<object[], int>>(new StringArrayEqualityComparer());
+ new ConcurrentDictionary<string[], ConcurrentDictionary<object[], int>>(StringArrayComparer);
private readonly AggregationTemporality temporality;
private readonly string name; | 1 | // <copyright file="AggregatorStore.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Threading;
using OpenTelemetry.Internal;
namespace OpenTelemetry.Metrics
{
internal sealed class AggregatorStore
{
private static readonly ObjectArrayEqualityComparer ObjectArrayComparer = new ObjectArrayEqualityComparer();
private readonly object lockZeroTags = new object();
private readonly HashSet<string> tagKeysInteresting;
private readonly int tagsKeysInterestingCount;
// Two-Level lookup. TagKeys x [ TagValues x Metrics ]
private readonly ConcurrentDictionary<string[], ConcurrentDictionary<object[], int>> keyValue2MetricAggs =
new ConcurrentDictionary<string[], ConcurrentDictionary<object[], int>>(new StringArrayEqualityComparer());
private readonly AggregationTemporality temporality;
private readonly string name;
private readonly string metricPointCapHitMessage;
private readonly bool outputDelta;
private readonly MetricPoint[] metricPoints;
private readonly int[] currentMetricPointBatch;
private readonly AggregationType aggType;
private readonly double[] histogramBounds;
private readonly UpdateLongDelegate updateLongCallback;
private readonly UpdateDoubleDelegate updateDoubleCallback;
private readonly int maxMetricPoints;
private int metricPointIndex = 0;
private int batchSize = 0;
private int metricCapHitMessageLogged;
private bool zeroTagMetricPointInitialized;
private DateTimeOffset startTimeExclusive;
private DateTimeOffset endTimeInclusive;
internal AggregatorStore(
string name,
AggregationType aggType,
AggregationTemporality temporality,
int maxMetricPoints,
double[] histogramBounds,
string[] tagKeysInteresting = null)
{
this.name = name;
this.maxMetricPoints = maxMetricPoints;
this.metricPointCapHitMessage = $"Maximum MetricPoints limit reached for this Metric stream. Configured limit: {this.maxMetricPoints}";
this.metricPoints = new MetricPoint[maxMetricPoints];
this.currentMetricPointBatch = new int[maxMetricPoints];
this.aggType = aggType;
this.temporality = temporality;
this.outputDelta = temporality == AggregationTemporality.Delta ? true : false;
this.histogramBounds = histogramBounds;
this.startTimeExclusive = DateTimeOffset.UtcNow;
if (tagKeysInteresting == null)
{
this.updateLongCallback = this.UpdateLong;
this.updateDoubleCallback = this.UpdateDouble;
}
else
{
this.updateLongCallback = this.UpdateLongCustomTags;
this.updateDoubleCallback = this.UpdateDoubleCustomTags;
var hs = new HashSet<string>(tagKeysInteresting, StringComparer.Ordinal);
this.tagKeysInteresting = hs;
this.tagsKeysInterestingCount = hs.Count;
}
}
private delegate void UpdateLongDelegate(long value, ReadOnlySpan<KeyValuePair<string, object>> tags);
private delegate void UpdateDoubleDelegate(double value, ReadOnlySpan<KeyValuePair<string, object>> tags);
internal void Update(long value, ReadOnlySpan<KeyValuePair<string, object>> tags)
{
this.updateLongCallback(value, tags);
}
internal void Update(double value, ReadOnlySpan<KeyValuePair<string, object>> tags)
{
this.updateDoubleCallback(value, tags);
}
internal int Snapshot()
{
this.batchSize = 0;
var indexSnapshot = Math.Min(this.metricPointIndex, this.maxMetricPoints - 1);
if (this.temporality == AggregationTemporality.Delta)
{
this.SnapshotDelta(indexSnapshot);
}
else
{
this.SnapshotCumulative(indexSnapshot);
}
this.endTimeInclusive = DateTimeOffset.UtcNow;
return this.batchSize;
}
internal void SnapshotDelta(int indexSnapshot)
{
for (int i = 0; i <= indexSnapshot; i++)
{
ref var metricPoint = ref this.metricPoints[i];
if (metricPoint.MetricPointStatus == MetricPointStatus.NoCollectPending)
{
continue;
}
metricPoint.TakeSnapshot(this.outputDelta);
this.currentMetricPointBatch[this.batchSize] = i;
this.batchSize++;
}
if (this.endTimeInclusive != default)
{
this.startTimeExclusive = this.endTimeInclusive;
}
}
internal void SnapshotCumulative(int indexSnapshot)
{
for (int i = 0; i <= indexSnapshot; i++)
{
ref var metricPoint = ref this.metricPoints[i];
if (metricPoint.StartTime == default)
{
continue;
}
metricPoint.TakeSnapshot(this.outputDelta);
this.currentMetricPointBatch[this.batchSize] = i;
this.batchSize++;
}
}
internal MetricPointsAccessor GetMetricPoints()
{
return new MetricPointsAccessor(this.metricPoints, this.currentMetricPointBatch, this.batchSize, this.startTimeExclusive, this.endTimeInclusive);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void InitializeZeroTagPointIfNotInitialized()
{
if (!this.zeroTagMetricPointInitialized)
{
lock (this.lockZeroTags)
{
if (!this.zeroTagMetricPointInitialized)
{
var dt = DateTimeOffset.UtcNow;
this.metricPoints[0] = new MetricPoint(this.aggType, dt, null, null, this.histogramBounds);
this.zeroTagMetricPointInitialized = true;
}
}
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private int LookupAggregatorStore(string[] tagKeys, object[] tagValues, int length)
{
int aggregatorIndex;
string[] seqKey = null;
// GetOrAdd by TagKeys at 1st Level of 2-level dictionary structure.
// Get back a Dictionary of [ Values x Metrics[] ].
if (!this.keyValue2MetricAggs.TryGetValue(tagKeys, out var value2metrics))
{
// Note: We are using storage from ThreadStatic, so need to make a deep copy for Dictionary storage.
seqKey = new string[length];
tagKeys.CopyTo(seqKey, 0);
value2metrics = new ConcurrentDictionary<object[], int>(ObjectArrayComparer);
if (!this.keyValue2MetricAggs.TryAdd(seqKey, value2metrics))
{
this.keyValue2MetricAggs.TryGetValue(seqKey, out value2metrics);
}
}
// GetOrAdd by TagValues at 2st Level of 2-level dictionary structure.
// Get back Metrics[].
if (!value2metrics.TryGetValue(tagValues, out aggregatorIndex))
{
aggregatorIndex = this.metricPointIndex;
if (aggregatorIndex >= this.maxMetricPoints)
{
// sorry! out of data points.
// TODO: Once we support cleanup of
// unused points (typically with delta)
// we can re-claim them here.
return -1;
}
lock (value2metrics)
{
// check again after acquiring lock.
if (!value2metrics.TryGetValue(tagValues, out aggregatorIndex))
{
aggregatorIndex = Interlocked.Increment(ref this.metricPointIndex);
if (aggregatorIndex >= this.maxMetricPoints)
{
// sorry! out of data points.
// TODO: Once we support cleanup of
// unused points (typically with delta)
// we can re-claim them here.
return -1;
}
// Note: We are using storage from ThreadStatic, so need to make a deep copy for Dictionary storage.
if (seqKey == null)
{
seqKey = new string[length];
tagKeys.CopyTo(seqKey, 0);
}
var seqVal = new object[length];
tagValues.CopyTo(seqVal, 0);
ref var metricPoint = ref this.metricPoints[aggregatorIndex];
var dt = DateTimeOffset.UtcNow;
metricPoint = new MetricPoint(this.aggType, dt, seqKey, seqVal, this.histogramBounds);
// Add to dictionary *after* initializing MetricPoint
// as other threads can start writing to the
// MetricPoint, if dictionary entry found.
value2metrics.TryAdd(seqVal, aggregatorIndex);
}
}
}
return aggregatorIndex;
}
private void UpdateLong(long value, ReadOnlySpan<KeyValuePair<string, object>> tags)
{
try
{
var index = this.FindMetricAggregatorsDefault(tags);
if (index < 0)
{
if (Interlocked.CompareExchange(ref this.metricCapHitMessageLogged, 1, 0) == 0)
{
OpenTelemetrySdkEventSource.Log.MeasurementDropped(this.name, this.metricPointCapHitMessage, "Modify instrumentation to reduce the number of unique key/value pair combinations. Or use MeterProviderBuilder.SetMaxMetricPointsPerMetricStream to set higher limit.");
}
return;
}
this.metricPoints[index].Update(value);
}
catch (Exception)
{
OpenTelemetrySdkEventSource.Log.MeasurementDropped(this.name, "SDK internal error occurred.", "Contact SDK owners.");
}
}
private void UpdateLongCustomTags(long value, ReadOnlySpan<KeyValuePair<string, object>> tags)
{
try
{
var index = this.FindMetricAggregatorsCustomTag(tags);
if (index < 0)
{
if (Interlocked.CompareExchange(ref this.metricCapHitMessageLogged, 1, 0) == 0)
{
OpenTelemetrySdkEventSource.Log.MeasurementDropped(this.name, this.metricPointCapHitMessage, "Modify instrumentation to reduce the number of unique key/value pair combinations. Or use MeterProviderBuilder.SetMaxMetricPointsPerMetricStream to set higher limit.");
}
return;
}
this.metricPoints[index].Update(value);
}
catch (Exception)
{
OpenTelemetrySdkEventSource.Log.MeasurementDropped(this.name, "SDK internal error occurred.", "Contact SDK owners.");
}
}
private void UpdateDouble(double value, ReadOnlySpan<KeyValuePair<string, object>> tags)
{
try
{
var index = this.FindMetricAggregatorsDefault(tags);
if (index < 0)
{
if (Interlocked.CompareExchange(ref this.metricCapHitMessageLogged, 1, 0) == 0)
{
OpenTelemetrySdkEventSource.Log.MeasurementDropped(this.name, this.metricPointCapHitMessage, "Modify instrumentation to reduce the number of unique key/value pair combinations. Or use MeterProviderBuilder.SetMaxMetricPointsPerMetricStream to set higher limit.");
}
return;
}
this.metricPoints[index].Update(value);
}
catch (Exception)
{
OpenTelemetrySdkEventSource.Log.MeasurementDropped(this.name, "SDK internal error occurred.", "Contact SDK owners.");
}
}
private void UpdateDoubleCustomTags(double value, ReadOnlySpan<KeyValuePair<string, object>> tags)
{
try
{
var index = this.FindMetricAggregatorsCustomTag(tags);
if (index < 0)
{
if (Interlocked.CompareExchange(ref this.metricCapHitMessageLogged, 1, 0) == 0)
{
OpenTelemetrySdkEventSource.Log.MeasurementDropped(this.name, this.metricPointCapHitMessage, "Modify instrumentation to reduce the number of unique key/value pair combinations. Or use MeterProviderBuilder.SetMaxMetricPointsPerMetricStream to set higher limit.");
}
return;
}
this.metricPoints[index].Update(value);
}
catch (Exception)
{
OpenTelemetrySdkEventSource.Log.MeasurementDropped(this.name, "SDK internal error occurred.", "Contact SDK owners.");
}
}
private int FindMetricAggregatorsDefault(ReadOnlySpan<KeyValuePair<string, object>> tags)
{
int tagLength = tags.Length;
if (tagLength == 0)
{
this.InitializeZeroTagPointIfNotInitialized();
return 0;
}
var storage = ThreadStaticStorage.GetStorage();
storage.SplitToKeysAndValues(tags, tagLength, out var tagKeys, out var tagValues);
if (tagLength > 1)
{
Array.Sort(tagKeys, tagValues);
}
return this.LookupAggregatorStore(tagKeys, tagValues, tagLength);
}
private int FindMetricAggregatorsCustomTag(ReadOnlySpan<KeyValuePair<string, object>> tags)
{
int tagLength = tags.Length;
if (tagLength == 0 || this.tagsKeysInterestingCount == 0)
{
this.InitializeZeroTagPointIfNotInitialized();
return 0;
}
// TODO: Get only interesting tags
// from the incoming tags
var storage = ThreadStaticStorage.GetStorage();
storage.SplitToKeysAndValues(tags, tagLength, this.tagKeysInteresting, out var tagKeys, out var tagValues, out var actualLength);
// Actual number of tags depend on how many
// of the incoming tags has user opted to
// select.
if (actualLength == 0)
{
this.InitializeZeroTagPointIfNotInitialized();
return 0;
}
if (actualLength > 1)
{
Array.Sort(tagKeys, tagValues);
}
return this.LookupAggregatorStore(tagKeys, tagValues, actualLength);
}
}
}
| 1 | 23,156 | another alternate option which doesn't have the risk of too many entries (when user keeps providing keys in different order) Have the dictionary as before If tagKeys lookup fail, sort and lookup again. If fails, insert both original tagKeys and its sorted one to the dictionary. So that we only store atmost 2 entries per key set. And we only do a single lookup in hotpath, as opposed to 2 look ups. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -370,6 +370,13 @@ void PlanNode::releaseSymbols() {
}
}
+void PlanNode::updateSymbols() {
+ auto symTbl = qctx_->symTable();
+ for (auto out : outputVars_) {
+ out && symTbl->updateWrittenBy(out->name, out->name, this);
+ }
+}
+
std::ostream& operator<<(std::ostream& os, PlanNode::Kind kind) {
os << PlanNode::toString(kind);
return os; | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "graph/planner/plan/PlanNode.h"
#include <folly/String.h>
#include <folly/json.h>
#include <memory>
#include <vector>
#include "common/graph/Response.h"
#include "graph/context/QueryContext.h"
#include "graph/util/ToJson.h"
namespace nebula {
namespace graph {
PlanNode::PlanNode(QueryContext* qctx, Kind kind) : qctx_(qctx), kind_(kind) {
DCHECK(qctx != nullptr);
id_ = qctx_->genId();
auto varName = folly::stringPrintf("__%s_%ld", toString(kind_), id_);
auto* variable = qctx_->symTable()->newVariable(varName);
VLOG(1) << "New variable: " << varName;
outputVars_.emplace_back(variable);
qctx_->symTable()->writtenBy(varName, this);
}
// static
const char* PlanNode::toString(PlanNode::Kind kind) {
switch (kind) {
case Kind::kUnknown:
return "Unknown";
case Kind::kStart:
return "Start";
case Kind::kGetNeighbors:
return "GetNeighbors";
case Kind::kGetVertices:
return "GetVertices";
case Kind::kGetEdges:
return "GetEdges";
case Kind::kIndexScan:
return "IndexScan";
case Kind::kTagIndexFullScan:
return "TagIndexFullScan";
case Kind::kTagIndexRangeScan:
return "TagIndexRangeScan";
case Kind::kTagIndexPrefixScan:
return "TagIndexPrefixScan";
case Kind::kEdgeIndexFullScan:
return "EdgeIndexFullScan";
case Kind::kEdgeIndexRangeScan:
return "EdgeIndexRangeScan";
case Kind::kEdgeIndexPrefixScan:
return "EdgeIndexPrefixScan";
case Kind::kScanVertices:
return "ScanVertices";
case Kind::kScanEdges:
return "ScanEdges";
case Kind::kFilter:
return "Filter";
case Kind::kUnion:
return "Union";
case Kind::kUnionAllVersionVar:
return "UnionAllVersionVar";
case Kind::kIntersect:
return "Intersect";
case Kind::kMinus:
return "Minus";
case Kind::kProject:
return "Project";
case Kind::kUnwind:
return "Unwind";
case Kind::kSort:
return "Sort";
case Kind::kTopN:
return "TopN";
case Kind::kLimit:
return "Limit";
case Kind::kSample:
return "Sample";
case Kind::kAggregate:
return "Aggregate";
case Kind::kSelect:
return "Select";
case Kind::kLoop:
return "Loop";
case Kind::kDedup:
return "Dedup";
case Kind::kPassThrough:
return "PassThrough";
case Kind::kAssign:
return "Assign";
case Kind::kSwitchSpace:
return "RegisterSpaceToSession";
case Kind::kCreateSpace:
return "CreateSpace";
case Kind::kCreateSpaceAs:
return "CreateSpaceAs";
case Kind::kCreateTag:
return "CreateTag";
case Kind::kCreateEdge:
return "CreateEdge";
case Kind::kDescSpace:
return "DescSpace";
case Kind::kDescTag:
return "DescTag";
case Kind::kDescEdge:
return "DescEdge";
case Kind::kAlterTag:
return "AlterTag";
case Kind::kAlterEdge:
return "AlterEdge";
case Kind::kCreateTagIndex:
return "CreateTagIndex";
case Kind::kCreateEdgeIndex:
return "CreateEdgeIndex";
case Kind::kCreateFTIndex:
return "CreateFTIndex";
case Kind::kDropTagIndex:
return "DropTagIndex";
case Kind::kDropEdgeIndex:
return "DropEdgeIndex";
case Kind::kDropFTIndex:
return "DropFTIndex";
case Kind::kDescTagIndex:
return "DescTagIndex";
case Kind::kDescEdgeIndex:
return "DescEdgeIndex";
case Kind::kInsertVertices:
return "InsertVertices";
case Kind::kInsertEdges:
return "InsertEdges";
case Kind::kDataCollect:
return "DataCollect";
// ACL
case Kind::kCreateUser:
return "CreateUser";
case Kind::kDropUser:
return "DropUser";
case Kind::kUpdateUser:
return "UpdateUser";
case Kind::kGrantRole:
return "GrantRole";
case Kind::kRevokeRole:
return "RevokeRole";
case Kind::kChangePassword:
return "ChangePassword";
case Kind::kListUserRoles:
return "ListUserRoles";
case Kind::kListUsers:
return "ListUsers";
case Kind::kDescribeUser:
return "DescribeUser";
case Kind::kListRoles:
return "ListRoles";
case Kind::kShowCreateSpace:
return "ShowCreateSpace";
case Kind::kShowCreateTag:
return "ShowCreateTag";
case Kind::kShowCreateEdge:
return "ShowCreateEdge";
case Kind::kShowCreateTagIndex:
return "ShowCreateTagIndex";
case Kind::kShowCreateEdgeIndex:
return "ShowCreateEdgeIndex";
case Kind::kDropSpace:
return "DropSpace";
case Kind::kDropTag:
return "DropTag";
case Kind::kDropEdge:
return "DropEdge";
case Kind::kShowSpaces:
return "ShowSpaces";
case Kind::kAlterSpace:
return "AlterSpaces";
case Kind::kShowTags:
return "ShowTags";
case Kind::kShowEdges:
return "ShowEdges";
case Kind::kShowTagIndexes:
return "ShowTagIndexes";
case Kind::kShowEdgeIndexes:
return "ShowEdgeIndexes";
case Kind::kShowTagIndexStatus:
return "ShowTagIndexStatus";
case Kind::kShowEdgeIndexStatus:
return "ShowEdgeIndexStatus";
case Kind::kCreateSnapshot:
return "CreateSnapshot";
case Kind::kDropSnapshot:
return "DropSnapshot";
case Kind::kShowSnapshots:
return "ShowSnapshots";
case Kind::kSubmitJob:
return "SubmitJob";
case Kind::kLeftJoin:
return "LeftJoin";
case Kind::kInnerJoin:
return "InnerJoin";
case Kind::kDeleteVertices:
return "DeleteVertices";
case Kind::kDeleteTags:
return "DeleteTags";
case Kind::kDeleteEdges:
return "DeleteEdges";
case Kind::kUpdateVertex:
return "UpdateVertex";
case Kind::kUpdateEdge:
return "UpdateEdge";
case Kind::kShowHosts:
return "ShowHosts";
case Kind::kShowMetaLeader:
return "ShowMetaLeader";
case Kind::kShowParts:
return "ShowParts";
case Kind::kShowCharset:
return "ShowCharset";
case Kind::kShowCollation:
return "ShowCollation";
case Kind::kShowConfigs:
return "ShowConfigs";
case Kind::kSetConfig:
return "SetConfig";
case Kind::kGetConfig:
return "GetConfig";
case Kind::kBFSShortest:
return "BFSShortest";
case Kind::kProduceSemiShortestPath:
return "ProduceSemiShortestPath";
case Kind::kConjunctPath:
return "ConjunctPath";
case Kind::kProduceAllPaths:
return "ProduceAllPaths";
case Kind::kCartesianProduct:
return "CartesianProduct";
case Kind::kSubgraph:
return "Subgraph";
case Kind::kAddHosts:
return "AddHosts";
case Kind::kDropHosts:
return "DropHosts";
// Zone
case Kind::kMergeZone:
return "MergeZone";
case Kind::kRenameZone:
return "RenameZone";
case Kind::kDropZone:
return "DropZone";
case Kind::kDivideZone:
return "DivideZone";
case Kind::kDescribeZone:
return "DescribeZone";
case Kind::kAddHostsIntoZone:
return "AddHostsIntoZone";
case Kind::kShowZones:
return "ShowZones";
case Kind::kAddListener:
return "AddListener";
case Kind::kRemoveListener:
return "RemoveListener";
case Kind::kShowListener:
return "ShowListener";
case Kind::kShowStats:
return "ShowStats";
// service search
case Kind::kShowServiceClients:
return "ShowServiceClients";
case Kind::kShowFTIndexes:
return "ShowFTIndexes";
case Kind::kSignInService:
return "SignInService";
case Kind::kSignOutService:
return "SignOutService";
case Kind::kDownload:
return "Download";
case Kind::kIngest:
return "Ingest";
case Kind::kShowSessions:
return "ShowSessions";
case Kind::kUpdateSession:
return "UpdateSession";
case Kind::kShowQueries:
return "ShowQueries";
case Kind::kKillQuery:
return "KillQuery";
case Kind::kTraverse:
return "Traverse";
case Kind::kAppendVertices:
return "AppendVertices";
case Kind::kBiLeftJoin:
return "BiLeftJoin";
case Kind::kBiInnerJoin:
return "BiInnerJoin";
case Kind::kBiCartesianProduct:
return "BiCartesianProduct";
case Kind::kArgument:
return "Argument";
// no default so the compiler will warning when lack
}
LOG(FATAL) << "Impossible kind plan node " << static_cast<int>(kind);
}
std::string PlanNode::toString() const {
return folly::stringPrintf("%s_%ld", toString(kind_), id_);
}
// static
void PlanNode::addDescription(std::string key, std::string value, PlanNodeDescription* desc) {
if (desc->description == nullptr) {
desc->description = std::make_unique<std::vector<Pair>>();
}
desc->description->emplace_back(Pair{std::move(key), std::move(value)});
}
void PlanNode::readVariable(const std::string& varname) {
auto varPtr = qctx_->symTable()->getVar(varname);
readVariable(varPtr);
}
void PlanNode::readVariable(Variable* varPtr) {
DCHECK(varPtr != nullptr);
inputVars_.emplace_back(varPtr);
qctx_->symTable()->readBy(varPtr->name, this);
}
void PlanNode::calcCost() {
VLOG(1) << "unimplemented cost calculation.";
}
void PlanNode::setOutputVar(const std::string& var) {
DCHECK_EQ(1, outputVars_.size());
auto* outputVarPtr = qctx_->symTable()->getVar(var);
DCHECK(outputVarPtr != nullptr);
auto oldVar = outputVars_[0]->name;
outputVars_[0] = outputVarPtr;
qctx_->symTable()->updateWrittenBy(oldVar, var, this);
}
void PlanNode::setInputVar(const std::string& varname, size_t idx) {
std::string oldVar = inputVar(idx);
auto symTable = qctx_->symTable();
auto varPtr = symTable->getVar(varname);
DCHECK(varPtr != nullptr);
inputVars_[idx] = varPtr;
if (!oldVar.empty()) {
symTable->updateReadBy(oldVar, varname, this);
} else {
symTable->readBy(varname, this);
}
}
std::unique_ptr<PlanNodeDescription> PlanNode::explain() const {
auto desc = std::make_unique<PlanNodeDescription>();
desc->id = id_;
desc->name = toString(kind_);
desc->outputVar = folly::toJson(util::toJson(outputVars_));
return desc;
}
void PlanNode::releaseSymbols() {
auto symTbl = qctx_->symTable();
for (auto in : inputVars_) {
in && symTbl->deleteReadBy(in->name, this);
}
for (auto out : outputVars_) {
out && symTbl->deleteWrittenBy(out->name, this);
}
}
std::ostream& operator<<(std::ostream& os, PlanNode::Kind kind) {
os << PlanNode::toString(kind);
return os;
}
SingleInputNode::SingleInputNode(QueryContext* qctx, Kind kind, const PlanNode* dep)
: SingleDependencyNode(qctx, kind, dep) {
if (dep != nullptr) {
readVariable(dep->outputVarPtr());
} else {
inputVars_.emplace_back(nullptr);
}
}
std::unique_ptr<PlanNodeDescription> SingleDependencyNode::explain() const {
auto desc = PlanNode::explain();
DCHECK(desc->dependencies == nullptr);
desc->dependencies.reset(new std::vector<int64_t>{dep()->id()});
return desc;
}
std::unique_ptr<PlanNodeDescription> SingleInputNode::explain() const {
auto desc = SingleDependencyNode::explain();
addDescription("inputVar", inputVar(), desc.get());
return desc;
}
BinaryInputNode::BinaryInputNode(QueryContext* qctx,
Kind kind,
const PlanNode* left,
const PlanNode* right)
: PlanNode(qctx, kind) {
addDep(left);
readVariable(left->outputVarPtr());
addDep(right);
readVariable(right->outputVarPtr());
}
std::unique_ptr<PlanNodeDescription> BinaryInputNode::explain() const {
auto desc = PlanNode::explain();
DCHECK(desc->dependencies == nullptr);
desc->dependencies.reset(new std::vector<int64_t>{left()->id(), right()->id()});
folly::dynamic inputVar = folly::dynamic::object();
inputVar.insert("leftVar", leftInputVar());
inputVar.insert("rightVar", rightInputVar());
addDescription("inputVar", folly::toJson(inputVar), desc.get());
return desc;
}
std::unique_ptr<PlanNodeDescription> VariableDependencyNode::explain() const {
auto desc = PlanNode::explain();
DCHECK(desc->dependencies == nullptr);
desc->dependencies.reset(new std::vector<int64_t>(dependIds()));
return desc;
}
void PlanNode::setColNames(std::vector<std::string> cols) {
qctx_->symTable()->setAliasGeneratedBy(cols, outputVarPtr(0)->name);
outputVarPtr(0)->colNames = std::move(cols);
}
} // namespace graph
} // namespace nebula
| 1 | 33,336 | if (out != nullptr) | vesoft-inc-nebula | cpp |
@@ -79,6 +79,18 @@ def profile(user_name):
user = _get_user(user_name)
+ need_exact = request.args.get("exact")
+ if not need_exact:
+ listen_count = db_conn.get_listen_count_for_user(user_name)
+ # round off to nearest 10 as we can't guarantee that the values
+ # are not old, so show approximate values instead
+ if listen_count > 10:
+ listen_count = (listen_count / 10) * 10
+ have_exact = False
+ else:
+ listen_count = db_conn.get_listen_count_for_user(user_name, need_exact = True)
+ have_exact = True
+
# Getting data for current page
max_ts = request.args.get("max_ts")
if max_ts is not None: | 1 | from __future__ import absolute_import
from flask import Blueprint, render_template, request, url_for, Response, redirect, flash, current_app
from flask_login import current_user, login_required
from werkzeug.exceptions import NotFound, BadRequest, RequestEntityTooLarge, InternalServerError
from werkzeug.utils import secure_filename
from webserver.decorators import crossdomain
from datetime import datetime
from time import time
import webserver
from webserver import flash
import db.user
from db.exceptions import DatabaseException
from flask import make_response
from webserver.views.api_tools import convert_backup_to_native_format, insert_payload, validate_listen, \
MAX_ITEMS_PER_MESSYBRAINZ_LOOKUP, LISTEN_TYPE_IMPORT
from webserver.utils import sizeof_readable
from webserver.login import User
from webserver.redis_connection import _redis
from os import path, makedirs
import ujson
import zipfile
import re
import os
import pytz
LISTENS_PER_PAGE = 25
user_bp = Blueprint("user", __name__)
@user_bp.route("/<user_name>/scraper.js")
@crossdomain()
def lastfmscraper(user_name):
""" Fetch the scraper.js with proper variable injecting
"""
user_token = request.args.get("user_token")
lastfm_username = request.args.get("lastfm_username")
if user_token is None or lastfm_username is None:
raise NotFound
scraper = render_template(
"user/scraper.js",
base_url=url_for("api_v1.submit_listen", user_name=user_name, _external=True),
user_token=user_token,
lastfm_username=lastfm_username,
user_name=user_name,
lastfm_api_key=current_app.config['LASTFM_API_KEY'],
lastfm_api_url=current_app.config['LASTFM_API_URL'],
)
return Response(scraper, content_type="text/javascript")
@user_bp.route("/resettoken", methods=["GET", "POST"])
@login_required
def reset_token():
if request.method == "POST":
token = request.form.get("token")
if token != current_user.auth_token:
raise BadRequest("Can only reset token of currently logged in user")
reset = request.form.get("reset")
if reset == "yes":
try:
db.user.update_token(current_user.id)
flash.info("Access token reset")
except DatabaseException as e:
flash.error("Something went wrong! Unable to reset token right now.")
return redirect(url_for("user.import_data"))
else:
token = current_user.auth_token
return render_template(
"user/resettoken.html",
token = token,
)
@user_bp.route("/<user_name>")
def profile(user_name):
# Which database to use to showing user listens.
db_conn = webserver.influx_connection._influx
# Which database to use to show playing_now stream.
playing_now_conn = webserver.redis_connection._redis
user = _get_user(user_name)
# Getting data for current page
max_ts = request.args.get("max_ts")
if max_ts is not None:
try:
max_ts = int(max_ts)
except ValueError:
raise BadRequest("Incorrect timestamp argument max_ts:" % request.args.get("max_ts"))
min_ts = request.args.get("min_ts")
if min_ts is not None:
try:
min_ts = int(min_ts)
except ValueError:
raise BadRequest("Incorrect timestamp argument min_ts:" % request.args.get("min_ts"))
if max_ts == None and min_ts == None:
max_ts = int(time())
args = {}
if max_ts:
args['to_ts'] = max_ts
else:
args['from_ts'] = min_ts
listens = []
for listen in db_conn.fetch_listens(user_name, limit=LISTENS_PER_PAGE, **args):
# Let's fetch one more listen, so we know to show a next page link or not
listens.append({
"track_metadata": listen.data,
"listened_at": listen.ts_since_epoch,
"listened_at_iso": listen.timestamp.isoformat() + "Z",
})
# Calculate if we need to show next/prev buttons
previous_listen_ts = None
next_listen_ts = None
if listens:
(min_ts_per_user, max_ts_per_user) = db_conn.get_timestamps_for_user(user_name)
if min_ts_per_user >= 0:
if listens[-1]['listened_at'] > min_ts_per_user:
next_listen_ts = listens[-1]['listened_at']
else:
next_listen_ts = None
if listens[0]['listened_at'] < max_ts_per_user:
previous_listen_ts = listens[0]['listened_at']
else:
previous_listen_ts = None
# If there are no previous listens then display now_playing
if not previous_listen_ts:
playing_now = playing_now_conn.get_playing_now(user_name)
if playing_now:
listen = {
"track_metadata": playing_now.data,
"playing_now": "true",
}
listens.insert(0, listen)
return render_template(
"user/profile.html",
user=user,
listens=listens,
previous_listen_ts=previous_listen_ts,
next_listen_ts=next_listen_ts,
spotify_uri=_get_spotify_uri_for_listens(listens)
)
@user_bp.route("/import")
@login_required
def import_data():
""" Displays the import page to user, giving various options """
# Return error if LASTFM_API_KEY is not given in config.py
if 'LASTFM_API_KEY' not in current_app.config or current_app.config['LASTFM_API_KEY'] == "":
return NotFound("LASTFM_API_KEY not specified.")
alpha_import_status = "NO_REQUEST"
redis_connection = _redis.redis
user_key = "{} {}".format(current_app.config['IMPORTER_SET_KEY_PREFIX'], current_user.musicbrainz_id)
if redis_connection.exists(user_key):
alpha_import_status = redis_connection.get(user_key)
return render_template(
"user/import.html",
user=current_user,
alpha_import_status=alpha_import_status,
scraper_url=url_for(
"user.lastfmscraper",
user_name=current_user.musicbrainz_id,
_external=True,
),
)
@user_bp.route("/export", methods=["GET", "POST"])
@login_required
def export_data():
""" Exporting the data to json """
if request.method == "POST":
db_conn = webserver.create_influx(current_app)
filename = current_user.musicbrainz_id + "_lb-" + datetime.today().strftime('%Y-%m-%d') + ".json"
# Fetch output and convert it into dict with keys as indexes
output = []
for index, obj in enumerate(db_conn.fetch_listens(current_user.musicbrainz_id)):
dic = obj.data
dic['timestamp'] = obj.ts_since_epoch
dic['album_msid'] = None if obj.album_msid is None else str(obj.album_msid)
dic['artist_msid'] = None if obj.artist_msid is None else str(obj.artist_msid)
dic['recording_msid'] = None if obj.recording_msid is None else str(obj.recording_msid)
output.append(dic)
response = make_response(ujson.dumps(output))
response.headers["Content-Disposition"] = "attachment; filename=" + filename
response.headers['Content-Type'] = 'application/json; charset=utf-8'
response.mimetype = "text/json"
return response
else:
return render_template("user/export.html", user=current_user)
@user_bp.route("/upload", methods=['GET', 'POST'])
@login_required
def upload():
if request.method == 'POST':
try:
f = request.files['file']
if f.filename == '':
flash('No file selected.')
return redirect(request.url)
except RequestEntityTooLarge:
raise RequestEntityTooLarge('Maximum filesize upload limit exceeded. File must be <=' + \
sizeof_readable(current_app.config['MAX_CONTENT_LENGTH']))
except:
raise InternalServerError("Something went wrong. Could not upload the file")
# Check upload folder
if not 'UPLOAD_FOLDER' in current_app.config:
raise InternalServerError("Could not upload the file. Upload folder not specified")
upload_path = path.join(path.abspath(current_app.config['UPLOAD_FOLDER']), current_user.musicbrainz_id)
if not path.isdir(upload_path):
makedirs(upload_path)
# Write to a file
filename = path.join(upload_path, secure_filename(f.filename))
f.save(filename)
if not zipfile.is_zipfile(filename):
raise BadRequest('Not a valid zip file.')
success = failure = 0
regex = re.compile('json/scrobbles/scrobbles-*')
try:
zf = zipfile.ZipFile(filename, 'r')
files = zf.namelist()
# Iterate over file that match the regex
for f in [f for f in files if regex.match(f)]:
try:
# Load listens file
jsonlist = ujson.loads(zf.read(f))
if not isinstance(jsonlist, list):
raise ValueError
except ValueError:
failure += 1
continue
payload = convert_backup_to_native_format(jsonlist)
for listen in payload:
validate_listen(listen, LISTEN_TYPE_IMPORT)
insert_payload(payload, current_user)
success += 1
except Exception, e:
raise BadRequest('Not a valid lastfm-backup-file.')
finally:
os.remove(filename)
flash('Congratulations! Your listens from %d files have been uploaded successfully.' % (success))
return redirect(url_for("user.import_data"))
def _get_user(user_name):
""" Get current username """
if current_user.is_authenticated() and \
current_user.musicbrainz_id == user_name:
return current_user
else:
user = db.user.get_by_mb_id(user_name)
if user is None:
raise NotFound("Cannot find user: %s" % user_name)
return User.from_dbrow(user)
def _get_spotify_uri_for_listens(listens):
def get_track_id_from_listen(listen):
additional_info = listen["track_metadata"]["additional_info"]
if "spotify_id" in additional_info:
return additional_info["spotify_id"].rsplit('/', 1)[-1]
else:
return None
track_ids = [get_track_id_from_listen(l) for l in listens]
track_ids = [t_id for t_id in track_ids if t_id]
if track_ids:
return "spotify:trackset:Recent listens:" + ",".join(track_ids)
else:
return None
@user_bp.route("/import/alpha")
@login_required
def import_from_alpha():
""" Just push the task into redis queue and then return to user page.
"""
redis_connection = _redis.redis
# push into the queue
value = "{} {}".format(current_user.musicbrainz_id, current_user.auth_token)
redis_connection.rpush(current_app.config['IMPORTER_QUEUE_KEY'], value)
# push username into redis so that we know that this user is in waiting
redis_connection.set("{} {}".format(current_app.config['IMPORTER_SET_KEY_PREFIX'], current_user.musicbrainz_id), "WAITING")
return redirect(url_for("user.import_data"))
| 1 | 14,070 | In the listenstore you catch the exception and then pass it on via Raise. However, here you do not catch the exception. You should catch the exception, but since this is a minor aspect of this page, perhaps show an error message when the count cannot be loaded in time. Then the rest of the page can still be rendered, rather than failing the whole page. | metabrainz-listenbrainz-server | py |
@@ -406,13 +406,14 @@ class TabbedBrowser(QWidget):
else:
yes_action()
- def close_tab(self, tab, *, add_undo=True, new_undo=True):
+ def close_tab(self, tab, *, add_undo=True, new_undo=True, transfer=False):
"""Close a tab.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
+ transfer: Whether the tab is closing because it is moving to a new window.
"""
if config.val.tabs.tabs_are_windows:
last_close = 'close' | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""The main tabbed browser widget."""
import collections
import functools
import weakref
import datetime
import dataclasses
from typing import (
Any, Deque, List, Mapping, MutableMapping, MutableSequence, Optional, Tuple)
from PyQt5.QtWidgets import QSizePolicy, QWidget, QApplication
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QTimer, QUrl
from qutebrowser.config import config
from qutebrowser.keyinput import modeman
from qutebrowser.mainwindow import tabwidget, mainwindow
from qutebrowser.browser import signalfilter, browsertab, history
from qutebrowser.utils import (log, usertypes, utils, qtutils, objreg,
urlutils, message, jinja, version)
from qutebrowser.misc import quitter
@dataclasses.dataclass
class _UndoEntry:
"""Information needed for :undo."""
url: QUrl
history: bytes
index: int
pinned: bool
created_at: datetime.datetime = dataclasses.field(
default_factory=datetime.datetime.now)
UndoStackType = MutableSequence[MutableSequence[_UndoEntry]]
class TabDeque:
"""Class which manages the 'last visited' tab stack.
Instead of handling deletions by clearing old entries, they are handled by
checking if they exist on access. This allows us to save an iteration on
every tab delete.
Currently, we assume we will switch to the tab returned by any of the
getter functions. This is done because the on_switch functions will be
called upon switch, and we don't want to duplicate entries in the stack
for a single switch.
"""
def __init__(self) -> None:
size = config.val.tabs.focus_stack_size
if size < 0:
size = None
self._stack: Deque[weakref.ReferenceType[QWidget]] = collections.deque(
maxlen=size)
# Items that have been removed from the primary stack.
self._stack_deleted: List[weakref.ReferenceType[QWidget]] = []
self._ignore_next = False
self._keep_deleted_next = False
def on_switch(self, old_tab: QWidget) -> None:
"""Record tab switch events."""
if self._ignore_next:
self._ignore_next = False
self._keep_deleted_next = False
return
tab = weakref.ref(old_tab)
if self._stack_deleted and not self._keep_deleted_next:
self._stack_deleted = []
self._keep_deleted_next = False
self._stack.append(tab)
def prev(self, cur_tab: QWidget) -> QWidget:
"""Get the 'previous' tab in the stack.
Throws IndexError on failure.
"""
tab: Optional[QWidget] = None
while tab is None or tab.pending_removal or tab is cur_tab:
tab = self._stack.pop()()
self._stack_deleted.append(weakref.ref(cur_tab))
self._ignore_next = True
return tab
def next(self, cur_tab: QWidget, *, keep_overflow: bool = True) -> QWidget:
"""Get the 'next' tab in the stack.
Throws IndexError on failure.
"""
tab: Optional[QWidget] = None
while tab is None or tab.pending_removal or tab is cur_tab:
tab = self._stack_deleted.pop()()
# On next tab-switch, current tab will be added to stack as normal.
# However, we shouldn't wipe the overflow stack as normal.
if keep_overflow:
self._keep_deleted_next = True
return tab
def last(self, cur_tab: QWidget) -> QWidget:
"""Get the last tab.
Throws IndexError on failure.
"""
try:
return self.next(cur_tab, keep_overflow=False)
except IndexError:
return self.prev(cur_tab)
def update_size(self) -> None:
"""Update the maxsize of this TabDeque."""
newsize = config.val.tabs.focus_stack_size
if newsize < 0:
newsize = None
# We can't resize a collections.deque so just recreate it >:(
self._stack = collections.deque(self._stack, maxlen=newsize)
class TabDeletedError(Exception):
"""Exception raised when _tab_index is called for a deleted tab."""
class TabbedBrowser(QWidget):
"""A TabWidget with QWebViews inside.
Provides methods to manage tabs, convenience methods to interact with the
current tab (cur_*) and filters signals to re-emit them when they occurred
in the currently visible tab.
For all tab-specific signals (cur_*) emitted by a tab, this happens:
- the signal gets filtered with _filter_signals and self.cur_* gets
emitted if the signal occurred in the current tab.
Attributes:
search_text/search_options: Search parameters which are shared between
all tabs.
_win_id: The window ID this tabbedbrowser is associated with.
_filter: A SignalFilter instance.
_now_focused: The tab which is focused now.
_tab_insert_idx_left: Where to insert a new tab with
tabs.new_tab_position set to 'prev'.
_tab_insert_idx_right: Same as above, for 'next'.
undo_stack: List of lists of _UndoEntry objects of closed tabs.
is_shutting_down: Whether we're currently shutting down.
_local_marks: Jump markers local to each page
_global_marks: Jump markers used across all pages
default_window_icon: The qutebrowser window icon
is_private: Whether private browsing is on for this window.
Signals:
cur_progress: Progress of the current tab changed (load_progress).
cur_load_started: Current tab started loading (load_started)
cur_load_finished: Current tab finished loading (load_finished)
cur_url_changed: Current URL changed.
cur_link_hovered: Link hovered in current tab (link_hovered)
cur_scroll_perc_changed: Scroll percentage of current tab changed.
arg 1: x-position in %.
arg 2: y-position in %.
cur_load_status_changed: Loading status of current tab changed.
close_window: The last tab was closed, close this window.
resized: Emitted when the browser window has resized, so the completion
widget can adjust its size to it.
arg: The new size.
current_tab_changed: The current tab changed to the emitted tab.
new_tab: Emits the new WebView and its index when a new tab is opened.
shutting_down: This TabbedBrowser will be deleted soon.
"""
cur_progress = pyqtSignal(int)
cur_load_started = pyqtSignal()
cur_load_finished = pyqtSignal(bool)
cur_url_changed = pyqtSignal(QUrl)
cur_link_hovered = pyqtSignal(str)
cur_scroll_perc_changed = pyqtSignal(int, int)
cur_load_status_changed = pyqtSignal(usertypes.LoadStatus)
cur_fullscreen_requested = pyqtSignal(bool)
cur_caret_selection_toggled = pyqtSignal(browsertab.SelectionState)
close_window = pyqtSignal()
resized = pyqtSignal('QRect')
current_tab_changed = pyqtSignal(browsertab.AbstractTab)
new_tab = pyqtSignal(browsertab.AbstractTab, int)
shutting_down = pyqtSignal()
def __init__(self, *, win_id, private, parent=None):
if private:
assert not qtutils.is_single_process()
super().__init__(parent)
self.widget = tabwidget.TabWidget(win_id, parent=self)
self._win_id = win_id
self._tab_insert_idx_left = 0
self._tab_insert_idx_right = -1
self.is_shutting_down = False
self.widget.tabCloseRequested.connect(self.on_tab_close_requested)
self.widget.new_tab_requested.connect(self.tabopen)
self.widget.currentChanged.connect(self._on_current_changed)
self.cur_fullscreen_requested.connect(self.widget.tabBar().maybe_hide)
self.widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
# load_finished instead of load_started as WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-65223
self.cur_load_finished.connect(self._leave_modes_on_load)
# This init is never used, it is immediately thrown away in the next
# line.
self.undo_stack: UndoStackType = collections.deque()
self._update_stack_size()
self._filter = signalfilter.SignalFilter(win_id, self)
self._now_focused = None
self.search_text = None
self.search_options: Mapping[str, Any] = {}
self._local_marks: MutableMapping[QUrl, MutableMapping[str, int]] = {}
self._global_marks: MutableMapping[str, Tuple[int, QUrl]] = {}
self.default_window_icon = self.widget.window().windowIcon()
self.is_private = private
self.tab_deque = TabDeque()
config.instance.changed.connect(self._on_config_changed)
quitter.instance.shutting_down.connect(self.shutdown)
def _update_stack_size(self):
newsize = config.instance.get('tabs.undo_stack_size')
if newsize < 0:
newsize = None
# We can't resize a collections.deque so just recreate it >:(
self.undo_stack = collections.deque(self.undo_stack, maxlen=newsize)
def __repr__(self):
return utils.get_repr(self, count=self.widget.count())
@pyqtSlot(str)
def _on_config_changed(self, option):
if option == 'tabs.favicons.show':
self._update_favicons()
elif option == 'window.title_format':
self._update_window_title()
elif option == 'tabs.undo_stack_size':
self._update_stack_size()
elif option in ['tabs.title.format', 'tabs.title.format_pinned']:
self.widget.update_tab_titles()
elif option == "tabs.focus_stack_size":
self.tab_deque.update_size()
def _tab_index(self, tab):
"""Get the index of a given tab.
Raises TabDeletedError if the tab doesn't exist anymore.
"""
try:
idx = self.widget.indexOf(tab)
except RuntimeError as e:
log.webview.debug("Got invalid tab ({})!".format(e))
raise TabDeletedError(e)
if idx == -1:
log.webview.debug("Got invalid tab (index is -1)!")
raise TabDeletedError("index is -1!")
return idx
def widgets(self):
"""Get a list of open tab widgets.
We don't implement this as generator so we can delete tabs while
iterating over the list.
"""
widgets = []
for i in range(self.widget.count()):
widget = self.widget.widget(i)
if widget is None:
log.webview.debug( # type: ignore[unreachable]
"Got None-widget in tabbedbrowser!")
else:
widgets.append(widget)
return widgets
def _update_window_title(self, field=None):
"""Change the window title to match the current tab.
Args:
idx: The tab index to update.
field: A field name which was updated. If given, the title
is only set if the given field is in the template.
"""
title_format = config.cache['window.title_format']
if field is not None and ('{' + field + '}') not in title_format:
return
idx = self.widget.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating window title because index is -1")
return
fields = self.widget.get_tab_fields(idx)
fields['id'] = self._win_id
title = title_format.format(**fields)
self.widget.window().setWindowTitle(title)
def _connect_tab_signals(self, tab):
"""Set up the needed signals for tab."""
# filtered signals
tab.link_hovered.connect(
self._filter.create(self.cur_link_hovered, tab))
tab.load_progress.connect(
self._filter.create(self.cur_progress, tab))
tab.load_finished.connect(
self._filter.create(self.cur_load_finished, tab))
tab.load_started.connect(
self._filter.create(self.cur_load_started, tab))
tab.scroller.perc_changed.connect(
self._filter.create(self.cur_scroll_perc_changed, tab))
tab.url_changed.connect(
self._filter.create(self.cur_url_changed, tab))
tab.load_status_changed.connect(
self._filter.create(self.cur_load_status_changed, tab))
tab.fullscreen_requested.connect(
self._filter.create(self.cur_fullscreen_requested, tab))
tab.caret.selection_toggled.connect(
self._filter.create(self.cur_caret_selection_toggled, tab))
# misc
tab.scroller.perc_changed.connect(self._on_scroll_pos_changed)
tab.scroller.before_jump_requested.connect(lambda: self.set_mark("'"))
tab.url_changed.connect(
functools.partial(self._on_url_changed, tab))
tab.title_changed.connect(
functools.partial(self._on_title_changed, tab))
tab.icon_changed.connect(
functools.partial(self._on_icon_changed, tab))
tab.pinned_changed.connect(
functools.partial(self._on_pinned_changed, tab))
tab.load_progress.connect(
functools.partial(self._on_load_progress, tab))
tab.load_finished.connect(
functools.partial(self._on_load_finished, tab))
tab.load_started.connect(
functools.partial(self._on_load_started, tab))
tab.load_status_changed.connect(
functools.partial(self._on_load_status_changed, tab))
tab.window_close_requested.connect(
functools.partial(self._on_window_close_requested, tab))
tab.renderer_process_terminated.connect(
functools.partial(self._on_renderer_process_terminated, tab))
tab.audio.muted_changed.connect(
functools.partial(self._on_audio_changed, tab))
tab.audio.recently_audible_changed.connect(
functools.partial(self._on_audio_changed, tab))
tab.new_tab_requested.connect(self.tabopen)
if not self.is_private:
tab.history_item_triggered.connect(
history.web_history.add_from_tab)
def current_url(self):
"""Get the URL of the current tab.
Intended to be used from command handlers.
Return:
The current URL as QUrl.
"""
idx = self.widget.currentIndex()
return self.widget.tab_url(idx)
def shutdown(self):
"""Try to shut down all tabs cleanly."""
self.is_shutting_down = True
# Reverse tabs so we don't have to recalculate tab titles over and over
# Removing first causes [2..-1] to be recomputed
# Removing the last causes nothing to be recomputed
for idx, tab in enumerate(reversed(self.widgets())):
self._remove_tab(tab, new_undo=idx == 0)
self.shutting_down.emit()
def tab_close_prompt_if_pinned(
self, tab, force, yes_action,
text="Are you sure you want to close a pinned tab?"):
"""Helper method for tab_close.
If tab is pinned, prompt. If not, run yes_action.
If tab is destroyed, abort question.
"""
if tab.data.pinned and not force:
message.confirm_async(
title='Pinned Tab',
text=text,
yes_action=yes_action, default=False, abort_on=[tab.destroyed])
else:
yes_action()
def close_tab(self, tab, *, add_undo=True, new_undo=True):
"""Close a tab.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
"""
if config.val.tabs.tabs_are_windows:
last_close = 'close'
else:
last_close = config.val.tabs.last_close
count = self.widget.count()
if last_close == 'ignore' and count == 1:
return
self._remove_tab(tab, add_undo=add_undo, new_undo=new_undo)
if count == 1: # We just closed the last tab above.
if last_close == 'close':
self.close_window.emit()
elif last_close == 'blank':
self.load_url(QUrl('about:blank'), newtab=True)
elif last_close == 'startpage':
for url in config.val.url.start_pages:
self.load_url(url, newtab=True)
elif last_close == 'default-page':
self.load_url(config.val.url.default_page, newtab=True)
def _remove_tab(self, tab, *, add_undo=True, new_undo=True, crashed=False):
"""Remove a tab from the tab list and delete it properly.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
crashed: Whether we're closing a tab with crashed renderer process.
"""
idx = self.widget.indexOf(tab)
if idx == -1:
if crashed:
return
raise TabDeletedError("tab {} is not contained in "
"TabbedWidget!".format(tab))
if tab is self._now_focused:
self._now_focused = None
tab.pending_removal = True
if tab.url().isEmpty():
# There are some good reasons why a URL could be empty
# (target="_blank" with a download, see [1]), so we silently ignore
# this.
# [1] https://github.com/qutebrowser/qutebrowser/issues/163
pass
elif not tab.url().isValid():
# We display a warning for URLs which are not empty but invalid -
# but we don't return here because we want the tab to close either
# way.
urlutils.invalid_url_error(tab.url(), "saving tab")
elif add_undo:
try:
history_data = tab.history.private_api.serialize()
except browsertab.WebTabError:
pass # special URL
else:
entry = _UndoEntry(url=tab.url(),
history=history_data,
index=idx,
pinned=tab.data.pinned)
if new_undo or not self.undo_stack:
self.undo_stack.append([entry])
else:
self.undo_stack[-1].append(entry)
tab.private_api.shutdown()
self.widget.removeTab(idx)
tab.deleteLater()
def undo(self, depth=1):
"""Undo removing of a tab or tabs."""
# Remove unused tab which may be created after the last tab is closed
last_close = config.val.tabs.last_close
use_current_tab = False
last_close_replaces = last_close in [
'blank', 'startpage', 'default-page'
]
only_one_tab_open = self.widget.count() == 1
if only_one_tab_open and last_close_replaces:
no_history = len(self.widget.widget(0).history) == 1
urls = {
'blank': QUrl('about:blank'),
'startpage': config.val.url.start_pages[0],
'default-page': config.val.url.default_page,
}
first_tab_url = self.widget.widget(0).url()
last_close_urlstr = urls[last_close].toString().rstrip('/')
first_tab_urlstr = first_tab_url.toString().rstrip('/')
last_close_url_used = first_tab_urlstr == last_close_urlstr
use_current_tab = (only_one_tab_open and no_history and
last_close_url_used)
entries = self.undo_stack[-depth]
del self.undo_stack[-depth]
for entry in reversed(entries):
if use_current_tab:
newtab = self.widget.widget(0)
use_current_tab = False
else:
newtab = self.tabopen(background=False, idx=entry.index)
newtab.history.private_api.deserialize(entry.history)
newtab.set_pinned(entry.pinned)
@pyqtSlot('QUrl', bool)
def load_url(self, url, newtab):
"""Open a URL, used as a slot.
Args:
url: The URL to open as QUrl.
newtab: True to open URL in a new tab, False otherwise.
"""
qtutils.ensure_valid(url)
if newtab or self.widget.currentWidget() is None:
self.tabopen(url, background=False)
else:
self.widget.currentWidget().load_url(url)
@pyqtSlot(int)
def on_tab_close_requested(self, idx):
"""Close a tab via an index."""
tab = self.widget.widget(idx)
if tab is None:
log.webview.debug( # type: ignore[unreachable]
"Got invalid tab {} for index {}!".format(tab, idx))
return
self.tab_close_prompt_if_pinned(
tab, False, lambda: self.close_tab(tab))
@pyqtSlot(browsertab.AbstractTab)
def _on_window_close_requested(self, widget):
"""Close a tab with a widget given."""
try:
self.close_tab(widget)
except TabDeletedError:
log.webview.debug("Requested to close {!r} which does not "
"exist!".format(widget))
@pyqtSlot('QUrl')
@pyqtSlot('QUrl', bool)
@pyqtSlot('QUrl', bool, bool)
def tabopen(
self, url: QUrl = None,
background: bool = None,
related: bool = True,
idx: int = None,
) -> browsertab.AbstractTab:
"""Open a new tab with a given URL.
Inner logic for open-tab and open-tab-bg.
Also connect all the signals we need to _filter_signals.
Args:
url: The URL to open as QUrl or None for an empty tab.
background: Whether to open the tab in the background.
if None, the `tabs.background` setting decides.
related: Whether the tab was opened from another existing tab.
If this is set, the new position might be different. With
the default settings we handle it like Chromium does:
- Tabs from clicked links etc. are to the right of
the current (related=True).
- Explicitly opened tabs are at the very right
(related=False)
idx: The index where the new tab should be opened.
Return:
The opened WebView instance.
"""
if url is not None:
qtutils.ensure_valid(url)
log.webview.debug("Creating new tab with URL {}, background {}, "
"related {}, idx {}".format(
url, background, related, idx))
prev_focus = QApplication.focusWidget()
if config.val.tabs.tabs_are_windows and self.widget.count() > 0:
window = mainwindow.MainWindow(private=self.is_private)
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
return tabbed_browser.tabopen(url=url, background=background,
related=related)
tab = browsertab.create(win_id=self._win_id,
private=self.is_private,
parent=self.widget)
self._connect_tab_signals(tab)
if idx is None:
idx = self._get_new_tab_idx(related)
self.widget.insertTab(idx, tab, "")
if url is not None:
tab.load_url(url)
if background is None:
background = config.val.tabs.background
if background:
# Make sure the background tab has the correct initial size.
# With a foreground tab, it's going to be resized correctly by the
# layout anyways.
tab.resize(self.widget.currentWidget().size())
self.widget.tab_index_changed.emit(self.widget.currentIndex(),
self.widget.count())
# Refocus webview in case we lost it by spawning a bg tab
self.widget.currentWidget().setFocus()
else:
self.widget.setCurrentWidget(tab)
mode = modeman.instance(self._win_id).mode
if mode in [usertypes.KeyMode.command, usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno]:
# If we were in a command prompt, restore old focus
# The above commands need to be run to switch tabs
if prev_focus is not None:
prev_focus.setFocus()
tab.show()
self.new_tab.emit(tab, idx)
return tab
def _get_new_tab_idx(self, related):
"""Get the index of a tab to insert.
Args:
related: Whether the tab was opened from another tab (as a "child")
Return:
The index of the new tab.
"""
if related:
pos = config.val.tabs.new_position.related
else:
pos = config.val.tabs.new_position.unrelated
if pos == 'prev':
if config.val.tabs.new_position.stacking:
idx = self._tab_insert_idx_left
# On first sight, we'd think we have to decrement
# self._tab_insert_idx_left here, as we want the next tab to be
# *before* the one we just opened. However, since we opened a
# tab *before* the currently focused tab, indices will shift by
# 1 automatically.
else:
idx = self.widget.currentIndex()
elif pos == 'next':
if config.val.tabs.new_position.stacking:
idx = self._tab_insert_idx_right
else:
idx = self.widget.currentIndex() + 1
self._tab_insert_idx_right += 1
elif pos == 'first':
idx = 0
elif pos == 'last':
idx = -1
else:
raise ValueError("Invalid tabs.new_position '{}'.".format(pos))
log.webview.debug("tabs.new_position {} -> opening new tab at {}, "
"next left: {} / right: {}".format(
pos, idx, self._tab_insert_idx_left,
self._tab_insert_idx_right))
return idx
def _update_favicons(self):
"""Update favicons when config was changed."""
for tab in self.widgets():
self.widget.update_tab_favicon(tab)
@pyqtSlot()
def _on_load_started(self, tab):
"""Clear icon and update title when a tab started loading.
Args:
tab: The tab where the signal belongs to.
"""
if tab.data.keep_icon:
tab.data.keep_icon = False
else:
if (config.cache['tabs.tabs_are_windows'] and
tab.data.should_show_icon()):
self.widget.window().setWindowIcon(self.default_window_icon)
@pyqtSlot()
def _on_load_status_changed(self, tab):
"""Update tab/window titles if the load status changed."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.update_tab_title(idx)
if idx == self.widget.currentIndex():
self._update_window_title()
@pyqtSlot()
def _leave_modes_on_load(self):
"""Leave insert/hint mode when loading started."""
try:
url = self.current_url()
if not url.isValid():
url = None
except qtutils.QtValueError:
url = None
if config.instance.get('input.insert_mode.leave_on_load',
url=url):
modeman.leave(self._win_id, usertypes.KeyMode.insert,
'load started', maybe=True)
else:
log.modes.debug("Ignoring leave_on_load request due to setting.")
if config.cache['hints.leave_on_load']:
modeman.leave(self._win_id, usertypes.KeyMode.hint,
'load started', maybe=True)
else:
log.modes.debug("Ignoring leave_on_load request due to setting.")
@pyqtSlot(browsertab.AbstractTab, str)
def _on_title_changed(self, tab, text):
"""Set the title of a tab.
Slot for the title_changed signal of any tab.
Args:
tab: The WebView where the title was changed.
text: The text to set.
"""
if not text:
log.webview.debug("Ignoring title change to '{}'.".format(text))
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
log.webview.debug("Changing title for idx {} to '{}'".format(
idx, text))
self.widget.set_page_title(idx, text)
if idx == self.widget.currentIndex():
self._update_window_title()
@pyqtSlot(browsertab.AbstractTab, QUrl)
def _on_url_changed(self, tab, url):
"""Set the new URL as title if there's no title yet.
Args:
tab: The WebView where the title was changed.
url: The new URL.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if not self.widget.page_title(idx):
self.widget.set_page_title(idx, url.toDisplayString())
@pyqtSlot(browsertab.AbstractTab)
def _on_icon_changed(self, tab):
"""Set the icon of a tab.
Slot for the iconChanged signal of any tab.
Args:
tab: The WebView where the title was changed.
"""
try:
self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.update_tab_favicon(tab)
@pyqtSlot(usertypes.KeyMode)
def on_mode_entered(self, mode):
"""Save input mode when tabs.mode_on_change = restore."""
if (config.val.tabs.mode_on_change == 'restore' and
mode in modeman.INPUT_MODES):
tab = self.widget.currentWidget()
if tab is not None:
tab.data.input_mode = mode
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Give focus to current tab if command mode was left."""
widget = self.widget.currentWidget()
if widget is None:
return # type: ignore[unreachable]
if mode in [usertypes.KeyMode.command] + modeman.PROMPT_MODES:
log.modes.debug("Left status-input mode, focusing {!r}".format(
widget))
widget.setFocus()
if config.val.tabs.mode_on_change == 'restore':
widget.data.input_mode = usertypes.KeyMode.normal
@pyqtSlot(int)
def _on_current_changed(self, idx):
"""Add prev tab to stack and leave hinting mode when focus changed."""
mode_on_change = config.val.tabs.mode_on_change
if idx == -1 or self.is_shutting_down:
# closing the last tab (before quitting) or shutting down
return
tab = self.widget.widget(idx)
if tab is None:
log.webview.debug( # type: ignore[unreachable]
"on_current_changed got called with invalid index {}"
.format(idx))
return
log.modes.debug("Current tab changed, focusing {!r}".format(tab))
tab.setFocus()
modes_to_leave = [usertypes.KeyMode.hint, usertypes.KeyMode.caret]
mm_instance = modeman.instance(self._win_id)
current_mode = mm_instance.mode
log.modes.debug("Mode before tab change: {} (mode_on_change = {})"
.format(current_mode.name, mode_on_change))
if mode_on_change == 'normal':
modes_to_leave += modeman.INPUT_MODES
for mode in modes_to_leave:
modeman.leave(self._win_id, mode, 'tab changed', maybe=True)
if (mode_on_change == 'restore' and
current_mode not in modeman.PROMPT_MODES):
modeman.enter(self._win_id, tab.data.input_mode, 'restore')
if self._now_focused is not None:
self.tab_deque.on_switch(self._now_focused)
log.modes.debug("Mode after tab change: {} (mode_on_change = {})"
.format(current_mode.name, mode_on_change))
self._now_focused = tab
self.current_tab_changed.emit(tab)
QTimer.singleShot(0, self._update_window_title)
self._tab_insert_idx_left = self.widget.currentIndex()
self._tab_insert_idx_right = self.widget.currentIndex() + 1
@pyqtSlot()
def on_cmd_return_pressed(self):
"""Set focus when the commandline closes."""
log.modes.debug("Commandline closed, focusing {!r}".format(self))
def _on_load_progress(self, tab, perc):
"""Adjust tab indicator on load progress."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
start = config.cache['colors.tabs.indicator.start']
stop = config.cache['colors.tabs.indicator.stop']
system = config.cache['colors.tabs.indicator.system']
color = qtutils.interpolate_color(start, stop, perc, system)
self.widget.set_tab_indicator_color(idx, color)
self.widget.update_tab_title(idx)
if idx == self.widget.currentIndex():
self._update_window_title()
def _on_load_finished(self, tab, ok):
"""Adjust tab indicator when loading finished."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if ok:
start = config.cache['colors.tabs.indicator.start']
stop = config.cache['colors.tabs.indicator.stop']
system = config.cache['colors.tabs.indicator.system']
color = qtutils.interpolate_color(start, stop, 100, system)
else:
color = config.cache['colors.tabs.indicator.error']
self.widget.set_tab_indicator_color(idx, color)
if idx == self.widget.currentIndex():
tab.private_api.handle_auto_insert_mode(ok)
@pyqtSlot()
def _on_scroll_pos_changed(self):
"""Update tab and window title when scroll position changed."""
idx = self.widget.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating scroll position because index is "
"-1")
return
self._update_window_title('scroll_pos')
self.widget.update_tab_title(idx, 'scroll_pos')
def _on_pinned_changed(self, tab):
"""Update the tab's pinned status."""
idx = self.widget.indexOf(tab)
self.widget.update_tab_favicon(tab)
self.widget.update_tab_title(idx)
def _on_audio_changed(self, tab, _muted):
"""Update audio field in tab when mute or recentlyAudible changed."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.update_tab_title(idx, 'audio')
if idx == self.widget.currentIndex():
self._update_window_title('audio')
def _on_renderer_process_terminated(self, tab, status, code):
"""Show an error when a renderer process terminated."""
if status == browsertab.TerminationStatus.normal:
return
messages = {
browsertab.TerminationStatus.abnormal: "Renderer process exited",
browsertab.TerminationStatus.crashed: "Renderer process crashed",
browsertab.TerminationStatus.killed: "Renderer process was killed",
browsertab.TerminationStatus.unknown: "Renderer process did not start",
}
msg = messages[status] + f" (status {code})"
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-91715
versions = version.qtwebengine_versions()
is_qtbug_91715 = (
status == browsertab.TerminationStatus.unknown and
code == 1002 and
versions.webengine == utils.VersionNumber(5, 15, 3))
def show_error_page(html):
tab.set_html(html)
log.webview.error(msg)
if is_qtbug_91715:
log.webview.error(msg)
log.webview.error('')
log.webview.error(
'NOTE: If you see this and "Network service crashed, restarting '
'service.", please see:')
log.webview.error('https://github.com/qutebrowser/qutebrowser/issues/6235')
log.webview.error(
'You can set the "qt.workarounds.locale" setting in qutebrowser to '
'work around the issue.')
log.webview.error(
'A proper fix is likely available in QtWebEngine soon (which is why '
'the workaround is disabled by default).')
log.webview.error('')
else:
url_string = tab.url(requested=True).toDisplayString()
error_page = jinja.render(
'error.html', title="Error loading {}".format(url_string),
url=url_string, error=msg)
QTimer.singleShot(100, lambda: show_error_page(error_page))
def resizeEvent(self, e):
"""Extend resizeEvent of QWidget to emit a resized signal afterwards.
Args:
e: The QResizeEvent
"""
super().resizeEvent(e)
self.resized.emit(self.geometry())
def wheelEvent(self, e):
"""Override wheelEvent of QWidget to forward it to the focused tab.
Args:
e: The QWheelEvent
"""
if self._now_focused is not None:
self._now_focused.wheelEvent(e)
else:
e.ignore()
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
# strip the fragment as it may interfere with scrolling
try:
url = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
# show an error only if the mark is not automatically set
if key != "'":
message.error("Failed to set mark: url invalid")
return
point = self.widget.currentWidget().scroller.pos_px()
if key.isupper():
self._global_marks[key] = point, url
else:
if url not in self._local_marks:
self._local_marks[url] = {}
self._local_marks[url][key] = point
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
try:
# consider urls that differ only in fragment to be identical
urlkey = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
urlkey = None
tab = self.widget.currentWidget()
if key.isupper():
if key in self._global_marks:
point, url = self._global_marks[key]
def callback(ok):
"""Scroll once loading finished."""
if ok:
self.cur_load_finished.disconnect(callback)
tab.scroller.to_point(point)
self.load_url(url, newtab=False)
self.cur_load_finished.connect(callback)
else:
message.error("Mark {} is not set".format(key))
elif urlkey is None:
message.error("Current URL is invalid!")
elif urlkey in self._local_marks and key in self._local_marks[urlkey]:
point = self._local_marks[urlkey][key]
# save the pre-jump position in the special ' mark
# this has to happen after we read the mark, otherwise jump_mark
# "'" would just jump to the current position every time
tab.scroller.before_jump_requested.emit()
tab.scroller.to_point(point)
else:
message.error("Mark {} is not set".format(key))
| 1 | 26,501 | wouldn't it be simpler to just add `or transfer` here? That way the more complicated set of conditionals down below don't have to get more clauses. | qutebrowser-qutebrowser | py |
@@ -13,8 +13,8 @@
# limitations under the License.
"""Wrapper for the BigQuery API client."""
-from googleapiclient import errors
from httplib2 import HttpLib2Error
+from googleapiclient import errors
from google.cloud.forseti.common.gcp_api import _base_repository
from google.cloud.forseti.common.gcp_api import api_helpers | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for the BigQuery API client."""
from googleapiclient import errors
from httplib2 import HttpLib2Error
from google.cloud.forseti.common.gcp_api import _base_repository
from google.cloud.forseti.common.gcp_api import api_helpers
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.gcp_api import repository_mixins
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
class BigQueryRepositoryClient(_base_repository.BaseRepositoryClient):
"""Big Query API Respository."""
def __init__(self,
quota_max_calls=None,
quota_period=100.0,
use_rate_limiter=True):
"""Constructor.
Args:
quota_max_calls (int): Allowed requests per <quota_period> for the
API.
quota_period (float): The time period to track requests over.
use_rate_limiter (bool): Set to false to disable the use of a rate
limiter for this service.
"""
if not quota_max_calls:
use_rate_limiter = False
self._projects = None
self._datasets = None
super(BigQueryRepositoryClient, self).__init__(
'bigquery', versions=['v2'],
quota_max_calls=quota_max_calls,
quota_period=quota_period,
use_rate_limiter=use_rate_limiter)
# Turn off docstrings for properties.
# pylint: disable=missing-return-doc, missing-return-type-doc
@property
def projects(self):
"""Returns a _BigQueryProjectsRepository instance."""
if not self._projects:
self._projects = self._init_repository(
_BigQueryProjectsRepository)
return self._projects
@property
def datasets(self):
"""Returns a _BigQueryDatasetsRepository instance."""
if not self._datasets:
self._datasets = self._init_repository(
_BigQueryDatasetsRepository)
return self._datasets
# pylint: enable=missing-return-doc, missing-return-type-doc
class _BigQueryProjectsRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Big Query Projects repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_BigQueryProjectsRepository, self).__init__(
key_field=None, component='projects', **kwargs)
class _BigQueryDatasetsRepository(
repository_mixins.GetQueryMixin,
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Big Query Datasets repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_BigQueryDatasetsRepository, self).__init__(
key_field='projectId', entity_field='datasetId',
component='datasets', **kwargs)
class BigQueryClient(object):
"""BigQuery Client manager."""
DEFAULT_QUOTA_PERIOD = 100.0
def __init__(self, global_configs, **kwargs):
"""Initialize.
Args:
global_configs (dict): Forseti config.
**kwargs (dict): The kwargs.
"""
max_calls = global_configs.get('max_bigquery_api_calls_per_100_seconds')
self.repository = BigQueryRepositoryClient(
quota_max_calls=max_calls,
quota_period=self.DEFAULT_QUOTA_PERIOD,
use_rate_limiter=kwargs.get('use_rate_limiter', True))
def get_bigquery_projectids(self):
"""Request and page through bigquery projectids.
Returns:
list: A list of project_ids enabled for bigquery.
['project-id',
'project-id',
'...']
If there are no project_ids enabled for bigquery an empty list will
be returned.
"""
try:
results = self.repository.projects.list(
fields='nextPageToken,projects/id')
flattened_results = api_helpers.flatten_list_results(
results, 'projects')
LOGGER.debug('Request and page through bigquery '
' projectids, flattened_results = %s',
flattened_results)
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError('bigquery', e)
project_ids = [result.get('id') for result in flattened_results
if 'id' in result]
return project_ids
def get_datasets_for_projectid(self, project_id):
"""Return BigQuery datasets stored in the requested project_id.
Args:
project_id (str): String representing the project id.
Returns:
list: A list of datasetReference objects for a given project_id.
[{'datasetId': 'dataset-id',
'projectId': 'project-id'},
{...}]
"""
try:
results = self.repository.datasets.list(
resource=project_id, all=True)
flattened_results = api_helpers.flatten_list_results(
results, 'datasets')
LOGGER.debug('Getting bigquery datasets for a given project,'
' project_id = %s, flattened_results = %s',
project_id, flattened_results)
return flattened_results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
def get_dataset_access(self, project_id, dataset_id):
"""Return the access portion of the dataset resource object.
Args:
project_id (str): String representing the project id.
dataset_id (str): String representing the dataset id.
Returns:
list: A list of access lists for a given project_id and dataset_id.
[{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'OWNER', 'specialGroup': 'projectOwners'},
{'role': 'OWNER', 'userByEmail': 'user@domain.com'},
{'role': 'READER', 'specialGroup': 'projectReaders'}]
"""
try:
results = self.repository.datasets.get(resource=project_id,
target=dataset_id,
fields='access')
access = results.get('access', [])
LOGGER.debug('Geting the access portion of the dataset'
' resource object, project_id = %s, dataset_id = %s,'
' results = %s', project_id, dataset_id, access)
return access
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
| 1 | 29,252 | ditto; please fix everywhere | forseti-security-forseti-security | py |
@@ -164,6 +164,18 @@ Home directory can be found in a shared folder called "home"
Default: false,
Help: "Set to skip any symlinks and any other non regular files.",
Advanced: true,
+ }, {
+ Name: "subsystem",
+ Default: "sftp",
+ Help: "Specifies the SSH2 subsystem on the remote host.",
+ Advanced: true,
+ }, {
+ Name: "server_command",
+ Default: "",
+ Help: `Specifies the path or command to run a sftp server on the remote host.
+
+The subsystem option is ignored when server_command is defined.`,
+ Advanced: true,
}},
}
fs.Register(fsi) | 1 | // Package sftp provides a filesystem interface using github.com/pkg/sftp
// +build !plan9
package sftp
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
)
const (
hashCommandNotSupported = "none"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
currentUser = readCurrentUser()
)
func init() {
fsi := &fs.RegInfo{
Name: "sftp",
Description: "SSH/SFTP Connection",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "SSH host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "example.com",
Help: "Connect to example.com",
}},
}, {
Name: "user",
Help: "SSH username, leave blank for current username, " + currentUser,
}, {
Name: "port",
Help: "SSH port, leave blank to use default (22)",
}, {
Name: "pass",
Help: "SSH password, leave blank to use ssh-agent.",
IsPassword: true,
}, {
Name: "key_pem",
Help: "Raw PEM-encoded private key, If specified, will override key_file parameter.",
}, {
Name: "key_file",
Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
}, {
Name: "key_file_pass",
Help: `The passphrase to decrypt the PEM-encoded private key file.
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
in the new OpenSSH format can't be used.`,
IsPassword: true,
}, {
Name: "key_use_agent",
Help: `When set forces the usage of the ssh-agent.
When key-file is also set, the ".pub" file of the specified key-file is read and only the associated key is
requested from the ssh-agent. This allows to avoid ` + "`Too many authentication failures for *username*`" + ` errors
when the ssh-agent contains many keys.`,
Default: false,
}, {
Name: "use_insecure_cipher",
Help: `Enable the use of insecure ciphers and key exchange methods.
This enables the use of the following insecure ciphers and key exchange methods:
- aes128-cbc
- aes192-cbc
- aes256-cbc
- 3des-cbc
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group-exchange-sha1
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.`,
Default: false,
Examples: []fs.OptionExample{
{
Value: "false",
Help: "Use default Cipher list.",
}, {
Value: "true",
Help: "Enables the use of the aes128-cbc cipher and diffie-hellman-group-exchange-sha256, diffie-hellman-group-exchange-sha1 key exchange.",
},
},
}, {
Name: "disable_hashcheck",
Default: false,
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
}, {
Name: "ask_password",
Default: false,
Help: `Allow asking for SFTP password when needed.
If this is set and no password is supplied then rclone will:
- ask for a password
- not contact the ssh agent
`,
Advanced: true,
}, {
Name: "path_override",
Default: "",
Help: `Override path used by SSH connection.
This allows checksum calculation when SFTP and SSH paths are
different. This issue affects among others Synology NAS boxes.
Shared folders can be found in directories representing volumes
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
Home directory can be found in a shared folder called "home"
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
Advanced: true,
}, {
Name: "set_modtime",
Default: true,
Help: "Set the modified time on the remote if set.",
Advanced: true,
}, {
Name: "md5sum_command",
Default: "",
Help: "The command used to read md5 hashes. Leave blank for autodetect.",
Advanced: true,
}, {
Name: "sha1sum_command",
Default: "",
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
Advanced: true,
}, {
Name: "skip_links",
Default: false,
Help: "Set to skip any symlinks and any other non regular files.",
Advanced: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Port string `config:"port"`
Pass string `config:"pass"`
KeyPem string `config:"key_pem"`
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
KeyUseAgent bool `config:"key_use_agent"`
UseInsecureCipher bool `config:"use_insecure_cipher"`
DisableHashCheck bool `config:"disable_hashcheck"`
AskPassword bool `config:"ask_password"`
PathOverride string `config:"path_override"`
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
}
// Fs stores the interface to the remote SFTP files
type Fs struct {
name string
root string
absRoot string
opt Options // parsed options
m configmap.Mapper // config
features *fs.Features // optional features
config *ssh.ClientConfig
url string
mkdirLock *stringLock
cachedHashes *hash.Set
poolMu sync.Mutex
pool []*conn
pacer *fs.Pacer // pacer for operations
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs
remote string
size int64 // size of the object
modTime time.Time // modification time of the object
mode os.FileMode // mode bits from the file
md5sum *string // Cached MD5 checksum
sha1sum *string // Cached SHA1 checksum
}
// readCurrentUser finds the current user name or "" if not found
func readCurrentUser() (userName string) {
usr, err := user.Current()
if err == nil {
return usr.Username
}
// Fall back to reading $USER then $LOGNAME
userName = os.Getenv("USER")
if userName != "" {
return userName
}
return os.Getenv("LOGNAME")
}
// dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client.
func (f *Fs) dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
dialer := fshttp.NewDialer(fs.Config)
conn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
if err != nil {
return nil, err
}
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
return ssh.NewClient(c, chans, reqs), nil
}
// conn encapsulates an ssh client and corresponding sftp client
type conn struct {
sshClient *ssh.Client
sftpClient *sftp.Client
err chan error
}
// Wait for connection to close
func (c *conn) wait() {
c.err <- c.sshClient.Conn.Wait()
}
// Closes the connection
func (c *conn) close() error {
sftpErr := c.sftpClient.Close()
sshErr := c.sshClient.Close()
if sftpErr != nil {
return sftpErr
}
return sshErr
}
// Returns an error if closed
func (c *conn) closed() error {
select {
case err := <-c.err:
return err
default:
}
return nil
}
// Open a new connection to the SFTP server.
func (f *Fs) sftpConnection() (c *conn, err error) {
// Rate limit rate of new connections
c = &conn{
err: make(chan error, 1),
}
c.sshClient, err = f.dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
if err != nil {
return nil, errors.Wrap(err, "couldn't connect SSH")
}
c.sftpClient, err = sftp.NewClient(c.sshClient)
if err != nil {
_ = c.sshClient.Close()
return nil, errors.Wrap(err, "couldn't initialise SFTP")
}
go c.wait()
return c, nil
}
// Get an SFTP connection from the pool, or open a new one
func (f *Fs) getSftpConnection() (c *conn, err error) {
f.poolMu.Lock()
for len(f.pool) > 0 {
c = f.pool[0]
f.pool = f.pool[1:]
err := c.closed()
if err == nil {
break
}
fs.Errorf(f, "Discarding closed SSH connection: %v", err)
c = nil
}
f.poolMu.Unlock()
if c != nil {
return c, nil
}
err = f.pacer.Call(func() (bool, error) {
c, err = f.sftpConnection()
if err != nil {
return true, err
}
return false, nil
})
return c, err
}
// Return an SFTP connection to the pool
//
// It nils the pointed to connection out so it can't be reused
//
// if err is not nil then it checks the connection is alive using a
// Getwd request
func (f *Fs) putSftpConnection(pc **conn, err error) {
c := *pc
*pc = nil
if err != nil {
// work out if this is an expected error
underlyingErr := errors.Cause(err)
isRegularError := false
switch underlyingErr {
case os.ErrNotExist:
isRegularError = true
default:
switch underlyingErr.(type) {
case *sftp.StatusError, *os.PathError:
isRegularError = true
}
}
// If not a regular SFTP error code then check the connection
if !isRegularError {
_, nopErr := c.sftpClient.Getwd()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
_ = c.close()
return
}
fs.Debugf(f, "Connection OK after error: %v", err)
}
}
f.poolMu.Lock()
f.pool = append(f.pool, c)
f.poolMu.Unlock()
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.User == "" {
opt.User = currentUser
}
if opt.Port == "" {
opt.Port = "22"
}
sshConfig := &ssh.ClientConfig{
User: opt.User,
Auth: []ssh.AuthMethod{},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: fs.Config.ConnectTimeout,
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
}
if opt.UseInsecureCipher {
sshConfig.Config.SetDefaults()
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256")
}
keyFile := env.ShellExpand(opt.KeyFile)
//keyPem := env.ShellExpand(opt.KeyPem)
// Add ssh agent-auth if no password or file or key PEM specified
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
sshAgentClient, _, err := sshagent.New()
if err != nil {
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
}
signers, err := sshAgentClient.Signers()
if err != nil {
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
}
if keyFile != "" {
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
if err != nil {
return nil, errors.Wrap(err, "failed to read public key file")
}
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
if err != nil {
return nil, errors.Wrap(err, "failed to parse public key file")
}
pubM := pub.Marshal()
found := false
for _, s := range signers {
if bytes.Equal(pubM, s.PublicKey().Marshal()) {
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(s))
found = true
break
}
}
if !found {
return nil, errors.New("private key not found in the ssh-agent")
}
} else {
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
}
}
// Load key file if specified
if keyFile != "" || opt.KeyPem != "" {
var key []byte
if opt.KeyPem == "" {
key, err = ioutil.ReadFile(keyFile)
if err != nil {
return nil, errors.Wrap(err, "failed to read private key file")
}
} else {
// wrap in quotes because the config is a coming as a literal without them.
opt.KeyPem, err = strconv.Unquote("\"" + opt.KeyPem + "\"")
if err != nil {
return nil, errors.Wrap(err, "pem key not formatted properly")
}
key = []byte(opt.KeyPem)
}
clearpass := ""
if opt.KeyFilePass != "" {
clearpass, err = obscure.Reveal(opt.KeyFilePass)
if err != nil {
return nil, err
}
}
var signer ssh.Signer
if clearpass == "" {
signer, err = ssh.ParsePrivateKey(key)
} else {
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
}
if err != nil {
return nil, errors.Wrap(err, "failed to parse private key file")
}
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
}
// Auth from password if specified
if opt.Pass != "" {
clearpass, err := obscure.Reveal(opt.Pass)
if err != nil {
return nil, err
}
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
}
// Ask for password if none was defined and we're allowed to
if opt.Pass == "" && opt.AskPassword {
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
clearpass := config.ReadPassword()
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
}
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
}
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
// the host specified in the ssh.ClientConfig
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
absRoot: root,
opt: *opt,
m: m,
config: sshConfig,
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
mkdirLock: newStringLock(),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
SlowHash: true,
}).Fill(f)
// Make a connection and pool it to return errors early
c, err := f.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
cwd, err := c.sftpClient.Getwd()
f.putSftpConnection(&c, nil)
if err != nil {
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
} else if !path.IsAbs(f.root) {
f.absRoot = path.Join(cwd, f.root)
fs.Debugf(f, "Using absolute root directory %q", f.absRoot)
}
if root != "" {
// Check to see if the root actually an existing file
oldAbsRoot := f.absRoot
remote := path.Base(root)
f.root = path.Dir(root)
f.absRoot = path.Dir(f.absRoot)
if f.root == "." {
f.root = ""
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f
f.root = root
f.absRoot = oldAbsRoot
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return f.url
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote sftp file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// NewObject creates a new remote sftp file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
err := o.stat()
if err != nil {
return nil, err
}
return o, nil
}
// dirExists returns true,nil if the directory exists, false, nil if
// it doesn't or false, err
func (f *Fs) dirExists(dir string) (bool, error) {
if dir == "" {
dir = "."
}
c, err := f.getSftpConnection()
if err != nil {
return false, errors.Wrap(err, "dirExists")
}
info, err := c.sftpClient.Stat(dir)
f.putSftpConnection(&c, err)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, errors.Wrap(err, "dirExists stat failed")
}
if !info.IsDir() {
return false, fs.ErrorIsFile
}
return true, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
root := path.Join(f.absRoot, dir)
ok, err := f.dirExists(root)
if err != nil {
return nil, errors.Wrap(err, "List failed")
}
if !ok {
return nil, fs.ErrorDirNotFound
}
sftpDir := root
if sftpDir == "" {
sftpDir = "."
}
c, err := f.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "List")
}
infos, err := c.sftpClient.ReadDir(sftpDir)
f.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
}
for _, info := range infos {
remote := path.Join(dir, info.Name())
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
// pick up the size and type of the destination, instead of the size and type of the symlink.
if !info.Mode().IsRegular() && !info.IsDir() {
if f.opt.SkipLinks {
// skip non regular file if SkipLinks is set
continue
}
oldInfo := info
info, err = f.stat(remote)
if err != nil {
if !os.IsNotExist(err) {
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
}
info = oldInfo
}
}
if info.IsDir() {
d := fs.NewDir(remote, info.ModTime())
entries = append(entries, d)
} else {
o := &Object{
fs: f,
remote: remote,
}
o.setMetadata(info)
entries = append(entries, o)
}
}
return entries, nil
}
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)>
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
err := f.mkParentDir(src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
}
// Temporary object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
err = o.Update(ctx, in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// mkParentDir makes the parent of remote if necessary and any
// directories above that
func (f *Fs) mkParentDir(remote string) error {
parent := path.Dir(remote)
return f.mkdir(path.Join(f.absRoot, parent))
}
// mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(dirPath string) error {
f.mkdirLock.Lock(dirPath)
defer f.mkdirLock.Unlock(dirPath)
if dirPath == "." || dirPath == "/" {
return nil
}
ok, err := f.dirExists(dirPath)
if err != nil {
return errors.Wrap(err, "mkdir dirExists failed")
}
if ok {
return nil
}
parent := path.Dir(dirPath)
err = f.mkdir(parent)
if err != nil {
return err
}
c, err := f.getSftpConnection()
if err != nil {
return errors.Wrap(err, "mkdir")
}
err = c.sftpClient.Mkdir(dirPath)
f.putSftpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "mkdir %q failed", dirPath)
}
return nil
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
root := path.Join(f.absRoot, dir)
return f.mkdir(root)
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Check to see if directory is empty as some servers will
// delete recursively with RemoveDirectory
entries, err := f.List(ctx, dir)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(entries) != 0 {
return fs.ErrorDirectoryNotEmpty
}
// Remove the directory
root := path.Join(f.absRoot, dir)
c, err := f.getSftpConnection()
if err != nil {
return errors.Wrap(err, "Rmdir")
}
err = c.sftpClient.RemoveDirectory(root)
f.putSftpConnection(&c, err)
return err
}
// Move renames a remote sftp file object
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
}
c, err := f.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "Move")
}
err = c.sftpClient.Rename(
srcObj.path(),
path.Join(f.absRoot, remote),
)
f.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.absRoot, srcRemote)
dstPath := path.Join(f.absRoot, dstRemote)
// Check if destination exists
ok, err := f.dirExists(dstPath)
if err != nil {
return errors.Wrap(err, "DirMove dirExists dst failed")
}
if ok {
return fs.ErrorDirExists
}
// Make sure the parent directory exists
err = f.mkdir(path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
}
// Do the move
c, err := f.getSftpConnection()
if err != nil {
return errors.Wrap(err, "DirMove")
}
err = c.sftpClient.Rename(
srcPath,
dstPath,
)
f.putSftpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
}
return nil
}
// run runds cmd on the remote end returning standard output
func (f *Fs) run(cmd string) ([]byte, error) {
c, err := f.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP connection")
}
defer f.putSftpConnection(&c, err)
session, err := c.sshClient.NewSession()
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP sessiion")
}
defer func() {
_ = session.Close()
}()
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
err = session.Run(cmd)
if err != nil {
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
}
return stdout.Bytes(), nil
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
if f.opt.DisableHashCheck {
return hash.Set(hash.None)
}
if f.cachedHashes != nil {
return *f.cachedHashes
}
// look for a hash command which works
checkHash := func(commands []string, expected string, hashCommand *string, changed *bool) bool {
if *hashCommand == hashCommandNotSupported {
return false
}
if *hashCommand != "" {
return true
}
*changed = true
for _, command := range commands {
output, err := f.run(command)
if err != nil {
continue
}
output = bytes.TrimSpace(output)
fs.Debugf(f, "checking %q command: %q", command, output)
if parseHash(output) == expected {
*hashCommand = command
return true
}
}
*hashCommand = hashCommandNotSupported
return false
}
changed := false
md5Works := checkHash([]string{"md5sum", "md5 -r"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
sha1Works := checkHash([]string{"sha1sum", "sha1 -r"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
if changed {
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
f.m.Set("sha1sum_command", f.opt.Sha1sumCommand)
}
set := hash.NewHashSet()
if sha1Works {
set.Add(hash.SHA1)
}
if md5Works {
set.Add(hash.MD5)
}
f.cachedHashes = &set
return set
}
// About gets usage stats
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
escapedPath := shellEscape(f.root)
if f.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(f.opt.PathOverride, f.root))
}
if len(escapedPath) == 0 {
escapedPath = "/"
}
stdout, err := f.run("df -k " + escapedPath)
if err != nil {
return nil, errors.Wrap(err, "your remote may not support About")
}
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
usage := &fs.Usage{}
if usageTotal >= 0 {
usage.Total = fs.NewUsageValue(usageTotal)
}
if usageUsed >= 0 {
usage.Used = fs.NewUsageValue(usageUsed)
}
if usageAvail >= 0 {
usage.Free = fs.NewUsageValue(usageAvail)
}
return usage, nil
}
// Fs is the filesystem this remote sftp file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote SFTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote SFTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
if o.fs.opt.DisableHashCheck {
return "", nil
}
_ = o.fs.Hashes()
var hashCmd string
if r == hash.MD5 {
if o.md5sum != nil {
return *o.md5sum, nil
}
hashCmd = o.fs.opt.Md5sumCommand
} else if r == hash.SHA1 {
if o.sha1sum != nil {
return *o.sha1sum, nil
}
hashCmd = o.fs.opt.Sha1sumCommand
} else {
return "", hash.ErrUnsupported
}
if hashCmd == "" || hashCmd == hashCommandNotSupported {
return "", hash.ErrUnsupported
}
c, err := o.fs.getSftpConnection()
if err != nil {
return "", errors.Wrap(err, "Hash get SFTP connection")
}
session, err := c.sshClient.NewSession()
o.fs.putSftpConnection(&c, err)
if err != nil {
return "", errors.Wrap(err, "Hash put SFTP connection")
}
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
escapedPath := shellEscape(o.path())
if o.fs.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
}
err = session.Run(hashCmd + " " + escapedPath)
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
if err != nil {
_ = session.Close()
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
return "", nil
}
_ = session.Close()
b := stdout.Bytes()
fs.Debugf(nil, "sftp output = %q", b)
str := parseHash(b)
fs.Debugf(nil, "sftp hash = %q", str)
if r == hash.MD5 {
o.md5sum = &str
} else if r == hash.SHA1 {
o.sha1sum = &str
}
return str, nil
}
var shellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\n-]")
// Escape a string s.t. it cannot cause unintended behavior
// when sending it to a shell.
func shellEscape(str string) string {
safe := shellEscapeRegex.ReplaceAllString(str, `\$0`)
return strings.Replace(safe, "\n", "'\n'", -1)
}
// Converts a byte array from the SSH session returned by
// an invocation of md5sum/sha1sum to a hash string
// as expected by the rest of this application
func parseHash(bytes []byte) string {
// For strings with backslash *sum writes a leading \
// https://unix.stackexchange.com/q/313733/94054
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
}
// Parses the byte array output from the SSH session
// returned by an invocation of df into
// the disk size, used space, and available space on the disk, in that order.
// Only works when `df` has output info on only one disk
func parseUsage(bytes []byte) (spaceTotal int64, spaceUsed int64, spaceAvail int64) {
spaceTotal, spaceUsed, spaceAvail = -1, -1, -1
lines := strings.Split(string(bytes), "\n")
if len(lines) < 2 {
return
}
split := strings.Fields(lines[1])
if len(split) < 6 {
return
}
spaceTotal, err := strconv.ParseInt(split[1], 10, 64)
if err != nil {
spaceTotal = -1
}
spaceUsed, err = strconv.ParseInt(split[2], 10, 64)
if err != nil {
spaceUsed = -1
}
spaceAvail, err = strconv.ParseInt(split[3], 10, 64)
if err != nil {
spaceAvail = -1
}
return spaceTotal * 1024, spaceUsed * 1024, spaceAvail * 1024
}
// Size returns the size in bytes of the remote sftp file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote sftp file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// path returns the native path of the object
func (o *Object) path() string {
return path.Join(o.fs.absRoot, o.remote)
}
// setMetadata updates the info in the object from the stat result passed in
func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.size = info.Size()
o.mode = info.Mode()
}
// statRemote stats the file or directory at the remote given
func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
c, err := f.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "stat")
}
absPath := path.Join(f.absRoot, remote)
info, err = c.sftpClient.Stat(absPath)
f.putSftpConnection(&c, err)
return info, err
}
// stat updates the info in the Object
func (o *Object) stat() error {
info, err := o.fs.stat(o.remote)
if err != nil {
if os.IsNotExist(err) {
return fs.ErrorObjectNotFound
}
return errors.Wrap(err, "stat failed")
}
if info.IsDir() {
return errors.Wrapf(fs.ErrorNotAFile, "%q", o.remote)
}
o.setMetadata(info)
return nil
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.SetModTime {
c, err := o.fs.getSftpConnection()
if err != nil {
return errors.Wrap(err, "SetModTime")
}
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "SetModTime failed")
}
}
err := o.stat()
if err != nil {
return errors.Wrap(err, "SetModTime stat failed")
}
return nil
}
// Storable returns whether the remote sftp file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
func (o *Object) Storable() bool {
return o.mode.IsRegular()
}
// objectReader represents a file open for reading on the SFTP server
type objectReader struct {
sftpFile *sftp.File
pipeReader *io.PipeReader
done chan struct{}
}
func newObjectReader(sftpFile *sftp.File) *objectReader {
pipeReader, pipeWriter := io.Pipe()
file := &objectReader{
sftpFile: sftpFile,
pipeReader: pipeReader,
done: make(chan struct{}),
}
go func() {
// Use sftpFile.WriteTo to pump data so that it gets a
// chance to build the window up.
_, err := sftpFile.WriteTo(pipeWriter)
// Close the pipeWriter so the pipeReader fails with
// the same error or EOF if err == nil
_ = pipeWriter.CloseWithError(err)
// signal that we've finished
close(file.done)
}()
return file
}
// Read from a remote sftp file object reader
func (file *objectReader) Read(p []byte) (n int, err error) {
n, err = file.pipeReader.Read(p)
return n, err
}
// Close a reader of a remote sftp file
func (file *objectReader) Close() (err error) {
// Close the sftpFile - this will likely cause the WriteTo to error
err = file.sftpFile.Close()
// Close the pipeReader so writes to the pipeWriter fail
_ = file.pipeReader.Close()
// Wait for the background process to finish
<-file.done
return err
}
// Open a remote sftp file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
c, err := o.fs.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "Open")
}
sftpFile, err := c.sftpClient.Open(o.path())
o.fs.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
if offset > 0 {
off, err := sftpFile.Seek(offset, io.SeekStart)
if err != nil || off != offset {
return nil, errors.Wrap(err, "Open Seek failed")
}
}
in = readers.NewLimitedReadCloser(newObjectReader(sftpFile), limit)
return in, nil
}
// Update a remote sftp file using the data <in> and ModTime from <src>
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// Clear the hash cache since we are about to update the object
o.md5sum = nil
o.sha1sum = nil
c, err := o.fs.getSftpConnection()
if err != nil {
return errors.Wrap(err, "Update")
}
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "Update Create failed")
}
// remove the file if upload failed
remove := func() {
c, removeErr := o.fs.getSftpConnection()
if removeErr != nil {
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
return
}
removeErr = c.sftpClient.Remove(o.path())
o.fs.putSftpConnection(&c, removeErr)
if removeErr != nil {
fs.Debugf(src, "Failed to remove: %v", removeErr)
} else {
fs.Debugf(src, "Removed after failed upload: %v", err)
}
}
_, err = file.ReadFrom(in)
if err != nil {
remove()
return errors.Wrap(err, "Update ReadFrom failed")
}
err = file.Close()
if err != nil {
remove()
return errors.Wrap(err, "Update Close failed")
}
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return errors.Wrap(err, "Update SetModTime failed")
}
return nil
}
// Remove a remote sftp file object
func (o *Object) Remove(ctx context.Context) error {
c, err := o.fs.getSftpConnection()
if err != nil {
return errors.Wrap(err, "Remove")
}
err = c.sftpClient.Remove(o.path())
o.fs.putSftpConnection(&c, err)
return err
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Abouter = &Fs{}
_ fs.Object = &Object{}
)
| 1 | 11,571 | Can you break this line into two parts - the first line makes the option help text for `--sftp-server` and its too long! `Specifies the path or command to run a sftp server on the remote host. The subsystem option is ignored when sftp_server is defined.` | rclone-rclone | go |
@@ -45,7 +45,7 @@ class LoopAnalyzer
Context &$inner_context = null,
bool $is_do = false,
bool $always_enters_loop = false
- ) {
+ ): ?bool {
$traverser = new PhpParser\NodeTraverser;
$assignment_mapper = new \Psalm\Internal\PhpVisitor\AssignmentMapVisitor($loop_scope->loop_context->self); | 1 | <?php
namespace Psalm\Internal\Analyzer\Statements\Block;
use PhpParser;
use Psalm\Internal\Analyzer\ScopeAnalyzer;
use Psalm\Internal\Analyzer\Statements\ExpressionAnalyzer;
use Psalm\Internal\Analyzer\StatementsAnalyzer;
use Psalm\Internal\Clause;
use Psalm\CodeLocation;
use Psalm\Config;
use Psalm\Context;
use Psalm\IssueBuffer;
use Psalm\Internal\Scope\LoopScope;
use Psalm\Type;
use Psalm\Type\Algebra;
use Psalm\Type\Reconciler;
use function array_merge;
use function array_keys;
use function array_unique;
use function array_intersect_key;
use function in_array;
/**
* @internal
*/
class LoopAnalyzer
{
/**
* Checks an array of statements in a loop
*
* @param array<PhpParser\Node\Stmt> $stmts
* @param PhpParser\Node\Expr[] $pre_conditions
* @param PhpParser\Node\Expr[] $post_expressions
* @param Context loop_scope->loop_context
* @param Context $loop_scope->loop_parent_context
*
* @return false|null
*/
public static function analyze(
StatementsAnalyzer $statements_analyzer,
array $stmts,
array $pre_conditions,
array $post_expressions,
LoopScope $loop_scope,
Context &$inner_context = null,
bool $is_do = false,
bool $always_enters_loop = false
) {
$traverser = new PhpParser\NodeTraverser;
$assignment_mapper = new \Psalm\Internal\PhpVisitor\AssignmentMapVisitor($loop_scope->loop_context->self);
$traverser->addVisitor($assignment_mapper);
$traverser->traverse(array_merge($stmts, $post_expressions));
$assignment_map = $assignment_mapper->getAssignmentMap();
$assignment_depth = 0;
$asserted_var_ids = [];
$pre_condition_clauses = [];
$original_protected_var_ids = $loop_scope->loop_parent_context->protected_var_ids;
$codebase = $statements_analyzer->getCodebase();
$inner_do_context = null;
if ($pre_conditions) {
foreach ($pre_conditions as $i => $pre_condition) {
$pre_condition_id = \spl_object_id($pre_condition);
$pre_condition_clauses[$i] = Algebra::getFormula(
$pre_condition_id,
$pre_condition_id,
$pre_condition,
$loop_scope->loop_context->self,
$statements_analyzer,
$codebase
);
}
} else {
$asserted_var_ids = Context::getNewOrUpdatedVarIds(
$loop_scope->loop_parent_context,
$loop_scope->loop_context
);
}
$final_actions = ScopeAnalyzer::getFinalControlActions(
$stmts,
$statements_analyzer->node_data,
Config::getInstance()->exit_functions,
$loop_scope->loop_context->break_types
);
$does_always_break = $final_actions === [ScopeAnalyzer::ACTION_BREAK];
if ($assignment_map) {
$first_var_id = array_keys($assignment_map)[0];
$assignment_depth = self::getAssignmentMapDepth($first_var_id, $assignment_map);
}
$loop_scope->loop_context->parent_context = $loop_scope->loop_parent_context;
$pre_outer_context = $loop_scope->loop_parent_context;
if ($assignment_depth === 0 || $does_always_break) {
$inner_context = clone $loop_scope->loop_context;
foreach ($inner_context->vars_in_scope as $context_var_id => $context_type) {
$inner_context->vars_in_scope[$context_var_id] = clone $context_type;
}
$inner_context->loop_scope = $loop_scope;
$inner_context->parent_context = $loop_scope->loop_context;
$old_referenced_var_ids = $inner_context->referenced_var_ids;
$inner_context->referenced_var_ids = [];
foreach ($pre_conditions as $condition_offset => $pre_condition) {
self::applyPreConditionToLoopContext(
$statements_analyzer,
$pre_condition,
$pre_condition_clauses[$condition_offset],
$inner_context,
$loop_scope->loop_parent_context,
$is_do
);
}
$inner_context->protected_var_ids = $loop_scope->protected_var_ids;
$statements_analyzer->analyze($stmts, $inner_context);
self::updateLoopScopeContexts($loop_scope, $loop_scope->loop_parent_context);
foreach ($post_expressions as $post_expression) {
if (ExpressionAnalyzer::analyze(
$statements_analyzer,
$post_expression,
$loop_scope->loop_context
) === false
) {
return false;
}
}
$new_referenced_var_ids = $inner_context->referenced_var_ids;
$inner_context->referenced_var_ids = $old_referenced_var_ids + $inner_context->referenced_var_ids;
$loop_scope->loop_parent_context->vars_possibly_in_scope = array_merge(
$inner_context->vars_possibly_in_scope,
$loop_scope->loop_parent_context->vars_possibly_in_scope
);
} else {
$pre_outer_context = clone $loop_scope->loop_parent_context;
$analyzer = $statements_analyzer->getCodebase()->analyzer;
$original_mixed_counts = $analyzer->getMixedCountsForFile($statements_analyzer->getFilePath());
$pre_condition_vars_in_scope = $loop_scope->loop_context->vars_in_scope;
IssueBuffer::startRecording();
if (!$is_do) {
foreach ($pre_conditions as $condition_offset => $pre_condition) {
$asserted_var_ids = array_merge(
self::applyPreConditionToLoopContext(
$statements_analyzer,
$pre_condition,
$pre_condition_clauses[$condition_offset],
$loop_scope->loop_context,
$loop_scope->loop_parent_context,
$is_do
),
$asserted_var_ids
);
}
}
// record all the vars that existed before we did the first pass through the loop
$pre_loop_context = clone $loop_scope->loop_context;
$inner_context = clone $loop_scope->loop_context;
foreach ($inner_context->vars_in_scope as $context_var_id => $context_type) {
$inner_context->vars_in_scope[$context_var_id] = clone $context_type;
}
$inner_context->parent_context = $loop_scope->loop_context;
$inner_context->loop_scope = $loop_scope;
$old_referenced_var_ids = $inner_context->referenced_var_ids;
$inner_context->referenced_var_ids = [];
$inner_context->protected_var_ids = $loop_scope->protected_var_ids;
$statements_analyzer->analyze($stmts, $inner_context);
self::updateLoopScopeContexts($loop_scope, $pre_outer_context);
$inner_context->protected_var_ids = $original_protected_var_ids;
if ($is_do) {
$inner_do_context = clone $inner_context;
foreach ($pre_conditions as $condition_offset => $pre_condition) {
$asserted_var_ids = array_merge(
self::applyPreConditionToLoopContext(
$statements_analyzer,
$pre_condition,
$pre_condition_clauses[$condition_offset],
$inner_context,
$loop_scope->loop_parent_context,
$is_do
),
$asserted_var_ids
);
}
}
$asserted_var_ids = array_unique($asserted_var_ids);
foreach ($post_expressions as $post_expression) {
if (ExpressionAnalyzer::analyze($statements_analyzer, $post_expression, $inner_context) === false) {
return false;
}
}
/**
* @var array<string, bool>
*/
$new_referenced_var_ids = $inner_context->referenced_var_ids;
$inner_context->referenced_var_ids = array_intersect_key(
$old_referenced_var_ids,
$inner_context->referenced_var_ids
);
$recorded_issues = IssueBuffer::clearRecordingLevel();
IssueBuffer::stopRecording();
for ($i = 0; $i < $assignment_depth; ++$i) {
$vars_to_remove = [];
$loop_scope->iteration_count++;
$has_changes = false;
// reset the $inner_context to what it was before we started the analysis,
// but union the types with what's in the loop scope
foreach ($inner_context->vars_in_scope as $var_id => $type) {
if (in_array($var_id, $asserted_var_ids, true)) {
// set the vars to whatever the while/foreach loop expects them to be
if (!isset($pre_loop_context->vars_in_scope[$var_id])
|| !$type->equals($pre_loop_context->vars_in_scope[$var_id])
) {
$has_changes = true;
}
} elseif (isset($pre_outer_context->vars_in_scope[$var_id])) {
if (!$type->equals($pre_outer_context->vars_in_scope[$var_id])) {
$has_changes = true;
// widen the foreach context type with the initial context type
$inner_context->vars_in_scope[$var_id] = Type::combineUnionTypes(
$inner_context->vars_in_scope[$var_id],
$pre_outer_context->vars_in_scope[$var_id]
);
// if there's a change, invalidate related clauses
$pre_loop_context->removeVarFromConflictingClauses($var_id);
}
if (isset($loop_scope->loop_context->vars_in_scope[$var_id])
&& !$type->equals($loop_scope->loop_context->vars_in_scope[$var_id])
) {
$has_changes = true;
// widen the foreach context type with the initial context type
$inner_context->vars_in_scope[$var_id] = Type::combineUnionTypes(
$inner_context->vars_in_scope[$var_id],
$loop_scope->loop_context->vars_in_scope[$var_id]
);
// if there's a change, invalidate related clauses
$pre_loop_context->removeVarFromConflictingClauses($var_id);
}
} else {
// give an opportunity to redeemed UndefinedVariable issues
if ($recorded_issues) {
$has_changes = true;
}
// if we're in a do block we don't want to remove vars before evaluating
// the where conditional
if (!$is_do) {
$vars_to_remove[] = $var_id;
}
}
}
$inner_context->has_returned = false;
if ($codebase->find_unused_variables) {
foreach ($inner_context->unreferenced_vars as $var_id => $locations) {
if (!isset($pre_outer_context->vars_in_scope[$var_id])) {
$loop_scope->unreferenced_vars[$var_id] = $locations;
unset($inner_context->unreferenced_vars[$var_id]);
}
}
}
$loop_scope->loop_parent_context->vars_possibly_in_scope = array_merge(
$inner_context->vars_possibly_in_scope,
$loop_scope->loop_parent_context->vars_possibly_in_scope
);
// if there are no changes to the types, no need to re-examine
if (!$has_changes) {
break;
}
if ($codebase->find_unused_variables) {
foreach ($loop_scope->possibly_unreferenced_vars as $var_id => $locations) {
if (isset($inner_context->unreferenced_vars[$var_id])) {
$inner_context->unreferenced_vars[$var_id] += $locations;
} else {
$inner_context->unreferenced_vars[$var_id] = $locations;
}
}
}
// remove vars that were defined in the foreach
foreach ($vars_to_remove as $var_id) {
unset($inner_context->vars_in_scope[$var_id]);
}
$inner_context->clauses = $pre_loop_context->clauses;
$analyzer->setMixedCountsForFile($statements_analyzer->getFilePath(), $original_mixed_counts);
IssueBuffer::startRecording();
foreach ($pre_loop_context->vars_in_scope as $var_id => $_) {
if (!isset($pre_condition_vars_in_scope[$var_id])
&& isset($inner_context->vars_in_scope[$var_id])
&& \strpos($var_id, '->') === false
&& \strpos($var_id, '[') === false
) {
$inner_context->vars_in_scope[$var_id]->possibly_undefined = true;
}
}
if (!$is_do) {
foreach ($pre_conditions as $condition_offset => $pre_condition) {
self::applyPreConditionToLoopContext(
$statements_analyzer,
$pre_condition,
$pre_condition_clauses[$condition_offset],
$inner_context,
$loop_scope->loop_parent_context,
false
);
}
}
foreach ($asserted_var_ids as $var_id) {
if ((!isset($inner_context->vars_in_scope[$var_id])
|| $inner_context->vars_in_scope[$var_id]->getId()
!== $pre_loop_context->vars_in_scope[$var_id]->getId()
|| $inner_context->vars_in_scope[$var_id]->from_docblock
!== $pre_loop_context->vars_in_scope[$var_id]->from_docblock
)
) {
if (isset($pre_loop_context->vars_in_scope[$var_id])) {
$inner_context->vars_in_scope[$var_id] = clone $pre_loop_context->vars_in_scope[$var_id];
} else {
unset($inner_context->vars_in_scope[$var_id]);
}
}
}
$inner_context->clauses = $pre_loop_context->clauses;
$inner_context->protected_var_ids = $loop_scope->protected_var_ids;
$traverser = new PhpParser\NodeTraverser;
$traverser->addVisitor(
new \Psalm\Internal\PhpVisitor\NodeCleanerVisitor(
$statements_analyzer->node_data
)
);
$traverser->traverse($stmts);
$statements_analyzer->analyze($stmts, $inner_context);
self::updateLoopScopeContexts($loop_scope, $pre_outer_context);
$inner_context->protected_var_ids = $original_protected_var_ids;
if ($is_do) {
$inner_do_context = clone $inner_context;
foreach ($pre_conditions as $condition_offset => $pre_condition) {
self::applyPreConditionToLoopContext(
$statements_analyzer,
$pre_condition,
$pre_condition_clauses[$condition_offset],
$inner_context,
$loop_scope->loop_parent_context,
$is_do
);
}
}
foreach ($post_expressions as $post_expression) {
if (ExpressionAnalyzer::analyze($statements_analyzer, $post_expression, $inner_context) === false) {
return false;
}
}
$recorded_issues = IssueBuffer::clearRecordingLevel();
IssueBuffer::stopRecording();
}
if ($recorded_issues) {
foreach ($recorded_issues as $recorded_issue) {
// if we're not in any loops then this will just result in the issue being emitted
IssueBuffer::bubbleUp($recorded_issue);
}
}
}
$does_sometimes_break = in_array(ScopeAnalyzer::ACTION_BREAK, $loop_scope->final_actions, true);
$does_always_break = $loop_scope->final_actions === [ScopeAnalyzer::ACTION_BREAK];
if ($does_sometimes_break) {
if ($loop_scope->possibly_redefined_loop_parent_vars !== null) {
foreach ($loop_scope->possibly_redefined_loop_parent_vars as $var => $type) {
$loop_scope->loop_parent_context->vars_in_scope[$var] = Type::combineUnionTypes(
$type,
$loop_scope->loop_parent_context->vars_in_scope[$var]
);
}
}
}
foreach ($loop_scope->loop_parent_context->vars_in_scope as $var_id => $type) {
if (!isset($loop_scope->loop_context->vars_in_scope[$var_id])) {
continue;
}
if ($loop_scope->loop_context->vars_in_scope[$var_id]->getId() !== $type->getId()) {
$loop_scope->loop_parent_context->vars_in_scope[$var_id] = Type::combineUnionTypes(
$loop_scope->loop_parent_context->vars_in_scope[$var_id],
$loop_scope->loop_context->vars_in_scope[$var_id]
);
$loop_scope->loop_parent_context->removeVarFromConflictingClauses($var_id);
}
}
if (!$does_always_break) {
foreach ($loop_scope->loop_parent_context->vars_in_scope as $var_id => $type) {
if (!isset($inner_context->vars_in_scope[$var_id])) {
unset($loop_scope->loop_parent_context->vars_in_scope[$var_id]);
continue;
}
if ($inner_context->vars_in_scope[$var_id]->hasMixed()) {
$loop_scope->loop_parent_context->vars_in_scope[$var_id] =
$inner_context->vars_in_scope[$var_id];
$loop_scope->loop_parent_context->removeVarFromConflictingClauses($var_id);
continue;
}
if ($inner_context->vars_in_scope[$var_id]->getId() !== $type->getId()) {
$loop_scope->loop_parent_context->vars_in_scope[$var_id] = Type::combineUnionTypes(
$loop_scope->loop_parent_context->vars_in_scope[$var_id],
$inner_context->vars_in_scope[$var_id]
);
$loop_scope->loop_parent_context->removeVarFromConflictingClauses($var_id);
}
}
}
if ($pre_conditions && $pre_condition_clauses && !ScopeAnalyzer::doesEverBreak($stmts)) {
// if the loop contains an assertion and there are no break statements, we can negate that assertion
// and apply it to the current context
try {
$negated_pre_condition_clauses = Algebra::negateFormula(array_merge(...$pre_condition_clauses));
} catch (\Psalm\Exception\ComplicatedExpressionException $e) {
$negated_pre_condition_clauses = [];
}
$negated_pre_condition_types = Algebra::getTruthsFromFormula($negated_pre_condition_clauses);
if ($negated_pre_condition_types) {
$changed_var_ids = [];
$vars_in_scope_reconciled = Reconciler::reconcileKeyedTypes(
$negated_pre_condition_types,
[],
$inner_context->vars_in_scope,
$changed_var_ids,
[],
$statements_analyzer,
[],
true,
new CodeLocation($statements_analyzer->getSource(), $pre_conditions[0])
);
foreach ($changed_var_ids as $var_id => $_) {
if (isset($vars_in_scope_reconciled[$var_id])
&& isset($loop_scope->loop_parent_context->vars_in_scope[$var_id])
) {
$loop_scope->loop_parent_context->vars_in_scope[$var_id] = $vars_in_scope_reconciled[$var_id];
}
$loop_scope->loop_parent_context->removeVarFromConflictingClauses($var_id);
}
}
}
$loop_scope->loop_context->referenced_var_ids = array_merge(
array_intersect_key(
$inner_context->referenced_var_ids,
$pre_outer_context->vars_in_scope
),
$loop_scope->loop_context->referenced_var_ids
);
if ($codebase->find_unused_variables) {
foreach ($loop_scope->possibly_unreferenced_vars as $var_id => $locations) {
if (isset($inner_context->unreferenced_vars[$var_id])) {
$inner_context->unreferenced_vars[$var_id] += $locations;
}
}
foreach ($inner_context->unreferenced_vars as $var_id => $locations) {
if (!isset($new_referenced_var_ids[$var_id])
|| !isset($pre_outer_context->vars_in_scope[$var_id])
|| $does_always_break
) {
if (!isset($loop_scope->loop_context->unreferenced_vars[$var_id])) {
$loop_scope->loop_context->unreferenced_vars[$var_id] = $locations;
} else {
$loop_scope->loop_context->unreferenced_vars[$var_id] += $locations;
}
} else {
$statements_analyzer->registerVariableUses($locations);
}
}
foreach ($loop_scope->unreferenced_vars as $var_id => $locations) {
if (isset($loop_scope->referenced_var_ids[$var_id])) {
$statements_analyzer->registerVariableUses($locations);
} else {
if (!isset($loop_scope->loop_context->unreferenced_vars[$var_id])) {
$loop_scope->loop_context->unreferenced_vars[$var_id] = $locations;
} else {
$loop_scope->loop_context->unreferenced_vars[$var_id] += $locations;
}
}
}
}
if ($always_enters_loop) {
foreach ($inner_context->vars_in_scope as $var_id => $type) {
// if there are break statements in the loop it's not certain
// that the loop has finished executing, so the assertions at the end
// the loop in the while conditional may not hold
if (in_array(ScopeAnalyzer::ACTION_BREAK, $loop_scope->final_actions, true)
|| in_array(ScopeAnalyzer::ACTION_CONTINUE, $loop_scope->final_actions, true)
) {
if (isset($loop_scope->possibly_defined_loop_parent_vars[$var_id])) {
$loop_scope->loop_parent_context->vars_in_scope[$var_id] = Type::combineUnionTypes(
$type,
$loop_scope->possibly_defined_loop_parent_vars[$var_id]
);
}
} else {
if ($codebase->find_unused_variables
&& !isset($loop_scope->loop_parent_context->vars_in_scope[$var_id])
&& isset($inner_context->unreferenced_vars[$var_id])
) {
$loop_scope->loop_parent_context->unreferenced_vars[$var_id]
= $inner_context->unreferenced_vars[$var_id];
}
$loop_scope->loop_parent_context->vars_in_scope[$var_id] = $type;
}
}
}
if ($inner_do_context) {
$inner_context = $inner_do_context;
}
}
private static function updateLoopScopeContexts(
LoopScope $loop_scope,
Context $pre_outer_context
): void {
$updated_loop_vars = [];
if (!in_array(ScopeAnalyzer::ACTION_CONTINUE, $loop_scope->final_actions, true)) {
$loop_scope->loop_context->vars_in_scope = $pre_outer_context->vars_in_scope;
} else {
if ($loop_scope->redefined_loop_vars !== null) {
foreach ($loop_scope->redefined_loop_vars as $var => $type) {
$loop_scope->loop_context->vars_in_scope[$var] = $type;
$updated_loop_vars[$var] = true;
}
}
if ($loop_scope->possibly_redefined_loop_vars) {
foreach ($loop_scope->possibly_redefined_loop_vars as $var => $type) {
if ($loop_scope->loop_context->hasVariable($var)
&& !isset($updated_loop_vars[$var])
) {
$loop_scope->loop_context->vars_in_scope[$var] = Type::combineUnionTypes(
$loop_scope->loop_context->vars_in_scope[$var],
$type
);
}
}
}
}
// merge vars possibly in scope at the end of each loop
$loop_scope->loop_context->vars_possibly_in_scope = array_merge(
$loop_scope->loop_context->vars_possibly_in_scope,
$loop_scope->vars_possibly_in_scope
);
}
/**
* @param array<int, Clause> $pre_condition_clauses
*
* @return string[]
*/
private static function applyPreConditionToLoopContext(
StatementsAnalyzer $statements_analyzer,
PhpParser\Node\Expr $pre_condition,
array $pre_condition_clauses,
Context $loop_context,
Context $outer_context,
bool $is_do
): array {
$pre_referenced_var_ids = $loop_context->referenced_var_ids;
$loop_context->referenced_var_ids = [];
$loop_context->inside_conditional = true;
$suppressed_issues = $statements_analyzer->getSuppressedIssues();
if ($is_do) {
if (!in_array('RedundantCondition', $suppressed_issues, true)) {
$statements_analyzer->addSuppressedIssues(['RedundantCondition']);
}
if (!in_array('RedundantConditionGivenDocblockType', $suppressed_issues, true)) {
$statements_analyzer->addSuppressedIssues(['RedundantConditionGivenDocblockType']);
}
if (!in_array('TypeDoesNotContainType', $suppressed_issues, true)) {
$statements_analyzer->addSuppressedIssues(['TypeDoesNotContainType']);
}
}
if (ExpressionAnalyzer::analyze($statements_analyzer, $pre_condition, $loop_context) === false) {
return [];
}
$loop_context->inside_conditional = false;
$new_referenced_var_ids = $loop_context->referenced_var_ids;
$loop_context->referenced_var_ids = array_merge($pre_referenced_var_ids, $new_referenced_var_ids);
$asserted_var_ids = Context::getNewOrUpdatedVarIds($outer_context, $loop_context);
$loop_context->clauses = Algebra::simplifyCNF(
array_merge($outer_context->clauses, $pre_condition_clauses)
);
$active_while_types = [];
$reconcilable_while_types = Algebra::getTruthsFromFormula(
$loop_context->clauses,
\spl_object_id($pre_condition),
$new_referenced_var_ids
);
$changed_var_ids = [];
if ($reconcilable_while_types) {
$pre_condition_vars_in_scope_reconciled = Reconciler::reconcileKeyedTypes(
$reconcilable_while_types,
$active_while_types,
$loop_context->vars_in_scope,
$changed_var_ids,
$new_referenced_var_ids,
$statements_analyzer,
[],
true,
new CodeLocation($statements_analyzer->getSource(), $pre_condition)
);
$loop_context->vars_in_scope = $pre_condition_vars_in_scope_reconciled;
}
if ($is_do) {
if (!in_array('RedundantCondition', $suppressed_issues, true)) {
$statements_analyzer->removeSuppressedIssues(['RedundantCondition']);
}
if (!in_array('RedundantConditionGivenDocblockType', $suppressed_issues, true)) {
$statements_analyzer->removeSuppressedIssues(['RedundantConditionGivenDocblockType']);
}
if (!in_array('TypeDoesNotContainType', $suppressed_issues, true)) {
$statements_analyzer->removeSuppressedIssues(['TypeDoesNotContainType']);
}
}
if ($is_do) {
return [];
}
foreach ($asserted_var_ids as $var_id) {
$loop_context->clauses = Context::filterClauses(
$var_id,
$loop_context->clauses,
null,
$statements_analyzer
);
}
return $asserted_var_ids;
}
/**
* @param array<string, array<string, bool>> $assignment_map
*
*/
private static function getAssignmentMapDepth(string $first_var_id, array $assignment_map): int
{
$max_depth = 0;
$assignment_var_ids = $assignment_map[$first_var_id];
unset($assignment_map[$first_var_id]);
foreach ($assignment_var_ids as $assignment_var_id => $_) {
$depth = 1;
if (isset($assignment_map[$assignment_var_id])) {
$depth = 1 + self::getAssignmentMapDepth($assignment_var_id, $assignment_map);
}
if ($depth > $max_depth) {
$max_depth = $depth;
}
}
return $max_depth;
}
}
| 1 | 9,158 | I reverted that one in a previous PR because of a CI failure but it was actually unrelated | vimeo-psalm | php |
@@ -112,12 +112,13 @@ func Add2ToolsList(toolList map[string]types.ToolsInstaller, flagData map[string
if kubeVer == "" {
var latestVersion string
for i := 0; i < util.RetryTimes; i++ {
- latestVersion, err := util.GetLatestVersion()
+ version, err := util.GetLatestVersion()
if err != nil {
return err
}
- if len(latestVersion) != 0 {
- kubeVer = latestVersion[1:]
+ if len(version) != 0 {
+ kubeVer = version[1:]
+ latestVersion = version
break
}
} | 1 | /*
Copyright 2019 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"io"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
types "github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/common"
"github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/util"
)
var (
cloudInitLongDescription = `
"keadm init" command install KubeEdge's master node (on the cloud) component.
It checks if the Kubernetes Master are installed already,
If not installed, please install the Kubernetes first.
`
cloudInitExample = `
keadm init
- This command will download and install the default version of KubeEdge cloud component
keadm init --kubeedge-version=1.2.1 --kube-config=/root/.kube/config
- kube-config is the absolute path of kubeconfig which used to secure connectivity between cloudcore and kube-apiserver
`
)
// NewCloudInit represents the keadm init command for cloud component
func NewCloudInit(out io.Writer, init *types.InitOptions) *cobra.Command {
if init == nil {
init = newInitOptions()
}
tools := make(map[string]types.ToolsInstaller, 0)
flagVals := make(map[string]types.FlagData, 0)
var cmd = &cobra.Command{
Use: "init",
Short: "Bootstraps cloud component. Checks and install (if required) the pre-requisites.",
Long: cloudInitLongDescription,
Example: cloudInitExample,
RunE: func(cmd *cobra.Command, args []string) error {
checkFlags := func(f *pflag.Flag) {
util.AddToolVals(f, flagVals)
}
cmd.Flags().VisitAll(checkFlags)
err := Add2ToolsList(tools, flagVals, init)
if err != nil {
return err
}
return Execute(tools)
},
}
addJoinOtherFlags(cmd, init)
return cmd
}
//newInitOptions will initialise new instance of options everytime
func newInitOptions() *types.InitOptions {
var opts *types.InitOptions
opts = &types.InitOptions{}
opts.KubeConfig = types.DefaultKubeConfig
return opts
}
func addJoinOtherFlags(cmd *cobra.Command, initOpts *types.InitOptions) {
cmd.Flags().StringVar(&initOpts.KubeEdgeVersion, types.KubeEdgeVersion, initOpts.KubeEdgeVersion,
"Use this key to download and use the required KubeEdge version")
cmd.Flags().Lookup(types.KubeEdgeVersion).NoOptDefVal = initOpts.KubeEdgeVersion
cmd.Flags().StringVar(&initOpts.KubeConfig, types.KubeConfig, initOpts.KubeConfig,
"Use this key to set kube-config path, eg: $HOME/.kube/config")
cmd.Flags().StringVar(&initOpts.Master, types.Master, initOpts.Master,
"Use this key to set K8s master address, eg: http://127.0.0.1:8080")
}
//Add2ToolsList Reads the flagData (containing val and default val) and join options to fill the list of tools.
func Add2ToolsList(toolList map[string]types.ToolsInstaller, flagData map[string]types.FlagData, initOptions *types.InitOptions) error {
toolList["Kubernetes"] = &util.K8SInstTool{
Common: util.Common{
KubeConfig: initOptions.KubeConfig,
Master: initOptions.Master,
},
}
var kubeVer string
flgData, ok := flagData[types.KubeEdgeVersion]
if ok {
kubeVer = util.CheckIfAvailable(flgData.Val.(string), flgData.DefVal.(string))
}
if kubeVer == "" {
var latestVersion string
for i := 0; i < util.RetryTimes; i++ {
latestVersion, err := util.GetLatestVersion()
if err != nil {
return err
}
if len(latestVersion) != 0 {
kubeVer = latestVersion[1:]
break
}
}
if len(latestVersion) == 0 {
fmt.Println("Failed to get the latest KubeEdge release version, will use default version")
kubeVer = types.DefaultKubeEdgeVersion
}
}
toolList["Cloud"] = &util.KubeCloudInstTool{
Common: util.Common{
ToolVersion: kubeVer,
KubeConfig: initOptions.KubeConfig,
Master: initOptions.Master,
},
}
return nil
}
//Execute the installation for each tool and start cloudcore
func Execute(toolList map[string]types.ToolsInstaller) error {
for name, tool := range toolList {
if name != "Cloud" {
err := tool.InstallTools()
if err != nil {
return err
}
}
}
return toolList["Cloud"].InstallTools()
}
| 1 | 16,228 | The for loop is useless here, since any error will make the function return. | kubeedge-kubeedge | go |
@@ -0,0 +1,16 @@
+#! /usr/bin/env python
+
+
+# import unittest2 as unittest
+
+# from nupic.bindings.algorithms import FlatSpatialPooler as FlatSpatialPooler
+
+# import spatial_pooler_py_api_test as pytest
+
+# pytest.SpatialPooler = FlatSpatialPooler
+
+# SpatialPoolerFlatAPITest = pytest.SpatialPoolerAPITest
+
+
+# if __name__ == "__main__":
+# unittest.main() | 1 | 1 | 12,613 | Fix file endings here and elsewhere. | numenta-nupic | py |
|
@@ -1188,7 +1188,7 @@ void CLIENT_STATE::check_project_timeout() {
if (p->possibly_backed_off && now > p->min_rpc_time) {
p->possibly_backed_off = false;
char buf[256];
- snprintf(buf, sizeof(buf), "Backoff ended for %s", p->get_project_name());
+ snprintf(buf, sizeof(buf), "Backoff ended for %.128s", p->get_project_name());
request_work_fetch(buf);
}
} | 1 | // This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2008 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
// High-level logic for communicating with scheduling servers,
// and for merging the result of a scheduler RPC into the client state
// The scheduler RPC mechanism is in scheduler_op.C
#include "cpp.h"
#ifdef _WIN32
#include "boinc_win.h"
#else
#include "config.h"
#include <cstdio>
#include <cmath>
#include <ctime>
#include <cstring>
#include <map>
#include <set>
#endif
#ifdef _MSC_VER
#define snprintf _snprintf
#endif
#include "crypt.h"
#include "error_numbers.h"
#include "file_names.h"
#include "filesys.h"
#include "parse.h"
#include "str_util.h"
#include "str_replace.h"
#include "url.h"
#include "util.h"
#include "client_msgs.h"
#include "cs_notice.h"
#include "cs_trickle.h"
#include "project.h"
#include "result.h"
#include "scheduler_op.h"
#include "sandbox.h"
#include "client_state.h"
using std::max;
using std::vector;
using std::string;
// quantities like avg CPU time decay by a factor of e every week
//
#define EXP_DECAY_RATE (1./(SECONDS_PER_DAY*7))
// try to report results this much before their deadline
//
#define REPORT_DEADLINE_CUSHION ((double)SECONDS_PER_DAY)
// report results within this time after completion
//
#define MAX_REPORT_DELAY 3600
#ifndef SIM
// Write a scheduler request to a disk file,
// to be sent to a scheduling server
//
int CLIENT_STATE::make_scheduler_request(PROJECT* p) {
char buf[1024];
MIOFILE mf;
unsigned int i;
RESULT* rp;
get_sched_request_filename(*p, buf, sizeof(buf));
FILE* f = boinc_fopen(buf, "wb");
if (!f) return ERR_FOPEN;
double trs = total_resource_share();
double rrs = runnable_resource_share(RSC_TYPE_ANY);
double prrs = potentially_runnable_resource_share();
double resource_share_fraction, rrs_fraction, prrs_fraction;
if (trs) {
resource_share_fraction = p->resource_share / trs;
} else {
resource_share_fraction = 1;
}
if (rrs) {
rrs_fraction = p->resource_share / rrs;
} else {
rrs_fraction = 1;
}
if (prrs) {
prrs_fraction = p->resource_share / prrs;
} else {
prrs_fraction = 1;
}
// if hostid is zero, rpc_seqno better be also
//
if (!p->hostid) {
p->rpc_seqno = 0;
}
mf.init_file(f);
fprintf(f,
"<scheduler_request>\n"
" <authenticator>%s</authenticator>\n"
" <hostid>%d</hostid>\n"
" <rpc_seqno>%d</rpc_seqno>\n"
" <core_client_major_version>%d</core_client_major_version>\n"
" <core_client_minor_version>%d</core_client_minor_version>\n"
" <core_client_release>%d</core_client_release>\n"
" <resource_share_fraction>%f</resource_share_fraction>\n"
" <rrs_fraction>%f</rrs_fraction>\n"
" <prrs_fraction>%f</prrs_fraction>\n"
" <duration_correction_factor>%f</duration_correction_factor>\n"
" <allow_multiple_clients>%d</allow_multiple_clients>\n"
" <sandbox>%d</sandbox>\n"
" <dont_send_work>%d</dont_send_work>\n",
p->authenticator,
p->hostid,
p->rpc_seqno,
core_client_version.major,
core_client_version.minor,
core_client_version.release,
resource_share_fraction,
rrs_fraction,
prrs_fraction,
p->duration_correction_factor,
cc_config.allow_multiple_clients?1:0,
g_use_sandbox?1:0,
p->dont_request_more_work?1:0
);
work_fetch.write_request(f, p);
// write client capabilities
//
fprintf(f,
" <client_cap_plan_class>1</client_cap_plan_class>\n"
);
write_platforms(p, mf.f);
if (strlen(p->code_sign_key)) {
fprintf(f, " <code_sign_key>\n%s\n</code_sign_key>\n", p->code_sign_key);
}
// send working prefs
//
fprintf(f, "<working_global_preferences>\n");
global_prefs.write(mf);
fprintf(f, "</working_global_preferences>\n");
// send master global preferences if present and not host-specific
//
if (!global_prefs.host_specific && boinc_file_exists(GLOBAL_PREFS_FILE_NAME)) {
FILE* fprefs = fopen(GLOBAL_PREFS_FILE_NAME, "r");
if (fprefs) {
copy_stream(fprefs, f);
fclose(fprefs);
}
PROJECT* pp = lookup_project(global_prefs.source_project);
if (pp && strlen(pp->email_hash)) {
fprintf(f,
"<global_prefs_source_email_hash>%s</global_prefs_source_email_hash>\n",
pp->email_hash
);
}
}
// Of the projects with same email hash as this one,
// send the oldest cross-project ID.
// Use project URL as tie-breaker.
//
PROJECT* winner = p;
for (i=0; i<projects.size(); i++ ) {
PROJECT* project = projects[i];
if (project == p) continue;
if (strcmp(project->email_hash, p->email_hash)) continue;
if (project->cpid_time < winner->cpid_time) {
winner = project;
} else if (project->cpid_time == winner->cpid_time) {
if (strcmp(project->master_url, winner->master_url) < 0) {
winner = project;
}
}
}
fprintf(f,
"<cross_project_id>%s</cross_project_id>\n",
winner->cross_project_id
);
time_stats.write(mf, true);
net_stats.write(mf);
if (global_prefs.daily_xfer_period_days) {
daily_xfer_history.write_scheduler_request(
mf, global_prefs.daily_xfer_period_days
);
}
// update hardware info, and write host info
//
host_info.get_host_info(false);
set_ncpus();
host_info.write(mf, !cc_config.suppress_net_info, false);
// get and write disk usage
//
get_disk_usages();
get_disk_shares();
fprintf(f,
" <disk_usage>\n"
" <d_boinc_used_total>%f</d_boinc_used_total>\n"
" <d_boinc_used_project>%f</d_boinc_used_project>\n"
" <d_project_share>%f</d_project_share>\n"
" </disk_usage>\n",
total_disk_usage, p->disk_usage, p->disk_share
);
if (coprocs.n_rsc > 1) {
work_fetch.copy_requests();
coprocs.write_xml(mf, true);
}
// report completed jobs
//
unsigned int last_reported_index = 0;
p->nresults_returned = 0;
for (i=0; i<results.size(); i++) {
rp = results[i];
if (rp->project == p && rp->ready_to_report) {
p->nresults_returned++;
rp->write(mf, true);
}
if (cc_config.max_tasks_reported
&& (p->nresults_returned >= cc_config.max_tasks_reported)
) {
last_reported_index = i;
break;
}
}
read_trickle_files(p, f);
// report sticky files as needed
//
for (i=0; i<file_infos.size(); i++) {
FILE_INFO* fip = file_infos[i];
if (fip->project != p) continue;
if (!fip->sticky) continue;
fprintf(f,
" <file_info>\n"
" <name>%s</name>\n"
" <nbytes>%f</nbytes>\n"
" <status>%d</status>\n"
" </file_info>\n",
fip->name, fip->nbytes, fip->status
);
}
if (p->send_time_stats_log) {
fprintf(f, "<time_stats_log>\n");
time_stats.get_log_after(p->send_time_stats_log, mf);
fprintf(f, "</time_stats_log>\n");
}
if (p->send_job_log) {
fprintf(f, "<job_log>\n");
job_log_filename(*p, buf, sizeof(buf));
send_log_after(buf, p->send_job_log, mf);
fprintf(f, "</job_log>\n");
}
// send descriptions of app versions
//
fprintf(f, "<app_versions>\n");
int j=0;
for (i=0; i<app_versions.size(); i++) {
APP_VERSION* avp = app_versions[i];
if (avp->project != p) continue;
avp->write(mf, false);
avp->index = j++;
}
fprintf(f, "</app_versions>\n");
// send descriptions of jobs in progress for this project
//
fprintf(f, "<other_results>\n");
for (i=0; i<results.size(); i++) {
rp = results[i];
if (rp->project != p) continue;
if ((last_reported_index && (i > last_reported_index)) || !rp->ready_to_report) {
fprintf(f,
" <other_result>\n"
" <name>%s</name>\n"
" <app_version>%d</app_version>\n",
rp->name,
rp->avp->index
);
// the following is for backwards compatibility w/ old schedulers
//
if (strlen(rp->avp->plan_class)) {
fprintf(f,
" <plan_class>%s</plan_class>\n",
rp->avp->plan_class
);
}
fprintf(f,
" </other_result>\n"
);
}
}
fprintf(f, "</other_results>\n");
// if requested by project, send summary of all in-progress results
// (for EDF simulation by scheduler)
//
if (p->send_full_workload) {
fprintf(f, "<in_progress_results>\n");
for (i=0; i<results.size(); i++) {
rp = results[i];
double x = rp->estimated_runtime_remaining();
if (x == 0) continue;
safe_strcpy(buf, "");
int rt = rp->avp->gpu_usage.rsc_type;
if (rt) {
if (rt == rsc_index(GPU_TYPE_NVIDIA)) {
snprintf(buf, sizeof(buf),
" <ncudas>%f</ncudas>\n",
rp->avp->gpu_usage.usage
);
} else if (rt == rsc_index(GPU_TYPE_ATI)) {
snprintf(buf, sizeof(buf),
" <natis>%f</natis>\n",
rp->avp->gpu_usage.usage
);
} else if (rt == rsc_index(GPU_TYPE_INTEL)) {
snprintf(buf, sizeof(buf),
" <nintel_gpus>%f</nintel_gpus>\n",
rp->avp->gpu_usage.usage
);
}
}
fprintf(f,
" <ip_result>\n"
" <name>%s</name>\n"
" <report_deadline>%.0f</report_deadline>\n"
" <time_remaining>%.2f</time_remaining>\n"
" <avg_ncpus>%f</avg_ncpus>\n"
"%s"
" </ip_result>\n",
rp->name,
rp->report_deadline,
x,
rp->avp->avg_ncpus,
buf
);
}
fprintf(f, "</in_progress_results>\n");
}
FILE* cof = boinc_fopen(CLIENT_OPAQUE_FILENAME, "r");
if (cof) {
fprintf(f, "<client_opaque>\n<![CDATA[\n");
copy_stream(cof, f);
fprintf(f, "\n]]>\n</client_opaque>\n");
fclose(cof);
}
if (strlen(client_brand)) {
fprintf(f, " <client_brand>%s</client_brand>\n", client_brand);
}
if (acct_mgr_info.using_am()) {
acct_mgr_info.user_keywords.write(f);
}
fprintf(f, "</scheduler_request>\n");
fclose(f);
return 0;
}
// the project is uploading, and it started recently
//
static inline bool actively_uploading(PROJECT* p) {
for (unsigned int i=0; i<gstate.file_xfers->file_xfers.size(); i++) {
FILE_XFER* fxp = gstate.file_xfers->file_xfers[i];
if (fxp->fip->project != p) continue;
if (!fxp->is_upload) continue;
if (gstate.now - fxp->start_time > WF_UPLOAD_DEFER_INTERVAL) continue;
//msg_printf(p, MSG_INFO, "actively uploading");
return true;
}
//msg_printf(p, MSG_INFO, "not actively uploading");
return false;
}
// If there is a request for an idle instance, return true.
// Clear other requests
//
static inline bool idle_request() {
bool found = false;
for (int i=0; i<coprocs.n_rsc; i++) {
RSC_WORK_FETCH &rwf = rsc_work_fetch[i];
if (rwf.req_instances) {
found = true;
} else {
rwf.req_secs = 0;
}
}
return found;
}
// Called once/sec.
// Initiate scheduler RPC activity if needed and possible
//
bool CLIENT_STATE::scheduler_rpc_poll() {
PROJECT *p;
static double last_time=0;
static double last_work_fetch_time = 0;
double elapsed_time;
// are we currently doing a scheduler RPC?
// If so, see if it's finished
//
if (scheduler_op->state != SCHEDULER_OP_STATE_IDLE) {
last_time = now;
scheduler_op->poll();
return (scheduler_op->state == SCHEDULER_OP_STATE_IDLE);
}
if (network_suspended) return false;
// check only every 5 sec
//
if (!clock_change && now - last_time < SCHEDULER_RPC_POLL_PERIOD) return false;
last_time = now;
if (scheduler_op->check_master_fetch_start()) {
return true;
}
// If we haven't run benchmarks yet, don't do a scheduler RPC.
// We need to know CPU speed to handle app versions
//
if (!host_info.p_calculated) return false;
// check for various reasons to contact particular projects.
// If we need to contact a project,
// see if we should ask it for work as well.
//
p = next_project_sched_rpc_pending();
if (p) {
if (log_flags.sched_op_debug) {
msg_printf(p, MSG_INFO, "sched RPC pending: %s",
rpc_reason_string(p->sched_rpc_pending)
);
}
// if the user requested the RPC,
// clear backoffs to allow work requests
//
if (p->sched_rpc_pending == RPC_REASON_USER_REQ) {
for (int i=0; i<coprocs.n_rsc; i++) {
p->rsc_pwf[i].clear_backoff();
}
}
work_fetch.piggyback_work_request(p);
scheduler_op->init_op_project(p, p->sched_rpc_pending);
return true;
}
p = next_project_trickle_up_pending();
if (p) {
work_fetch.piggyback_work_request(p);
scheduler_op->init_op_project(p, RPC_REASON_TRICKLE_UP);
return true;
}
// stuff from here on is checked only once/minute,
// or if work fetch was requested.
//
if (must_check_work_fetch) {
last_work_fetch_time = 0;
}
elapsed_time = now - last_work_fetch_time;
if (!clock_change && elapsed_time < WORK_FETCH_PERIOD) return false;
must_check_work_fetch = false;
last_work_fetch_time = now;
// check if we should report finished results
//
bool suspend_soon = global_prefs.net_times.suspended(now + 1800);
suspend_soon |= global_prefs.cpu_times.suspended(now + 1800);
p = find_project_with_overdue_results(suspend_soon);
if (p) {
work_fetch.piggyback_work_request(p);
scheduler_op->init_op_project(p, RPC_REASON_RESULTS_DUE);
return true;
}
// check if we should fetch work (do this last)
//
switch (suspend_reason) {
case 0:
case SUSPEND_REASON_CPU_THROTTLE:
break;
default:
return false;
}
if (cc_config.fetch_minimal_work && had_or_requested_work) {
return false;
}
p = work_fetch.choose_project();
if (p) {
if (actively_uploading(p)) {
bool dont_request = true;
if (p->pwf.request_if_idle_and_uploading) {
if (idle_request()) {
dont_request = false;
}
}
if (dont_request) {
if (log_flags.work_fetch_debug) {
msg_printf(p, MSG_INFO,
"[work_fetch] deferring work fetch; upload active"
);
}
p->sched_rpc_pending = 0;
return false;
}
}
scheduler_op->init_op_project(p, RPC_REASON_NEED_WORK);
return true;
}
return false;
}
// Handle the reply from a scheduler
//
int CLIENT_STATE::handle_scheduler_reply(
PROJECT* project, char* scheduler_url
) {
SCHEDULER_REPLY sr;
FILE* f;
int retval;
unsigned int i;
bool signature_valid, update_global_prefs=false, update_project_prefs=false;
char buf[1024], filename[256];
string old_gui_urls = project->gui_urls;
PROJECT* p2;
vector<RESULT*>new_results;
project->last_rpc_time = now;
if (work_fetch.requested_work()) {
had_or_requested_work = true;
}
get_sched_reply_filename(*project, filename, sizeof(filename));
f = fopen(filename, "rb");
if (!f) return ERR_FOPEN;
retval = sr.parse(f, project);
fclose(f);
if (retval) return retval;
if (log_flags.sched_ops) {
if (work_fetch.requested_work()) {
snprintf(buf, sizeof(buf), ": got %d new tasks", (int)sr.results.size());
} else {
safe_strcpy(buf, "");
}
msg_printf(project, MSG_INFO, "Scheduler request completed%s", buf);
}
if (log_flags.sched_op_debug) {
if (sr.scheduler_version) {
msg_printf(project, MSG_INFO,
"[sched_op] Server version %d",
sr.scheduler_version
);
}
}
// check that master URL is correct
//
if (strlen(sr.master_url)) {
canonicalize_master_url(sr.master_url, sizeof(sr.master_url));
string url1 = sr.master_url;
string url2 = project->master_url;
downcase_string(url1);
downcase_string(url2);
if (url1 != url2) {
p2 = lookup_project(sr.master_url);
if (p2) {
msg_printf(project, MSG_USER_ALERT,
"You are attached to this project twice. Please remove projects named %s, then add %s",
project->project_name,
sr.master_url
);
} else {
msg_printf(project, MSG_USER_ALERT,
_("This project is using an old URL. When convenient, remove the project, then add %s"),
sr.master_url
);
}
}
}
// make sure we don't already have a project of same name
//
bool dup_name = false;
for (i=0; i<projects.size(); i++) {
p2 = projects[i];
if (project == p2) continue;
if (!strcmp(p2->project_name, project->project_name)) {
dup_name = true;
break;
}
}
if (dup_name) {
msg_printf(project, MSG_INFO,
"Already attached to a project named %s (possibly with wrong URL)",
project->project_name
);
msg_printf(project, MSG_INFO,
"Consider detaching this project, then trying again"
);
}
// show messages from server
//
bool got_notice = false;
for (i=0; i<sr.messages.size(); i++) {
USER_MESSAGE& um = sr.messages[i];
int prio = MSG_INFO;
if (!strcmp(um.priority.c_str(), "notice")) {
prio = MSG_SCHEDULER_ALERT;
got_notice = true;
}
msg_printf(project, prio, "%s", um.message.c_str());
}
// if we requested work and didn't get notices,
// clear scheduler notices from this project
//
if (work_fetch.requested_work() && !got_notice) {
notices.remove_notices(project, REMOVE_SCHEDULER_MSG);
}
if (log_flags.sched_op_debug && sr.request_delay) {
msg_printf(project, MSG_INFO,
"Project requested delay of %.0f seconds", sr.request_delay
);
}
// if project is down, return error (so that we back off)
// and don't do anything else
//
if (sr.project_is_down) {
if (sr.request_delay) {
double x = now + sr.request_delay;
project->set_min_rpc_time(x, "project requested delay");
}
return ERR_PROJECT_DOWN;
}
// if the scheduler reply includes global preferences,
// insert extra elements, write to disk, and parse
//
if (sr.global_prefs_xml) {
// ignore prefs if we're using prefs from account mgr
// BAM! currently has mixed http, https; trim off
char* p = strchr(global_prefs.source_project, '/');
char* q = strchr(gstate.acct_mgr_info.master_url, '/');
if (gstate.acct_mgr_info.using_am() && p && q && !strcmp(p, q)) {
if (log_flags.sched_op_debug) {
msg_printf(project, MSG_INFO,
"ignoring prefs from project; using prefs from AM"
);
}
} else if (!global_prefs.host_specific || sr.scheduler_version >= 507) {
// ignore prefs if we have host-specific prefs
// and we're talking to an old scheduler
//
retval = save_global_prefs(
sr.global_prefs_xml, project->master_url, scheduler_url
);
if (retval) {
return retval;
}
update_global_prefs = true;
} else {
if (log_flags.sched_op_debug) {
msg_printf(project, MSG_INFO,
"ignoring prefs from old server; we have host-specific prefs"
);
}
}
}
// see if we have a new venue from this project
// (this must go AFTER the above, since otherwise
// global_prefs_source_project() is meaningless)
//
if (strcmp(project->host_venue, sr.host_venue)) {
safe_strcpy(project->host_venue, sr.host_venue);
msg_printf(project, MSG_INFO, "New computer location: %s", sr.host_venue);
update_project_prefs = true;
#ifdef USE_NET_PREFS
if (project == global_prefs_source_project()) {
safe_strcpy(main_host_venue, sr.host_venue);
update_global_prefs = true;
}
#endif
}
if (update_global_prefs) {
read_global_prefs();
}
// deal with project preferences (should always be there)
// If they've changed, write to account file,
// then parse to get our venue, and pass to running apps
//
if (sr.project_prefs_xml) {
if (strcmp(project->project_prefs.c_str(), sr.project_prefs_xml)) {
project->project_prefs = string(sr.project_prefs_xml);
update_project_prefs = true;
}
}
// the account file has GUI URLs and project prefs.
// rewrite if either of these has changed
//
if (project->gui_urls != old_gui_urls || update_project_prefs) {
retval = project->write_account_file();
if (retval) {
msg_printf(project, MSG_INTERNAL_ERROR,
"Can't write account file: %s", boincerror(retval)
);
return retval;
}
}
if (update_project_prefs) {
project->parse_account_file();
project->parse_preferences_for_user_files();
active_tasks.request_reread_prefs(project);
}
// notices here serve no purpose.
// The only thing that may have changed is project prefs,
// and there's no reason to tell the user what they just did.
//
//project->show_no_work_notice();
// if the scheduler reply includes a code-signing key,
// accept it if we don't already have one from the project.
// Otherwise verify its signature, using the key we already have.
//
if (sr.code_sign_key) {
if (!strlen(project->code_sign_key)) {
safe_strcpy(project->code_sign_key, sr.code_sign_key);
} else {
if (sr.code_sign_key_signature) {
retval = check_string_signature2(
sr.code_sign_key, sr.code_sign_key_signature,
project->code_sign_key, signature_valid
);
if (!retval && signature_valid) {
safe_strcpy(project->code_sign_key, sr.code_sign_key);
} else {
msg_printf(project, MSG_INTERNAL_ERROR,
"New code signing key doesn't validate"
);
}
} else {
msg_printf(project, MSG_INTERNAL_ERROR,
"Missing code sign key signature"
);
}
}
}
// copy new entities to client state
//
for (i=0; i<sr.apps.size(); i++) {
APP* app = lookup_app(project, sr.apps[i].name);
if (app) {
// update app attributes; they may have changed on server
//
safe_strcpy(app->user_friendly_name, sr.apps[i].user_friendly_name);
app->non_cpu_intensive = sr.apps[i].non_cpu_intensive;
app->fraction_done_exact = sr.apps[i].fraction_done_exact;
} else {
app = new APP;
*app = sr.apps[i];
retval = link_app(project, app);
if (retval) {
msg_printf(project, MSG_INTERNAL_ERROR,
"Can't handle application %s in scheduler reply", app->name
);
delete app;
} else {
apps.push_back(app);
}
}
}
FILE_INFO* fip;
for (i=0; i<sr.file_infos.size(); i++) {
fip = lookup_file_info(project, sr.file_infos[i].name);
if (fip) {
fip->merge_info(sr.file_infos[i]);
} else {
fip = new FILE_INFO;
*fip = sr.file_infos[i];
if (fip->sticky_lifetime) {
fip->sticky_expire_time = now + fip->sticky_lifetime;
}
retval = link_file_info(project, fip);
if (retval) {
msg_printf(project, MSG_INTERNAL_ERROR,
"Can't handle file %s in scheduler reply", fip->name
);
delete fip;
} else {
file_infos.push_back(fip);
}
}
}
for (i=0; i<sr.file_deletes.size(); i++) {
fip = lookup_file_info(project, sr.file_deletes[i].c_str());
if (fip) {
if (log_flags.file_xfer_debug) {
msg_printf(project, MSG_INFO,
"[file_xfer_debug] Got server request to delete file %s",
fip->name
);
}
fip->sticky = false;
}
}
for (i=0; i<sr.app_versions.size(); i++) {
if (project->anonymous_platform) {
msg_printf(project, MSG_INTERNAL_ERROR,
"App version returned from anonymous platform project; ignoring"
);
continue;
}
APP_VERSION& avpp = sr.app_versions[i];
if (strlen(avpp.platform) == 0) {
safe_strcpy(avpp.platform, get_primary_platform());
} else {
if (!is_supported_platform(avpp.platform)) {
msg_printf(project, MSG_INTERNAL_ERROR,
"App version has unsupported platform %s", avpp.platform
);
continue;
}
}
if (avpp.missing_coproc) {
msg_printf(project, MSG_INTERNAL_ERROR,
"App version uses non-existent %s GPU",
avpp.missing_coproc_name
);
}
APP* app = lookup_app(project, avpp.app_name);
if (!app) {
msg_printf(project, MSG_INTERNAL_ERROR,
"Missing app %s", avpp.app_name
);
continue;
}
APP_VERSION* avp = lookup_app_version(
app, avpp.platform, avpp.version_num, avpp.plan_class
);
if (avp) {
// update app version attributes in case they changed on server
//
avp->avg_ncpus = avpp.avg_ncpus;
avp->flops = avpp.flops;
safe_strcpy(avp->cmdline, avpp.cmdline);
avp->gpu_usage = avpp.gpu_usage;
strlcpy(avp->api_version, avpp.api_version, sizeof(avp->api_version));
avp->dont_throttle = avpp.dont_throttle;
avp->needs_network = avpp.needs_network;
// if we had download failures, clear them
//
avp->clear_errors();
continue;
}
avp = new APP_VERSION;
*avp = avpp;
retval = link_app_version(project, avp);
if (retval) {
delete avp;
continue;
}
app_versions.push_back(avp);
}
for (i=0; i<sr.workunits.size(); i++) {
if (lookup_workunit(project, sr.workunits[i].name)) continue;
WORKUNIT* wup = new WORKUNIT;
*wup = sr.workunits[i];
wup->project = project;
retval = link_workunit(project, wup);
if (retval) {
msg_printf(project, MSG_INTERNAL_ERROR,
"Can't handle task %s in scheduler reply", wup->name
);
delete wup;
continue;
}
wup->clear_errors();
workunits.push_back(wup);
}
double est_rsc_runtime[MAX_RSC];
bool got_work_for_rsc[MAX_RSC];
for (int j=0; j<coprocs.n_rsc; j++) {
est_rsc_runtime[j] = 0;
got_work_for_rsc[j] = false;
}
for (i=0; i<sr.results.size(); i++) {
RESULT* rp2 = lookup_result(project, sr.results[i].name);
if (rp2) {
// see if project wants to change the job's deadline
//
if (sr.results[i].report_deadline != rp2->report_deadline) {
rp2->report_deadline = sr.results[i].report_deadline;
} else {
msg_printf(project, MSG_INTERNAL_ERROR,
"Already have task %s\n", sr.results[i].name
);
}
continue;
}
RESULT* rp = new RESULT;
*rp = sr.results[i];
retval = link_result(project, rp);
if (retval) {
msg_printf(project, MSG_INTERNAL_ERROR,
"Can't handle task %s in scheduler reply", rp->name
);
delete rp;
continue;
}
if (strlen(rp->platform) == 0) {
safe_strcpy(rp->platform, get_primary_platform());
rp->version_num = latest_version(rp->wup->app, rp->platform);
}
rp->avp = lookup_app_version(
rp->wup->app, rp->platform, rp->version_num, rp->plan_class
);
if (!rp->avp) {
msg_printf(project, MSG_INTERNAL_ERROR,
"No app version found for app %s platform %s ver %d class %s; discarding %s",
rp->wup->app->name, rp->platform, rp->version_num, rp->plan_class, rp->name
);
delete rp;
continue;
}
if (rp->avp->missing_coproc) {
msg_printf(project, MSG_INTERNAL_ERROR,
"Missing coprocessor for task %s; aborting", rp->name
);
rp->abort_inactive(EXIT_MISSING_COPROC);
} else {
rp->set_state(RESULT_NEW, "handle_scheduler_reply");
got_work_for_rsc[0] = true;
int rt = rp->avp->gpu_usage.rsc_type;
if (rt > 0) {
est_rsc_runtime[rt] += rp->estimated_runtime();
got_work_for_rsc[rt] = true;
gpus_usable = true;
// trigger a check of whether GPU is actually usable
} else {
est_rsc_runtime[0] += rp->estimated_runtime();
}
}
rp->wup->version_num = rp->version_num;
rp->received_time = now;
new_results.push_back(rp);
results.push_back(rp);
}
// find the resources for which we requested work and didn't get any
// This is currently used for AM starvation mechanism.
//
if (!sr.too_recent) {
for (int j=0; j<coprocs.n_rsc; j++) {
RSC_WORK_FETCH& rwf = rsc_work_fetch[j];
if (got_work_for_rsc[j]) {
project->sched_req_no_work[j] = false;
} else if (rwf.req_secs>0 || rwf.req_instances>0) {
project->sched_req_no_work[j] = true;
}
}
}
sort_results();
if (log_flags.sched_op_debug) {
if (sr.results.size()) {
for (int j=0; j<coprocs.n_rsc; j++) {
msg_printf(project, MSG_INFO,
"[sched_op] estimated total %s task duration: %.0f seconds",
rsc_name_long(j),
est_rsc_runtime[j]/time_stats.availability_frac(j)
);
}
}
}
// update records for ack'ed results
//
for (i=0; i<sr.result_acks.size(); i++) {
if (log_flags.sched_op_debug) {
msg_printf(project, MSG_INFO,
"[sched_op] handle_scheduler_reply(): got ack for task %s\n",
sr.result_acks[i].name
);
}
RESULT* rp = lookup_result(project, sr.result_acks[i].name);
if (rp) {
rp->got_server_ack = true;
} else {
msg_printf(project, MSG_INTERNAL_ERROR,
"Got ack for task %s, but can't find it", sr.result_acks[i].name
);
}
}
// handle result abort requests
//
for (i=0; i<sr.result_abort.size(); i++) {
RESULT* rp = lookup_result(project, sr.result_abort[i].name);
if (rp) {
ACTIVE_TASK* atp = lookup_active_task_by_result(rp);
if (atp) {
atp->abort_task(EXIT_ABORTED_BY_PROJECT,
"aborted by project - no longer usable"
);
} else {
rp->abort_inactive(EXIT_ABORTED_BY_PROJECT);
}
} else {
msg_printf(project, MSG_INTERNAL_ERROR,
"Server requested abort of unknown task %s",
sr.result_abort[i].name
);
}
}
for (i=0; i<sr.result_abort_if_not_started.size(); i++) {
RESULT* rp = lookup_result(project, sr.result_abort_if_not_started[i].name);
if (!rp) {
msg_printf(project, MSG_INTERNAL_ERROR,
"Server requested conditional abort of unknown task %s",
sr.result_abort_if_not_started[i].name
);
continue;
}
if (rp->not_started) {
rp->abort_inactive(EXIT_ABORTED_BY_PROJECT);
}
}
// remove acked trickle files
//
if (sr.message_ack) {
remove_trickle_files(project);
}
if (sr.send_full_workload) {
project->send_full_workload = true;
}
project->dont_use_dcf = sr.dont_use_dcf;
project->send_time_stats_log = sr.send_time_stats_log;
project->send_job_log = sr.send_job_log;
project->trickle_up_pending = false;
// The project returns a hostid only if it has created a new host record.
// In that case reset RPC seqno
//
if (sr.hostid) {
if (project->hostid) {
// if we already have a host ID for this project,
// we must have sent it a stale seqno,
// which usually means our state file was copied from another host.
// So generate a new host CPID.
//
generate_new_host_cpid();
msg_printf(project, MSG_INFO,
"Generated new computer cross-project ID: %s",
host_info.host_cpid
);
}
//msg_printf(project, MSG_INFO, "Changing host ID from %d to %d", project->hostid, sr.hostid);
project->hostid = sr.hostid;
project->rpc_seqno = 0;
}
#ifdef ENABLE_AUTO_UPDATE
if (sr.auto_update.present) {
if (!sr.auto_update.validate_and_link(project)) {
auto_update = sr.auto_update;
}
}
#endif
project->project_files = sr.project_files;
project->link_project_files();
project->create_project_file_symlinks();
if (log_flags.state_debug) {
msg_printf(project, MSG_INFO,
"[state] handle_scheduler_reply(): State after handle_scheduler_reply():"
);
print_summary();
}
// the following must precede the backoff and request_delay checks,
// since it overrides them
//
if (sr.next_rpc_delay) {
project->next_rpc_time = now + sr.next_rpc_delay;
} else {
project->next_rpc_time = 0;
}
work_fetch.handle_reply(project, &sr, new_results);
project->nrpc_failures = 0;
project->min_rpc_time = 0;
if (sr.request_delay) {
double x = now + sr.request_delay;
project->set_min_rpc_time(x, "requested by project");
}
if (sr.got_rss_feeds) {
handle_sr_feeds(sr.sr_feeds, project);
}
update_trickle_up_urls(project, sr.trickle_up_urls);
// garbage collect in case the project sent us some irrelevant FILE_INFOs;
// avoid starting transfers for them
//
gstate.garbage_collect_always();
// if the user provided app_config.xml for this project,
// apply it to any app versions we just got
//
project->app_configs.config_app_versions(project, false);
// make sure we don't set no_rsc_apps[] for all processor types
//
if (!project->anonymous_platform) {
project->check_no_rsc_apps();
}
return 0;
}
#endif // SIM
void CLIENT_STATE::check_project_timeout() {
unsigned int i;
for (i=0; i<projects.size(); i++) {
PROJECT* p = projects[i];
if (p->possibly_backed_off && now > p->min_rpc_time) {
p->possibly_backed_off = false;
char buf[256];
snprintf(buf, sizeof(buf), "Backoff ended for %s", p->get_project_name());
request_work_fetch(buf);
}
}
}
// find a project that needs to have its master file fetched
//
PROJECT* CLIENT_STATE::next_project_master_pending() {
unsigned int i;
PROJECT* p;
for (i=0; i<projects.size(); i++) {
p = projects[i];
if (p->waiting_until_min_rpc_time()) continue;
if (p->suspended_via_gui) continue;
if (p->master_url_fetch_pending) {
return p;
}
}
return 0;
}
// find a project for which a scheduler RPC has been requested
// - by user
// - by an account manager
// - by the project
// - because the project was just attached (for verification)
//
PROJECT* CLIENT_STATE::next_project_sched_rpc_pending() {
unsigned int i;
PROJECT* p;
for (i=0; i<projects.size(); i++) {
p = projects[i];
bool honor_backoff = true;
bool honor_suspend = true;
// is a scheduler-requested RPC due?
//
if (!p->sched_rpc_pending && p->next_rpc_time && p->next_rpc_time<now) {
// don't do it if project is set to no new work
// and has no jobs currently
//
if (!p->dont_request_more_work || p->has_results()) {
p->sched_rpc_pending = RPC_REASON_PROJECT_REQ;
}
}
switch (p->sched_rpc_pending) {
case RPC_REASON_USER_REQ:
honor_backoff = false;
honor_suspend = false;
break;
case RPC_REASON_ACCT_MGR_REQ:
// This is critical for acct mgrs, to propagate new host CPIDs
honor_suspend = false;
break;
case RPC_REASON_INIT:
// always do the initial RPC so we can get project name etc.
honor_suspend = false;
break;
case RPC_REASON_PROJECT_REQ:
break;
default:
continue;
}
if (honor_backoff && p->waiting_until_min_rpc_time()) {
continue;
}
if (honor_suspend && p->suspended_via_gui) {
continue;
}
if (p->sched_rpc_pending) {
return p;
}
}
return 0;
}
PROJECT* CLIENT_STATE::next_project_trickle_up_pending() {
unsigned int i;
PROJECT* p;
for (i=0; i<projects.size(); i++) {
p = projects[i];
if (p->waiting_until_min_rpc_time()) continue;
if (p->suspended_via_gui) continue;
if (p->trickle_up_pending) {
return p;
}
}
return 0;
}
// find a project with finished results that should be reported.
// This means:
// - we're not backing off contacting the project
// - no upload for that project is active
// - the result is ready_to_report (compute done; files uploaded)
// - we're within a day of the report deadline,
// or at least a day has elapsed since the result was completed,
// or we have a sporadic connection
// or the project is in "don't request more work" state
// or a network suspend period is coming up soon
// or the project has > RESULT_REPORT_IF_AT_LEAST_N results ready to report
//
PROJECT* CLIENT_STATE::find_project_with_overdue_results(
bool network_suspend_soon
) {
unsigned int i;
RESULT* r;
for (i=0; i<projects.size(); i++) {
PROJECT* p = projects[i];
p->n_ready = 0;
p->dont_contact = false;
if (p->waiting_until_min_rpc_time()) p->dont_contact = true;
if (p->suspended_via_gui) p->dont_contact = true;
#ifndef SIM
if (actively_uploading(p)) p->dont_contact = true;
#endif
}
for (i=0; i<results.size(); i++) {
r = results[i];
if (!r->ready_to_report) continue;
PROJECT* p = r->project;
if (p->dont_contact) continue;
if (p->dont_request_more_work) {
return p;
}
if (r->report_immediately) {
return p;
}
if (cc_config.report_results_immediately) {
return p;
}
if (p->report_results_immediately) {
return p;
}
if (r->app->report_results_immediately) {
return p;
}
if (net_status.have_sporadic_connection) {
return p;
}
if (network_suspend_soon) {
return p;
}
double cushion = std::max(REPORT_DEADLINE_CUSHION, work_buf_min());
if (gstate.now > r->report_deadline - cushion) {
return p;
}
if (gstate.now > r->completed_time + MAX_REPORT_DELAY) {
return p;
}
p->n_ready++;
if (p->n_ready >= RESULT_REPORT_IF_AT_LEAST_N) {
return p;
}
}
return 0;
}
// trigger work fetch
//
void CLIENT_STATE::request_work_fetch(const char* where) {
if (log_flags.work_fetch_debug) {
msg_printf(0, MSG_INFO, "[work_fetch] Request work fetch: %s", where);
}
must_check_work_fetch = true;
}
| 1 | 13,728 | I'd better increase `buf` length to MAXPATHLEN and not cut p->get_project_name() output twice | BOINC-boinc | php |
@@ -172,6 +172,7 @@ func (*impl) createStorageMiner(vmctx runtime.InvocationContext, params CreateSt
SectorSize: params.SectorSize,
})
if err != nil {
+ fmt.Printf("here it is: %s\n", err)
return fmt.Errorf("Could not set power table at address: %s", actorIDAddr)
}
return nil | 1 | package power
import (
"context"
"fmt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/ipfs/go-hamt-ipld"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-filecoin/internal/pkg/enccid"
"github.com/filecoin-project/go-filecoin/internal/pkg/encoding"
"github.com/filecoin-project/go-filecoin/internal/pkg/types"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor/builtin/initactor"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/actor/builtin/miner"
vmaddr "github.com/filecoin-project/go-filecoin/internal/pkg/vm/address"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/dispatch"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/pattern"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/runtime"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm/internal/storage"
"github.com/filecoin-project/specs-actors/actors/builtin"
)
func init() {
encoding.RegisterIpldCborType(State{})
encoding.RegisterIpldCborType(TableEntry{})
}
// Actor provides bookkeeping for the storage power of registered miners.
// It updates power based on faults and storage proofs.
// It also tracks pledge collateral conditions.
type Actor struct{}
// State keeps track of power and collateral of registered miner actors
type State struct {
// PowerTable is a lookup mapping actorAddr -> PowerTableEntry
PowerTable enccid.Cid
}
// TableEntry tracks a single miner actor's power and collateral
type TableEntry struct {
ActivePower *types.BytesAmount
InactivePower *types.BytesAmount
AvailableBalance types.AttoFIL
LockedPledgeCollateral types.AttoFIL
SectorSize *types.BytesAmount
}
// Actor Methods
const (
CreateStorageMiner types.MethodID = iota + 32
RemoveStorageMiner
GetTotalPower
EnsurePledgeCollateralSatisfied
ProcessPowerReport
GetPowerReport
ProcessFaultReport
GetSectorSize
// ReportConsensusFault
// Surprise
// AddBalance ? (review: is this a runtime builtin?)
// WithdrawBalance ? (review: is this a runtime builtin?)
)
// NewActor returns a new power actor
func NewActor() *actor.Actor {
return actor.NewActor(builtin.StoragePowerActorCodeID, abi.NewTokenAmount(0))
}
//
// ExecutableActor impl for Actor
//
var _ dispatch.Actor = (*Actor)(nil)
// Exports implements `dispatch.Actor`
func (a *Actor) Exports() []interface{} {
return []interface{}{
CreateStorageMiner: (*impl)(a).createStorageMiner,
ProcessPowerReport: (*impl)(a).processPowerReport,
}
}
// InitializeState stores the actor's initial data structure.
func (*Actor) InitializeState(handle runtime.ActorStateHandle, _ interface{}) error {
handle.Create(&State{})
return nil
}
//
// vm methods for actor
//
type impl Actor
const (
// ErrDeleteMinerWithPower signals that RemoveStorageMiner was called on an actor with nonzero power
ErrDeleteMinerWithPower = 100
// ErrUnknownEntry entry is returned when the actor attempts to access a power table entry at an address not in the power table
ErrUnknownEntry = 101
// ErrDuplicateEntry is returned when there is an attempt to create a new power table entry at an existing addrErr
ErrDuplicateEntry = 102
)
// Errors map error codes to revert errors this actor may return.
var Errors = map[uint8]error{
ErrDeleteMinerWithPower: fmt.Errorf("cannot delete miner with power from power table"),
ErrUnknownEntry: fmt.Errorf("cannot find address in power table"),
ErrDuplicateEntry: fmt.Errorf("duplicate create power table entry attempt"),
}
// CreateStorageMinerParams is the params for the CreateStorageMiner method.
type CreateStorageMinerParams struct {
OwnerAddr address.Address
WorkerAddr address.Address
PeerID peer.ID
SectorSize *types.BytesAmount
}
// CreateStorageMiner creates a new record of a miner in the power table.
func (*impl) createStorageMiner(vmctx runtime.InvocationContext, params CreateStorageMinerParams) address.Address {
vmctx.ValidateCaller(pattern.Any{})
initParams := miner.ConstructorParams{
OwnerAddr: vmctx.Message().Caller(),
WorkerAddr: vmctx.Message().Caller(),
PeerID: params.PeerID,
SectorSize: params.SectorSize,
}
constructorParams, err := encoding.Encode(initParams)
if err != nil {
panic(err)
}
actorCodeCid := builtin.StorageMinerActorCodeID
epoch := vmctx.Runtime().CurrentEpoch()
if epoch == 0 {
actorCodeCid = types.BootstrapMinerActorCodeCid
}
// create miner actor by messaging the init actor and sending it collateral
ret := vmctx.Send(vmaddr.InitAddress, initactor.ExecMethodID, vmctx.Message().ValueReceived(), initactor.ExecParams{
ActorCodeCid: actorCodeCid,
ConstructorParams: constructorParams,
})
actorIDAddr := ret.(address.Address)
var state State
ret, err = vmctx.State().Transaction(&state, func() (interface{}, error) {
// Update power table.
ctx := context.Background()
newPowerTable, err := actor.WithLookup(ctx, vmctx.Runtime().Storage(), state.PowerTable.Cid, func(lookup storage.Lookup) error {
// Do not overwrite table entry if it already exists
err := lookup.Find(ctx, string(actorIDAddr.Bytes()), nil)
if err != hamt.ErrNotFound { // we expect to not find the power table entry
if err == nil {
return Errors[ErrDuplicateEntry]
}
return fmt.Errorf("Error looking for new entry in power table at addres %s", actorIDAddr)
}
// Create fresh entry
err = lookup.Set(ctx, string(actorIDAddr.Bytes()), TableEntry{
ActivePower: types.NewBytesAmount(0),
InactivePower: types.NewBytesAmount(0),
AvailableBalance: types.ZeroAttoFIL,
LockedPledgeCollateral: types.ZeroAttoFIL,
SectorSize: params.SectorSize,
})
if err != nil {
return fmt.Errorf("Could not set power table at address: %s", actorIDAddr)
}
return nil
})
if err != nil {
return nil, err
}
state.PowerTable = enccid.NewCid(newPowerTable)
return actorIDAddr, nil
})
if err != nil {
panic(err)
}
return ret.(address.Address)
}
// RemoveStorageMiner removes the given miner address from the power table. This call will fail if
// the miner has any power remaining in the table or if the actor does not already exit in the table.
func (*impl) removeStorageMiner(vmctx runtime.InvocationContext, delAddr address.Address) (uint8, error) {
// TODO #3649 we need proper authentication. Totally insecure as it is.
var state State
_, err := vmctx.State().Transaction(&state, func() (interface{}, error) {
ctx := context.Background()
newPowerTable, err := actor.WithLookup(ctx, vmctx.Runtime().Storage(), state.PowerTable.Cid, func(lookup storage.Lookup) error {
// Find entry to delete.
var delEntry TableEntry
err := lookup.Find(ctx, string(delAddr.Bytes()), &delEntry)
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownEntry]
}
return fmt.Errorf("Could not retrieve power table entry with ID: %s", delAddr)
}
// Never delete an entry that still has power
if delEntry.ActivePower.IsPositive() || delEntry.InactivePower.IsPositive() {
return Errors[ErrDeleteMinerWithPower]
}
// All clear to delete
return lookup.Delete(ctx, string(delAddr.Bytes()))
})
if err != nil {
return nil, err
}
state.PowerTable = enccid.NewCid(newPowerTable)
return nil, nil
})
if err != nil {
return 1, err
}
return 0, nil
}
// GetTotalPower returns the total power (in bytes) held by all miners registered in the system
func (*impl) getTotalPower(vmctx runtime.InvocationContext) (*types.BytesAmount, uint8, error) {
// TODO #3649 we need proper authentication. Totally insecure without.
var state State
ret, err := vmctx.State().Transaction(&state, func() (interface{}, error) {
ctx := context.Background()
total := types.NewBytesAmount(0)
err := actor.WithLookupForReading(ctx, vmctx.Runtime().Storage(), state.PowerTable.Cid, func(lookup storage.Lookup) error {
// TODO https://github.com/filecoin-project/specs/issues/634 this is inefficient
return lookup.ForEachValue(ctx, TableEntry{}, func(k string, value interface{}) error {
entry, ok := value.(TableEntry)
if !ok {
return fmt.Errorf("Expected TableEntry from power table lookup")
}
total = total.Add(entry.ActivePower)
total = total.Add(entry.InactivePower)
return nil
})
})
return total, err
})
if err != nil {
return nil, 1, err
}
return ret.(*types.BytesAmount), 0, nil
}
func (*impl) getPowerReport(vmctx runtime.InvocationContext, addr address.Address) (types.PowerReport, uint8, error) {
var state State
ret, err := vmctx.State().Transaction(&state, func() (interface{}, error) {
ctx := context.Background()
var tableEntry TableEntry
var report types.PowerReport
err := actor.WithLookupForReading(ctx, vmctx.Runtime().Storage(), state.PowerTable.Cid, func(lookup storage.Lookup) error {
err := lookup.Find(ctx, string(addr.Bytes()), &tableEntry)
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownEntry]
}
return fmt.Errorf("Could not retrieve power table entry with ID: %s", addr)
}
report.ActivePower = tableEntry.ActivePower
report.InactivePower = tableEntry.InactivePower
return nil
})
return report, err
})
if err != nil {
return types.PowerReport{}, 1, err
}
return ret.(types.PowerReport), 0, nil
}
func (*impl) getSectorSize(vmctx runtime.InvocationContext, addr address.Address) (*types.BytesAmount, uint8, error) {
var state State
ret, err := vmctx.State().Transaction(&state, func() (interface{}, error) {
ctx := context.Background()
ss := types.NewBytesAmount(0)
err := actor.WithLookupForReading(ctx, vmctx.Runtime().Storage(), state.PowerTable.Cid, func(lookup storage.Lookup) error {
return lookup.ForEachValue(ctx, TableEntry{}, func(k string, value interface{}) error {
entry, ok := value.(TableEntry)
if !ok {
return fmt.Errorf("Expected TableEntry from power table lookup")
}
ss = entry.SectorSize
return nil
})
})
return ss, err
})
if err != nil {
return nil, 1, err
}
return ret.(*types.BytesAmount), 0, nil
}
// ProcessPowerReportParams is what is says.
type ProcessPowerReportParams struct {
Report types.PowerReport
UpdateAddr address.Address
}
// ProcessPowerReport updates a registered miner's power table entry according to the power report.
func (*impl) processPowerReport(vmctx runtime.InvocationContext, params ProcessPowerReportParams) {
vmctx.ValidateCaller(pattern.Any{})
var state State
_, err := vmctx.State().Transaction(&state, func() (interface{}, error) {
ctx := context.Background()
newPowerTable, err := actor.WithLookup(ctx, vmctx.Runtime().Storage(), state.PowerTable.Cid, func(lookup storage.Lookup) error {
// Find entry to update.
var updateEntry TableEntry
err := lookup.Find(ctx, string(params.UpdateAddr.Bytes()), &updateEntry)
if err != nil {
if err == hamt.ErrNotFound {
return Errors[ErrUnknownEntry]
}
return fmt.Errorf("Could not retrieve power table entry with ID: %s", params.UpdateAddr)
}
// All clear to update
updateEntry.ActivePower = params.Report.ActivePower
updateEntry.InactivePower = params.Report.InactivePower
return lookup.Set(ctx, string(params.UpdateAddr.Bytes()), updateEntry)
})
if err != nil {
return nil, err
}
state.PowerTable = enccid.NewCid(newPowerTable)
return nil, nil
})
if err != nil {
panic(err)
}
}
| 1 | 22,891 | Please remove the prints, even though this code will be trashed. | filecoin-project-venus | go |
@@ -26,8 +26,8 @@ import (
"runtime/debug"
"github.com/projectcalico/felix/config"
- "github.com/projectcalico/felix/dataplane/external"
- "github.com/projectcalico/felix/dataplane/linux"
+ extdataplane "github.com/projectcalico/felix/dataplane/external"
+ intdataplane "github.com/projectcalico/felix/dataplane/linux"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/logutils" | 1 | // +build !windows
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dataplane
import (
"math/bits"
"net"
"os/exec"
log "github.com/sirupsen/logrus"
"runtime/debug"
"github.com/projectcalico/felix/config"
"github.com/projectcalico/felix/dataplane/external"
"github.com/projectcalico/felix/dataplane/linux"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/markbits"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/libcalico-go/lib/health"
)
func StartDataplaneDriver(configParams *config.Config,
healthAggregator *health.HealthAggregator,
configChangedRestartCallback func()) (DataplaneDriver, *exec.Cmd) {
if configParams.UseInternalDataplaneDriver {
log.Info("Using internal (linux) dataplane driver.")
// If kube ipvs interface is present, enable ipvs support.
kubeIPVSSupportEnabled := ifacemonitor.IsInterfacePresent(intdataplane.KubeIPVSInterface)
if kubeIPVSSupportEnabled {
log.Info("Kube-proxy in ipvs mode, enabling felix kube-proxy ipvs support.")
}
if configChangedRestartCallback == nil {
log.Panic("Starting dataplane with nil callback func.")
}
markBitsManager := markbits.NewMarkBitsManager(configParams.IptablesMarkMask, "felix-iptables")
// Dedicated mark bits for accept and pass actions. These are long lived bits
// that we use for communicating between chains.
markAccept, _ := markBitsManager.NextSingleBitMark()
markPass, _ := markBitsManager.NextSingleBitMark()
// Short-lived mark bits for local calculations within a chain.
markScratch0, _ := markBitsManager.NextSingleBitMark()
markScratch1, _ := markBitsManager.NextSingleBitMark()
if markAccept == 0 || markPass == 0 || markScratch0 == 0 || markScratch1 == 0 {
log.WithFields(log.Fields{
"Name": "felix-iptables",
"MarkMask": configParams.IptablesMarkMask,
}).Panic("Not enough mark bits available.")
}
// Mark bits for end point mark. Currently felix takes the rest bits from mask available for use.
markEndpointMark, allocated := markBitsManager.NextBlockBitsMark(markBitsManager.AvailableMarkBitCount())
if kubeIPVSSupportEnabled && allocated == 0 {
log.WithFields(log.Fields{
"Name": "felix-iptables",
"MarkMask": configParams.IptablesMarkMask,
}).Panic("Not enough mark bits available for endpoint mark.")
}
// Take lowest bit position (position 1) from endpoint mark mask reserved for non-calico endpoint.
markEndpointNonCaliEndpoint := uint32(1) << uint(bits.TrailingZeros32(markEndpointMark))
log.WithFields(log.Fields{
"acceptMark": markAccept,
"passMark": markPass,
"scratch0Mark": markScratch0,
"scratch1Mark": markScratch1,
"endpointMark": markEndpointMark,
"endpointMarkNonCali": markEndpointNonCaliEndpoint,
}).Info("Calculated iptables mark bits")
dpConfig := intdataplane.Config{
IfaceMonitorConfig: ifacemonitor.Config{
InterfaceExcludes: configParams.InterfaceExclude,
},
RulesConfig: rules.Config{
WorkloadIfacePrefixes: configParams.InterfacePrefixes(),
IPSetConfigV4: ipsets.NewIPVersionConfig(
ipsets.IPFamilyV4,
rules.IPSetNamePrefix,
rules.AllHistoricIPSetNamePrefixes,
rules.LegacyV4IPSetNames,
),
IPSetConfigV6: ipsets.NewIPVersionConfig(
ipsets.IPFamilyV6,
rules.IPSetNamePrefix,
rules.AllHistoricIPSetNamePrefixes,
nil,
),
KubeNodePortRanges: configParams.KubeNodePortRanges,
KubeIPVSSupportEnabled: kubeIPVSSupportEnabled,
OpenStackSpecialCasesEnabled: configParams.OpenstackActive(),
OpenStackMetadataIP: net.ParseIP(configParams.MetadataAddr),
OpenStackMetadataPort: uint16(configParams.MetadataPort),
IptablesMarkAccept: markAccept,
IptablesMarkPass: markPass,
IptablesMarkScratch0: markScratch0,
IptablesMarkScratch1: markScratch1,
IptablesMarkEndpoint: markEndpointMark,
IptablesMarkNonCaliEndpoint: markEndpointNonCaliEndpoint,
IPIPEnabled: configParams.IpInIpEnabled,
IPIPTunnelAddress: configParams.IpInIpTunnelAddr,
IptablesLogPrefix: configParams.LogPrefix,
EndpointToHostAction: configParams.DefaultEndpointToHostAction,
IptablesFilterAllowAction: configParams.IptablesFilterAllowAction,
IptablesMangleAllowAction: configParams.IptablesMangleAllowAction,
FailsafeInboundHostPorts: configParams.FailsafeInboundHostPorts,
FailsafeOutboundHostPorts: configParams.FailsafeOutboundHostPorts,
DisableConntrackInvalid: configParams.DisableConntrackInvalidCheck,
NATPortRange: configParams.NATPortRange,
IptablesNATOutgoingInterfaceFilter: configParams.IptablesNATOutgoingInterfaceFilter,
},
IPIPMTU: configParams.IpInIpMtu,
IptablesRefreshInterval: configParams.IptablesRefreshInterval,
RouteRefreshInterval: configParams.RouteRefreshInterval,
IPSetsRefreshInterval: configParams.IpsetsRefreshInterval,
IptablesPostWriteCheckInterval: configParams.IptablesPostWriteCheckIntervalSecs,
IptablesInsertMode: configParams.ChainInsertMode,
IptablesLockFilePath: configParams.IptablesLockFilePath,
IptablesLockTimeout: configParams.IptablesLockTimeoutSecs,
IptablesLockProbeInterval: configParams.IptablesLockProbeIntervalMillis,
MaxIPSetSize: configParams.MaxIpsetSize,
IgnoreLooseRPF: configParams.IgnoreLooseRPF,
IPv6Enabled: configParams.Ipv6Support,
StatusReportingInterval: configParams.ReportingIntervalSecs,
NetlinkTimeout: configParams.NetlinkTimeoutSecs,
ConfigChangedRestartCallback: configChangedRestartCallback,
PostInSyncCallback: func() {
// The initial resync uses a lot of scratch space so now is
// a good time to force a GC and return any RAM that we can.
debug.FreeOSMemory()
if configParams.DebugMemoryProfilePath == "" {
return
}
logutils.DumpHeapMemoryProfile(configParams.DebugMemoryProfilePath)
},
HealthAggregator: healthAggregator,
DebugSimulateDataplaneHangAfter: configParams.DebugSimulateDataplaneHangAfter,
ExternalNodesCidrs: configParams.ExternalNodesCIDRList,
}
intDP := intdataplane.NewIntDataplaneDriver(dpConfig)
intDP.Start()
return intDP, nil
} else {
log.WithField("driver", configParams.DataplaneDriver).Info(
"Using external dataplane driver.")
return extdataplane.StartExtDataplaneDriver(configParams.DataplaneDriver)
}
}
| 1 | 16,847 | Not sure if you added deliberately but I've seen these popping up; is goimports adding them? | projectcalico-felix | go |
@@ -75,6 +75,9 @@ func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { // nolint
dst.Spec.NetworkSpec.VPC.AvailabilityZoneSelection = restored.Spec.NetworkSpec.VPC.AvailabilityZoneSelection
}
+ dst.Spec.NetworkSpec.SecurityGroupOverrides = restored.Spec.NetworkSpec.SecurityGroupOverrides
+ dst.Spec.NetworkSpec.TagSecurityGroups = restored.Spec.NetworkSpec.TagSecurityGroups
+
return nil
}
| 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"reflect"
apiconversion "k8s.io/apimachinery/pkg/conversion"
"k8s.io/utils/pointer"
infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
v1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
"sigs.k8s.io/controller-runtime/pkg/conversion"
)
// ConvertTo converts this AWSCluster to the Hub version (v1alpha3).
func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { // nolint
dst := dstRaw.(*infrav1alpha3.AWSCluster)
if err := Convert_v1alpha2_AWSCluster_To_v1alpha3_AWSCluster(src, dst, nil); err != nil {
return err
}
// Manually convert Status.APIEndpoints to Spec.ControlPlaneEndpoint.
if len(src.Status.APIEndpoints) > 0 {
endpoint := src.Status.APIEndpoints[0]
dst.Spec.ControlPlaneEndpoint.Host = endpoint.Host
dst.Spec.ControlPlaneEndpoint.Port = int32(endpoint.Port)
}
// Manually restore data.
restored := &infrav1alpha3.AWSCluster{}
if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
return err
}
// override the SSHKeyName conversion if we are roundtripping from v1alpha3 and the v1alpha3 value is nil
if src.Spec.SSHKeyName == "" && restored.Spec.SSHKeyName == nil {
dst.Spec.SSHKeyName = nil
}
dst.Spec.ImageLookupFormat = restored.Spec.ImageLookupFormat
dst.Spec.ImageLookupOrg = restored.Spec.ImageLookupOrg
dst.Spec.ImageLookupBaseOS = restored.Spec.ImageLookupBaseOS
if restored.Spec.ControlPlaneLoadBalancer != nil {
dst.Spec.ControlPlaneLoadBalancer = restored.Spec.ControlPlaneLoadBalancer
}
dst.Spec.NetworkSpec.CNI = restored.Spec.NetworkSpec.CNI
dst.Status.FailureDomains = restored.Status.FailureDomains
dst.Status.Network.APIServerELB.AvailabilityZones = restored.Status.Network.APIServerELB.AvailabilityZones
dst.Status.Network.APIServerELB.Attributes.CrossZoneLoadBalancing = restored.Status.Network.APIServerELB.Attributes.CrossZoneLoadBalancing
if restored.Status.Bastion != nil {
restored.Status.Bastion.DeepCopyInto(dst.Status.Bastion)
}
if restored.Spec.NetworkSpec.VPC.AvailabilityZoneUsageLimit != nil {
dst.Spec.NetworkSpec.VPC.AvailabilityZoneUsageLimit = restored.Spec.NetworkSpec.VPC.AvailabilityZoneUsageLimit
}
if restored.Spec.NetworkSpec.VPC.AvailabilityZoneSelection != nil {
dst.Spec.NetworkSpec.VPC.AvailabilityZoneSelection = restored.Spec.NetworkSpec.VPC.AvailabilityZoneSelection
}
return nil
}
// ConvertFrom converts from the Hub version (v1alpha3) to this version.
func (dst *AWSCluster) ConvertFrom(srcRaw conversion.Hub) error { // nolint
src := srcRaw.(*infrav1alpha3.AWSCluster)
if err := Convert_v1alpha3_AWSCluster_To_v1alpha2_AWSCluster(src, dst, nil); err != nil {
return err
}
// Manually convert Spec.ControlPlaneEndpoint to Status.APIEndpoints.
if !src.Spec.ControlPlaneEndpoint.IsZero() {
dst.Status.APIEndpoints = []APIEndpoint{
{
Host: src.Spec.ControlPlaneEndpoint.Host,
Port: int(src.Spec.ControlPlaneEndpoint.Port),
},
}
}
// Preserve Hub data on down-conversion.
if err := utilconversion.MarshalData(src, dst); err != nil {
return err
}
return nil
}
// ConvertTo converts this AWSClusterList to the Hub version (v1alpha3).
func (src *AWSClusterList) ConvertTo(dstRaw conversion.Hub) error { // nolint
dst := dstRaw.(*infrav1alpha3.AWSClusterList)
return Convert_v1alpha2_AWSClusterList_To_v1alpha3_AWSClusterList(src, dst, nil)
}
// ConvertFrom converts from the Hub version (v1alpha3) to this version.
func (dst *AWSClusterList) ConvertFrom(srcRaw conversion.Hub) error { // nolint
src := srcRaw.(*infrav1alpha3.AWSClusterList)
return Convert_v1alpha3_AWSClusterList_To_v1alpha2_AWSClusterList(src, dst, nil)
}
// Convert_v1alpha2_AWSClusterStatus_To_v1alpha3_AWSClusterStatus converts AWSCluster.Status from v1alpha2 to v1alpha3.
func Convert_v1alpha2_AWSClusterStatus_To_v1alpha3_AWSClusterStatus(in *AWSClusterStatus, out *v1alpha3.AWSClusterStatus, s apiconversion.Scope) error { // nolint
if err := autoConvert_v1alpha2_AWSClusterStatus_To_v1alpha3_AWSClusterStatus(in, out, s); err != nil {
return err
}
// Manually convert Status.Bastion.
if !reflect.DeepEqual(in.Bastion, Instance{}) {
out.Bastion = &v1alpha3.Instance{}
if err := Convert_v1alpha2_Instance_To_v1alpha3_Instance(&in.Bastion, out.Bastion, s); err != nil {
return err
}
}
return nil
}
// Convert_v1alpha2_AWSClusterSpec_To_v1alpha3_AWSClusterSpec.
func Convert_v1alpha2_AWSClusterSpec_To_v1alpha3_AWSClusterSpec(in *AWSClusterSpec, out *infrav1alpha3.AWSClusterSpec, s apiconversion.Scope) error { //nolint
if err := autoConvert_v1alpha2_AWSClusterSpec_To_v1alpha3_AWSClusterSpec(in, out, s); err != nil {
return err
}
// Manually convert Bastion.
out.Bastion.Enabled = !in.DisableBastionHost
// Manually convert SSHKeyName
out.SSHKeyName = pointer.StringPtr(in.SSHKeyName)
return nil
}
// Convert_v1alpha3_AWSClusterSpec_To_v1alpha2_AWSClusterSpec converts from the Hub version (v1alpha3) of the AWSClusterSpec to this version.
// Requires manual conversion as infrav1alpha3.AWSClusterSpec.ImageLookupOrg does not exist in AWSClusterSpec.
func Convert_v1alpha3_AWSClusterSpec_To_v1alpha2_AWSClusterSpec(in *infrav1alpha3.AWSClusterSpec, out *AWSClusterSpec, s apiconversion.Scope) error { // nolint
if err := autoConvert_v1alpha3_AWSClusterSpec_To_v1alpha2_AWSClusterSpec(in, out, s); err != nil {
return err
}
// Manually convert DisableBastionHost.
out.DisableBastionHost = !in.Bastion.Enabled
// Manually convert SSHKeyName
if in.SSHKeyName != nil {
out.SSHKeyName = *in.SSHKeyName
}
return nil
}
// Convert_v1alpha3_AWSClusterStatus_To_v1alpha2_AWSClusterStatus.
func Convert_v1alpha3_AWSClusterStatus_To_v1alpha2_AWSClusterStatus(in *infrav1alpha3.AWSClusterStatus, out *AWSClusterStatus, s apiconversion.Scope) error { //nolint
if err := autoConvert_v1alpha3_AWSClusterStatus_To_v1alpha2_AWSClusterStatus(in, out, s); err != nil {
return err
}
// Manually convert Status.Bastion.
if in.Bastion != nil {
if err := Convert_v1alpha3_Instance_To_v1alpha2_Instance(in.Bastion, &out.Bastion, s); err != nil {
return err
}
}
return nil
}
// Convert_v1alpha3_ClassicELB_To_v1alpha2_ClassicELB.
func Convert_v1alpha3_ClassicELB_To_v1alpha2_ClassicELB(in *infrav1alpha3.ClassicELB, out *ClassicELB, s apiconversion.Scope) error { //nolint
return autoConvert_v1alpha3_ClassicELB_To_v1alpha2_ClassicELB(in, out, s)
}
// Convert_v1alpha3_AWSLoadBalancerSpec_To_v1alpha2_AWSLoadBalancerSpec.
func Convert_v1alpha3_AWSLoadBalancerSpec_To_v1alpha2_AWSLoadBalancerSpec(in *infrav1alpha3.AWSLoadBalancerSpec, out *AWSLoadBalancerSpec, s apiconversion.Scope) error { //nolint
return autoConvert_v1alpha3_AWSLoadBalancerSpec_To_v1alpha2_AWSLoadBalancerSpec(in, out, s)
}
func Convert_v1alpha3_ClassicELBAttributes_To_v1alpha2_ClassicELBAttributes(in *infrav1alpha3.ClassicELBAttributes, out *ClassicELBAttributes, s apiconversion.Scope) error { //nolint
return autoConvert_v1alpha3_ClassicELBAttributes_To_v1alpha2_ClassicELBAttributes(in, out, s)
}
// Convert_v1alpha3_VPCSpec_To_v1alpha2_VPCSpec is an autogenerated conversion function.
func Convert_v1alpha3_VPCSpec_To_v1alpha2_VPCSpec(in *infrav1alpha3.VPCSpec, out *VPCSpec, s apiconversion.Scope) error {
return autoConvert_v1alpha3_VPCSpec_To_v1alpha2_VPCSpec(in, out, s)
}
//Convert_v1alpha3_NetworkSpec_To_v1alpha2_NetworkSpec
func Convert_v1alpha3_NetworkSpec_To_v1alpha2_NetworkSpec(in *infrav1alpha3.NetworkSpec, out *NetworkSpec, s apiconversion.Scope) error {
return autoConvert_v1alpha3_NetworkSpec_To_v1alpha2_NetworkSpec(in, out, s)
}
| 1 | 14,391 | I think we might need some special handling for `Spec.ControlPlaneLoadBalancer.SecurityGroups`, it looks like we are currently only handling the case that `Spec.ControlPlaneLoadBalancer` is nil. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -0,0 +1,18 @@
+_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py'
+
+model = dict(
+ pretrained='open-mmlab://dla60',
+ backbone=dict(
+ type='DLANet',
+ depth=60,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=1,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ style='pytorch'),
+ neck=dict(
+ type='FPN',
+ in_channels=[128, 256, 512, 1024],
+ out_channels=256,
+ num_outs=5)) | 1 | 1 | 22,386 | Is this from a third-party library? | open-mmlab-mmdetection | py |
|
@@ -70,3 +70,18 @@ dom.isNativelyFocusable = function(el) {
}
return false;
};
+
+/**
+ * Determines if an element is in the focus order, but would not be if its
+ * tabindex were unspecified.
+ * @method insertedIntoFocusOrder
+ * @memberof axe.commons.dom
+ * @instance
+ * @param {HTMLElement} el The HTMLElement
+ * @return {Boolean} True if the element is in the focus order but wouldn't be
+ * if its tabindex were removed. Else, false.
+ */
+dom.insertedIntoFocusOrder = function(el) {
+ return (el.tabIndex > -1 && dom.isFocusable(el) &&
+ !dom.isNativelyFocusable(el));
+}; | 1 | /* global dom */
/* jshint maxcomplexity: 20 */
/**
* Determines if focusing has been disabled on an element.
* @param {HTMLElement} el The HTMLElement
* @return {Boolean} Whether focusing has been disabled on an element.
*/
function focusDisabled(el) {
return el.disabled ||
(!dom.isVisible(el, true) && el.nodeName.toUpperCase() !== 'AREA');
}
/**
* Determines if an element is focusable
* @method isFocusable
* @memberof axe.commons.dom
* @instance
* @param {HTMLElement} el The HTMLElement
* @return {Boolean} The element's focusability status
*/
dom.isFocusable = function (el) {
'use strict';
if (focusDisabled(el)) {
return false;
} else if (dom.isNativelyFocusable(el)) {
return true;
}
// check if the tabindex is specified and a parseable number
var tabindex = el.getAttribute('tabindex');
if (tabindex && !isNaN(parseInt(tabindex, 10))) {
return true;
}
return false;
};
/**
* Determines if an element is focusable without considering its tabindex
* @method isNativelyFocusable
* @memberof axe.commons.dom
* @instance
* @param {HTMLElement} el The HTMLElement
* @return {Boolean} True if the element is in the focus order but wouldn't be
* if its tabindex were removed. Else, false.
*/
dom.isNativelyFocusable = function(el) {
'use strict';
if (!el || focusDisabled(el)) {
return false;
}
switch (el.nodeName.toUpperCase()) {
case 'A':
case 'AREA':
if (el.href) {
return true;
}
break;
case 'INPUT':
return el.type !== 'hidden';
case 'TEXTAREA':
case 'SELECT':
case 'DETAILS':
case 'BUTTON':
return true;
}
return false;
};
| 1 | 11,960 | Please put the `el.tabIndex > -1` in the first part of the expression. The matcher will run on every node on the page, so we should put the fastest part first. | dequelabs-axe-core | js |
@@ -20,14 +20,14 @@ package mysterium
import (
"path/filepath"
- log "github.com/cihub/seelog"
"github.com/mitchellh/go-homedir"
+ "github.com/mysteriumnetwork/go-openvpn/openvpn3"
"github.com/mysteriumnetwork/node/cmd"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/node"
- "github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/metadata"
- service_noop "github.com/mysteriumnetwork/node/services/noop"
+ "github.com/mysteriumnetwork/node/services/openvpn"
+ "github.com/mysteriumnetwork/node/services/openvpn/session"
)
// MobileNode represents node object tuned for mobile devices | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mysterium
import (
"path/filepath"
log "github.com/cihub/seelog"
"github.com/mitchellh/go-homedir"
"github.com/mysteriumnetwork/node/cmd"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/metadata"
service_noop "github.com/mysteriumnetwork/node/services/noop"
)
// MobileNode represents node object tuned for mobile devices
type MobileNode struct {
di cmd.Dependencies
}
// MobileNetworkOptions alias for node.OptionsNetwork to be visible from mobile framework
type MobileNetworkOptions node.OptionsNetwork
// NewNode function creates new Node
func NewNode(appPath string, optionsNetwork *MobileNetworkOptions) (*MobileNode, error) {
var di cmd.Dependencies
var dataDir, currentDir string
if appPath == "" {
currentDir, err := homedir.Dir()
if err != nil {
panic(err)
}
dataDir = filepath.Join(currentDir, ".mysterium")
} else {
dataDir = filepath.Join(appPath, ".mysterium")
currentDir = appPath
}
err := di.Bootstrap(node.Options{
Directories: node.OptionsDirectory{
Data: dataDir,
Storage: filepath.Join(dataDir, "db"),
Keystore: filepath.Join(dataDir, "keystore"),
// TODO Where to save runtime data
Runtime: currentDir,
},
TequilapiAddress: "127.0.0.1",
TequilapiPort: 4050,
// TODO Make Openvpn pluggable connection optional
Openvpn: noOpenvpnYet{},
Location: node.OptionsLocation{
IpifyUrl: "https://api.ipify.org/",
Country: "LT",
},
OptionsNetwork: node.OptionsNetwork(*optionsNetwork),
})
if err != nil {
return nil, err
}
di.ConnectionRegistry.Register("openvpn", service_noop.NewConnectionCreator())
return &MobileNode{di}, nil
}
// DefaultNetworkOptions returns default network options to connect with
func DefaultNetworkOptions() *MobileNetworkOptions {
return &MobileNetworkOptions{
Testnet: true,
DiscoveryAPIAddress: metadata.TestnetDefinition.DiscoveryAPIAddress,
BrokerAddress: metadata.TestnetDefinition.BrokerAddress,
EtherClientRPC: metadata.TestnetDefinition.EtherClientRPC,
EtherPaymentsAddress: metadata.DefaultNetwork.PaymentsContractAddress.String(),
}
}
// TestConnectFlow checks whenever connection can be successfully established
func (mobNode *MobileNode) TestConnectFlow(providerAddress string) error {
consumers := mobNode.di.IdentityManager.GetIdentities()
var consumerID identity.Identity
if len(consumers) < 1 {
created, err := mobNode.di.IdentityManager.CreateNewIdentity("")
if err != nil {
return err
}
consumerID = created
} else {
consumerID = consumers[0]
}
log.Infof("Unlocking consumer: %#v", consumerID)
err := mobNode.di.IdentityManager.Unlock(consumerID.Address, "")
if err != nil {
return err
}
providerId := identity.FromAddress(providerAddress)
log.Infof("Connecting to provider: %#v", providerId)
err = mobNode.di.ConnectionManager.Connect(consumerID, providerId, connection.ConnectParams{})
if err != nil {
return err
}
connectionStatus := mobNode.di.ConnectionManager.Status()
log.Infof("Connection status: %#v", connectionStatus)
return mobNode.di.ConnectionManager.Disconnect()
}
// Shutdown function stops running mobile node
func (mobNode *MobileNode) Shutdown() error {
return mobNode.di.Node.Kill()
}
// WaitUntilDies function returns when node stops
func (mobNode *MobileNode) WaitUntilDies() error {
return mobNode.di.Node.Wait()
}
type noOpenvpnYet struct {
}
func (noOpenvpnYet) Check() error {
return nil
}
// BinaryPath returns noop binary path
func (noOpenvpnYet) BinaryPath() string {
return "no openvpn binary available on mobile"
}
var _ node.Openvpn = noOpenvpnYet{}
| 1 | 12,553 | `identity` is unused now, is it for future usage? | mysteriumnetwork-node | go |
@@ -9,11 +9,14 @@ Puppet::Functions.create_function(:prompt) do
# @param prompt The prompt to display.
# @param options A hash of additional options.
# @option options [Boolean] sensitive Disable echo back and mark the response as sensitive.
+ # The returned value will be wrapped by the `Sensitive` data type. To access the raw
+ # value, use the `unwrap` function (i.e. `$sensitive_value.unwrap`).
# @return The response to the prompt.
# @example Prompt the user if plan execution should continue
# $response = prompt('Continue executing plan? [Y\N]')
# @example Prompt the user for sensitive information
# $password = prompt('Enter your password', 'sensitive' => true)
+ # out::message("Password is: ${password.unwrap}")
dispatch :prompt do
param 'String', :prompt
optional_param 'Hash[String[1], Any]', :options | 1 | # frozen_string_literal: true
require 'bolt/error'
# Display a prompt and wait for a response.
#
# > **Note:** Not available in apply block
Puppet::Functions.create_function(:prompt) do
# @param prompt The prompt to display.
# @param options A hash of additional options.
# @option options [Boolean] sensitive Disable echo back and mark the response as sensitive.
# @return The response to the prompt.
# @example Prompt the user if plan execution should continue
# $response = prompt('Continue executing plan? [Y\N]')
# @example Prompt the user for sensitive information
# $password = prompt('Enter your password', 'sensitive' => true)
dispatch :prompt do
param 'String', :prompt
optional_param 'Hash[String[1], Any]', :options
return_type 'Variant[String, Sensitive]'
end
def prompt(prompt, options = {})
unless Puppet[:tasks]
raise Puppet::ParseErrorWithIssue
.from_issue_and_stack(Bolt::PAL::Issues::PLAN_OPERATION_NOT_SUPPORTED_WHEN_COMPILING,
action: 'prompt')
end
options = options.transform_keys(&:to_sym)
executor = Puppet.lookup(:bolt_executor)
# Send analytics report
executor.report_function_call(self.class.name)
response = executor.prompt(prompt, options)
if options[:sensitive]
Puppet::Pops::Types::PSensitiveType::Sensitive.new(response)
else
response
end
end
end
| 1 | 16,555 | This would have assisted me a lot! I am on the "functions" page more than any other page in the docs | puppetlabs-bolt | rb |
@@ -971,7 +971,7 @@ class JobTask(BaseHadoopJobTask):
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
- d = d.replace(b'(c__main__', "(c" + module_name)
+ d = d.replace(b'c__main__', b'c' + module_name.encode('ascii'))
open(file_name, "wb").write(d)
else: | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Run Hadoop Mapreduce jobs using Hadoop Streaming. To run a job, you need
to subclass :py:class:`luigi.contrib.hadoop.JobTask` and implement a
``mapper`` and ``reducer`` methods. See :doc:`/example_top_artists` for
an example of how to run a Hadoop job.
"""
import abc
import datetime
import glob
import logging
import os
import pickle
import random
import re
import shutil
import signal
from io import StringIO
import subprocess
import sys
import tempfile
import warnings
from hashlib import md5
from itertools import groupby
from luigi import configuration
import luigi
import luigi.task
import luigi.contrib.gcs
import luigi.contrib.hdfs
import luigi.contrib.s3
from luigi.contrib import mrrunner
try:
# See benchmark at https://gist.github.com/mvj3/02dca2bcc8b0ef1bbfb5
import ujson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
_attached_packages = []
TRACKING_RE = re.compile(r'(tracking url|the url to track the job):\s+(?P<url>.+)$')
class hadoop(luigi.task.Config):
pool = luigi.OptionalParameter(
default=None,
description=(
'Hadoop pool so use for Hadoop tasks. To specify pools per tasks, '
'see BaseHadoopJobTask.pool'
),
)
def attach(*packages):
"""
Attach a python package to hadoop map reduce tarballs to make those packages available
on the hadoop cluster.
"""
_attached_packages.extend(packages)
def dereference(f):
if os.path.islink(f):
# by joining with the dirname we are certain to get the absolute path
return dereference(os.path.join(os.path.dirname(f), os.readlink(f)))
else:
return f
def get_extra_files(extra_files):
result = []
for f in extra_files:
if isinstance(f, str):
src, dst = f, os.path.basename(f)
elif isinstance(f, tuple):
src, dst = f
else:
raise Exception()
if os.path.isdir(src):
src_prefix = os.path.join(src, '')
for base, dirs, files in os.walk(src):
for f in files:
f_src = os.path.join(base, f)
f_src_stripped = f_src[len(src_prefix):]
f_dst = os.path.join(dst, f_src_stripped)
result.append((f_src, f_dst))
else:
result.append((src, dst))
return result
def create_packages_archive(packages, filename):
"""
Create a tar archive which will contain the files for the packages listed in packages.
"""
import tarfile
tar = tarfile.open(filename, "w")
def add(src, dst):
logger.debug('adding to tar: %s -> %s', src, dst)
tar.add(src, dst)
def add_files_for_package(sub_package_path, root_package_path, root_package_name):
for root, dirs, files in os.walk(sub_package_path):
if '.svn' in dirs:
dirs.remove('.svn')
for f in files:
if not f.endswith(".pyc") and not f.startswith("."):
add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f)
for package in packages:
# Put a submodule's entire package in the archive. This is the
# magic that usually packages everything you need without
# having to attach packages/modules explicitly
if not getattr(package, "__path__", None) and '.' in package.__name__:
package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty')
n = package.__name__.replace(".", "/")
if getattr(package, "__path__", None):
# TODO: (BUG) picking only the first path does not
# properly deal with namespaced packages in different
# directories
p = package.__path__[0]
if p.endswith('.egg') and os.path.isfile(p):
raise 'egg files not supported!!!'
# Add the entire egg file
# p = p[:p.find('.egg') + 4]
# add(dereference(p), os.path.basename(p))
else:
# include __init__ files from parent projects
root = []
for parent in package.__name__.split('.')[0:-1]:
root.append(parent)
module_name = '.'.join(root)
directory = '/'.join(root)
add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"),
directory + "/__init__.py")
add_files_for_package(p, p, n)
# include egg-info directories that are parallel:
for egg_info_path in glob.glob(p + '*.egg-info'):
logger.debug(
'Adding package metadata to archive for "%s" found at "%s"',
package.__name__,
egg_info_path
)
add_files_for_package(egg_info_path, p, n)
else:
f = package.__file__
if f.endswith("pyc"):
f = f[:-3] + "py"
if n.find(".") == -1:
add(dereference(f), os.path.basename(f))
else:
add(dereference(f), n + ".py")
tar.close()
def flatten(sequence):
"""
A simple generator which flattens a sequence.
Only one level is flattened.
.. code-block:: python
(1, (2, 3), 4) -> (1, 2, 3, 4)
"""
for item in sequence:
if hasattr(item, "__iter__") and not isinstance(item, str) and not isinstance(item, bytes):
for i in item:
yield i
else:
yield item
class HadoopRunContext:
def __init__(self):
self.job_id = None
self.application_id = None
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def kill_job(self, captured_signal=None, stack_frame=None):
if self.application_id:
logger.info('Job interrupted, killing application %s' % self.application_id)
subprocess.call(['yarn', 'application', '-kill', self.application_id])
elif self.job_id:
logger.info('Job interrupted, killing job %s', self.job_id)
subprocess.call(['mapred', 'job', '-kill', self.job_id])
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
class HadoopJobError(RuntimeError):
def __init__(self, message, out=None, err=None):
super(HadoopJobError, self).__init__(message, out, err)
self.message = message
self.out = out
self.err = err
def __str__(self):
return self.message
def run_and_track_hadoop_job(arglist, tracking_url_callback=None, env=None):
"""
Runs the job by invoking the command from the given arglist.
Finds tracking urls from the output and attempts to fetch errors using those urls if the job fails.
Throws HadoopJobError with information about the error
(including stdout and stderr from the process)
on failure and returns normally otherwise.
:param arglist:
:param tracking_url_callback:
:param env:
:return:
"""
logger.info('%s', subprocess.list2cmdline(arglist))
def write_luigi_history(arglist, history):
"""
Writes history to a file in the job's output directory in JSON format.
Currently just for tracking the job ID in a configuration where
no history is stored in the output directory by Hadoop.
"""
history_filename = configuration.get_config().get('core', 'history-filename', '')
if history_filename and '-output' in arglist:
output_dir = arglist[arglist.index('-output') + 1]
f = luigi.contrib.hdfs.HdfsTarget(os.path.join(output_dir, history_filename)).open('w')
f.write(json.dumps(history))
f.close()
def track_process(arglist, tracking_url_callback, env=None):
# Dump stdout to a temp file, poll stderr and log it
temp_stdout = tempfile.TemporaryFile('w+t')
proc = subprocess.Popen(arglist, stdout=temp_stdout, stderr=subprocess.PIPE, env=env, close_fds=True, universal_newlines=True)
# We parse the output to try to find the tracking URL.
# This URL is useful for fetching the logs of the job.
tracking_url = None
job_id = None
application_id = None
err_lines = []
with HadoopRunContext() as hadoop_context:
while proc.poll() is None:
err_line = proc.stderr.readline()
err_lines.append(err_line)
err_line = err_line.strip()
if err_line:
logger.info('%s', err_line)
err_line = err_line.lower()
tracking_url_match = TRACKING_RE.search(err_line)
if tracking_url_match:
tracking_url = tracking_url_match.group('url')
try:
tracking_url_callback(tracking_url)
except Exception as e:
logger.error("Error in tracking_url_callback, disabling! %s", e)
def tracking_url_callback(x):
return None
if err_line.find('running job') != -1:
# hadoop jar output
job_id = err_line.split('running job: ')[-1]
if err_line.find('submitted hadoop job:') != -1:
# scalding output
job_id = err_line.split('submitted hadoop job: ')[-1]
if err_line.find('submitted application ') != -1:
application_id = err_line.split('submitted application ')[-1]
hadoop_context.job_id = job_id
hadoop_context.application_id = application_id
# Read the rest + stdout
err = ''.join(err_lines + [an_err_line for an_err_line in proc.stderr])
temp_stdout.seek(0)
out = ''.join(temp_stdout.readlines())
if proc.returncode == 0:
write_luigi_history(arglist, {'job_id': job_id})
return (out, err)
# Try to fetch error logs if possible
message = 'Streaming job failed with exit code %d. ' % proc.returncode
if not tracking_url:
raise HadoopJobError(message + 'Also, no tracking url found.', out, err)
try:
task_failures = fetch_task_failures(tracking_url)
except Exception as e:
raise HadoopJobError(message + 'Additionally, an error occurred when fetching data from %s: %s' %
(tracking_url, e), out, err)
if not task_failures:
raise HadoopJobError(message + 'Also, could not fetch output from tasks.', out, err)
else:
raise HadoopJobError(message + 'Output from tasks below:\n%s' % task_failures, out, err)
if tracking_url_callback is None:
def tracking_url_callback(x): return None
return track_process(arglist, tracking_url_callback, env)
def fetch_task_failures(tracking_url):
"""
Uses mechanize to fetch the actual task logs from the task tracker.
This is highly opportunistic, and we might not succeed.
So we set a low timeout and hope it works.
If it does not, it's not the end of the world.
TODO: Yarn has a REST API that we should probably use instead:
http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html
"""
import mechanize
timeout = 3.0
failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed'
logger.debug('Fetching data from %s', failures_url)
b = mechanize.Browser()
b.open(failures_url, timeout=timeout)
links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why
links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails
error_text = []
for link in links:
task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset
logger.debug('Fetching data from %s', task_url)
b2 = mechanize.Browser()
try:
r = b2.open(task_url, timeout=timeout)
data = r.read()
except Exception as e:
logger.debug('Error fetching data from %s: %s', task_url, e)
continue
# Try to get the hex-encoded traceback back from the output
for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data):
error_text.append('---------- %s:' % task_url)
error_text.append(exc.split('=')[-1].decode('hex'))
return '\n'.join(error_text)
class JobRunner:
run_job = NotImplemented
class HadoopJobRunner(JobRunner):
"""
Takes care of uploading & executing a Hadoop job using Hadoop streaming.
TODO: add code to support Elastic Mapreduce (using boto) and local execution.
"""
def __init__(self, streaming_jar, modules=None, streaming_args=None,
libjars=None, libjars_in_hdfs=None, jobconfs=None,
input_format=None, output_format=None,
end_job_with_atomic_move_dir=True, archives=None):
def get(x, default):
return x is not None and x or default
self.streaming_jar = streaming_jar
self.modules = get(modules, [])
self.streaming_args = get(streaming_args, [])
self.libjars = get(libjars, [])
self.libjars_in_hdfs = get(libjars_in_hdfs, [])
self.archives = get(archives, [])
self.jobconfs = get(jobconfs, {})
self.input_format = input_format
self.output_format = output_format
self.end_job_with_atomic_move_dir = end_job_with_atomic_move_dir
self.tmp_dir = False
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
packages = [luigi] + self.modules + job.extra_modules() + list(_attached_packages)
# find the module containing the job
packages.append(__import__(job.__module__, None, None, 'dummy'))
# find the path to out runner.py
runner_path = mrrunner.__file__
# assume source is next to compiled
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
base_tmp_dir = configuration.get_config().get('core', 'tmp-dir', None)
if base_tmp_dir:
warnings.warn("The core.tmp-dir configuration item is"
" deprecated, please use the TMPDIR"
" environment variable if you wish"
" to control where luigi.contrib.hadoop may"
" create temporary files and directories.")
self.tmp_dir = os.path.join(base_tmp_dir, 'hadoop_job_%016x' % random.getrandbits(64))
os.makedirs(self.tmp_dir)
else:
self.tmp_dir = tempfile.mkdtemp()
logger.debug("Tmp dir: %s", self.tmp_dir)
# build arguments
config = configuration.get_config()
python_executable = config.get('hadoop', 'python-executable', 'python')
runner_arg = 'mrrunner.pex' if job.package_binary is not None else 'mrrunner.py'
command = '{0} {1} {{step}}'.format(python_executable, runner_arg)
map_cmd = command.format(step='map')
cmb_cmd = command.format(step='combiner')
red_cmd = command.format(step='reduce')
output_final = job.output().path
# atomic output: replace output with a temporary work directory
if self.end_job_with_atomic_move_dir:
illegal_targets = (
luigi.contrib.s3.S3FlagTarget, luigi.contrib.gcs.GCSFlagTarget)
if isinstance(job.output(), illegal_targets):
raise TypeError("end_job_with_atomic_move_dir is not supported"
" for {}".format(illegal_targets))
output_hadoop = '{output}-temp-{time}'.format(
output=output_final,
time=datetime.datetime.now().isoformat().replace(':', '-'))
else:
output_hadoop = output_final
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', self.streaming_jar]
# 'libjars' is a generic option, so place it first
libjars = [libjar for libjar in self.libjars]
for libjar in self.libjars_in_hdfs:
run_cmd = luigi.contrib.hdfs.load_hadoop_cmd() + ['fs', '-get', libjar, self.tmp_dir]
logger.debug(subprocess.list2cmdline(run_cmd))
subprocess.call(run_cmd)
libjars.append(os.path.join(self.tmp_dir, os.path.basename(libjar)))
if libjars:
arglist += ['-libjars', ','.join(libjars)]
# 'archives' is also a generic option
archives = []
extra_archives = job.extra_archives()
if self.archives:
archives = self.archives
if extra_archives:
archives += extra_archives
if archives:
arglist += ['-archives', ','.join(archives)]
# Add static files and directories
extra_files = get_extra_files(job.extra_files())
files = []
for src, dst in extra_files:
dst_tmp = '%s_%09d' % (dst.replace('/', '_'), random.randint(0, 999999999))
files += ['%s#%s' % (src, dst_tmp)]
# -files doesn't support subdirectories, so we need to create the dst_tmp -> dst manually
job.add_link(dst_tmp, dst)
if files:
arglist += ['-files', ','.join(files)]
jobconfs = job.jobconfs()
for k, v in self.jobconfs.items():
jobconfs.append('%s=%s' % (k, v))
for conf in jobconfs:
arglist += ['-D', conf]
arglist += self.streaming_args
# Add additional non-generic per-job streaming args
extra_streaming_args = job.extra_streaming_arguments()
for (arg, value) in extra_streaming_args:
if not arg.startswith('-'): # safety first
arg = '-' + arg
arglist += [arg, value]
arglist += ['-mapper', map_cmd]
if job.combiner != NotImplemented:
arglist += ['-combiner', cmb_cmd]
if job.reducer != NotImplemented:
arglist += ['-reducer', red_cmd]
packages_fn = 'mrrunner.pex' if job.package_binary is not None else 'packages.tar'
files = [
runner_path if job.package_binary is None else None,
os.path.join(self.tmp_dir, packages_fn),
os.path.join(self.tmp_dir, 'job-instance.pickle'),
]
for f in filter(None, files):
arglist += ['-file', f]
if self.output_format:
arglist += ['-outputformat', self.output_format]
if self.input_format:
arglist += ['-inputformat', self.input_format]
allowed_input_targets = (
luigi.contrib.hdfs.HdfsTarget,
luigi.contrib.s3.S3Target,
luigi.contrib.gcs.GCSTarget)
for target in luigi.task.flatten(job.input_hadoop()):
if not isinstance(target, allowed_input_targets):
raise TypeError('target must one of: {}'.format(
allowed_input_targets))
arglist += ['-input', target.path]
allowed_output_targets = (
luigi.contrib.hdfs.HdfsTarget,
luigi.contrib.s3.S3FlagTarget,
luigi.contrib.gcs.GCSFlagTarget)
if not isinstance(job.output(), allowed_output_targets):
raise TypeError('output must be one of: {}'.format(
allowed_output_targets))
arglist += ['-output', output_hadoop]
# submit job
if job.package_binary is not None:
shutil.copy(job.package_binary, os.path.join(self.tmp_dir, 'mrrunner.pex'))
else:
create_packages_archive(packages, os.path.join(self.tmp_dir, 'packages.tar'))
job.dump(self.tmp_dir)
run_and_track_hadoop_job(arglist, tracking_url_callback=job.set_tracking_url)
if self.end_job_with_atomic_move_dir:
luigi.contrib.hdfs.HdfsTarget(output_hadoop).move_dir(output_final)
self.finish()
def finish(self):
# FIXME: check for isdir?
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.debug('Removing directory %s', self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def __del__(self):
self.finish()
class DefaultHadoopJobRunner(HadoopJobRunner):
"""
The default job runner just reads from config and sets stuff.
"""
def __init__(self):
config = configuration.get_config()
streaming_jar = config.get('hadoop', 'streaming-jar')
super(DefaultHadoopJobRunner, self).__init__(streaming_jar=streaming_jar)
# TODO: add more configurable options
class LocalJobRunner(JobRunner):
"""
Will run the job locally.
This is useful for debugging and also unit testing. Tries to mimic Hadoop Streaming.
TODO: integrate with JobTask
"""
def __init__(self, samplelines=None):
self.samplelines = samplelines
def sample(self, input_stream, n, output):
for i, line in enumerate(input_stream):
if n is not None and i >= n:
break
output.write(line)
def group(self, input_stream):
output = StringIO()
lines = []
for i, line in enumerate(input_stream):
parts = line.rstrip('\n').split('\t')
blob = md5(str(i).encode('ascii')).hexdigest() # pseudo-random blob to make sure the input isn't sorted
lines.append((parts[:-1], blob, line))
for _, _, line in sorted(lines):
output.write(line)
output.seek(0)
return output
def run_job(self, job):
map_input = StringIO()
for i in luigi.task.flatten(job.input_hadoop()):
self.sample(i.open('r'), self.samplelines, map_input)
map_input.seek(0)
if job.reducer == NotImplemented:
# Map only job; no combiner, no reducer
map_output = job.output().open('w')
job.run_mapper(map_input, map_output)
map_output.close()
return
# run job now...
map_output = StringIO()
job.run_mapper(map_input, map_output)
map_output.seek(0)
if job.combiner == NotImplemented:
reduce_input = self.group(map_output)
else:
combine_input = self.group(map_output)
combine_output = StringIO()
job.run_combiner(combine_input, combine_output)
combine_output.seek(0)
reduce_input = self.group(combine_output)
reduce_output = job.output().open('w')
job.run_reducer(reduce_input, reduce_output)
reduce_output.close()
class BaseHadoopJobTask(luigi.Task):
pool = luigi.OptionalParameter(default=None, significant=False, positional=False)
# This value can be set to change the default batching increment. Default is 1 for backwards compatibility.
batch_counter_default = 1
final_mapper = NotImplemented
final_combiner = NotImplemented
final_reducer = NotImplemented
mr_priority = NotImplemented
package_binary = None
_counter_dict = {}
task_id = None
def _get_pool(self):
""" Protected method """
if self.pool:
return self.pool
if hadoop().pool:
return hadoop().pool
@abc.abstractmethod
def job_runner(self):
pass
def jobconfs(self):
jcs = []
jcs.append('mapred.job.name=%s' % self)
if self.mr_priority != NotImplemented:
jcs.append('mapred.job.priority=%s' % self.mr_priority())
pool = self._get_pool()
if pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs.append('mapred.fairscheduler.pool=%s' % pool)
elif scheduler_type == 'capacity':
jcs.append('mapred.job.queue.name=%s' % pool)
return jcs
def init_local(self):
"""
Implement any work to setup any internal datastructure etc here.
You can add extra input using the requires_local/input_local methods.
Anything you set on the object will be pickled and available on the Hadoop nodes.
"""
pass
def init_hadoop(self):
pass
# available formats are "python" and "json".
data_interchange_format = "python"
def run(self):
# The best solution is to store them as lazy `cached_property`, but it
# has extraneous dependency. And `property` is slow (need to be
# calculated every time when called), so we save them as attributes
# directly.
self.serialize = DataInterchange[self.data_interchange_format]['serialize']
self.internal_serialize = DataInterchange[self.data_interchange_format]['internal_serialize']
self.deserialize = DataInterchange[self.data_interchange_format]['deserialize']
self.init_local()
self.job_runner().run_job(self)
def requires_local(self):
"""
Default impl - override this method if you need any local input to be accessible in init().
"""
return []
def requires_hadoop(self):
return self.requires() # default impl
def input_local(self):
return luigi.task.getpaths(self.requires_local())
def input_hadoop(self):
return luigi.task.getpaths(self.requires_hadoop())
def deps(self):
# Overrides the default implementation
return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local())
def on_failure(self, exception):
if isinstance(exception, HadoopJobError):
return """Hadoop job failed with message: {message}
stdout:
{stdout}
stderr:
{stderr}
""".format(message=exception.message, stdout=exception.out, stderr=exception.err)
else:
return super(BaseHadoopJobTask, self).on_failure(exception)
DataInterchange = {
"python": {"serialize": str,
"internal_serialize": repr,
"deserialize": eval},
"json": {"serialize": json.dumps,
"internal_serialize": json.dumps,
"deserialize": json.loads}
}
class JobTask(BaseHadoopJobTask):
jobconf_truncate = 20000
n_reduce_tasks = 25
reducer = NotImplemented
def jobconfs(self):
jcs = super(JobTask, self).jobconfs()
if self.reducer == NotImplemented:
jcs.append('mapred.reduce.tasks=0')
else:
jcs.append('mapred.reduce.tasks=%s' % self.n_reduce_tasks)
if self.jobconf_truncate >= 0:
jcs.append('stream.jobconf.truncate.limit=%i' % self.jobconf_truncate)
return jcs
def init_mapper(self):
pass
def init_combiner(self):
pass
def init_reducer(self):
pass
def _setup_remote(self):
self._setup_links()
def job_runner(self):
# We recommend that you define a subclass, override this method and set up your own config
"""
Get the MapReduce runner for this job.
If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used.
Otherwise, the LocalJobRunner which streams all data through the local machine
will be used (great for testing).
"""
outputs = luigi.task.flatten(self.output())
for output in outputs:
if not isinstance(output, luigi.contrib.hdfs.HdfsTarget):
warnings.warn("Job is using one or more non-HdfsTarget outputs" +
" so it will be run in local mode")
return LocalJobRunner()
else:
return DefaultHadoopJobRunner()
def reader(self, input_stream):
"""
Reader is a method which iterates over input lines and outputs records.
The default implementation yields one argument containing the line for each line in the input."""
for line in input_stream:
yield line,
def writer(self, outputs, stdout, stderr=sys.stderr):
"""
Writer format is a method which iterates over the output records
from the reducer and formats them for output.
The default implementation outputs tab separated items.
"""
for output in outputs:
try:
output = flatten(output)
if self.data_interchange_format == "json":
# Only dump one json string, and skip another one, maybe key or value.
output = filter(lambda x: x, output)
else:
# JSON is already serialized, so we put `self.serialize` in a else statement.
output = map(self.serialize, output)
print("\t".join(output), file=stdout)
except BaseException:
print(output, file=stderr)
raise
def mapper(self, item):
"""
Re-define to process an input item (usually a line of input data).
Defaults to identity mapper that sends all lines to the same reducer.
"""
yield None, item
combiner = NotImplemented
def incr_counter(self, *args, **kwargs):
"""
Increments a Hadoop counter.
Since counters can be a bit slow to update, this batches the updates.
"""
threshold = kwargs.get("threshold", self.batch_counter_default)
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
key = (group_name,)
else:
group, name, count = args
key = (group, name)
ct = self._counter_dict.get(key, 0)
ct += count
if ct >= threshold:
new_arg = list(key) + [ct]
self._incr_counter(*new_arg)
ct = 0
self._counter_dict[key] = ct
def _flush_batch_incr_counter(self):
"""
Increments any unflushed counter values.
"""
for key, count in self._counter_dict.items():
if count == 0:
continue
args = list(key) + [count]
self._incr_counter(*args)
self._counter_dict[key] = 0
def _incr_counter(self, *args):
"""
Increments a Hadoop counter.
Note that this seems to be a bit slow, ~1 ms
Don't overuse this function by updating very frequently.
"""
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
print('reporter:counter:%s,%s' % (group_name, count), file=sys.stderr)
else:
group, name, count = args
print('reporter:counter:%s,%s,%s' % (group, name, count), file=sys.stderr)
def extra_modules(self):
return [] # can be overridden in subclass
def extra_files(self):
"""
Can be overriden in subclass.
Each element is either a string, or a pair of two strings (src, dst).
* `src` can be a directory (in which case everything will be copied recursively).
* `dst` can include subdirectories (foo/bar/baz.txt etc)
Uses Hadoop's -files option so that the same file is reused across tasks.
"""
return []
def extra_streaming_arguments(self):
"""
Extra arguments to Hadoop command line.
Return here a list of (parameter, value) tuples.
"""
return []
def extra_archives(self):
"""List of paths to archives """
return []
def add_link(self, src, dst):
if not hasattr(self, '_links'):
self._links = []
self._links.append((src, dst))
def _setup_links(self):
if hasattr(self, '_links'):
missing = []
for src, dst in self._links:
d = os.path.dirname(dst)
if d:
try:
os.makedirs(d)
except OSError:
pass
if not os.path.exists(src):
missing.append(src)
continue
if not os.path.exists(dst):
# If the combiner runs, the file might already exist,
# so no reason to create the link again
os.link(src, dst)
if missing:
raise HadoopJobError(
'Missing files for distributed cache: ' +
', '.join(missing))
def dump(self, directory=''):
"""
Dump instance to file.
"""
with self.no_unpicklable_properties():
file_name = os.path.join(directory, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace(b'(c__main__', "(c" + module_name)
open(file_name, "wb").write(d)
else:
pickle.dump(self, open(file_name, "wb"))
def _map_input(self, input_stream):
"""
Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value.
"""
for record in self.reader(input_stream):
for output in self.mapper(*record):
yield output
if self.final_mapper != NotImplemented:
for output in self.final_mapper():
yield output
self._flush_batch_incr_counter()
def _reduce_input(self, inputs, reducer, final=NotImplemented):
"""
Iterate over input, collect values with the same key, and call the reducer for each unique key.
"""
for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])):
for output in reducer(self.deserialize(key), (v[1] for v in values)):
yield output
if final != NotImplemented:
for output in final():
yield output
self._flush_batch_incr_counter()
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout)
def run_reducer(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the reducer on the hadoop node.
"""
self.init_hadoop()
self.init_reducer()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer)
self.writer(outputs, stdout)
def run_combiner(self, stdin=sys.stdin, stdout=sys.stdout):
self.init_hadoop()
self.init_combiner()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.combiner, self.final_combiner)
self.internal_writer(outputs, stdout)
def internal_reader(self, input_stream):
"""
Reader which uses python eval on each part of a tab separated string.
Yields a tuple of python objects.
"""
for input_line in input_stream:
yield list(map(self.deserialize, input_line.split("\t")))
def internal_writer(self, outputs, stdout):
"""
Writer which outputs the python repr for each item.
"""
for output in outputs:
print("\t".join(map(self.internal_serialize, output)), file=stdout)
| 1 | 20,031 | I think it's better we keep `(` and encode with default encoding (utf-8). | spotify-luigi | py |
@@ -52,11 +52,12 @@ class BaselineFormat extends AbstractBaselinePlugin {
java.removeUnusedImports();
// use empty string to specify one group for all non-static imports
java.importOrder("");
- java.trimTrailingWhitespace();
if (eclipseFormattingEnabled(project)) {
java.eclipse().configFile(project.file(eclipseXml.toString()));
}
+
+ java.trimTrailingWhitespace();
});
// necessary because SpotlessPlugin creates tasks in an afterEvaluate block | 1 | /*
* (c) Copyright 2018 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.plugins;
import com.diffplug.gradle.spotless.SpotlessExtension;
import java.nio.file.Files;
import java.nio.file.Path;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.file.ConfigurableFileCollection;
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.tasks.compile.JavaCompile;
class BaselineFormat extends AbstractBaselinePlugin {
// TODO(dfox): remove this feature flag when we've refined the eclipse.xml sufficiently
private static final String ECLIPSE_FORMATTING = "com.palantir.baseline-format.eclipse";
@Override
public void apply(Project project) {
this.project = project;
project.getPluginManager().withPlugin("java", plugin -> {
project.getPluginManager().apply("com.diffplug.gradle.spotless");
Path eclipseXml = eclipseConfigFile(project);
project.getExtensions().getByType(SpotlessExtension.class).java(java -> {
// Configure a lazy FileCollection then pass it as the target
ConfigurableFileCollection allJavaFiles = project.files();
project
.getConvention()
.getPlugin(JavaPluginConvention.class)
.getSourceSets()
.all(sourceSet -> allJavaFiles.from(
sourceSet.getAllJava().filter(file -> !file.toString().contains("/generated"))));
java.target(allJavaFiles);
java.removeUnusedImports();
// use empty string to specify one group for all non-static imports
java.importOrder("");
java.trimTrailingWhitespace();
if (eclipseFormattingEnabled(project)) {
java.eclipse().configFile(project.file(eclipseXml.toString()));
}
});
// necessary because SpotlessPlugin creates tasks in an afterEvaluate block
Task formatTask = project.task("format");
project.afterEvaluate(p -> {
Task spotlessJava = project.getTasks().getByName("spotlessJava");
Task spotlessApply = project.getTasks().getByName("spotlessApply");
if (eclipseFormattingEnabled(project) && !Files.exists(eclipseXml)) {
spotlessJava.dependsOn(project.getTasks().findByPath(":baselineUpdateConfig"));
}
formatTask.dependsOn(spotlessApply);
project.getTasks().withType(JavaCompile.class).configureEach(spotlessJava::mustRunAfter);
});
});
}
static boolean eclipseFormattingEnabled(Project project) {
return project.hasProperty(ECLIPSE_FORMATTING);
}
static Path eclipseConfigFile(Project project) {
return project.getRootDir().toPath().resolve(".baseline/spotless/eclipse.xml");
}
}
| 1 | 7,348 | this is gonna be different in an IDE vs from gradlew?? | palantir-gradle-baseline | java |
@@ -140,3 +140,18 @@ end
Spork.each_run do
end
+
+shared_context "with isolated syntax" do
+ orig_matchers_syntax = nil
+ orig_mocks_syntax = nil
+
+ before(:each) do
+ orig_matchers_syntax = RSpec::Matchers.configuration.syntax
+ orig_mocks_syntax = RSpec::Mocks.configuration.syntax
+ end
+
+ after(:each) do
+ RSpec::Matchers.configuration.syntax = orig_matchers_syntax
+ RSpec::Mocks.configuration.syntax = orig_mocks_syntax
+ end
+end | 1 | require 'rubygems'
unless ENV['NO_COVERALLS']
require 'simplecov' if RUBY_VERSION.to_f > 1.8
require 'coveralls'
Coveralls.wear! do
add_filter '/bundle/'
add_filter '/spec/'
add_filter '/tmp/'
end
end
begin
require 'spork'
rescue LoadError
module Spork
def self.prefork
yield
end
def self.each_run
yield
end
end
end
Spork.prefork do
require 'rspec/autorun'
require 'autotest/rspec2'
require 'aruba/api'
if RUBY_PLATFORM == 'java'
# Works around https://jira.codehaus.org/browse/JRUBY-5678
require 'fileutils'
ENV['TMPDIR'] = File.expand_path('../../tmp', __FILE__)
FileUtils.mkdir_p(ENV['TMPDIR'])
end
Dir['./spec/support/**/*.rb'].map {|f| require f}
class NullObject
private
def method_missing(method, *args, &block)
# ignore
end
end
module Sandboxing
def self.sandboxed(&block)
@orig_config = RSpec.configuration
@orig_world = RSpec.world
new_config = RSpec::Core::Configuration.new
new_world = RSpec::Core::World.new(new_config)
RSpec.configuration = new_config
RSpec.world = new_world
object = Object.new
object.extend(RSpec::Core::SharedExampleGroup)
(class << RSpec::Core::ExampleGroup; self; end).class_eval do
alias_method :orig_run, :run
def run(reporter=nil)
orig_run(reporter || NullObject.new)
end
end
RSpec::Core::SandboxedMockSpace.sandboxed do
object.instance_eval(&block)
end
ensure
(class << RSpec::Core::ExampleGroup; self; end).class_eval do
remove_method :run
alias_method :run, :orig_run
remove_method :orig_run
end
RSpec.configuration = @orig_config
RSpec.world = @orig_world
end
end
def in_editor?
ENV.has_key?('TM_MODE') || ENV.has_key?('EMACS') || ENV.has_key?('VIM')
end
module EnvHelpers
def with_env_vars(vars)
original = ENV.to_hash
vars.each { |k, v| ENV[k] = v }
begin
yield
ensure
ENV.replace(original)
end
end
def without_env_vars(*vars)
original = ENV.to_hash
vars.each { |k| ENV.delete(k) }
begin
yield
ensure
ENV.replace(original)
end
end
end
RSpec.configure do |c|
# structural
c.alias_it_behaves_like_to 'it_has_behavior'
c.around {|example| Sandboxing.sandboxed { example.run }}
c.include(RSpecHelpers)
c.include Aruba::Api, :example_group => {
:file_path => /spec\/command_line/
}
c.expect_with :rspec do |expectations|
expectations.syntax = :expect
end
# runtime options
c.treat_symbols_as_metadata_keys_with_true_values = true
c.color = !in_editor?
c.filter_run :focus
c.include EnvHelpers
c.run_all_when_everything_filtered = true
c.filter_run_excluding :ruby => lambda {|version|
case version.to_s
when "!jruby"
RUBY_ENGINE == "jruby"
when /^> (.*)/
!(RUBY_VERSION.to_s > $1)
else
!(RUBY_VERSION.to_s =~ /^#{version.to_s}/)
end
}
end
end
Spork.each_run do
end
| 1 | 9,982 | Do we not already have something for isolating syntax? | rspec-rspec-core | rb |
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
-## Copyright (C) 2012 CERN.
+## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as | 1 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from invenio.testutils import make_test_suite, run_test_suite, InvenioXmlTestCase
from invenio.config import CFG_SITE_URL, CFG_ETCDIR, CFG_INSPIRE_SITE
from invenio.bibrecord import create_record, record_xml_output, record_delete_field
if CFG_INSPIRE_SITE:
EXPECTED_RESPONSE = """<record>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">1</subfield>
<subfield code="h">D. Clowe, A. Gonzalez, and M. Markevitch</subfield>
<subfield code="s">Astrophys. J.,604,596</subfield>
<subfield code="y">2004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">2</subfield>
<subfield code="h">C. L. Sarazin, X-Ray Emission</subfield>
<subfield code="m">from Clusters of Galaxies (Cambridge University Press, Cambridge 1988)</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">3</subfield>
<subfield code="h">M. Girardi, G. Giuricin, F. Mardirossian, M. Mezzetti, and W. Boschin</subfield>
<subfield code="s">Astrophys. J.,505,74</subfield>
<subfield code="y">1998</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">4</subfield>
<subfield code="h">D. A. White, C. Jones, and W. Forman</subfield>
<subfield code="s">Mon. Not. R. Astron. Soc.,292,419</subfield>
<subfield code="y">1997</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">5</subfield>
<subfield code="h">V.C. Rubin, N. Thonnard, and W. K. Ford</subfield>
<subfield code="s">Astrophys. J.,238,471</subfield>
<subfield code="y">1980</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">6</subfield>
<subfield code="h">A. Bosma</subfield>
<subfield code="s">Astron. J.,86,1825</subfield>
<subfield code="y">1981</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">7</subfield>
<subfield code="h">S.M. Faber and J.S. Gallagher</subfield>
<subfield code="s">Annu. Rev. Astron. Astrophys.,17,135</subfield>
<subfield code="y">1979</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">8</subfield>
<subfield code="h">M. Persic, P. Salucci, and F. Stel</subfield>
<subfield code="s">Mon. Not. R. Astron. Soc.,281,27</subfield>
<subfield code="y">1996</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">9</subfield>
<subfield code="h">M. Lowewnstein and R. E. White</subfield>
<subfield code="s">Astrophys. J.,518,50</subfield>
<subfield code="y">1999</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">10</subfield>
<subfield code="h">D. P. Clemens</subfield>
<subfield code="s">Astrophys. J.,295,422</subfield>
<subfield code="y">1985</subfield>
</datafield>
</record>
"""
else:
EXPECTED_RESPONSE = """<record>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">1</subfield>
<subfield code="h">D. Clowe, A. Gonzalez, and M. Markevitch</subfield>
<subfield code="s">Astrophys. J. 604 (2004) 596</subfield>
<subfield code="y">2004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">2</subfield>
<subfield code="h">C. L. Sarazin, X-Ray Emission</subfield>
<subfield code="m">from Clusters of Galaxies (Cambridge University Press, Cambridge 1988)</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">3</subfield>
<subfield code="h">M. Girardi, G. Giuricin, F. Mardirossian, M. Mezzetti, and W. Boschin</subfield>
<subfield code="s">Astrophys. J. 505 (1998) 74</subfield>
<subfield code="y">1998</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">4</subfield>
<subfield code="h">D. A. White, C. Jones, and W. Forman</subfield>
<subfield code="s">Mon. Not. R. Astron. Soc. 292 (1997) 419</subfield>
<subfield code="y">1997</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">5</subfield>
<subfield code="h">V.C. Rubin, N. Thonnard, and W. K. Ford</subfield>
<subfield code="s">Astrophys. J. 238 (1980) 471</subfield>
<subfield code="y">1980</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">6</subfield>
<subfield code="h">A. Bosma</subfield>
<subfield code="s">Astron. J. 86 (1981) 1825</subfield>
<subfield code="y">1981</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">7</subfield>
<subfield code="h">S.M. Faber and J.S. Gallagher</subfield>
<subfield code="s">Annu. Rev. Astron. Astrophys. 17 (1979) 135</subfield>
<subfield code="y">1979</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">8</subfield>
<subfield code="h">M. Persic, P. Salucci, and F. Stel</subfield>
<subfield code="s">Mon. Not. R. Astron. Soc. 281 (1996) 27</subfield>
<subfield code="y">1996</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">9</subfield>
<subfield code="h">M. Lowewnstein and R. E. White</subfield>
<subfield code="s">Astrophys. J. 518 (1999) 50</subfield>
<subfield code="y">1999</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">10</subfield>
<subfield code="h">D. P. Clemens</subfield>
<subfield code="s">Astrophys. J. 295 (1985) 422</subfield>
<subfield code="y">1985</subfield>
</datafield>
</record>"""
def compare_references(test, a, b):
## Let's normalize records to remove the Invenio refextract signature
a = create_record(a)[0]
b = create_record(b)[0]
record_delete_field(a, '999', 'C', '6')
a = record_xml_output(a)
b = record_xml_output(b)
test.assertXmlEqual(a, b)
class DocExtractTest(InvenioXmlTestCase):
if HAS_REQUESTS:
def test_upload(self):
url = CFG_SITE_URL + '/textmining/api/extract-references-pdf'
pdf = open("%s/docextract/example.pdf" % CFG_ETCDIR, 'rb')
response = requests.post(url, files={'pdf': pdf})
# Remove stats tag
lines = response.content.split('\n')
lines[-6:-1] = []
compare_references(self, '\n'.join(lines), EXPECTED_RESPONSE)
def test_url(self):
url = CFG_SITE_URL + '/textmining/api/extract-references-pdf-url'
pdf = CFG_SITE_URL + '/textmining/example.pdf'
response = requests.post(url, data={'url': pdf})
compare_references(self, response.content, EXPECTED_RESPONSE)
def test_txt(self):
url = CFG_SITE_URL + '/textmining/api/extract-references-txt'
pdf = open("%s/docextract/example.txt" % CFG_ETCDIR, 'rb')
response = requests.post(url, files={'txt': pdf})
# Remove stats tag
lines = response.content.split('\n')
lines[-6:-1] = []
compare_references(self, '\n'.join(lines), EXPECTED_RESPONSE)
TEST_SUITE = make_test_suite(DocExtractTest)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
| 1 | 12,800 | 1: D100 Docstring missing 4: I102 copyright year is outdated, expected 2014 but got 2012 158: D103 Docstring missing 168: D101 Docstring missing 170: D102 Docstring missing 180: D102 Docstring missing 187: D102 Docstring missing | inveniosoftware-invenio | py |
@@ -1,11 +1,14 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
+# Purpose
+# This code example demonstrates how deny uploads of objects without
+# server-side AWS KMS encryption to an Amazon Simple Storage Solution (Amazon S3).
+
+# snippet-start:[s3.ruby.s3_add_bucket_sses3_encryption_policy]
+
require 'aws-sdk-s3'
-# Denies uploads of objects without server-side AWS KMS encryption to
-# an Amazon S3 bucket.
-#
# Prerequisites:
#
# - The Amazon S3 bucket to deny uploading objects without | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
require 'aws-sdk-s3'
# Denies uploads of objects without server-side AWS KMS encryption to
# an Amazon S3 bucket.
#
# Prerequisites:
#
# - The Amazon S3 bucket to deny uploading objects without
# server-side AWS KMS encryption.
#
# @param s3_client [Aws::S3::Client] An initialized Amazon S3 client.
# @param bucket_name [String] The bucket's name.
# @return [Boolean] true if a policy was added to the bucket to
# deny uploading objects without server-side AWS KMS encryption;
# otherwise, false.
# @example
# if deny_uploads_without_server_side_aws_kms_encryption?(
# Aws::S3::Client.new(region: 'us-east-1'),
# 'doc-example-bucket'
# )
# puts 'Policy added.'
# else
# puts 'Policy not added.'
# end
def deny_uploads_without_server_side_aws_kms_encryption?(s3_client, bucket_name)
policy = {
'Version': '2012-10-17',
'Id': 'PutObjPolicy',
'Statement': [
{
'Sid': 'DenyIncorrectEncryptionHeader',
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::' + bucket_name + '/*',
'Condition': {
'StringNotEquals': {
's3:x-amz-server-side-encryption': 'aws:kms'
}
}
},
{
'Sid': 'DenyUnEncryptedObjectUploads',
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::' + bucket_name + '/*',
'Condition': {
'Null': {
's3:x-amz-server-side-encryption': 'true'
}
}
}
]
}.to_json
s3_client.put_bucket_policy(
bucket: bucket_name,
policy: policy
)
return true
rescue StandardError => e
puts "Error adding policy: #{e.message}"
return false
end
# Full example call:
def run_me
if deny_uploads_without_server_side_aws_kms_encryption?(
Aws::S3::Client.new(region: 'us-east-1'),
'doc-example-bucket'
)
puts 'Policy added.'
else
puts 'Policy not added.'
end
end
run_me if $PROGRAM_NAME == __FILE__
| 1 | 20,532 | how **to** deny | awsdocs-aws-doc-sdk-examples | rb |
@@ -19,6 +19,9 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core
/// </summary>
public class ListenOptions : IEndPointInformation, IConnectionBuilder
{
+ private const string Http2ExperimentSwitch = "Switch.Microsoft.AspNetCore.Server.Kestrel.Experiential.Http2";
+ private const string Http1AndHttp2ExperimentSwitch = "Switch.Microsoft.AspNetCore.Server.Kestrel.Experiential.Http1AndHttp2";
+
private FileHandleType _handleType;
private readonly List<Func<ConnectionDelegate, ConnectionDelegate>> _components = new List<Func<ConnectionDelegate, ConnectionDelegate>>();
| 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Protocols;
using Microsoft.AspNetCore.Server.Kestrel.Core.Adapter.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;
namespace Microsoft.AspNetCore.Server.Kestrel.Core
{
/// <summary>
/// Describes either an <see cref="IPEndPoint"/>, Unix domain socket path, or a file descriptor for an already open
/// socket that Kestrel should bind to or open.
/// </summary>
public class ListenOptions : IEndPointInformation, IConnectionBuilder
{
private FileHandleType _handleType;
private readonly List<Func<ConnectionDelegate, ConnectionDelegate>> _components = new List<Func<ConnectionDelegate, ConnectionDelegate>>();
internal ListenOptions(IPEndPoint endPoint)
{
Type = ListenType.IPEndPoint;
IPEndPoint = endPoint;
}
internal ListenOptions(string socketPath)
{
Type = ListenType.SocketPath;
SocketPath = socketPath;
}
internal ListenOptions(ulong fileHandle)
: this(fileHandle, FileHandleType.Auto)
{
}
internal ListenOptions(ulong fileHandle, FileHandleType handleType)
{
Type = ListenType.FileHandle;
FileHandle = fileHandle;
switch (handleType)
{
case FileHandleType.Auto:
case FileHandleType.Tcp:
case FileHandleType.Pipe:
_handleType = handleType;
break;
default:
throw new NotSupportedException();
}
}
/// <summary>
/// The type of interface being described: either an <see cref="IPEndPoint"/>, Unix domain socket path, or a file descriptor.
/// </summary>
public ListenType Type { get; }
public FileHandleType HandleType
{
get => _handleType;
set
{
if (value == _handleType)
{
return;
}
if (Type != ListenType.FileHandle || _handleType != FileHandleType.Auto)
{
throw new InvalidOperationException();
}
switch (value)
{
case FileHandleType.Tcp:
case FileHandleType.Pipe:
_handleType = value;
break;
default:
throw new ArgumentException(nameof(HandleType));
}
}
}
// IPEndPoint is mutable so port 0 can be updated to the bound port.
/// <summary>
/// The <see cref="IPEndPoint"/> to bind to.
/// Only set if the <see cref="ListenOptions"/> <see cref="Type"/> is <see cref="ListenType.IPEndPoint"/>.
/// </summary>
public IPEndPoint IPEndPoint { get; set; }
/// <summary>
/// The absolute path to a Unix domain socket to bind to.
/// Only set if the <see cref="ListenOptions"/> <see cref="Type"/> is <see cref="ListenType.SocketPath"/>.
/// </summary>
public string SocketPath { get; }
/// <summary>
/// A file descriptor for the socket to open.
/// Only set if the <see cref="ListenOptions"/> <see cref="Type"/> is <see cref="ListenType.FileHandle"/>.
/// </summary>
public ulong FileHandle { get; }
/// <summary>
/// Enables an <see cref="IConnectionAdapter"/> to resolve and use services registered by the application during startup.
/// Only set if accessed from the callback of a <see cref="KestrelServerOptions"/> Listen* method.
/// </summary>
public KestrelServerOptions KestrelServerOptions { get; internal set; }
/// <summary>
/// Set to false to enable Nagle's algorithm for all connections.
/// </summary>
/// <remarks>
/// Defaults to true.
/// </remarks>
public bool NoDelay { get; set; } = true;
/// <summary>
/// The protocols enabled on this endpoint.
/// </summary>
/// <remarks>Defaults to HTTP/1.x only.</remarks>
public HttpProtocols Protocols { get; set; } = HttpProtocols.Http1;
/// <summary>
/// Gets the <see cref="List{IConnectionAdapter}"/> that allows each connection <see cref="System.IO.Stream"/>
/// to be intercepted and transformed.
/// Configured by the <c>UseHttps()</c> and <see cref="Hosting.ListenOptionsConnectionLoggingExtensions.UseConnectionLogging(ListenOptions)"/>
/// extension methods.
/// </summary>
/// <remarks>
/// Defaults to empty.
/// </remarks>
public List<IConnectionAdapter> ConnectionAdapters { get; } = new List<IConnectionAdapter>();
public IServiceProvider ApplicationServices => KestrelServerOptions?.ApplicationServices;
/// <summary>
/// Gets the name of this endpoint to display on command-line when the web server starts.
/// </summary>
internal virtual string GetDisplayName()
{
var scheme = ConnectionAdapters.Any(f => f.IsHttps)
? "https"
: "http";
switch (Type)
{
case ListenType.IPEndPoint:
return $"{scheme}://{IPEndPoint}";
case ListenType.SocketPath:
return $"{scheme}://unix:{SocketPath}";
case ListenType.FileHandle:
return $"{scheme}://<file handle>";
default:
throw new InvalidOperationException();
}
}
public override string ToString() => GetDisplayName();
public IConnectionBuilder Use(Func<ConnectionDelegate, ConnectionDelegate> middleware)
{
_components.Add(middleware);
return this;
}
public ConnectionDelegate Build()
{
ConnectionDelegate app = context =>
{
return Task.CompletedTask;
};
for (int i = _components.Count - 1; i >= 0; i--)
{
var component = _components[i];
app = component(app);
}
return app;
}
internal virtual async Task BindAsync(AddressBindContext context)
{
await AddressBinder.BindEndpointAsync(this, context).ConfigureAwait(false);
context.Addresses.Add(GetDisplayName());
}
}
}
| 1 | 14,612 | The only beef I have with this is that it's app domain global. | aspnet-KestrelHttpServer | .cs |
@@ -562,8 +562,11 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
if err = tc.SetKeepAlive(true); err != nil {
return
}
- if err = tc.SetKeepAlivePeriod(3 * time.Minute); err != nil {
- return
+ // OpenBSD has no user-settable per-socket TCP keepalive
+ if runtime.GOOS != "openbsd" {
+ if err = tc.SetKeepAlivePeriod(3 * time.Minute); err != nil {
+ return
+ }
}
return tc, nil
} | 1 | // Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package httpserver implements an HTTP server on top of Caddy.
package httpserver
import (
"context"
"crypto/tls"
"errors"
"fmt"
"log"
"net"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/caddyserver/caddy"
"github.com/caddyserver/caddy/caddyhttp/staticfiles"
"github.com/caddyserver/caddy/caddytls"
"github.com/caddyserver/caddy/telemetry"
"github.com/lucas-clemente/quic-go/h2quic"
)
// Server is the HTTP server implementation.
type Server struct {
Server *http.Server
quicServer *h2quic.Server
sites []*SiteConfig
connTimeout time.Duration // max time to wait for a connection before force stop
tlsGovChan chan struct{} // close to stop the TLS maintenance goroutine
vhosts *vhostTrie
}
// ensure it satisfies the interface
var _ caddy.GracefulServer = new(Server)
var defaultALPN = []string{"h2", "http/1.1"}
// makeTLSConfig extracts TLS settings from each site config to
// build a tls.Config usable in Caddy HTTP servers. The returned
// config will be nil if TLS is disabled for these sites.
func makeTLSConfig(group []*SiteConfig) (*tls.Config, error) {
var tlsConfigs []*caddytls.Config
for i := range group {
if HTTP2 && len(group[i].TLS.ALPN) == 0 {
// if no application-level protocol was configured up to now,
// default to HTTP/2, then HTTP/1.1 if necessary
group[i].TLS.ALPN = defaultALPN
}
tlsConfigs = append(tlsConfigs, group[i].TLS)
}
return caddytls.MakeTLSConfig(tlsConfigs)
}
func getFallbacks(sites []*SiteConfig) []string {
fallbacks := []string{}
for _, sc := range sites {
if sc.FallbackSite {
fallbacks = append(fallbacks, sc.Addr.Host)
}
}
return fallbacks
}
// NewServer creates a new Server instance that will listen on addr
// and will serve the sites configured in group.
func NewServer(addr string, group []*SiteConfig) (*Server, error) {
s := &Server{
Server: makeHTTPServerWithTimeouts(addr, group),
vhosts: newVHostTrie(),
sites: group,
connTimeout: GracefulTimeout,
}
s.vhosts.fallbackHosts = append(s.vhosts.fallbackHosts, getFallbacks(group)...)
s.Server = makeHTTPServerWithHeaderLimit(s.Server, group)
s.Server.Handler = s // this is weird, but whatever
// extract TLS settings from each site config to build
// a tls.Config, which will not be nil if TLS is enabled
tlsConfig, err := makeTLSConfig(group)
if err != nil {
return nil, err
}
s.Server.TLSConfig = tlsConfig
// if TLS is enabled, make sure we prepare the Server accordingly
if s.Server.TLSConfig != nil {
// enable QUIC if desired (requires HTTP/2)
if HTTP2 && QUIC {
s.quicServer = &h2quic.Server{Server: s.Server}
s.Server.Handler = s.wrapWithSvcHeaders(s.Server.Handler)
}
// wrap the HTTP handler with a handler that does MITM detection
tlsh := &tlsHandler{next: s.Server.Handler}
s.Server.Handler = tlsh // this needs to be the "outer" handler when Serve() is called, for type assertion
// when Serve() creates the TLS listener later, that listener should
// be adding a reference the ClientHello info to a map; this callback
// will be sure to clear out that entry when the connection closes.
s.Server.ConnState = func(c net.Conn, cs http.ConnState) {
// when a connection closes or is hijacked, delete its entry
// in the map, because we are done with it.
if tlsh.listener != nil {
if cs == http.StateHijacked || cs == http.StateClosed {
tlsh.listener.helloInfosMu.Lock()
delete(tlsh.listener.helloInfos, c.RemoteAddr().String())
tlsh.listener.helloInfosMu.Unlock()
}
}
}
// As of Go 1.7, if the Server's TLSConfig is not nil, HTTP/2 is enabled only
// if TLSConfig.NextProtos includes the string "h2"
if HTTP2 && len(s.Server.TLSConfig.NextProtos) == 0 {
// some experimenting shows that this NextProtos must have at least
// one value that overlaps with the NextProtos of any other tls.Config
// that is returned from GetConfigForClient; if there is no overlap,
// the connection will fail (as of Go 1.8, Feb. 2017).
s.Server.TLSConfig.NextProtos = defaultALPN
}
}
// Compile custom middleware for every site (enables virtual hosting)
for _, site := range group {
stack := Handler(staticfiles.FileServer{Root: http.Dir(site.Root), Hide: site.HiddenFiles, IndexPages: site.IndexPages})
for i := len(site.middleware) - 1; i >= 0; i-- {
stack = site.middleware[i](stack)
}
site.middlewareChain = stack
s.vhosts.Insert(site.Addr.VHost(), site)
}
return s, nil
}
// makeHTTPServerWithHeaderLimit apply minimum header limit within a group to given http.Server
func makeHTTPServerWithHeaderLimit(s *http.Server, group []*SiteConfig) *http.Server {
var min int64
for _, cfg := range group {
limit := cfg.Limits.MaxRequestHeaderSize
if limit == 0 {
continue
}
// not set yet
if min == 0 {
min = limit
}
// find a better one
if limit < min {
min = limit
}
}
if min > 0 {
s.MaxHeaderBytes = int(min)
}
return s
}
// makeHTTPServerWithTimeouts makes an http.Server from the group of
// configs in a way that configures timeouts (or, if not set, it uses
// the default timeouts) by combining the configuration of each
// SiteConfig in the group. (Timeouts are important for mitigating
// slowloris attacks.)
func makeHTTPServerWithTimeouts(addr string, group []*SiteConfig) *http.Server {
// find the minimum duration configured for each timeout
var min Timeouts
for _, cfg := range group {
if cfg.Timeouts.ReadTimeoutSet &&
(!min.ReadTimeoutSet || cfg.Timeouts.ReadTimeout < min.ReadTimeout) {
min.ReadTimeoutSet = true
min.ReadTimeout = cfg.Timeouts.ReadTimeout
}
if cfg.Timeouts.ReadHeaderTimeoutSet &&
(!min.ReadHeaderTimeoutSet || cfg.Timeouts.ReadHeaderTimeout < min.ReadHeaderTimeout) {
min.ReadHeaderTimeoutSet = true
min.ReadHeaderTimeout = cfg.Timeouts.ReadHeaderTimeout
}
if cfg.Timeouts.WriteTimeoutSet &&
(!min.WriteTimeoutSet || cfg.Timeouts.WriteTimeout < min.WriteTimeout) {
min.WriteTimeoutSet = true
min.WriteTimeout = cfg.Timeouts.WriteTimeout
}
if cfg.Timeouts.IdleTimeoutSet &&
(!min.IdleTimeoutSet || cfg.Timeouts.IdleTimeout < min.IdleTimeout) {
min.IdleTimeoutSet = true
min.IdleTimeout = cfg.Timeouts.IdleTimeout
}
}
// for the values that were not set, use defaults
if !min.ReadTimeoutSet {
min.ReadTimeout = defaultTimeouts.ReadTimeout
}
if !min.ReadHeaderTimeoutSet {
min.ReadHeaderTimeout = defaultTimeouts.ReadHeaderTimeout
}
if !min.WriteTimeoutSet {
min.WriteTimeout = defaultTimeouts.WriteTimeout
}
if !min.IdleTimeoutSet {
min.IdleTimeout = defaultTimeouts.IdleTimeout
}
// set the final values on the server and return it
return &http.Server{
Addr: addr,
ReadTimeout: min.ReadTimeout,
ReadHeaderTimeout: min.ReadHeaderTimeout,
WriteTimeout: min.WriteTimeout,
IdleTimeout: min.IdleTimeout,
}
}
func (s *Server) wrapWithSvcHeaders(previousHandler http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if err := s.quicServer.SetQuicHeaders(w.Header()); err != nil {
log.Println("[Error] failed to set proper headers for QUIC: ", err)
}
previousHandler.ServeHTTP(w, r)
}
}
// Listen creates an active listener for s that can be
// used to serve requests.
func (s *Server) Listen() (net.Listener, error) {
if s.Server == nil {
return nil, fmt.Errorf("server field is nil")
}
ln, err := net.Listen("tcp", s.Server.Addr)
if err != nil {
var succeeded bool
if runtime.GOOS == "windows" {
// Windows has been known to keep sockets open even after closing the listeners.
// Tests reveal this error case easily because they call Start() then Stop()
// in succession. TODO: Better way to handle this? And why limit this to Windows?
for i := 0; i < 20; i++ {
time.Sleep(100 * time.Millisecond)
ln, err = net.Listen("tcp", s.Server.Addr)
if err == nil {
succeeded = true
break
}
}
}
if !succeeded {
return nil, err
}
}
if tcpLn, ok := ln.(*net.TCPListener); ok {
ln = tcpKeepAliveListener{TCPListener: tcpLn}
}
cln := s.WrapListener(ln)
// Very important to return a concrete caddy.Listener
// implementation for graceful restarts.
return cln.(caddy.Listener), nil
}
// WrapListener wraps ln in the listener middlewares configured
// for this server.
func (s *Server) WrapListener(ln net.Listener) net.Listener {
if ln == nil {
return nil
}
cln := ln.(caddy.Listener)
for _, site := range s.sites {
for _, m := range site.listenerMiddleware {
cln = m(cln)
}
}
return cln
}
// ListenPacket creates udp connection for QUIC if it is enabled,
func (s *Server) ListenPacket() (net.PacketConn, error) {
if QUIC {
udpAddr, err := net.ResolveUDPAddr("udp", s.Server.Addr)
if err != nil {
return nil, err
}
return net.ListenUDP("udp", udpAddr)
}
return nil, nil
}
// Serve serves requests on ln. It blocks until ln is closed.
func (s *Server) Serve(ln net.Listener) error {
if s.Server.TLSConfig != nil {
// Create TLS listener - note that we do not replace s.listener
// with this TLS listener; tls.listener is unexported and does
// not implement the File() method we need for graceful restarts
// on POSIX systems.
// TODO: Is this ^ still relevant anymore? Maybe we can now that it's a net.Listener...
ln = newTLSListener(ln, s.Server.TLSConfig)
if handler, ok := s.Server.Handler.(*tlsHandler); ok {
handler.listener = ln.(*tlsHelloListener)
}
// Rotate TLS session ticket keys
s.tlsGovChan = caddytls.RotateSessionTicketKeys(s.Server.TLSConfig)
}
defer func() {
if s.quicServer != nil {
if err := s.quicServer.Close(); err != nil {
log.Println("[ERROR] failed to close QUIC server: ", err)
}
}
}()
err := s.Server.Serve(ln)
if err != nil && err != http.ErrServerClosed {
return err
}
return nil
}
// ServePacket serves QUIC requests on pc until it is closed.
func (s *Server) ServePacket(pc net.PacketConn) error {
if s.quicServer != nil {
err := s.quicServer.Serve(pc.(*net.UDPConn))
return fmt.Errorf("serving QUIC connections: %v", err)
}
return nil
}
// ServeHTTP is the entry point of all HTTP requests.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer func() {
// We absolutely need to be sure we stay alive up here,
// even though, in theory, the errors middleware does this.
if rec := recover(); rec != nil {
log.Printf("[PANIC] %v", rec)
DefaultErrorFunc(w, r, http.StatusInternalServerError)
}
}()
// record the User-Agent string (with a cap on its length to mitigate attacks)
ua := r.Header.Get("User-Agent")
if len(ua) > 512 {
ua = ua[:512]
}
uaHash := telemetry.FastHash([]byte(ua)) // this is a normalized field
go telemetry.SetNested("http_user_agent", uaHash, ua)
go telemetry.AppendUnique("http_user_agent_count", uaHash)
go telemetry.Increment("http_request_count")
// copy the original, unchanged URL into the context
// so it can be referenced by middlewares
urlCopy := *r.URL
if r.URL.User != nil {
userInfo := new(url.Userinfo)
*userInfo = *r.URL.User
urlCopy.User = userInfo
}
c := context.WithValue(r.Context(), OriginalURLCtxKey, urlCopy)
r = r.WithContext(c)
// Setup a replacer for the request that keeps track of placeholder
// values across plugins.
replacer := NewReplacer(r, nil, "")
c = context.WithValue(r.Context(), ReplacerCtxKey, replacer)
r = r.WithContext(c)
w.Header().Set("Server", caddy.AppName)
status, _ := s.serveHTTP(w, r)
// Fallback error response in case error handling wasn't chained in
if status >= 400 {
DefaultErrorFunc(w, r, status)
}
}
func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
// strip out the port because it's not used in virtual
// hosting; the port is irrelevant because each listener
// is on a different port.
hostname, _, err := net.SplitHostPort(r.Host)
if err != nil {
hostname = r.Host
}
// look up the virtualhost; if no match, serve error
vhost, pathPrefix := s.vhosts.Match(hostname + r.URL.Path)
c := context.WithValue(r.Context(), caddy.CtxKey("path_prefix"), pathPrefix)
r = r.WithContext(c)
if vhost == nil {
// check for ACME challenge even if vhost is nil;
// could be a new host coming online soon - choose any
// vhost's cert manager configuration, I guess
if len(s.sites) > 0 && s.sites[0].TLS.Manager.HandleHTTPChallenge(w, r) {
return 0, nil
}
// otherwise, log the error and write a message to the client
remoteHost, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
remoteHost = r.RemoteAddr
}
WriteSiteNotFound(w, r) // don't add headers outside of this function (http.forwardproxy)
log.Printf("[INFO] %s - No such site at %s (Remote: %s, Referer: %s)",
hostname, s.Server.Addr, remoteHost, r.Header.Get("Referer"))
return 0, nil
}
// we still check for ACME challenge if the vhost exists,
// because the HTTP challenge might be disabled by its config
if vhost.TLS.Manager.HandleHTTPChallenge(w, r) {
return 0, nil
}
// trim the path portion of the site address from the beginning of
// the URL path, so a request to example.com/foo/blog on the site
// defined as example.com/foo appears as /blog instead of /foo/blog.
if pathPrefix != "/" {
r.URL = trimPathPrefix(r.URL, pathPrefix)
}
// enforce strict host matching, which ensures that the SNI
// value (if any), matches the Host header; essential for
// sites that rely on TLS ClientAuth sharing a port with
// sites that do not - if mismatched, close the connection
if vhost.StrictHostMatching && r.TLS != nil &&
strings.ToLower(r.TLS.ServerName) != strings.ToLower(hostname) {
r.Close = true
log.Printf("[ERROR] %s - strict host matching: SNI (%s) and HTTP Host (%s) values differ",
vhost.Addr, r.TLS.ServerName, hostname)
return http.StatusForbidden, nil
}
return vhost.middlewareChain.ServeHTTP(w, r)
}
func trimPathPrefix(u *url.URL, prefix string) *url.URL {
// We need to use URL.EscapedPath() when trimming the pathPrefix as
// URL.Path is ambiguous about / or %2f - see docs. See #1927
trimmedPath := strings.TrimPrefix(u.EscapedPath(), prefix)
if !strings.HasPrefix(trimmedPath, "/") {
trimmedPath = "/" + trimmedPath
}
// After trimming path reconstruct uri string with Query before parsing
trimmedURI := trimmedPath
if u.RawQuery != "" || u.ForceQuery == true {
trimmedURI = trimmedPath + "?" + u.RawQuery
}
if u.Fragment != "" {
trimmedURI = trimmedURI + "#" + u.Fragment
}
trimmedURL, err := url.Parse(trimmedURI)
if err != nil {
log.Printf("[ERROR] Unable to parse trimmed URL %s: %v", trimmedURI, err)
return u
}
return trimmedURL
}
// Address returns the address s was assigned to listen on.
func (s *Server) Address() string {
return s.Server.Addr
}
// Stop stops s gracefully (or forcefully after timeout) and
// closes its listener.
func (s *Server) Stop() error {
ctx, cancel := context.WithTimeout(context.Background(), s.connTimeout)
defer cancel()
err := s.Server.Shutdown(ctx)
if err != nil {
return err
}
// signal any TLS governor goroutines to exit
if s.tlsGovChan != nil {
close(s.tlsGovChan)
}
return nil
}
// OnStartupComplete lists the sites served by this server
// and any relevant information, assuming caddy.Quiet == false.
func (s *Server) OnStartupComplete() {
if !caddy.Quiet {
firstSite := s.sites[0]
scheme := "HTTP"
if firstSite.TLS.Enabled {
scheme = "HTTPS"
}
fmt.Println("")
fmt.Printf("Serving %s on port "+firstSite.Port()+" \n", scheme)
s.outputSiteInfo(false)
fmt.Println("")
}
// Print out process log without header comment
s.outputSiteInfo(true)
}
func (s *Server) outputSiteInfo(isProcessLog bool) {
for _, site := range s.sites {
output := site.Addr.String()
if caddy.IsLoopback(s.Address()) && !caddy.IsLoopback(site.Addr.Host) {
output += " (only accessible on this machine)"
}
if isProcessLog {
log.Printf("[INFO] Serving %s \n", output)
} else {
fmt.Println(output)
}
}
}
// defaultTimeouts stores the default timeout values to use
// if left unset by user configuration. NOTE: Most default
// timeouts are disabled (see issues #1464 and #1733).
var defaultTimeouts = Timeouts{IdleTimeout: 5 * time.Minute}
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
// connections. It's used by ListenAndServe and ListenAndServeTLS so
// dead TCP connections (e.g. closing laptop mid-download) eventually
// go away.
//
// Borrowed from the Go standard library.
type tcpKeepAliveListener struct {
*net.TCPListener
}
// Accept accepts the connection with a keep-alive enabled.
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
if err = tc.SetKeepAlive(true); err != nil {
return
}
if err = tc.SetKeepAlivePeriod(3 * time.Minute); err != nil {
return
}
return tc, nil
}
// File implements caddy.Listener; it returns the underlying file of the listener.
func (ln tcpKeepAliveListener) File() (*os.File, error) {
return ln.TCPListener.File()
}
// ErrMaxBytesExceeded is the error returned by MaxBytesReader
// when the request body exceeds the limit imposed
var ErrMaxBytesExceeded = errors.New("http: request body too large")
// DefaultErrorFunc responds to an HTTP request with a simple description
// of the specified HTTP status code.
func DefaultErrorFunc(w http.ResponseWriter, r *http.Request, status int) {
WriteTextResponse(w, status, fmt.Sprintf("%d %s\n", status, http.StatusText(status)))
}
const httpStatusMisdirectedRequest = 421 // RFC 7540, 9.1.2
// WriteSiteNotFound writes appropriate error code to w, signaling that
// requested host is not served by Caddy on a given port.
func WriteSiteNotFound(w http.ResponseWriter, r *http.Request) {
status := http.StatusNotFound
if r.ProtoMajor >= 2 {
// TODO: use http.StatusMisdirectedRequest when it gets defined
status = httpStatusMisdirectedRequest
}
WriteTextResponse(w, status, fmt.Sprintf("%d Site %s is not served on this interface\n", status, r.Host))
}
// WriteTextResponse writes body with code status to w. The body will
// be interpreted as plain text.
func WriteTextResponse(w http.ResponseWriter, status int, body string) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(status)
if _, err := w.Write([]byte(body)); err != nil {
log.Println("[Error] failed to write body: ", err)
}
}
// SafePath joins siteRoot and reqPath and converts it to a path that can
// be used to access a path on the local disk. It ensures the path does
// not traverse outside of the site root.
//
// If opening a file, use http.Dir instead.
func SafePath(siteRoot, reqPath string) string {
reqPath = filepath.ToSlash(reqPath)
reqPath = strings.Replace(reqPath, "\x00", "", -1) // NOTE: Go 1.9 checks for null bytes in the syscall package
if siteRoot == "" {
siteRoot = "."
}
return filepath.Join(siteRoot, filepath.FromSlash(path.Clean("/"+reqPath)))
}
// OriginalURLCtxKey is the key for accessing the original, incoming URL on an HTTP request.
const OriginalURLCtxKey = caddy.CtxKey("original_url")
| 1 | 13,654 | Can you link to the GitHub issue and/or PR so that it is easy for future readers to find out more about this? | caddyserver-caddy | go |
@@ -3009,3 +3009,18 @@ void StatelessValidation::PostCallRecordDestroyRenderPass(VkDevice device, VkRen
// Track the state necessary for checking vkCreateGraphicsPipeline (subpass usage of depth and color attachments)
renderpasses_states.erase(renderPass);
}
+
+bool StatelessValidation::manual_PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
+ bool skip = false;
+
+ if (pAllocateInfo) {
+ auto chained_prio_struct = lvl_find_in_chain<VkMemoryPriorityAllocateInfoEXT>(pAllocateInfo->pNext);
+ if (chained_prio_struct && (chained_prio_struct->priority < 0.0f || chained_prio_struct->priority > 1.0f)) {
+ skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
+ "VUID-VkMemoryPriorityAllocateInfoEXT-priority-02602",
+ "priority (=%f) must be between `0` and `1`, inclusive.", chained_prio_struct->priority);
+ }
+ }
+ return skip;
+} | 1 | /* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
* Copyright (C) 2015-2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@LunarG.com>
* Author: John Zulauf <jzulauf@lunarg.com>
*/
#define NOMINMAX
#include <math.h>
#include "chassis.h"
#include "stateless_validation.h"
static const int MaxParamCheckerStringLength = 256;
template <typename T>
inline bool in_inclusive_range(const T &value, const T &min, const T &max) {
// Using only < for generality and || for early abort
return !((value < min) || (max < value));
}
bool StatelessValidation::validate_string(const char *apiName, const ParameterName &stringName, const std::string &vuid,
const char *validateString) {
bool skip = false;
VkStringErrorFlags result = vk_string_validate(MaxParamCheckerStringLength, validateString);
if (result == VK_STRING_ERROR_NONE) {
return skip;
} else if (result & VK_STRING_ERROR_LENGTH) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: string %s exceeds max length %d", apiName, stringName.get_name().c_str(), MaxParamCheckerStringLength);
} else if (result & VK_STRING_ERROR_BAD_DATA) {
skip = log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: string %s contains invalid characters or is badly formed", apiName, stringName.get_name().c_str());
}
return skip;
}
bool StatelessValidation::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name,
const std::string &error_code, bool optional = false) {
bool skip = false;
if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
error_code,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name);
} else if (queueFamilyIndexMap.find(queue_family) == queueFamilyIndexMap.end()) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device), error_code,
"%s: %s (= %" PRIu32
") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.",
cmd_name, parameter_name, queue_family);
}
return skip;
}
bool StatelessValidation::ValidateQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families, const char *cmd_name,
const char *array_parameter_name, const std::string &unique_error_code,
const std::string &valid_error_code, bool optional = false) {
bool skip = false;
if (queue_families) {
std::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < queue_family_count; ++i) {
std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]";
if (set.count(queue_families[i])) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
"%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name, parameter_name.c_str(),
queue_families[i], array_parameter_name);
} else {
set.insert(queue_families[i]);
skip |= ValidateDeviceQueueFamily(queue_families[i], cmd_name, parameter_name.c_str(), valid_error_code, optional);
}
}
}
return skip;
}
bool StatelessValidation::validate_api_version(uint32_t api_version, uint32_t effective_api_version) {
bool skip = false;
uint32_t api_version_nopatch = VK_MAKE_VERSION(VK_VERSION_MAJOR(api_version), VK_VERSION_MINOR(api_version), 0);
if (api_version_nopatch != effective_api_version) {
if (api_version_nopatch < VK_API_VERSION_1_0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
HandleToUint64(instance), kVUIDUndefined,
"Invalid CreateInstance->pCreateInfo->pApplicationInfo.apiVersion number (0x%08x). "
"Using VK_API_VERSION_%" PRIu32 "_%" PRIu32 ".",
api_version, VK_VERSION_MAJOR(effective_api_version), VK_VERSION_MINOR(effective_api_version));
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
HandleToUint64(instance), kVUIDUndefined,
"Unrecognized CreateInstance->pCreateInfo->pApplicationInfo.apiVersion number (0x%08x). "
"Assuming VK_API_VERSION_%" PRIu32 "_%" PRIu32 ".",
api_version, VK_VERSION_MAJOR(effective_api_version), VK_VERSION_MINOR(effective_api_version));
}
}
return skip;
}
bool StatelessValidation::validate_instance_extensions(const VkInstanceCreateInfo *pCreateInfo) {
bool skip = false;
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
skip |= validate_extension_reqs(instance_extensions, "VUID-vkCreateInstance-ppEnabledExtensionNames-01388", "instance",
pCreateInfo->ppEnabledExtensionNames[i]);
}
return skip;
}
template <typename ExtensionState>
bool extension_state_by_name(const ExtensionState &extensions, const char *extension_name) {
if (!extension_name) return false; // null strings specify nothing
auto info = ExtensionState::get_info(extension_name);
bool state = info.state ? extensions.*(info.state) : false; // unknown extensions can't be enabled in extension struct
return state;
}
bool StatelessValidation::manual_PreCallValidateCreateInstance(const VkInstanceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
bool skip = false;
// Note: From the spec--
// Providing a NULL VkInstanceCreateInfo::pApplicationInfo or providing an apiVersion of 0 is equivalent to providing
// an apiVersion of VK_MAKE_VERSION(1, 0, 0). (a.k.a. VK_API_VERSION_1_0)
uint32_t local_api_version = (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion)
? pCreateInfo->pApplicationInfo->apiVersion
: VK_API_VERSION_1_0;
skip |= validate_api_version(local_api_version, api_version);
skip |= validate_instance_extensions(pCreateInfo);
return skip;
}
void StatelessValidation::PostCallRecordCreateInstance(const VkInstanceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
// Copy extension data into local object
this->instance_extensions = instance_data->instance_extensions;
}
void StatelessValidation::PostCallRecordCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
auto device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_data->object_dispatch, LayerObjectTypeParameterValidation);
StatelessValidation *stateless_validation = static_cast<StatelessValidation *>(validation_data);
// Store queue family data
if ((pCreateInfo != nullptr) && (pCreateInfo->pQueueCreateInfos != nullptr)) {
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) {
stateless_validation->queueFamilyIndexMap.insert(
std::make_pair(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, pCreateInfo->pQueueCreateInfos[i].queueCount));
}
}
// Parmeter validation also uses extension data
stateless_validation->device_extensions = this->device_extensions;
VkPhysicalDeviceProperties device_properties = {};
// Need to get instance and do a getlayerdata call...
ValidationObject *instance_object = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
instance_object->instance_dispatch_table.GetPhysicalDeviceProperties(physicalDevice, &device_properties);
memcpy(&stateless_validation->device_limits, &device_properties.limits, sizeof(VkPhysicalDeviceLimits));
if (device_extensions.vk_nv_shading_rate_image) {
// Get the needed shading rate image limits
auto shading_rate_image_props = lvl_init_struct<VkPhysicalDeviceShadingRateImagePropertiesNV>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&shading_rate_image_props);
instance_object->instance_dispatch_table.GetPhysicalDeviceProperties2KHR(physicalDevice, &prop2);
phys_dev_ext_props.shading_rate_image_props = shading_rate_image_props;
}
if (device_extensions.vk_nv_mesh_shader) {
// Get the needed mesh shader limits
auto mesh_shader_props = lvl_init_struct<VkPhysicalDeviceMeshShaderPropertiesNV>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&mesh_shader_props);
instance_object->instance_dispatch_table.GetPhysicalDeviceProperties2KHR(physicalDevice, &prop2);
phys_dev_ext_props.mesh_shader_props = mesh_shader_props;
}
// Save app-enabled features in this device's validation object
// The enabled features can come from either pEnabledFeatures, or from the pNext chain
const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
if ((nullptr == enabled_features_found) && device_extensions.vk_khr_get_physical_device_properties_2) {
const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
if (features2) {
enabled_features_found = &(features2->features);
}
}
if (enabled_features_found) {
stateless_validation->physical_device_features = *enabled_features_found;
} else {
memset(&stateless_validation->physical_device_features, 0, sizeof(VkPhysicalDeviceFeatures));
}
}
bool StatelessValidation::manual_PreCallValidateCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
bool skip = false;
bool maint1 = false;
bool negative_viewport = false;
if ((pCreateInfo->enabledLayerCount > 0) && (pCreateInfo->ppEnabledLayerNames != NULL)) {
for (size_t i = 0; i < pCreateInfo->enabledLayerCount; i++) {
skip |= validate_string("vkCreateDevice", "pCreateInfo->ppEnabledLayerNames",
"VUID-VkDeviceCreateInfo-ppEnabledLayerNames-parameter", pCreateInfo->ppEnabledLayerNames[i]);
}
}
if ((pCreateInfo->enabledExtensionCount > 0) && (pCreateInfo->ppEnabledExtensionNames != NULL)) {
maint1 = extension_state_by_name(device_extensions, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
negative_viewport = extension_state_by_name(device_extensions, VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME);
for (size_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
skip |= validate_string("vkCreateDevice", "pCreateInfo->ppEnabledExtensionNames",
"VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-parameter",
pCreateInfo->ppEnabledExtensionNames[i]);
skip |= validate_extension_reqs(device_extensions, "VUID-vkCreateDevice-ppEnabledExtensionNames-01387", "device",
pCreateInfo->ppEnabledExtensionNames[i]);
}
}
if (maint1 && negative_viewport) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-00374",
"VkDeviceCreateInfo->ppEnabledExtensionNames must not simultaneously include VK_KHR_maintenance1 and "
"VK_AMD_negative_viewport_height.");
}
if (pCreateInfo->pNext != NULL && pCreateInfo->pEnabledFeatures) {
// Check for get_physical_device_properties2 struct
const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
if (features2) {
// Cannot include VkPhysicalDeviceFeatures2KHR and have non-null pEnabledFeatures
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_InvalidUsage,
"VkDeviceCreateInfo->pNext includes a VkPhysicalDeviceFeatures2KHR struct when "
"pCreateInfo->pEnabledFeatures is non-NULL.");
}
}
// Validate pCreateInfo->pQueueCreateInfos
if (pCreateInfo->pQueueCreateInfos) {
std::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) {
const uint32_t requested_queue_family = pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex;
if (requested_queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family "
"index value.",
i);
} else if (set.count(requested_queue_family)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex (=%" PRIu32
") is not unique within pCreateInfo->pQueueCreateInfos array.",
i, requested_queue_family);
} else {
set.insert(requested_queue_family);
}
if (pCreateInfo->pQueueCreateInfos[i].pQueuePriorities != nullptr) {
for (uint32_t j = 0; j < pCreateInfo->pQueueCreateInfos[i].queueCount; ++j) {
const float queue_priority = pCreateInfo->pQueueCreateInfos[i].pQueuePriorities[j];
if (!(queue_priority >= 0.f) || !(queue_priority <= 1.f)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "VUID-VkDeviceQueueCreateInfo-pQueuePriorities-00383",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].pQueuePriorities[%" PRIu32
"] (=%f) is not between 0 and 1 (inclusive).",
i, j, queue_priority);
}
}
}
}
}
return skip;
}
bool StatelessValidation::require_device_extension(bool flag, char const *function_name, char const *extension_name) {
if (!flag) {
return log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_ExtensionNotEnabled,
"%s() called even though the %s extension was not enabled for this VkDevice.", function_name,
extension_name);
}
return false;
}
bool StatelessValidation::manual_PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
VkQueue *pQueue) {
bool skip = false;
skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex",
"VUID-vkGetDeviceQueue-queueFamilyIndex-00384");
const auto &queue_data = queueFamilyIndexMap.find(queueFamilyIndex);
if (queue_data != queueFamilyIndexMap.end() && queue_data->second <= queueIndex) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(device),
"VUID-vkGetDeviceQueue-queueIndex-00385",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32
") when the device was created (i.e. is not less than %" PRIu32 ").",
queueIndex, queueFamilyIndex, queue_data->second);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
bool skip = false;
const LogMiscParams log_misc{VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, VK_NULL_HANDLE, "vkCreateBuffer"};
if (pCreateInfo != nullptr) {
skip |= ValidateGreaterThanZero(pCreateInfo->size, "pCreateInfo->size", "VUID-VkBufferCreateInfo-size-00912", log_misc);
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
// If sharingMode is VK_SHARING_MODE_CONCURRENT, queueFamilyIndexCount must be greater than 1
if (pCreateInfo->queueFamilyIndexCount <= 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-sharingMode-00914",
"vkCreateBuffer: if pCreateInfo->sharingMode is VK_SHARING_MODE_CONCURRENT, "
"pCreateInfo->queueFamilyIndexCount must be greater than 1.");
}
// If sharingMode is VK_SHARING_MODE_CONCURRENT, pQueueFamilyIndices must be a pointer to an array of
// queueFamilyIndexCount uint32_t values
if (pCreateInfo->pQueueFamilyIndices == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-sharingMode-00913",
"vkCreateBuffer: if pCreateInfo->sharingMode is VK_SHARING_MODE_CONCURRENT, "
"pCreateInfo->pQueueFamilyIndices must be a pointer to an array of "
"pCreateInfo->queueFamilyIndexCount uint32_t values.");
} else {
skip |= ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", kVUID_PVError_InvalidUsage,
kVUID_PVError_InvalidUsage, false);
}
}
// If flags contains VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT or VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, it must also contain
// VK_BUFFER_CREATE_SPARSE_BINDING_BIT
if (((pCreateInfo->flags & (VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_ALIASED_BIT)) != 0) &&
((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-flags-00918",
"vkCreateBuffer: if pCreateInfo->flags contains VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT or "
"VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, it must also contain VK_BUFFER_CREATE_SPARSE_BINDING_BIT.");
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
bool skip = false;
const LogMiscParams log_misc{VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, VK_NULL_HANDLE, "vkCreateImage"};
if (pCreateInfo != nullptr) {
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
// If sharingMode is VK_SHARING_MODE_CONCURRENT, queueFamilyIndexCount must be greater than 1
if (pCreateInfo->queueFamilyIndexCount <= 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-sharingMode-00942",
"vkCreateImage(): if pCreateInfo->sharingMode is VK_SHARING_MODE_CONCURRENT, "
"pCreateInfo->queueFamilyIndexCount must be greater than 1.");
}
// If sharingMode is VK_SHARING_MODE_CONCURRENT, pQueueFamilyIndices must be a pointer to an array of
// queueFamilyIndexCount uint32_t values
if (pCreateInfo->pQueueFamilyIndices == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-sharingMode-00941",
"vkCreateImage(): if pCreateInfo->sharingMode is VK_SHARING_MODE_CONCURRENT, "
"pCreateInfo->pQueueFamilyIndices must be a pointer to an array of "
"pCreateInfo->queueFamilyIndexCount uint32_t values.");
} else {
skip |= ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateImage",
"pCreateInfo->pQueueFamilyIndices", kVUID_PVError_InvalidUsage,
kVUID_PVError_InvalidUsage, false);
}
}
skip |= ValidateGreaterThanZero(pCreateInfo->extent.width, "pCreateInfo->extent.width",
"VUID-VkImageCreateInfo-extent-00944", log_misc);
skip |= ValidateGreaterThanZero(pCreateInfo->extent.height, "pCreateInfo->extent.height",
"VUID-VkImageCreateInfo-extent-00945", log_misc);
skip |= ValidateGreaterThanZero(pCreateInfo->extent.depth, "pCreateInfo->extent.depth",
"VUID-VkImageCreateInfo-extent-00946", log_misc);
skip |= ValidateGreaterThanZero(pCreateInfo->mipLevels, "pCreateInfo->mipLevels", "VUID-VkImageCreateInfo-mipLevels-00947",
log_misc);
skip |= ValidateGreaterThanZero(pCreateInfo->arrayLayers, "pCreateInfo->arrayLayers",
"VUID-VkImageCreateInfo-arrayLayers-00948", log_misc);
// InitialLayout must be PREINITIALIZED or UNDEFINED
if ((pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_UNDEFINED) &&
(pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_PREINITIALIZED)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-initialLayout-00993",
"vkCreateImage(): initialLayout is %s, must be VK_IMAGE_LAYOUT_UNDEFINED or VK_IMAGE_LAYOUT_PREINITIALIZED.",
string_VkImageLayout(pCreateInfo->initialLayout));
}
// If imageType is VK_IMAGE_TYPE_1D, both extent.height and extent.depth must be 1
if ((pCreateInfo->imageType == VK_IMAGE_TYPE_1D) &&
((pCreateInfo->extent.height != 1) || (pCreateInfo->extent.depth != 1))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00956",
"vkCreateImage(): if pCreateInfo->imageType is VK_IMAGE_TYPE_1D, both pCreateInfo->extent.height and "
"pCreateInfo->extent.depth must be 1.");
}
if (pCreateInfo->imageType == VK_IMAGE_TYPE_2D) {
if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) {
if (pCreateInfo->extent.width != pCreateInfo->extent.height) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
VK_NULL_HANDLE, "VUID-VkImageCreateInfo-imageType-00954",
"vkCreateImage(): pCreateInfo->flags contains VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, but "
"pCreateInfo->extent.width (=%" PRIu32 ") and pCreateInfo->extent.height (=%" PRIu32
") are not equal.",
pCreateInfo->extent.width, pCreateInfo->extent.height);
}
if (pCreateInfo->arrayLayers < 6) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
VK_NULL_HANDLE, "VUID-VkImageCreateInfo-imageType-00954",
"vkCreateImage(): pCreateInfo->flags contains VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, but "
"pCreateInfo->arrayLayers (=%" PRIu32 ") is not greater than or equal to 6.",
pCreateInfo->arrayLayers);
}
}
if (pCreateInfo->extent.depth != 1) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00957",
"vkCreateImage(): if pCreateInfo->imageType is VK_IMAGE_TYPE_2D, pCreateInfo->extent.depth must be 1.");
}
}
// 3D image may have only 1 layer
if ((pCreateInfo->imageType == VK_IMAGE_TYPE_3D) && (pCreateInfo->arrayLayers != 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00961",
"vkCreateImage(): if pCreateInfo->imageType is VK_IMAGE_TYPE_3D, pCreateInfo->arrayLayers must be 1.");
}
// If multi-sample, validate type, usage, tiling and mip levels.
if ((pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) &&
((pCreateInfo->imageType != VK_IMAGE_TYPE_2D) || (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) ||
(pCreateInfo->mipLevels != 1) || (pCreateInfo->tiling != VK_IMAGE_TILING_OPTIMAL))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-samples-02257",
"vkCreateImage(): Multi-sample image with incompatible type, usage, tiling, or mips.");
}
if (0 != (pCreateInfo->usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT)) {
VkImageUsageFlags legal_flags = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
// At least one of the legal attachment bits must be set
if (0 == (pCreateInfo->usage & legal_flags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-00966",
"vkCreateImage(): Transient attachment image without a compatible attachment flag set.");
}
// No flags other than the legal attachment bits may be set
legal_flags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
if (0 != (pCreateInfo->usage & ~legal_flags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-00963",
"vkCreateImage(): Transient attachment image with incompatible usage flags set.");
}
}
// mipLevels must be less than or equal to the number of levels in the complete mipmap chain
uint32_t maxDim = std::max(std::max(pCreateInfo->extent.width, pCreateInfo->extent.height), pCreateInfo->extent.depth);
// Max mip levels is different for corner-sampled images vs normal images.
uint32_t maxMipLevels = (pCreateInfo->flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) ? (uint32_t)(ceil(log2(maxDim)))
: (uint32_t)(floor(log2(maxDim)) + 1);
if (maxDim > 0 && pCreateInfo->mipLevels > maxMipLevels) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-mipLevels-00958",
"vkCreateImage(): pCreateInfo->mipLevels must be less than or equal to "
"floor(log2(max(pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->extent.depth)))+1.");
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT) && (pCreateInfo->imageType != VK_IMAGE_TYPE_3D)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, VK_NULL_HANDLE,
"VUID-VkImageCreateInfo-flags-00950",
"vkCreateImage(): pCreateInfo->flags contains VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT but "
"pCreateInfo->imageType is not VK_IMAGE_TYPE_3D.");
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) && (!physical_device_features.sparseBinding)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, VK_NULL_HANDLE,
"VUID-VkImageCreateInfo-flags-00969",
"vkCreateImage(): pCreateInfo->flags contains VK_IMAGE_CREATE_SPARSE_BINDING_BIT, but the "
"VkPhysicalDeviceFeatures::sparseBinding feature is disabled.");
}
// If flags contains VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT or VK_IMAGE_CREATE_SPARSE_ALIASED_BIT, it must also contain
// VK_IMAGE_CREATE_SPARSE_BINDING_BIT
if (((pCreateInfo->flags & (VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT)) != 0) &&
((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-00987",
"vkCreateImage: if pCreateInfo->flags contains VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT or "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT, it must also contain VK_IMAGE_CREATE_SPARSE_BINDING_BIT.");
}
// Check for combinations of attributes that are incompatible with having VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set
if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) != 0) {
// Linear tiling is unsupported
if (VK_IMAGE_TILING_LINEAR == pCreateInfo->tiling) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_InvalidUsage,
"vkCreateImage: if pCreateInfo->flags contains VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then image "
"tiling of VK_IMAGE_TILING_LINEAR is not supported");
}
// Sparse 1D image isn't valid
if (VK_IMAGE_TYPE_1D == pCreateInfo->imageType) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00970",
"vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 1D image.");
}
// Sparse 2D image when device doesn't support it
if ((VK_FALSE == physical_device_features.sparseResidencyImage2D) && (VK_IMAGE_TYPE_2D == pCreateInfo->imageType)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00971",
"vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 2D image if corresponding "
"feature is not enabled on the device.");
}
// Sparse 3D image when device doesn't support it
if ((VK_FALSE == physical_device_features.sparseResidencyImage3D) && (VK_IMAGE_TYPE_3D == pCreateInfo->imageType)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00972",
"vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 3D image if corresponding "
"feature is not enabled on the device.");
}
// Multi-sample 2D image when device doesn't support it
if (VK_IMAGE_TYPE_2D == pCreateInfo->imageType) {
if ((VK_FALSE == physical_device_features.sparseResidency2Samples) &&
(VK_SAMPLE_COUNT_2_BIT == pCreateInfo->samples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00973",
"vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 2-sample image if "
"corresponding feature is not enabled on the device.");
} else if ((VK_FALSE == physical_device_features.sparseResidency4Samples) &&
(VK_SAMPLE_COUNT_4_BIT == pCreateInfo->samples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00974",
"vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 4-sample image if "
"corresponding feature is not enabled on the device.");
} else if ((VK_FALSE == physical_device_features.sparseResidency8Samples) &&
(VK_SAMPLE_COUNT_8_BIT == pCreateInfo->samples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00975",
"vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 8-sample image if "
"corresponding feature is not enabled on the device.");
} else if ((VK_FALSE == physical_device_features.sparseResidency16Samples) &&
(VK_SAMPLE_COUNT_16_BIT == pCreateInfo->samples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-00976",
"vkCreateImage: cannot specify VK_IMAGE_CREATE_SPARSE_BINDING_BIT for 16-sample image if "
"corresponding feature is not enabled on the device.");
}
}
}
if (pCreateInfo->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) {
if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-imageType-02082",
"vkCreateImage: if usage includes VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, "
"imageType must be VK_IMAGE_TYPE_2D.");
}
if (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-samples-02083",
"vkCreateImage: if usage includes VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, "
"samples must be VK_SAMPLE_COUNT_1_BIT.");
}
if (pCreateInfo->tiling != VK_IMAGE_TILING_OPTIMAL) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-tiling-02084",
"vkCreateImage: if usage includes VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, "
"tiling must be VK_IMAGE_TILING_OPTIMAL.");
}
}
if (pCreateInfo->flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) {
if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D && pCreateInfo->imageType != VK_IMAGE_TYPE_3D) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-02050",
"vkCreateImage: If flags contains VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV, "
"imageType must be VK_IMAGE_TYPE_2D or VK_IMAGE_TYPE_3D.");
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) || FormatIsDepthOrStencil(pCreateInfo->format)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-02051",
"vkCreateImage: If flags contains VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV, "
"it must not also contain VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT and format must "
"not be a depth/stencil format.");
}
if (pCreateInfo->imageType == VK_IMAGE_TYPE_2D && (pCreateInfo->extent.width == 1 || pCreateInfo->extent.height == 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-02052",
"vkCreateImage: If flags contains VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV and "
"imageType is VK_IMAGE_TYPE_2D, extent.width and extent.height must be "
"greater than 1.");
} else if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D &&
(pCreateInfo->extent.width == 1 || pCreateInfo->extent.height == 1 || pCreateInfo->extent.depth == 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-02053",
"vkCreateImage: If flags contains VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV and "
"imageType is VK_IMAGE_TYPE_3D, extent.width, extent.height, and extent.depth "
"must be greater than 1.");
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
bool skip = false;
if (pCreateInfo != nullptr) {
// Validate chained VkImageViewUsageCreateInfo struct, if present
if (nullptr != pCreateInfo->pNext) {
auto chained_ivuci_struct = lvl_find_in_chain<VkImageViewUsageCreateInfoKHR>(pCreateInfo->pNext);
if (chained_ivuci_struct) {
if (0 == chained_ivuci_struct->usage) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageViewUsageCreateInfo-usage-requiredbitmask",
"vkCreateImageView: Chained VkImageViewUsageCreateInfo usage field must not be 0.");
} else if (chained_ivuci_struct->usage & ~AllVkImageUsageFlagBits) {
std::stringstream ss;
ss << "vkCreateImageView: Chained VkImageViewUsageCreateInfo usage field (0x" << std::hex
<< chained_ivuci_struct->usage << ") contains invalid flag bits.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageViewUsageCreateInfo-usage-parameter", "%s", ss.str().c_str());
}
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateViewport(const VkViewport &viewport, const char *fn_name, const char *param_name,
VkDebugReportObjectTypeEXT object_type, uint64_t object = 0) {
bool skip = false;
// Note: for numerical correctness
// - float comparisons should expect NaN (comparison always false).
// - VkPhysicalDeviceLimits::maxViewportDimensions is uint32_t, not float -> careful.
const auto f_lte_u32_exact = [](const float v1_f, const uint32_t v2_u32) {
if (std::isnan(v1_f)) return false;
if (v1_f <= 0.0f) return true;
float intpart;
const float fract = modff(v1_f, &intpart);
assert(std::numeric_limits<float>::radix == 2);
const float u32_max_plus1 = ldexpf(1.0f, 32); // hopefully exact
if (intpart >= u32_max_plus1) return false;
uint32_t v1_u32 = static_cast<uint32_t>(intpart);
if (v1_u32 < v2_u32)
return true;
else if (v1_u32 == v2_u32 && fract == 0.0f)
return true;
else
return false;
};
const auto f_lte_u32_direct = [](const float v1_f, const uint32_t v2_u32) {
const float v2_f = static_cast<float>(v2_u32); // not accurate for > radix^digits; and undefined rounding mode
return (v1_f <= v2_f);
};
// width
bool width_healthy = true;
const auto max_w = device_limits.maxViewportDimensions[0];
if (!(viewport.width > 0.0f)) {
width_healthy = false;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-width-01770",
"%s: %s.width (=%f) is not greater than 0.0.", fn_name, param_name, viewport.width);
} else if (!(f_lte_u32_exact(viewport.width, max_w) || f_lte_u32_direct(viewport.width, max_w))) {
width_healthy = false;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-width-01771",
"%s: %s.width (=%f) exceeds VkPhysicalDeviceLimits::maxViewportDimensions[0] (=%" PRIu32 ").", fn_name,
param_name, viewport.width, max_w);
} else if (!f_lte_u32_exact(viewport.width, max_w) && f_lte_u32_direct(viewport.width, max_w)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, object_type, object, kVUID_PVError_NONE,
"%s: %s.width (=%f) technically exceeds VkPhysicalDeviceLimits::maxViewportDimensions[0] (=%" PRIu32
"), but it is within the static_cast<float>(maxViewportDimensions[0]) limit.",
fn_name, param_name, viewport.width, max_w);
}
// height
bool height_healthy = true;
const bool negative_height_enabled = api_version >= VK_API_VERSION_1_1 || device_extensions.vk_khr_maintenance1 ||
device_extensions.vk_amd_negative_viewport_height;
const auto max_h = device_limits.maxViewportDimensions[1];
if (!negative_height_enabled && !(viewport.height > 0.0f)) {
height_healthy = false;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-height-01772",
"%s: %s.height (=%f) is not greater 0.0.", fn_name, param_name, viewport.height);
} else if (!(f_lte_u32_exact(fabsf(viewport.height), max_h) || f_lte_u32_direct(fabsf(viewport.height), max_h))) {
height_healthy = false;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-height-01773",
"%s: Absolute value of %s.height (=%f) exceeds VkPhysicalDeviceLimits::maxViewportDimensions[1] (=%" PRIu32
").",
fn_name, param_name, viewport.height, max_h);
} else if (!f_lte_u32_exact(fabsf(viewport.height), max_h) && f_lte_u32_direct(fabsf(viewport.height), max_h)) {
height_healthy = false;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, object_type, object, kVUID_PVError_NONE,
"%s: Absolute value of %s.height (=%f) technically exceeds VkPhysicalDeviceLimits::maxViewportDimensions[1] (=%" PRIu32
"), but it is within the static_cast<float>(maxViewportDimensions[1]) limit.",
fn_name, param_name, viewport.height, max_h);
}
// x
bool x_healthy = true;
if (!(viewport.x >= device_limits.viewportBoundsRange[0])) {
x_healthy = false;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-x-01774",
"%s: %s.x (=%f) is less than VkPhysicalDeviceLimits::viewportBoundsRange[0] (=%f).", fn_name, param_name,
viewport.x, device_limits.viewportBoundsRange[0]);
}
// x + width
if (x_healthy && width_healthy) {
const float right_bound = viewport.x + viewport.width;
if (!(right_bound <= device_limits.viewportBoundsRange[1])) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-x-01232",
"%s: %s.x + %s.width (=%f + %f = %f) is greater than VkPhysicalDeviceLimits::viewportBoundsRange[1] (=%f).",
fn_name, param_name, param_name, viewport.x, viewport.width, right_bound, device_limits.viewportBoundsRange[1]);
}
}
// y
bool y_healthy = true;
if (!(viewport.y >= device_limits.viewportBoundsRange[0])) {
y_healthy = false;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-y-01775",
"%s: %s.y (=%f) is less than VkPhysicalDeviceLimits::viewportBoundsRange[0] (=%f).", fn_name, param_name,
viewport.y, device_limits.viewportBoundsRange[0]);
} else if (negative_height_enabled && !(viewport.y <= device_limits.viewportBoundsRange[1])) {
y_healthy = false;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-y-01776",
"%s: %s.y (=%f) exceeds VkPhysicalDeviceLimits::viewportBoundsRange[1] (=%f).", fn_name, param_name,
viewport.y, device_limits.viewportBoundsRange[1]);
}
// y + height
if (y_healthy && height_healthy) {
const float boundary = viewport.y + viewport.height;
if (!(boundary <= device_limits.viewportBoundsRange[1])) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-y-01233",
"%s: %s.y + %s.height (=%f + %f = %f) exceeds VkPhysicalDeviceLimits::viewportBoundsRange[1] (=%f).",
fn_name, param_name, param_name, viewport.y, viewport.height, boundary,
device_limits.viewportBoundsRange[1]);
} else if (negative_height_enabled && !(boundary >= device_limits.viewportBoundsRange[0])) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-y-01777",
"%s: %s.y + %s.height (=%f + %f = %f) is less than VkPhysicalDeviceLimits::viewportBoundsRange[0] (=%f).", fn_name,
param_name, param_name, viewport.y, viewport.height, boundary, device_limits.viewportBoundsRange[0]);
}
}
if (!device_extensions.vk_ext_depth_range_unrestricted) {
// minDepth
if (!(viewport.minDepth >= 0.0) || !(viewport.minDepth <= 1.0)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-minDepth-01234",
"%s: VK_EXT_depth_range_unrestricted extension is not enabled and %s.minDepth (=%f) is not within the "
"[0.0, 1.0] range.",
fn_name, param_name, viewport.minDepth);
}
// maxDepth
if (!(viewport.maxDepth >= 0.0) || !(viewport.maxDepth <= 1.0)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object, "VUID-VkViewport-maxDepth-01235",
"%s: VK_EXT_depth_range_unrestricted extension is not enabled and %s.maxDepth (=%f) is not within the "
"[0.0, 1.0] range.",
fn_name, param_name, viewport.maxDepth);
}
}
return skip;
}
struct SampleOrderInfo {
VkShadingRatePaletteEntryNV shadingRate;
uint32_t width;
uint32_t height;
};
// All palette entries with more than one pixel per fragment
static SampleOrderInfo sampleOrderInfos[] = {
{VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 1, 2},
{VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV, 2, 1},
{VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV, 2, 2},
{VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV, 4, 2},
{VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV, 2, 4},
{VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV, 4, 4},
};
bool StatelessValidation::ValidateCoarseSampleOrderCustomNV(const VkCoarseSampleOrderCustomNV *order) {
bool skip = false;
SampleOrderInfo *sampleOrderInfo;
uint32_t infoIdx = 0;
for (sampleOrderInfo = nullptr; infoIdx < ARRAY_SIZE(sampleOrderInfos); ++infoIdx) {
if (sampleOrderInfos[infoIdx].shadingRate == order->shadingRate) {
sampleOrderInfo = &sampleOrderInfos[infoIdx];
break;
}
}
if (sampleOrderInfo == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkCoarseSampleOrderCustomNV-shadingRate-02073",
"VkCoarseSampleOrderCustomNV shadingRate must be a shading rate "
"that generates fragments with more than one pixel.");
return skip;
}
if (order->sampleCount == 0 || (order->sampleCount & (order->sampleCount - 1)) ||
!(order->sampleCount & device_limits.framebufferNoAttachmentsSampleCounts)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkCoarseSampleOrderCustomNV-sampleCount-02074",
"VkCoarseSampleOrderCustomNV sampleCount (=%" PRIu32
") must "
"correspond to a sample count enumerated in VkSampleCountFlags whose corresponding bit "
"is set in framebufferNoAttachmentsSampleCounts.",
order->sampleCount);
}
if (order->sampleLocationCount != order->sampleCount * sampleOrderInfo->width * sampleOrderInfo->height) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075",
"VkCoarseSampleOrderCustomNV sampleLocationCount (=%" PRIu32
") must "
"be equal to the product of sampleCount (=%" PRIu32
"), the fragment width for shadingRate "
"(=%" PRIu32 "), and the fragment height for shadingRate (=%" PRIu32 ").",
order->sampleLocationCount, order->sampleCount, sampleOrderInfo->width, sampleOrderInfo->height);
}
if (order->sampleLocationCount > phys_dev_ext_props.shading_rate_image_props.shadingRateMaxCoarseSamples) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02076",
"VkCoarseSampleOrderCustomNV sampleLocationCount (=%" PRIu32
") must "
"be less than or equal to VkPhysicalDeviceShadingRateImagePropertiesNV shadingRateMaxCoarseSamples (=%" PRIu32 ").",
order->sampleLocationCount, phys_dev_ext_props.shading_rate_image_props.shadingRateMaxCoarseSamples);
}
// Accumulate a bitmask tracking which (x,y,sample) tuples are seen. Expect
// the first width*height*sampleCount bits to all be set. Note: There is no
// guarantee that 64 bits is enough, but practically it's unlikely for an
// implementation to support more than 32 bits for samplemask.
assert(phys_dev_ext_props.shading_rate_image_props.shadingRateMaxCoarseSamples <= 64);
uint64_t sampleLocationsMask = 0;
for (uint32_t i = 0; i < order->sampleLocationCount; ++i) {
const VkCoarseSampleLocationNV *sampleLoc = &order->pSampleLocations[i];
if (sampleLoc->pixelX >= sampleOrderInfo->width) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkCoarseSampleLocationNV-pixelX-02078",
"pixelX must be less than the width (in pixels) of the fragment.");
}
if (sampleLoc->pixelY >= sampleOrderInfo->height) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkCoarseSampleLocationNV-pixelY-02079",
"pixelY must be less than the height (in pixels) of the fragment.");
}
if (sampleLoc->sample >= order->sampleCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkCoarseSampleLocationNV-sample-02080",
"sample must be less than the number of coverage samples in each pixel belonging to the fragment.");
}
uint32_t idx = sampleLoc->sample + order->sampleCount * (sampleLoc->pixelX + sampleOrderInfo->width * sampleLoc->pixelY);
sampleLocationsMask |= 1ULL << idx;
}
uint64_t expectedMask = (order->sampleLocationCount == 64) ? ~0ULL : ((1ULL << order->sampleLocationCount) - 1);
if (sampleLocationsMask != expectedMask) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077",
"The array pSampleLocations must contain exactly one entry for "
"every combination of valid values for pixelX, pixelY, and sample in the structure VkCoarseSampleOrderCustomNV.");
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines) {
bool skip = false;
if (pCreateInfos != nullptr) {
for (uint32_t i = 0; i < createInfoCount; ++i) {
bool has_dynamic_viewport = false;
bool has_dynamic_scissor = false;
bool has_dynamic_line_width = false;
bool has_dynamic_viewport_w_scaling_nv = false;
bool has_dynamic_discard_rectangle_ext = false;
bool has_dynamic_sample_locations_ext = false;
bool has_dynamic_exclusive_scissor_nv = false;
bool has_dynamic_shading_rate_palette_nv = false;
if (pCreateInfos[i].pDynamicState != nullptr) {
const auto &dynamic_state_info = *pCreateInfos[i].pDynamicState;
for (uint32_t state_index = 0; state_index < dynamic_state_info.dynamicStateCount; ++state_index) {
const auto &dynamic_state = dynamic_state_info.pDynamicStates[state_index];
if (dynamic_state == VK_DYNAMIC_STATE_VIEWPORT) has_dynamic_viewport = true;
if (dynamic_state == VK_DYNAMIC_STATE_SCISSOR) has_dynamic_scissor = true;
if (dynamic_state == VK_DYNAMIC_STATE_LINE_WIDTH) has_dynamic_line_width = true;
if (dynamic_state == VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV) has_dynamic_viewport_w_scaling_nv = true;
if (dynamic_state == VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT) has_dynamic_discard_rectangle_ext = true;
if (dynamic_state == VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) has_dynamic_sample_locations_ext = true;
if (dynamic_state == VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV) has_dynamic_exclusive_scissor_nv = true;
if (dynamic_state == VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV)
has_dynamic_shading_rate_palette_nv = true;
}
}
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
if (pCreateInfos[i].pVertexInputState != nullptr) {
auto const &vertex_input_state = pCreateInfos[i].pVertexInputState;
if (vertex_input_state->vertexBindingDescriptionCount > device_limits.maxVertexInputBindings) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineVertexInputStateCreateInfo-vertexBindingDescriptionCount-00613",
"vkCreateGraphicsPipelines: pararameter "
"pCreateInfo[%d].pVertexInputState->vertexBindingDescriptionCount (%u) is "
"greater than VkPhysicalDeviceLimits::maxVertexInputBindings (%u).",
i, vertex_input_state->vertexBindingDescriptionCount, device_limits.maxVertexInputBindings);
}
if (vertex_input_state->vertexAttributeDescriptionCount > device_limits.maxVertexInputAttributes) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineVertexInputStateCreateInfo-vertexAttributeDescriptionCount-00614",
"vkCreateGraphicsPipelines: pararameter "
"pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptionCount (%u) is "
"greater than VkPhysicalDeviceLimits::maxVertexInputAttributes (%u).",
i, vertex_input_state->vertexBindingDescriptionCount, device_limits.maxVertexInputAttributes);
}
std::unordered_set<uint32_t> vertex_bindings(vertex_input_state->vertexBindingDescriptionCount);
for (uint32_t d = 0; d < vertex_input_state->vertexBindingDescriptionCount; ++d) {
auto const &vertex_bind_desc = vertex_input_state->pVertexBindingDescriptions[d];
auto const &binding_it = vertex_bindings.find(vertex_bind_desc.binding);
if (binding_it != vertex_bindings.cend()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineVertexInputStateCreateInfo-pVertexBindingDescriptions-00616",
"vkCreateGraphicsPipelines: parameter "
"pCreateInfo[%d].pVertexInputState->pVertexBindingDescription[%d].binding "
"(%" PRIu32 ") is not distinct.",
i, d, vertex_bind_desc.binding);
}
vertex_bindings.insert(vertex_bind_desc.binding);
if (vertex_bind_desc.binding >= device_limits.maxVertexInputBindings) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkVertexInputBindingDescription-binding-00618",
"vkCreateGraphicsPipelines: parameter "
"pCreateInfos[%u].pVertexInputState->pVertexBindingDescriptions[%u].binding (%u) is "
"greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings (%u).",
i, d, vertex_bind_desc.binding, device_limits.maxVertexInputBindings);
}
if (vertex_bind_desc.stride > device_limits.maxVertexInputBindingStride) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkVertexInputBindingDescription-stride-00619",
"vkCreateGraphicsPipelines: parameter "
"pCreateInfos[%u].pVertexInputState->pVertexBindingDescriptions[%u].stride (%u) is greater "
"than VkPhysicalDeviceLimits::maxVertexInputBindingStride (%u).",
i, d, vertex_bind_desc.stride, device_limits.maxVertexInputBindingStride);
}
}
std::unordered_set<uint32_t> attribute_locations(vertex_input_state->vertexAttributeDescriptionCount);
for (uint32_t d = 0; d < vertex_input_state->vertexAttributeDescriptionCount; ++d) {
auto const &vertex_attrib_desc = vertex_input_state->pVertexAttributeDescriptions[d];
auto const &location_it = attribute_locations.find(vertex_attrib_desc.location);
if (location_it != attribute_locations.cend()) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineVertexInputStateCreateInfo-pVertexAttributeDescriptions-00617",
"vkCreateGraphicsPipelines: parameter "
"pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].location (%u) is not distinct.",
i, d, vertex_attrib_desc.location);
}
attribute_locations.insert(vertex_attrib_desc.location);
auto const &binding_it = vertex_bindings.find(vertex_attrib_desc.binding);
if (binding_it == vertex_bindings.cend()) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineVertexInputStateCreateInfo-binding-00615",
"vkCreateGraphicsPipelines: parameter "
" pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].binding (%u) does not exist "
"in any pCreateInfo[%d].pVertexInputState->pVertexBindingDescription.",
i, d, vertex_attrib_desc.binding, i);
}
if (vertex_attrib_desc.location >= device_limits.maxVertexInputAttributes) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkVertexInputAttributeDescription-location-00620",
"vkCreateGraphicsPipelines: parameter "
"pCreateInfos[%u].pVertexInputState->pVertexAttributeDescriptions[%u].location (%u) is "
"greater than or equal to VkPhysicalDeviceLimits::maxVertexInputAttributes (%u).",
i, d, vertex_attrib_desc.location, device_limits.maxVertexInputAttributes);
}
if (vertex_attrib_desc.binding >= device_limits.maxVertexInputBindings) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkVertexInputAttributeDescription-binding-00621",
"vkCreateGraphicsPipelines: parameter "
"pCreateInfos[%u].pVertexInputState->pVertexAttributeDescriptions[%u].binding (%u) is "
"greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings (%u).",
i, d, vertex_attrib_desc.binding, device_limits.maxVertexInputBindings);
}
if (vertex_attrib_desc.offset > device_limits.maxVertexInputAttributeOffset) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkVertexInputAttributeDescription-offset-00622",
"vkCreateGraphicsPipelines: parameter "
"pCreateInfos[%u].pVertexInputState->pVertexAttributeDescriptions[%u].offset (%u) is "
"greater than VkPhysicalDeviceLimits::maxVertexInputAttributeOffset (%u).",
i, d, vertex_attrib_desc.offset, device_limits.maxVertexInputAttributeOffset);
}
}
}
if (pCreateInfos[i].pStages != nullptr) {
bool has_control = false;
bool has_eval = false;
for (uint32_t stage_index = 0; stage_index < pCreateInfos[i].stageCount; ++stage_index) {
if (pCreateInfos[i].pStages[stage_index].stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
has_control = true;
} else if (pCreateInfos[i].pStages[stage_index].stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) {
has_eval = true;
}
}
// pTessellationState is ignored without both tessellation control and tessellation evaluation shaders stages
if (has_control && has_eval) {
if (pCreateInfos[i].pTessellationState == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkGraphicsPipelineCreateInfo-pStages-00731",
"vkCreateGraphicsPipelines: if pCreateInfos[%d].pStages includes a tessellation control "
"shader stage and a tessellation evaluation shader stage, "
"pCreateInfos[%d].pTessellationState must not be NULL.",
i, i);
} else {
skip |= validate_struct_pnext(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pTessellationState->pNext", ParameterName::IndexVector{i}), NULL,
pCreateInfos[i].pTessellationState->pNext, 0, NULL, GeneratedVulkanHeaderVersion,
"VUID-VkGraphicsPipelineCreateInfo-pNext-pNext");
skip |= validate_reserved_flags(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pTessellationState->flags", ParameterName::IndexVector{i}),
pCreateInfos[i].pTessellationState->flags,
"VUID-VkPipelineTessellationStateCreateInfo-flags-zerobitmask");
if (pCreateInfos[i].pTessellationState->sType !=
VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineTessellationStateCreateInfo-sType-sType",
"vkCreateGraphicsPipelines: parameter pCreateInfos[%d].pTessellationState->sType must "
"be VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO.",
i);
}
if (pCreateInfos[i].pTessellationState->patchControlPoints == 0 ||
pCreateInfos[i].pTessellationState->patchControlPoints > device_limits.maxTessellationPatchSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214",
"vkCreateGraphicsPipelines: invalid parameter "
"pCreateInfos[%d].pTessellationState->patchControlPoints value %u. patchControlPoints "
"should be >0 and <=%u.",
i, pCreateInfos[i].pTessellationState->patchControlPoints,
device_limits.maxTessellationPatchSize);
}
}
}
}
// pViewportState, pMultisampleState, pDepthStencilState, and pColorBlendState ignored when rasterization is disabled
if ((pCreateInfos[i].pRasterizationState != nullptr) &&
(pCreateInfos[i].pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
if (pCreateInfos[i].pViewportState == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750",
"vkCreateGraphicsPipelines: Rasterization is enabled (pCreateInfos[%" PRIu32
"].pRasterizationState->rasterizerDiscardEnable is VK_FALSE), but pCreateInfos[%" PRIu32
"].pViewportState (=NULL) is not a valid pointer.",
i, i);
} else {
const auto &viewport_state = *pCreateInfos[i].pViewportState;
if (viewport_state.sType != VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportStateCreateInfo-sType-sType",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"].pViewportState->sType is not VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO.",
i);
}
const VkStructureType allowed_structs_VkPipelineViewportStateCreateInfo[] = {
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV,
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV,
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV,
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV,
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV,
};
skip |= validate_struct_pnext(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pViewportState->pNext", ParameterName::IndexVector{i}),
"VkPipelineViewportSwizzleStateCreateInfoNV, VkPipelineViewportWScalingStateCreateInfoNV, "
"VkPipelineViewportExclusiveScissorStateCreateInfoNV, VkPipelineViewportShadingRateImageStateCreateInfoNV, "
"VkPipelineViewportCoarseSampleOrderStateCreateInfoNV",
viewport_state.pNext, ARRAY_SIZE(allowed_structs_VkPipelineViewportStateCreateInfo),
allowed_structs_VkPipelineViewportStateCreateInfo, 65,
"VUID-VkPipelineViewportStateCreateInfo-pNext-pNext");
skip |= validate_reserved_flags(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pViewportState->flags", ParameterName::IndexVector{i}),
viewport_state.flags, "VUID-VkPipelineViewportStateCreateInfo-flags-zerobitmask");
auto exclusive_scissor_struct = lvl_find_in_chain<VkPipelineViewportExclusiveScissorStateCreateInfoNV>(
pCreateInfos[i].pViewportState->pNext);
auto shading_rate_image_struct = lvl_find_in_chain<VkPipelineViewportShadingRateImageStateCreateInfoNV>(
pCreateInfos[i].pViewportState->pNext);
auto coarse_sample_order_struct = lvl_find_in_chain<VkPipelineViewportCoarseSampleOrderStateCreateInfoNV>(
pCreateInfos[i].pViewportState->pNext);
const auto vp_swizzle_struct =
lvl_find_in_chain<VkPipelineViewportSwizzleStateCreateInfoNV>(pCreateInfos[i].pViewportState->pNext);
if (!physical_device_features.multiViewport) {
if (viewport_state.viewportCount != 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"vkCreateGraphicsPipelines: The VkPhysicalDeviceFeatures::multiViewport feature is "
"disabled, but pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32
") is not 1.",
i, viewport_state.viewportCount);
}
if (viewport_state.scissorCount != 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"vkCreateGraphicsPipelines: The VkPhysicalDeviceFeatures::multiViewport feature is "
"disabled, but pCreateInfos[%" PRIu32 "].pViewportState->scissorCount (=%" PRIu32
") is not 1.",
i, viewport_state.scissorCount);
}
if (exclusive_scissor_struct && (exclusive_scissor_struct->exclusiveScissorCount != 0 &&
exclusive_scissor_struct->exclusiveScissorCount != 1)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE,
"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02027",
"vkCreateGraphicsPipelines: The VkPhysicalDeviceFeatures::multiViewport feature is "
"disabled, but pCreateInfos[%" PRIu32
"] VkPipelineViewportExclusiveScissorStateCreateInfoNV::exclusiveScissorCount (=%" PRIu32
") is not 1.",
i, exclusive_scissor_struct->exclusiveScissorCount);
}
if (shading_rate_image_struct &&
(shading_rate_image_struct->viewportCount != 0 && shading_rate_image_struct->viewportCount != 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE,
"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-viewportCount-02054",
"vkCreateGraphicsPipelines: The VkPhysicalDeviceFeatures::multiViewport feature is "
"disabled, but pCreateInfos[%" PRIu32
"] VkPipelineViewportShadingRateImageStateCreateInfoNV::viewportCount (=%" PRIu32
") is neither 0 nor 1.",
i, shading_rate_image_struct->viewportCount);
}
} else { // multiViewport enabled
if (viewport_state.viewportCount == 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->viewportCount is 0.", i);
} else if (viewport_state.viewportCount > device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"].pViewportState->viewportCount (=%" PRIu32
") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").",
i, viewport_state.viewportCount, device_limits.maxViewports);
}
if (viewport_state.scissorCount == 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->scissorCount is 0.", i);
} else if (viewport_state.scissorCount > device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"].pViewportState->scissorCount (=%" PRIu32
") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").",
i, viewport_state.scissorCount, device_limits.maxViewports);
}
}
if (exclusive_scissor_struct && exclusive_scissor_struct->exclusiveScissorCount > device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE,
"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02028",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] exclusiveScissorCount (=%" PRIu32
") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").",
i, exclusive_scissor_struct->exclusiveScissorCount, device_limits.maxViewports);
}
if (shading_rate_image_struct && shading_rate_image_struct->viewportCount > device_limits.maxViewports) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-viewportCount-02055",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"] VkPipelineViewportShadingRateImageStateCreateInfoNV viewportCount (=%" PRIu32
") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").",
i, shading_rate_image_struct->viewportCount, device_limits.maxViewports);
}
if (viewport_state.scissorCount != viewport_state.viewportCount) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "].pViewportState->scissorCount (=%" PRIu32
") is not identical to pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32 ").",
i, viewport_state.scissorCount, i, viewport_state.viewportCount);
}
if (exclusive_scissor_struct && exclusive_scissor_struct->exclusiveScissorCount != 0 &&
exclusive_scissor_struct->exclusiveScissorCount != viewport_state.viewportCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE,
"VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-exclusiveScissorCount-02029",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32 "] exclusiveScissorCount (=%" PRIu32
") must be zero or identical to pCreateInfos[%" PRIu32
"].pViewportState->viewportCount (=%" PRIu32 ").",
i, exclusive_scissor_struct->exclusiveScissorCount, i, viewport_state.viewportCount);
}
if (shading_rate_image_struct && shading_rate_image_struct->shadingRateImageEnable &&
shading_rate_image_struct->viewportCount != viewport_state.viewportCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, VK_NULL_HANDLE,
"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-shadingRateImageEnable-02056",
"vkCreateGraphicsPipelines: If shadingRateImageEnable is enabled, pCreateInfos[%" PRIu32
"] "
"VkPipelineViewportShadingRateImageStateCreateInfoNV viewportCount (=%" PRIu32
") must identical to pCreateInfos[%" PRIu32 "].pViewportState->viewportCount (=%" PRIu32 ").",
i, shading_rate_image_struct->viewportCount, i, viewport_state.viewportCount);
}
if (!has_dynamic_viewport && viewport_state.viewportCount > 0 && viewport_state.pViewports == nullptr) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, VK_NULL_HANDLE,
"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747",
"vkCreateGraphicsPipelines: The viewport state is static (pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_VIEWPORT), but pCreateInfos[%" PRIu32
"].pViewportState->pViewports (=NULL) is an invalid pointer.",
i, i);
}
if (!has_dynamic_scissor && viewport_state.scissorCount > 0 && viewport_state.pScissors == nullptr) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, VK_NULL_HANDLE,
"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748",
"vkCreateGraphicsPipelines: The scissor state is static (pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_SCISSOR), but pCreateInfos[%" PRIu32
"].pViewportState->pScissors (=NULL) is an invalid pointer.",
i, i);
}
if (!has_dynamic_exclusive_scissor_nv && exclusive_scissor_struct &&
exclusive_scissor_struct->exclusiveScissorCount > 0 &&
exclusive_scissor_struct->pExclusiveScissors == nullptr) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportExclusiveScissorStateCreateInfoNV-pDynamicStates-02030",
"vkCreateGraphicsPipelines: The exclusive scissor state is static (pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV), but "
"pCreateInfos[%" PRIu32 "] pExclusiveScissors (=NULL) is an invalid pointer.",
i, i);
}
if (!has_dynamic_shading_rate_palette_nv && shading_rate_image_struct &&
shading_rate_image_struct->viewportCount > 0 &&
shading_rate_image_struct->pShadingRatePalettes == nullptr) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, VK_NULL_HANDLE,
"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-pDynamicStates-02057",
"vkCreateGraphicsPipelines: The shading rate palette state is static (pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV), "
"but pCreateInfos[%" PRIu32 "] pShadingRatePalettes (=NULL) is an invalid pointer.",
i, i);
}
if (vp_swizzle_struct) {
if (vp_swizzle_struct->viewportCount != viewport_state.viewportCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, "VUID-VkPipelineViewportSwizzleStateCreateInfoNV-viewportCount-01215",
"vkCreateGraphicsPipelines: The viewport swizzle state vieport count of %" PRIu32
" does "
"not match the viewport count of %" PRIu32 " in VkPipelineViewportStateCreateInfo.",
vp_swizzle_struct->viewportCount, viewport_state.viewportCount);
}
}
// validate the VkViewports
if (!has_dynamic_viewport && viewport_state.pViewports) {
for (uint32_t viewport_i = 0; viewport_i < viewport_state.viewportCount; ++viewport_i) {
const auto &viewport = viewport_state.pViewports[viewport_i]; // will crash on invalid ptr
const char fn_name[] = "vkCreateGraphicsPipelines";
const std::string param_name = "pCreateInfos[" + std::to_string(i) + "].pViewportState->pViewports[" +
std::to_string(viewport_i) + "]";
skip |= manual_PreCallValidateViewport(viewport, fn_name, param_name.c_str(),
VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
}
}
if (has_dynamic_viewport_w_scaling_nv && !device_extensions.vk_nv_clip_space_w_scaling) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, kVUID_PVError_ExtensionNotEnabled,
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates contains VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, but "
"VK_NV_clip_space_w_scaling extension is not enabled.",
i);
}
if (has_dynamic_discard_rectangle_ext && !device_extensions.vk_ext_discard_rectangles) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, kVUID_PVError_ExtensionNotEnabled,
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates contains VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, but "
"VK_EXT_discard_rectangles extension is not enabled.",
i);
}
if (has_dynamic_sample_locations_ext && !device_extensions.vk_ext_sample_locations) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, kVUID_PVError_ExtensionNotEnabled,
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates contains VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but "
"VK_EXT_sample_locations extension is not enabled.",
i);
}
if (has_dynamic_exclusive_scissor_nv && !device_extensions.vk_nv_scissor_exclusive) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE, kVUID_PVError_ExtensionNotEnabled,
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates contains VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV, but "
"VK_NV_scissor_exclusive extension is not enabled.",
i);
}
if (coarse_sample_order_struct &&
coarse_sample_order_struct->sampleOrderType != VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV &&
coarse_sample_order_struct->customSampleOrderCount != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
VK_NULL_HANDLE,
"VUID-VkPipelineViewportCoarseSampleOrderStateCreateInfoNV-sampleOrderType-02072",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRIu32
"] "
"VkPipelineViewportCoarseSampleOrderStateCreateInfoNV sampleOrderType is not "
"VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV and customSampleOrderCount is not 0.",
i);
}
if (coarse_sample_order_struct) {
for (uint32_t order_i = 0; order_i < coarse_sample_order_struct->customSampleOrderCount; ++order_i) {
skip |= ValidateCoarseSampleOrderCustomNV(&coarse_sample_order_struct->pCustomSampleOrders[order_i]);
}
}
}
if (pCreateInfos[i].pMultisampleState == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751",
"vkCreateGraphicsPipelines: if pCreateInfos[%d].pRasterizationState->rasterizerDiscardEnable "
"is VK_FALSE, pCreateInfos[%d].pMultisampleState must not be NULL.",
i, i);
} else {
const VkStructureType valid_next_stypes[] = {LvlTypeMap<VkPipelineCoverageModulationStateCreateInfoNV>::kSType,
LvlTypeMap<VkPipelineCoverageToColorStateCreateInfoNV>::kSType,
LvlTypeMap<VkPipelineSampleLocationsStateCreateInfoEXT>::kSType};
const char *valid_struct_names =
"VkPipelineCoverageModulationStateCreateInfoNV, VkPipelineCoverageToColorStateCreateInfoNV, "
"VkPipelineSampleLocationsStateCreateInfoEXT";
skip |= validate_struct_pnext(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pMultisampleState->pNext", ParameterName::IndexVector{i}),
valid_struct_names, pCreateInfos[i].pMultisampleState->pNext, 3, valid_next_stypes,
GeneratedVulkanHeaderVersion, "VUID-VkPipelineMultisampleStateCreateInfo-pNext-pNext");
skip |= validate_reserved_flags(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pMultisampleState->flags", ParameterName::IndexVector{i}),
pCreateInfos[i].pMultisampleState->flags, "VUID-VkPipelineMultisampleStateCreateInfo-flags-zerobitmask");
skip |= validate_bool32(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pMultisampleState->sampleShadingEnable", ParameterName::IndexVector{i}),
pCreateInfos[i].pMultisampleState->sampleShadingEnable);
skip |= validate_array(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pMultisampleState->rasterizationSamples", ParameterName::IndexVector{i}),
ParameterName("pCreateInfos[%i].pMultisampleState->pSampleMask", ParameterName::IndexVector{i}),
pCreateInfos[i].pMultisampleState->rasterizationSamples, &pCreateInfos[i].pMultisampleState->pSampleMask,
true, false, kVUIDUndefined, kVUIDUndefined);
skip |= validate_bool32(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pMultisampleState->alphaToCoverageEnable", ParameterName::IndexVector{i}),
pCreateInfos[i].pMultisampleState->alphaToCoverageEnable);
skip |= validate_bool32(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pMultisampleState->alphaToOneEnable", ParameterName::IndexVector{i}),
pCreateInfos[i].pMultisampleState->alphaToOneEnable);
if (pCreateInfos[i].pMultisampleState->sType != VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_InvalidStructSType,
"vkCreateGraphicsPipelines: parameter pCreateInfos[%d].pMultisampleState->sType must be "
"VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO",
i);
}
if (pCreateInfos[i].pMultisampleState->sampleShadingEnable == VK_TRUE) {
if (!physical_device_features.sampleRateShading) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineMultisampleStateCreateInfo-sampleShadingEnable-00784",
"vkCreateGraphicsPipelines(): parameter "
"pCreateInfos[%d].pMultisampleState->sampleShadingEnable.",
i);
}
// TODO Add documentation issue about when minSampleShading must be in range and when it is ignored
// For now a "least noise" test *only* when sampleShadingEnable is VK_TRUE.
if (!in_inclusive_range(pCreateInfos[i].pMultisampleState->minSampleShading, 0.F, 1.0F)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineMultisampleStateCreateInfo-minSampleShading-00786",
"vkCreateGraphicsPipelines(): parameter pCreateInfos[%d].pMultisampleState->minSampleShading.", i);
}
}
}
bool uses_color_attachment = false;
bool uses_depthstencil_attachment = false;
{
const auto subpasses_uses_it = renderpasses_states.find(pCreateInfos[i].renderPass);
if (subpasses_uses_it != renderpasses_states.end()) {
const auto &subpasses_uses = subpasses_uses_it->second;
if (subpasses_uses.subpasses_using_color_attachment.count(pCreateInfos[i].subpass))
uses_color_attachment = true;
if (subpasses_uses.subpasses_using_depthstencil_attachment.count(pCreateInfos[i].subpass))
uses_depthstencil_attachment = true;
}
}
if (pCreateInfos[i].pDepthStencilState != nullptr && uses_depthstencil_attachment) {
skip |= validate_struct_pnext(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->pNext", ParameterName::IndexVector{i}), NULL,
pCreateInfos[i].pDepthStencilState->pNext, 0, NULL, GeneratedVulkanHeaderVersion,
"VUID-VkPipelineDepthStencilStateCreateInfo-pNext-pNext");
skip |= validate_reserved_flags(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->flags", ParameterName::IndexVector{i}),
pCreateInfos[i].pDepthStencilState->flags, "VUID-VkPipelineDepthStencilStateCreateInfo-flags-zerobitmask");
skip |= validate_bool32(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->depthTestEnable", ParameterName::IndexVector{i}),
pCreateInfos[i].pDepthStencilState->depthTestEnable);
skip |= validate_bool32(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->depthWriteEnable", ParameterName::IndexVector{i}),
pCreateInfos[i].pDepthStencilState->depthWriteEnable);
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->depthCompareOp", ParameterName::IndexVector{i}),
"VkCompareOp", AllVkCompareOpEnums, pCreateInfos[i].pDepthStencilState->depthCompareOp,
"VUID-VkPipelineDepthStencilStateCreateInfo-depthCompareOp-parameter");
skip |= validate_bool32(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->depthBoundsTestEnable", ParameterName::IndexVector{i}),
pCreateInfos[i].pDepthStencilState->depthBoundsTestEnable);
skip |= validate_bool32(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->stencilTestEnable", ParameterName::IndexVector{i}),
pCreateInfos[i].pDepthStencilState->stencilTestEnable);
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->front.failOp", ParameterName::IndexVector{i}),
"VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->front.failOp,
"VUID-VkStencilOpState-failOp-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->front.passOp", ParameterName::IndexVector{i}),
"VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->front.passOp,
"VUID-VkStencilOpState-passOp-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->front.depthFailOp", ParameterName::IndexVector{i}),
"VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->front.depthFailOp,
"VUID-VkStencilOpState-depthFailOp-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->front.compareOp", ParameterName::IndexVector{i}),
"VkCompareOp", AllVkCompareOpEnums, pCreateInfos[i].pDepthStencilState->front.compareOp,
"VUID-VkPipelineDepthStencilStateCreateInfo-depthCompareOp-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->back.failOp", ParameterName::IndexVector{i}),
"VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->back.failOp,
"VUID-VkStencilOpState-failOp-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->back.passOp", ParameterName::IndexVector{i}),
"VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->back.passOp,
"VUID-VkStencilOpState-passOp-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->back.depthFailOp", ParameterName::IndexVector{i}),
"VkStencilOp", AllVkStencilOpEnums, pCreateInfos[i].pDepthStencilState->back.depthFailOp,
"VUID-VkStencilOpState-depthFailOp-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pDepthStencilState->back.compareOp", ParameterName::IndexVector{i}),
"VkCompareOp", AllVkCompareOpEnums, pCreateInfos[i].pDepthStencilState->back.compareOp,
"VUID-VkPipelineDepthStencilStateCreateInfo-depthCompareOp-parameter");
if (pCreateInfos[i].pDepthStencilState->sType != VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_InvalidStructSType,
"vkCreateGraphicsPipelines: parameter pCreateInfos[%d].pDepthStencilState->sType must be "
"VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO",
i);
}
}
const VkStructureType allowed_structs_VkPipelineColorBlendStateCreateInfo[] = {
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT};
if (pCreateInfos[i].pColorBlendState != nullptr && uses_color_attachment) {
skip |= validate_struct_pnext(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pNext", ParameterName::IndexVector{i}),
"VkPipelineColorBlendAdvancedStateCreateInfoEXT", pCreateInfos[i].pColorBlendState->pNext,
ARRAY_SIZE(allowed_structs_VkPipelineColorBlendStateCreateInfo),
allowed_structs_VkPipelineColorBlendStateCreateInfo, GeneratedVulkanHeaderVersion,
"VUID-VkPipelineColorBlendStateCreateInfo-pNext-pNext");
skip |= validate_reserved_flags(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->flags", ParameterName::IndexVector{i}),
pCreateInfos[i].pColorBlendState->flags, "VUID-VkPipelineColorBlendStateCreateInfo-flags-zerobitmask");
skip |= validate_bool32(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->logicOpEnable", ParameterName::IndexVector{i}),
pCreateInfos[i].pColorBlendState->logicOpEnable);
skip |= validate_array(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->attachmentCount", ParameterName::IndexVector{i}),
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments", ParameterName::IndexVector{i}),
pCreateInfos[i].pColorBlendState->attachmentCount, &pCreateInfos[i].pColorBlendState->pAttachments, false,
true, kVUIDUndefined, kVUIDUndefined);
if (pCreateInfos[i].pColorBlendState->pAttachments != NULL) {
for (uint32_t attachmentIndex = 0; attachmentIndex < pCreateInfos[i].pColorBlendState->attachmentCount;
++attachmentIndex) {
skip |= validate_bool32("vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].blendEnable",
ParameterName::IndexVector{i, attachmentIndex}),
pCreateInfos[i].pColorBlendState->pAttachments[attachmentIndex].blendEnable);
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].srcColorBlendFactor",
ParameterName::IndexVector{i, attachmentIndex}),
"VkBlendFactor", AllVkBlendFactorEnums,
pCreateInfos[i].pColorBlendState->pAttachments[attachmentIndex].srcColorBlendFactor,
"VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].dstColorBlendFactor",
ParameterName::IndexVector{i, attachmentIndex}),
"VkBlendFactor", AllVkBlendFactorEnums,
pCreateInfos[i].pColorBlendState->pAttachments[attachmentIndex].dstColorBlendFactor,
"VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].colorBlendOp",
ParameterName::IndexVector{i, attachmentIndex}),
"VkBlendOp", AllVkBlendOpEnums,
pCreateInfos[i].pColorBlendState->pAttachments[attachmentIndex].colorBlendOp,
"VUID-VkPipelineColorBlendAttachmentState-colorBlendOp-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].srcAlphaBlendFactor",
ParameterName::IndexVector{i, attachmentIndex}),
"VkBlendFactor", AllVkBlendFactorEnums,
pCreateInfos[i].pColorBlendState->pAttachments[attachmentIndex].srcAlphaBlendFactor,
"VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].dstAlphaBlendFactor",
ParameterName::IndexVector{i, attachmentIndex}),
"VkBlendFactor", AllVkBlendFactorEnums,
pCreateInfos[i].pColorBlendState->pAttachments[attachmentIndex].dstAlphaBlendFactor,
"VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-parameter");
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].alphaBlendOp",
ParameterName::IndexVector{i, attachmentIndex}),
"VkBlendOp", AllVkBlendOpEnums,
pCreateInfos[i].pColorBlendState->pAttachments[attachmentIndex].alphaBlendOp,
"VUID-VkPipelineColorBlendAttachmentState-alphaBlendOp-parameter");
skip |=
validate_flags("vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->pAttachments[%i].colorWriteMask",
ParameterName::IndexVector{i, attachmentIndex}),
"VkColorComponentFlagBits", AllVkColorComponentFlagBits,
pCreateInfos[i].pColorBlendState->pAttachments[attachmentIndex].colorWriteMask,
false, false, "VUID-VkPipelineColorBlendAttachmentState-colorWriteMask-parameter");
}
}
if (pCreateInfos[i].pColorBlendState->sType != VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_InvalidStructSType,
"vkCreateGraphicsPipelines: parameter pCreateInfos[%d].pColorBlendState->sType must be "
"VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO",
i);
}
// If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value
if (pCreateInfos[i].pColorBlendState->logicOpEnable == VK_TRUE) {
skip |= validate_ranged_enum(
"vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pColorBlendState->logicOp", ParameterName::IndexVector{i}), "VkLogicOp",
AllVkLogicOpEnums, pCreateInfos[i].pColorBlendState->logicOp,
"VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00607");
}
}
}
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
if (pCreateInfos[i].basePipelineIndex != -1) {
if (pCreateInfos[i].basePipelineHandle != VK_NULL_HANDLE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkGraphicsPipelineCreateInfo-flags-00724",
"vkCreateGraphicsPipelines parameter, pCreateInfos->basePipelineHandle, must be "
"VK_NULL_HANDLE if pCreateInfos->flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag "
"and pCreateInfos->basePipelineIndex is not -1.");
}
}
if (pCreateInfos[i].basePipelineHandle != VK_NULL_HANDLE) {
if (pCreateInfos[i].basePipelineIndex != -1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkGraphicsPipelineCreateInfo-flags-00725",
"vkCreateGraphicsPipelines parameter, pCreateInfos->basePipelineIndex, must be -1 if "
"pCreateInfos->flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag and "
"pCreateInfos->basePipelineHandle is not VK_NULL_HANDLE.");
}
}
}
if (pCreateInfos[i].pRasterizationState) {
if ((pCreateInfos[i].pRasterizationState->polygonMode != VK_POLYGON_MODE_FILL) &&
(physical_device_features.fillModeNonSolid == false)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_DeviceFeature,
"vkCreateGraphicsPipelines parameter, VkPolygonMode "
"pCreateInfos->pRasterizationState->polygonMode cannot be VK_POLYGON_MODE_POINT or "
"VK_POLYGON_MODE_LINE if VkPhysicalDeviceFeatures->fillModeNonSolid is false.");
}
if (!has_dynamic_line_width && !physical_device_features.wideLines &&
(pCreateInfos[i].pRasterizationState->lineWidth != 1.0f)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 0,
"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00749",
"The line width state is static (pCreateInfos[%" PRIu32
"].pDynamicState->pDynamicStates does not contain VK_DYNAMIC_STATE_LINE_WIDTH) and "
"VkPhysicalDeviceFeatures::wideLines is disabled, but pCreateInfos[%" PRIu32
"].pRasterizationState->lineWidth (=%f) is not 1.0.",
i, i, pCreateInfos[i].pRasterizationState->lineWidth);
}
}
for (size_t j = 0; j < pCreateInfos[i].stageCount; j++) {
skip |= validate_string("vkCreateGraphicsPipelines",
ParameterName("pCreateInfos[%i].pStages[%i].pName", ParameterName::IndexVector{i, j}),
"VUID-VkGraphicsPipelineCreateInfo-pStages-parameter", pCreateInfos[i].pStages[j].pName);
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines) {
bool skip = false;
for (uint32_t i = 0; i < createInfoCount; i++) {
skip |= validate_string("vkCreateComputePipelines",
ParameterName("pCreateInfos[%i].stage.pName", ParameterName::IndexVector{i}),
"VUID-VkPipelineShaderStageCreateInfo-pName-parameter", pCreateInfos[i].stage.pName);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
bool skip = false;
if (pCreateInfo != nullptr) {
const auto &features = physical_device_features;
const auto &limits = device_limits;
if (pCreateInfo->anisotropyEnable == VK_TRUE) {
if (!in_inclusive_range(pCreateInfo->maxAnisotropy, 1.0F, limits.maxSamplerAnisotropy)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-anisotropyEnable-01071",
"vkCreateSampler(): value of %s must be in range [1.0, %f] %s, but %f found.",
"pCreateInfo->maxAnisotropy", limits.maxSamplerAnisotropy,
"VkPhysicalDeviceLimits::maxSamplerAnistropy", pCreateInfo->maxAnisotropy);
}
// Anistropy cannot be enabled in sampler unless enabled as a feature
if (features.samplerAnisotropy == VK_FALSE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-anisotropyEnable-01070",
"vkCreateSampler(): Anisotropic sampling feature is not enabled, %s must be VK_FALSE.",
"pCreateInfo->anisotropyEnable");
}
}
if (pCreateInfo->unnormalizedCoordinates == VK_TRUE) {
if (pCreateInfo->minFilter != pCreateInfo->magFilter) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01072",
"vkCreateSampler(): when pCreateInfo->unnormalizedCoordinates is VK_TRUE, "
"pCreateInfo->minFilter (%s) and pCreateInfo->magFilter (%s) must be equal.",
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(pCreateInfo->magFilter));
}
if (pCreateInfo->mipmapMode != VK_SAMPLER_MIPMAP_MODE_NEAREST) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01073",
"vkCreateSampler(): when pCreateInfo->unnormalizedCoordinates is VK_TRUE, "
"pCreateInfo->mipmapMode (%s) must be VK_SAMPLER_MIPMAP_MODE_NEAREST.",
string_VkSamplerMipmapMode(pCreateInfo->mipmapMode));
}
if (pCreateInfo->minLod != 0.0f || pCreateInfo->maxLod != 0.0f) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01074",
"vkCreateSampler(): when pCreateInfo->unnormalizedCoordinates is VK_TRUE, "
"pCreateInfo->minLod (%f) and pCreateInfo->maxLod (%f) must both be zero.",
pCreateInfo->minLod, pCreateInfo->maxLod);
}
if ((pCreateInfo->addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE &&
pCreateInfo->addressModeU != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) ||
(pCreateInfo->addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE &&
pCreateInfo->addressModeV != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01075",
"vkCreateSampler(): when pCreateInfo->unnormalizedCoordinates is VK_TRUE, "
"pCreateInfo->addressModeU (%s) and pCreateInfo->addressModeV (%s) must both be "
"VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE or VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER.",
string_VkSamplerAddressMode(pCreateInfo->addressModeU),
string_VkSamplerAddressMode(pCreateInfo->addressModeV));
}
if (pCreateInfo->anisotropyEnable == VK_TRUE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076",
"vkCreateSampler(): pCreateInfo->anisotropyEnable and pCreateInfo->unnormalizedCoordinates must "
"not both be VK_TRUE.");
}
if (pCreateInfo->compareEnable == VK_TRUE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01077",
"vkCreateSampler(): pCreateInfo->compareEnable and pCreateInfo->unnormalizedCoordinates must "
"not both be VK_TRUE.");
}
}
// If compareEnable is VK_TRUE, compareOp must be a valid VkCompareOp value
if (pCreateInfo->compareEnable == VK_TRUE) {
skip |= validate_ranged_enum("vkCreateSampler", "pCreateInfo->compareOp", "VkCompareOp", AllVkCompareOpEnums,
pCreateInfo->compareOp, "VUID-VkSamplerCreateInfo-compareEnable-01080");
}
// If any of addressModeU, addressModeV or addressModeW are VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, borderColor must be a
// valid VkBorderColor value
if ((pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) ||
(pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) ||
(pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)) {
skip |= validate_ranged_enum("vkCreateSampler", "pCreateInfo->borderColor", "VkBorderColor", AllVkBorderColorEnums,
pCreateInfo->borderColor, "VUID-VkSamplerCreateInfo-addressModeU-01078");
}
// If any of addressModeU, addressModeV or addressModeW are VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, the
// VK_KHR_sampler_mirror_clamp_to_edge extension must be enabled
if (!device_extensions.vk_khr_sampler_mirror_clamp_to_edge &&
((pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) ||
(pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) ||
(pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-addressModeU-01079",
"vkCreateSampler(): A VkSamplerAddressMode value is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE "
"but the VK_KHR_sampler_mirror_clamp_to_edge extension has not been enabled.");
}
// Checks for the IMG cubic filtering extension
if (device_extensions.vk_img_filter_cubic) {
if ((pCreateInfo->anisotropyEnable == VK_TRUE) &&
((pCreateInfo->minFilter == VK_FILTER_CUBIC_IMG) || (pCreateInfo->magFilter == VK_FILTER_CUBIC_IMG))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSamplerCreateInfo-magFilter-01081",
"vkCreateSampler(): Anisotropic sampling must not be VK_TRUE when either minFilter or magFilter "
"are VK_FILTER_CUBIC_IMG.");
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateDescriptorSetLayout(VkDevice device,
const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) {
bool skip = false;
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
if ((pCreateInfo != nullptr) && (pCreateInfo->pBindings != nullptr)) {
for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
if (pCreateInfo->pBindings[i].descriptorCount != 0) {
// If descriptorType is VK_DESCRIPTOR_TYPE_SAMPLER or VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, and descriptorCount
// is not 0 and pImmutableSamplers is not NULL, pImmutableSamplers must be a pointer to an array of descriptorCount
// valid VkSampler handles
if (((pCreateInfo->pBindings[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) ||
(pCreateInfo->pBindings[i].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) &&
(pCreateInfo->pBindings[i].pImmutableSamplers != nullptr)) {
for (uint32_t descriptor_index = 0; descriptor_index < pCreateInfo->pBindings[i].descriptorCount;
++descriptor_index) {
if (pCreateInfo->pBindings[i].pImmutableSamplers[descriptor_index] == VK_NULL_HANDLE) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_RequiredParameter,
"vkCreateDescriptorSetLayout: required parameter "
"pCreateInfo->pBindings[%d].pImmutableSamplers[%d] specified as VK_NULL_HANDLE",
i, descriptor_index);
}
}
}
// If descriptorCount is not 0, stageFlags must be a valid combination of VkShaderStageFlagBits values
if ((pCreateInfo->pBindings[i].stageFlags != 0) &&
((pCreateInfo->pBindings[i].stageFlags & (~AllVkShaderStageFlagBits)) != 0)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorSetLayoutBinding-descriptorCount-00283",
"vkCreateDescriptorSetLayout(): if pCreateInfo->pBindings[%d].descriptorCount is not 0, "
"pCreateInfo->pBindings[%d].stageFlags must be a valid combination of VkShaderStageFlagBits "
"values.",
i, i);
}
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool,
uint32_t descriptorSetCount,
const VkDescriptorSet *pDescriptorSets) {
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
// This is an array of handles, where the elements are allowed to be VK_NULL_HANDLE, and does not require any validation beyond
// validate_array()
return validate_array("vkFreeDescriptorSets", "descriptorSetCount", "pDescriptorSets", descriptorSetCount, &pDescriptorSets,
true, true, kVUIDUndefined, kVUIDUndefined);
}
bool StatelessValidation::manual_PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
bool skip = false;
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
if (pDescriptorWrites != NULL) {
for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
// descriptorCount must be greater than 0
if (pDescriptorWrites[i].descriptorCount == 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkWriteDescriptorSet-descriptorCount-arraylength",
"vkUpdateDescriptorSets(): parameter pDescriptorWrites[%d].descriptorCount must be greater than 0.", i);
}
// dstSet must be a valid VkDescriptorSet handle
skip |= validate_required_handle("vkUpdateDescriptorSets",
ParameterName("pDescriptorWrites[%i].dstSet", ParameterName::IndexVector{i}),
pDescriptorWrites[i].dstSet);
if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) {
// If descriptorType is VK_DESCRIPTOR_TYPE_SAMPLER, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
// VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE or VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
// pImageInfo must be a pointer to an array of descriptorCount valid VkDescriptorImageInfo structures
if (pDescriptorWrites[i].pImageInfo == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkWriteDescriptorSet-descriptorType-00322",
"vkUpdateDescriptorSets(): if pDescriptorWrites[%d].descriptorType is "
"VK_DESCRIPTOR_TYPE_SAMPLER, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "
"VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE or "
"VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, pDescriptorWrites[%d].pImageInfo must not be NULL.",
i, i);
} else if (pDescriptorWrites[i].descriptorType != VK_DESCRIPTOR_TYPE_SAMPLER) {
// If descriptorType is VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
// VK_DESCRIPTOR_TYPE_STORAGE_IMAGE or VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, the imageView and imageLayout
// members of any given element of pImageInfo must be a valid VkImageView and VkImageLayout, respectively
for (uint32_t descriptor_index = 0; descriptor_index < pDescriptorWrites[i].descriptorCount;
++descriptor_index) {
skip |= validate_required_handle("vkUpdateDescriptorSets",
ParameterName("pDescriptorWrites[%i].pImageInfo[%i].imageView",
ParameterName::IndexVector{i, descriptor_index}),
pDescriptorWrites[i].pImageInfo[descriptor_index].imageView);
skip |= validate_ranged_enum("vkUpdateDescriptorSets",
ParameterName("pDescriptorWrites[%i].pImageInfo[%i].imageLayout",
ParameterName::IndexVector{i, descriptor_index}),
"VkImageLayout", AllVkImageLayoutEnums,
pDescriptorWrites[i].pImageInfo[descriptor_index].imageLayout, kVUIDUndefined);
}
}
} else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
// If descriptorType is VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
// VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC or VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, pBufferInfo must be a
// pointer to an array of descriptorCount valid VkDescriptorBufferInfo structures
if (pDescriptorWrites[i].pBufferInfo == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkWriteDescriptorSet-descriptorType-00324",
"vkUpdateDescriptorSets(): if pDescriptorWrites[%d].descriptorType is "
"VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, "
"VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC or VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, "
"pDescriptorWrites[%d].pBufferInfo must not be NULL.",
i, i);
} else {
for (uint32_t descriptorIndex = 0; descriptorIndex < pDescriptorWrites[i].descriptorCount; ++descriptorIndex) {
skip |= validate_required_handle("vkUpdateDescriptorSets",
ParameterName("pDescriptorWrites[%i].pBufferInfo[%i].buffer",
ParameterName::IndexVector{i, descriptorIndex}),
pDescriptorWrites[i].pBufferInfo[descriptorIndex].buffer);
}
}
} else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)) {
// If descriptorType is VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER or VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
// pTexelBufferView must be a pointer to an array of descriptorCount valid VkBufferView handles
if (pDescriptorWrites[i].pTexelBufferView == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkWriteDescriptorSet-descriptorType-00323",
"vkUpdateDescriptorSets(): if pDescriptorWrites[%d].descriptorType is "
"VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER or VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, "
"pDescriptorWrites[%d].pTexelBufferView must not be NULL.",
i, i);
} else {
for (uint32_t descriptor_index = 0; descriptor_index < pDescriptorWrites[i].descriptorCount;
++descriptor_index) {
skip |= validate_required_handle("vkUpdateDescriptorSets",
ParameterName("pDescriptorWrites[%i].pTexelBufferView[%i]",
ParameterName::IndexVector{i, descriptor_index}),
pDescriptorWrites[i].pTexelBufferView[descriptor_index]);
}
}
}
if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)) {
VkDeviceSize uniformAlignment = device_limits.minUniformBufferOffsetAlignment;
for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) {
if (pDescriptorWrites[i].pBufferInfo != NULL) {
if (SafeModulo(pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment) != 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
0, "VUID-VkWriteDescriptorSet-descriptorType-00327",
"vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (0x%" PRIxLEAST64
") must be a multiple of device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
i, j, pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment);
}
}
}
} else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
VkDeviceSize storageAlignment = device_limits.minStorageBufferOffsetAlignment;
for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) {
if (pDescriptorWrites[i].pBufferInfo != NULL) {
if (SafeModulo(pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment) != 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
0, "VUID-VkWriteDescriptorSet-descriptorType-00328",
"vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (0x%" PRIxLEAST64
") must be a multiple of device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
i, j, pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment);
}
}
}
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkRenderPass *pRenderPass) {
return CreateRenderPassGeneric(device, pCreateInfo, pAllocator, pRenderPass, RENDER_PASS_VERSION_1);
}
bool StatelessValidation::manual_PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkRenderPass *pRenderPass) {
return CreateRenderPassGeneric(device, pCreateInfo, pAllocator, pRenderPass, RENDER_PASS_VERSION_2);
}
bool StatelessValidation::manual_PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool,
uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
// This is an array of handles, where the elements are allowed to be VK_NULL_HANDLE, and does not require any validation beyond
// validate_array()
skip |= validate_array("vkFreeCommandBuffers", "commandBufferCount", "pCommandBuffers", commandBufferCount, &pCommandBuffers,
true, true, kVUIDUndefined, kVUIDUndefined);
return skip;
}
bool StatelessValidation::manual_PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) {
bool skip = false;
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
// TODO: pBeginInfo->pInheritanceInfo must not be NULL if commandBuffer is a secondary command buffer
skip |= validate_struct_type("vkBeginCommandBuffer", "pBeginInfo->pInheritanceInfo",
"VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO", pBeginInfo->pInheritanceInfo,
VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, false,
"VUID_vkBeginCommandBuffer-pBeginInfo-parameter", "VUID_VkCommandBufferBeginInfo-sType-sType");
if (pBeginInfo->pInheritanceInfo != NULL) {
skip |= validate_struct_pnext("vkBeginCommandBuffer", "pBeginInfo->pInheritanceInfo->pNext", NULL,
pBeginInfo->pInheritanceInfo->pNext, 0, NULL, GeneratedVulkanHeaderVersion,
"VUID-VkCommandBufferBeginInfo-pNext-pNext");
skip |= validate_bool32("vkBeginCommandBuffer", "pBeginInfo->pInheritanceInfo->occlusionQueryEnable",
pBeginInfo->pInheritanceInfo->occlusionQueryEnable);
// TODO: This only needs to be validated when the inherited queries feature is enabled
// skip |= validate_flags("vkBeginCommandBuffer", "pBeginInfo->pInheritanceInfo->queryFlags",
// "VkQueryControlFlagBits", AllVkQueryControlFlagBits, pBeginInfo->pInheritanceInfo->queryFlags, false);
// TODO: This must be 0 if the pipeline statistics queries feature is not enabled
skip |= validate_flags("vkBeginCommandBuffer", "pBeginInfo->pInheritanceInfo->pipelineStatistics",
"VkQueryPipelineStatisticFlagBits", AllVkQueryPipelineStatisticFlagBits,
pBeginInfo->pInheritanceInfo->pipelineStatistics, false, false, kVUIDUndefined);
}
if (pInfo != NULL) {
if ((physical_device_features.inheritedQueries == VK_FALSE) && (pInfo->occlusionQueryEnable != VK_FALSE)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkCommandBufferInheritanceInfo-occlusionQueryEnable-00056",
"Cannot set inherited occlusionQueryEnable in vkBeginCommandBuffer() when device does not support "
"inheritedQueries.");
}
if ((physical_device_features.inheritedQueries != VK_FALSE) && (pInfo->occlusionQueryEnable != VK_FALSE)) {
skip |= validate_flags("vkBeginCommandBuffer", "pBeginInfo->pInheritanceInfo->queryFlags", "VkQueryControlFlagBits",
AllVkQueryControlFlagBits, pInfo->queryFlags, false, false,
"VUID-VkCommandBufferInheritanceInfo-queryFlags-00057");
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount, const VkViewport *pViewports) {
bool skip = false;
if (!physical_device_features.multiViewport) {
if (firstViewport != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-firstViewport-01224",
"vkCmdSetViewport: The multiViewport feature is disabled, but firstViewport (=%" PRIu32 ") is not 0.",
firstViewport);
}
if (viewportCount > 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-viewportCount-01225",
"vkCmdSetViewport: The multiViewport feature is disabled, but viewportCount (=%" PRIu32 ") is not 1.",
viewportCount);
}
} else { // multiViewport enabled
const uint64_t sum = static_cast<uint64_t>(firstViewport) + static_cast<uint64_t>(viewportCount);
if (sum > device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-firstViewport-01223",
"vkCmdSetViewport: firstViewport + viewportCount (=%" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").",
firstViewport, viewportCount, sum, device_limits.maxViewports);
}
}
if (pViewports) {
for (uint32_t viewport_i = 0; viewport_i < viewportCount; ++viewport_i) {
const auto &viewport = pViewports[viewport_i]; // will crash on invalid ptr
const char fn_name[] = "vkCmdSetViewport";
const std::string param_name = "pViewports[" + std::to_string(viewport_i) + "]";
skip |= manual_PreCallValidateViewport(viewport, fn_name, param_name.c_str(),
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer));
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor,
uint32_t scissorCount, const VkRect2D *pScissors) {
bool skip = false;
if (!physical_device_features.multiViewport) {
if (firstScissor != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-firstScissor-00593",
"vkCmdSetScissor: The multiViewport feature is disabled, but firstScissor (=%" PRIu32 ") is not 0.",
firstScissor);
}
if (scissorCount > 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-scissorCount-00594",
"vkCmdSetScissor: The multiViewport feature is disabled, but scissorCount (=%" PRIu32 ") is not 1.",
scissorCount);
}
} else { // multiViewport enabled
const uint64_t sum = static_cast<uint64_t>(firstScissor) + static_cast<uint64_t>(scissorCount);
if (sum > device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-firstScissor-00592",
"vkCmdSetScissor: firstScissor + scissorCount (=%" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").",
firstScissor, scissorCount, sum, device_limits.maxViewports);
}
}
if (pScissors) {
for (uint32_t scissor_i = 0; scissor_i < scissorCount; ++scissor_i) {
const auto &scissor = pScissors[scissor_i]; // will crash on invalid ptr
if (scissor.offset.x < 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-x-00595",
"vkCmdSetScissor: pScissors[%" PRIu32 "].offset.x (=%" PRIi32 ") is negative.", scissor_i,
scissor.offset.x);
}
if (scissor.offset.y < 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-x-00595",
"vkCmdSetScissor: pScissors[%" PRIu32 "].offset.y (=%" PRIi32 ") is negative.", scissor_i,
scissor.offset.y);
}
const int64_t x_sum = static_cast<int64_t>(scissor.offset.x) + static_cast<int64_t>(scissor.extent.width);
if (x_sum > INT32_MAX) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-offset-00596",
"vkCmdSetScissor: offset.x + extent.width (=%" PRIi32 " + %" PRIu32 " = %" PRIi64
") of pScissors[%" PRIu32 "] will overflow int32_t.",
scissor.offset.x, scissor.extent.width, x_sum, scissor_i);
}
const int64_t y_sum = static_cast<int64_t>(scissor.offset.y) + static_cast<int64_t>(scissor.extent.height);
if (y_sum > INT32_MAX) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-offset-00597",
"vkCmdSetScissor: offset.y + extent.height (=%" PRIi32 " + %" PRIu32 " = %" PRIi64
") of pScissors[%" PRIu32 "] will overflow int32_t.",
scissor.offset.y, scissor.extent.height, y_sum, scissor_i);
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
bool skip = false;
if (!physical_device_features.wideLines && (lineWidth != 1.0f)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetLineWidth-lineWidth-00788",
"VkPhysicalDeviceFeatures::wideLines is disabled, but lineWidth (=%f) is not 1.0.", lineWidth);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
uint32_t firstVertex, uint32_t firstInstance) {
bool skip = false;
if (vertexCount == 0) {
// TODO: Verify against Valid Usage section. I don't see a non-zero vertexCount listed, may need to add that and make
// this an error or leave as is.
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_RequiredParameter, "vkCmdDraw parameter, uint32_t vertexCount, is 0");
}
if (instanceCount == 0) {
// TODO: Verify against Valid Usage section. I don't see a non-zero instanceCount listed, may need to add that and make
// this an error or leave as is.
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_RequiredParameter, "vkCmdDraw parameter, uint32_t instanceCount, is 0");
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t count, uint32_t stride) {
bool skip = false;
if (!physical_device_features.multiDrawIndirect && ((count > 1))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_DeviceFeature,
"CmdDrawIndirect(): Device feature multiDrawIndirect disabled: count must be 0 or 1 but is %d", count);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer,
VkDeviceSize offset, uint32_t count, uint32_t stride) {
bool skip = false;
if (!physical_device_features.multiDrawIndirect && ((count > 1))) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_PVError_DeviceFeature,
"CmdDrawIndexedIndirect(): Device feature multiDrawIndirect disabled: count must be 0 or 1 but is %d", count);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage,
VkImageLayout srcImageLayout, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
bool skip = false;
VkImageAspectFlags legal_aspect_flags =
VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT;
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
legal_aspect_flags |= (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
if (pRegions != nullptr) {
if ((pRegions->srcSubresource.aspectMask & legal_aspect_flags) == 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageSubresourceLayers-aspectMask-parameter",
"vkCmdCopyImage() parameter, VkImageAspect pRegions->srcSubresource.aspectMask, is an unrecognized enumerator.");
}
if ((pRegions->dstSubresource.aspectMask & legal_aspect_flags) == 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageSubresourceLayers-aspectMask-parameter",
"vkCmdCopyImage() parameter, VkImageAspect pRegions->dstSubresource.aspectMask, is an unrecognized enumerator.");
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage,
VkImageLayout srcImageLayout, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
bool skip = false;
VkImageAspectFlags legal_aspect_flags =
VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT;
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
legal_aspect_flags |= (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
if (pRegions != nullptr) {
if ((pRegions->srcSubresource.aspectMask & legal_aspect_flags) == 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_UnrecognizedValue,
"vkCmdBlitImage() parameter, VkImageAspect pRegions->srcSubresource.aspectMask, is an unrecognized enumerator");
}
if ((pRegions->dstSubresource.aspectMask & legal_aspect_flags) == 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_UnrecognizedValue,
"vkCmdBlitImage() parameter, VkImageAspect pRegions->dstSubresource.aspectMask, is an unrecognized enumerator");
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
VkImage dstImage, VkImageLayout dstImageLayout,
uint32_t regionCount, const VkBufferImageCopy *pRegions) {
bool skip = false;
VkImageAspectFlags legal_aspect_flags =
VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT;
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
legal_aspect_flags |= (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
if (pRegions != nullptr) {
if ((pRegions->imageSubresource.aspectMask & legal_aspect_flags) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_UnrecognizedValue,
"vkCmdCopyBufferToImage() parameter, VkImageAspect pRegions->imageSubresource.aspectMask, is an "
"unrecognized enumerator");
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
VkImageLayout srcImageLayout, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferImageCopy *pRegions) {
bool skip = false;
VkImageAspectFlags legal_aspect_flags =
VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT;
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
legal_aspect_flags |= (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
if (pRegions != nullptr) {
if ((pRegions->imageSubresource.aspectMask & legal_aspect_flags) == 0) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_UnrecognizedValue,
"vkCmdCopyImageToBuffer parameter, VkImageAspect pRegions->imageSubresource.aspectMask, is an unrecognized "
"enumerator");
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) {
bool skip = false;
if (dstOffset & 3) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdUpdateBuffer-dstOffset-00036",
"vkCmdUpdateBuffer() parameter, VkDeviceSize dstOffset (0x%" PRIxLEAST64 "), is not a multiple of 4.",
dstOffset);
}
if ((dataSize <= 0) || (dataSize > 65536)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdUpdateBuffer-dataSize-00037",
"vkCmdUpdateBuffer() parameter, VkDeviceSize dataSize (0x%" PRIxLEAST64
"), must be greater than zero and less than or equal to 65536.",
dataSize);
} else if (dataSize & 3) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdUpdateBuffer-dataSize-00038",
"vkCmdUpdateBuffer() parameter, VkDeviceSize dataSize (0x%" PRIxLEAST64 "), is not a multiple of 4.", dataSize);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
bool skip = false;
if (dstOffset & 3) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdFillBuffer-dstOffset-00025",
"vkCmdFillBuffer() parameter, VkDeviceSize dstOffset (0x%" PRIxLEAST64 "), is not a multiple of 4.", dstOffset);
}
if (size != VK_WHOLE_SIZE) {
if (size <= 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdFillBuffer-size-00026",
"vkCmdFillBuffer() parameter, VkDeviceSize size (0x%" PRIxLEAST64 "), must be greater than zero.", size);
} else if (size & 3) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdFillBuffer-size-00028",
"vkCmdFillBuffer() parameter, VkDeviceSize size (0x%" PRIxLEAST64 "), is not a multiple of 4.", size);
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchain) {
bool skip = false;
const LogMiscParams log_misc{VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, VK_NULL_HANDLE, "vkCreateSwapchainKHR"};
if (pCreateInfo != nullptr) {
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) {
// If imageSharingMode is VK_SHARING_MODE_CONCURRENT, queueFamilyIndexCount must be greater than 1
if (pCreateInfo->queueFamilyIndexCount <= 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01278",
"vkCreateSwapchainKHR(): if pCreateInfo->imageSharingMode is VK_SHARING_MODE_CONCURRENT, "
"pCreateInfo->queueFamilyIndexCount must be greater than 1.");
}
// If imageSharingMode is VK_SHARING_MODE_CONCURRENT, pQueueFamilyIndices must be a pointer to an array of
// queueFamilyIndexCount uint32_t values
if (pCreateInfo->pQueueFamilyIndices == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01277",
"vkCreateSwapchainKHR(): if pCreateInfo->imageSharingMode is VK_SHARING_MODE_CONCURRENT, "
"pCreateInfo->pQueueFamilyIndices must be a pointer to an array of "
"pCreateInfo->queueFamilyIndexCount uint32_t values.");
} else {
skip |= ValidateQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateSwapchainKHR", "pCreateInfo->pQueueFamilyIndices",
kVUID_PVError_InvalidUsage, kVUID_PVError_InvalidUsage, false);
}
}
skip |= ValidateGreaterThanZero(pCreateInfo->imageArrayLayers, "pCreateInfo->imageArrayLayers",
"VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275", log_misc);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
bool skip = false;
if (pPresentInfo && pPresentInfo->pNext) {
const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
// TODO: This and all other pNext extension dependencies should be added to code-generation
skip |= require_device_extension(device_extensions.vk_khr_incremental_present, "vkQueuePresentKHR",
VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME);
if (present_regions->swapchainCount != pPresentInfo->swapchainCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_PVError_InvalidUsage,
"QueuePresentKHR(): pPresentInfo->swapchainCount has a value of %i but VkPresentRegionsKHR "
"extension swapchainCount is %i. These values must be equal.",
pPresentInfo->swapchainCount, present_regions->swapchainCount);
}
skip |= validate_struct_pnext("QueuePresentKHR", "pCreateInfo->pNext->pNext", NULL, present_regions->pNext, 0, NULL,
GeneratedVulkanHeaderVersion, "VUID-VkPresentInfoKHR-pNext-pNext");
skip |= validate_array("QueuePresentKHR", "pCreateInfo->pNext->swapchainCount", "pCreateInfo->pNext->pRegions",
present_regions->swapchainCount, &present_regions->pRegions, true, false, kVUIDUndefined,
kVUIDUndefined);
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
skip |= validate_array("QueuePresentKHR", "pCreateInfo->pNext->pRegions[].rectangleCount",
"pCreateInfo->pNext->pRegions[].pRectangles", present_regions->pRegions[i].rectangleCount,
&present_regions->pRegions[i].pRectangles, true, false, kVUIDUndefined, kVUIDUndefined);
}
}
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool StatelessValidation::manual_PreCallValidateCreateWin32SurfaceKHR(VkInstance instance,
const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface) {
bool skip = false;
if (pCreateInfo->hwnd == nullptr) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkWin32SurfaceCreateInfoKHR-hwnd-01308",
"vkCreateWin32SurfaceKHR(): hwnd must be a valid Win32 HWND but hwnd is NULL.");
}
return skip;
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool StatelessValidation::manual_PreCallValidateDebugMarkerSetObjectNameEXT(VkDevice device,
const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
if (pNameInfo->pObjectName) {
report_data->debugObjectNameMap->insert(
std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
} else {
report_data->debugObjectNameMap->erase(pNameInfo->object);
}
return false;
}
bool StatelessValidation::manual_PreCallValidateCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorPool *pDescriptorPool) {
bool skip = false;
if (pCreateInfo) {
if (pCreateInfo->maxSets <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
VK_NULL_HANDLE, "VUID-VkDescriptorPoolCreateInfo-maxSets-00301",
"vkCreateDescriptorPool(): pCreateInfo->maxSets is not greater than 0.");
}
if (pCreateInfo->pPoolSizes) {
for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
if (pCreateInfo->pPoolSizes[i].descriptorCount <= 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, VK_NULL_HANDLE,
"VUID-VkDescriptorPoolSize-descriptorCount-00302",
"vkCreateDescriptorPool(): pCreateInfo->pPoolSizes[%" PRIu32 "].descriptorCount is not greater than 0.", i);
}
if (pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT &&
(pCreateInfo->pPoolSizes[i].descriptorCount % 4) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
VK_NULL_HANDLE, "VUID-VkDescriptorPoolSize-type-02218",
"vkCreateDescriptorPool(): pCreateInfo->pPoolSizes[%" PRIu32
"].type is VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT "
" and pCreateInfo->pPoolSizes[%" PRIu32 "].descriptorCount is not a multiple of 4.",
i, i);
}
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t groupCountX,
uint32_t groupCountY, uint32_t groupCountZ) {
bool skip = false;
if (groupCountX > device_limits.maxComputeWorkGroupCount[0]) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatch-groupCountX-00386",
"vkCmdDispatch(): groupCountX (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[0] (%" PRIu32 ").",
groupCountX, device_limits.maxComputeWorkGroupCount[0]);
}
if (groupCountY > device_limits.maxComputeWorkGroupCount[1]) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatch-groupCountY-00387",
"vkCmdDispatch(): groupCountY (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[1] (%" PRIu32 ").",
groupCountY, device_limits.maxComputeWorkGroupCount[1]);
}
if (groupCountZ > device_limits.maxComputeWorkGroupCount[2]) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatch-groupCountZ-00388",
"vkCmdDispatch(): groupCountZ (%" PRIu32 ") exceeds device limit maxComputeWorkGroupCount[2] (%" PRIu32 ").",
groupCountZ, device_limits.maxComputeWorkGroupCount[2]);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer,
VkDeviceSize offset) {
bool skip = false;
if ((offset % 4) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatchIndirect-offset-00406",
"vkCmdDispatchIndirect(): offset (%" PRIu64 ") must be a multiple of 4.", offset);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDispatchBaseKHR(VkCommandBuffer commandBuffer, uint32_t baseGroupX,
uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX,
uint32_t groupCountY, uint32_t groupCountZ) {
bool skip = false;
// Paired if {} else if {} tests used to avoid any possible uint underflow
uint32_t limit = device_limits.maxComputeWorkGroupCount[0];
if (baseGroupX >= limit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatchBase-baseGroupX-00421",
"vkCmdDispatch(): baseGroupX (%" PRIu32
") equals or exceeds device limit maxComputeWorkGroupCount[0] (%" PRIu32 ").",
baseGroupX, limit);
} else if (groupCountX > (limit - baseGroupX)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatchBase-groupCountX-00424",
"vkCmdDispatchBaseKHR(): baseGroupX (%" PRIu32 ") + groupCountX (%" PRIu32
") exceeds device limit maxComputeWorkGroupCount[0] (%" PRIu32 ").",
baseGroupX, groupCountX, limit);
}
limit = device_limits.maxComputeWorkGroupCount[1];
if (baseGroupY >= limit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatchBase-baseGroupX-00422",
"vkCmdDispatch(): baseGroupY (%" PRIu32
") equals or exceeds device limit maxComputeWorkGroupCount[1] (%" PRIu32 ").",
baseGroupY, limit);
} else if (groupCountY > (limit - baseGroupY)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatchBase-groupCountY-00425",
"vkCmdDispatchBaseKHR(): baseGroupY (%" PRIu32 ") + groupCountY (%" PRIu32
") exceeds device limit maxComputeWorkGroupCount[1] (%" PRIu32 ").",
baseGroupY, groupCountY, limit);
}
limit = device_limits.maxComputeWorkGroupCount[2];
if (baseGroupZ >= limit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatchBase-baseGroupZ-00423",
"vkCmdDispatch(): baseGroupZ (%" PRIu32
") equals or exceeds device limit maxComputeWorkGroupCount[2] (%" PRIu32 ").",
baseGroupZ, limit);
} else if (groupCountZ > (limit - baseGroupZ)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDispatchBase-groupCountZ-00426",
"vkCmdDispatchBaseKHR(): baseGroupZ (%" PRIu32 ") + groupCountZ (%" PRIu32
") exceeds device limit maxComputeWorkGroupCount[2] (%" PRIu32 ").",
baseGroupZ, groupCountZ, limit);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer,
uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount,
const VkRect2D *pExclusiveScissors) {
bool skip = false;
if (!physical_device_features.multiViewport) {
if (firstExclusiveScissor != 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02035",
"vkCmdSetExclusiveScissorNV: The multiViewport feature is disabled, but firstExclusiveScissor (=%" PRIu32
") is not 0.",
firstExclusiveScissor);
}
if (exclusiveScissorCount > 1) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-exclusiveScissorCount-02036",
"vkCmdSetExclusiveScissorNV: The multiViewport feature is disabled, but exclusiveScissorCount (=%" PRIu32
") is not 1.",
exclusiveScissorCount);
}
} else { // multiViewport enabled
const uint64_t sum = static_cast<uint64_t>(firstExclusiveScissor) + static_cast<uint64_t>(exclusiveScissorCount);
if (sum > device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02034",
"vkCmdSetExclusiveScissorNV: firstExclusiveScissor + exclusiveScissorCount (=%" PRIu32 " + %" PRIu32
" = %" PRIu64 ") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").",
firstExclusiveScissor, exclusiveScissorCount, sum, device_limits.maxViewports);
}
}
if (firstExclusiveScissor >= device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-firstExclusiveScissor-02033",
"vkCmdSetExclusiveScissorNV: firstExclusiveScissor (=%" PRIu32 ") must be less than maxViewports (=%" PRIu32
").",
firstExclusiveScissor, device_limits.maxViewports);
}
if (pExclusiveScissors) {
for (uint32_t scissor_i = 0; scissor_i < exclusiveScissorCount; ++scissor_i) {
const auto &scissor = pExclusiveScissors[scissor_i]; // will crash on invalid ptr
if (scissor.offset.x < 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-x-02037",
"vkCmdSetExclusiveScissorNV: pScissors[%" PRIu32 "].offset.x (=%" PRIi32 ") is negative.",
scissor_i, scissor.offset.x);
}
if (scissor.offset.y < 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-x-02037",
"vkCmdSetExclusiveScissorNV: pScissors[%" PRIu32 "].offset.y (=%" PRIi32 ") is negative.",
scissor_i, scissor.offset.y);
}
const int64_t x_sum = static_cast<int64_t>(scissor.offset.x) + static_cast<int64_t>(scissor.extent.width);
if (x_sum > INT32_MAX) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-offset-02038",
"vkCmdSetExclusiveScissorNV: offset.x + extent.width (=%" PRIi32 " + %" PRIu32 " = %" PRIi64
") of pScissors[%" PRIu32 "] will overflow int32_t.",
scissor.offset.x, scissor.extent.width, x_sum, scissor_i);
}
const int64_t y_sum = static_cast<int64_t>(scissor.offset.y) + static_cast<int64_t>(scissor.extent.height);
if (y_sum > INT32_MAX) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-offset-02039",
"vkCmdSetExclusiveScissorNV: offset.y + extent.height (=%" PRIi32 " + %" PRIu32 " = %" PRIi64
") of pScissors[%" PRIu32 "] will overflow int32_t.",
scissor.offset.y, scissor.extent.height, y_sum, scissor_i);
}
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdSetViewportShadingRatePaletteNV(
VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) {
bool skip = false;
if (!physical_device_features.multiViewport) {
if (firstViewport != 0) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02068",
"vkCmdSetViewportShadingRatePaletteNV: The multiViewport feature is disabled, but firstViewport (=%" PRIu32
") is not 0.",
firstViewport);
}
if (viewportCount > 1) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-viewportCount-02069",
"vkCmdSetViewportShadingRatePaletteNV: The multiViewport feature is disabled, but viewportCount (=%" PRIu32
") is not 1.",
viewportCount);
}
}
if (firstViewport >= device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02066",
"vkCmdSetViewportShadingRatePaletteNV: firstViewport (=%" PRIu32
") must be less than maxViewports (=%" PRIu32 ").",
firstViewport, device_limits.maxViewports);
}
const uint64_t sum = static_cast<uint64_t>(firstViewport) + static_cast<uint64_t>(viewportCount);
if (sum > device_limits.maxViewports) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02067",
"vkCmdSetViewportShadingRatePaletteNV: firstViewport + viewportCount (=%" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than VkPhysicalDeviceLimits::maxViewports (=%" PRIu32 ").",
firstViewport, viewportCount, sum, device_limits.maxViewports);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdSetCoarseSampleOrderNV(VkCommandBuffer commandBuffer,
VkCoarseSampleOrderTypeNV sampleOrderType,
uint32_t customSampleOrderCount,
const VkCoarseSampleOrderCustomNV *pCustomSampleOrders) {
bool skip = false;
if (sampleOrderType != VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV && customSampleOrderCount != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetCoarseSampleOrderNV-sampleOrderType-02081",
"vkCmdSetCoarseSampleOrderNV: If sampleOrderType is not VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, "
"customSampleOrderCount must be 0.");
}
for (uint32_t order_i = 0; order_i < customSampleOrderCount; ++order_i) {
skip |= ValidateCoarseSampleOrderCustomNV(&pCustomSampleOrders[order_i]);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount,
uint32_t firstTask) {
bool skip = false;
if (taskCount > phys_dev_ext_props.mesh_shader_props.maxDrawMeshTasksCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawMeshTasksNV-taskCount-02119",
"vkCmdDrawMeshTasksNV() parameter, uint32_t taskCount (0x%" PRIxLEAST32
"), must be less than or equal to VkPhysicalDeviceMeshShaderPropertiesNV::maxDrawMeshTasksCount (0x%" PRIxLEAST32 ").",
taskCount, phys_dev_ext_props.mesh_shader_props.maxDrawMeshTasksCount);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer,
VkDeviceSize offset, uint32_t drawCount,
uint32_t stride) {
bool skip = false;
if (offset & 3) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawMeshTasksIndirectNV-offset-02145",
"vkCmdDrawMeshTasksIndirectNV() parameter, VkDeviceSize offset (0x%" PRIxLEAST64 "), is not a multiple of 4.", offset);
}
if (drawCount > 1 && ((stride & 3) || stride < sizeof(VkDrawMeshTasksIndirectCommandNV))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02146",
"vkCmdDrawMeshTasksIndirectNV() parameter, uint32_t stride (0x%" PRIxLEAST32
"), is not a multiple of 4 or smaller than sizeof (VkDrawMeshTasksIndirectCommandNV).",
stride);
}
if (!physical_device_features.multiDrawIndirect && ((drawCount > 1))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawMeshTasksIndirectNV-drawCount-02147",
"vkCmdDrawMeshTasksIndirectNV(): Device feature multiDrawIndirect disabled: count must be 0 or 1 but is %d",
drawCount);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer,
VkDeviceSize offset, VkBuffer countBuffer,
VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
bool skip = false;
if (offset & 3) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawMeshTasksIndirectCountNV-offset-02180",
"vkCmdDrawMeshTasksIndirectCountNV() parameter, VkDeviceSize offset (0x%" PRIxLEAST64
"), is not a multiple of 4.",
offset);
}
if (countBufferOffset & 3) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawMeshTasksIndirectCountNV-countBufferOffset-02181",
"vkCmdDrawMeshTasksIndirectCountNV() parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64
"), is not a multiple of 4.",
countBufferOffset);
}
if ((stride & 3) || stride < sizeof(VkDrawMeshTasksIndirectCommandNV)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawMeshTasksIndirectCountNV-stride-02182",
"vkCmdDrawMeshTasksIndirectCountNV() parameter, uint32_t stride (0x%" PRIxLEAST32
"), is not a multiple of 4 or smaller than sizeof (VkDrawMeshTasksIndirectCommandNV).",
stride);
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkCommandPool *pCommandPool) {
return ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex",
"VUID-vkCreateCommandPool-queueFamilyIndex-01937");
}
bool StatelessValidation::manual_PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
bool skip = false;
// Validation for parameters excluded from the generated validation code due to a 'noautovalidity' tag in vk.xml
if (pCreateInfo != nullptr) {
// If queryType is VK_QUERY_TYPE_PIPELINE_STATISTICS, pipelineStatistics must be a valid combination of
// VkQueryPipelineStatisticFlagBits values
if ((pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) && (pCreateInfo->pipelineStatistics != 0) &&
((pCreateInfo->pipelineStatistics & (~AllVkQueryPipelineStatisticFlagBits)) != 0)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkQueryPoolCreateInfo-queryType-00792",
"vkCreateQueryPool(): if pCreateInfo->queryType is VK_QUERY_TYPE_PIPELINE_STATISTICS, "
"pCreateInfo->pipelineStatistics must be a valid combination of VkQueryPipelineStatisticFlagBits "
"values.");
}
}
return skip;
}
bool StatelessValidation::manual_PreCallValidateEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char *pLayerName, uint32_t *pPropertyCount,
VkExtensionProperties *pProperties) {
return validate_array("vkEnumerateDeviceExtensionProperties", "pPropertyCount", "pProperties", pPropertyCount, &pProperties,
true, false, false, kVUIDUndefined, "VUID-vkEnumerateDeviceExtensionProperties-pProperties-parameter");
}
void StatelessValidation::PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
RecordRenderPass(*pRenderPass, pCreateInfo);
}
void StatelessValidation::PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
// Track the state necessary for checking vkCreateGraphicsPipeline (subpass usage of depth and color attachments)
RecordRenderPass(*pRenderPass, pCreateInfo);
}
void StatelessValidation::PostCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) {
// Track the state necessary for checking vkCreateGraphicsPipeline (subpass usage of depth and color attachments)
renderpasses_states.erase(renderPass);
}
| 1 | 9,742 | Typical to report the value found as well as the valid bounds. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -151,7 +151,7 @@ var clerkCmd = &cobra.Command{
},
}
-func waitForCommit(client libgoal.Client, txid string) error {
+func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound uint64) error {
// Get current round information
stat, err := client.Status()
if err != nil { | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
"encoding/base64"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
"github.com/spf13/cobra"
)
var (
toAddress string
account string
amount uint64
txFilename string
rejectsFilename string
closeToAddress string
noProgramOutput bool
signProgram bool
programSource string
argB64Strings []string
disassemble bool
verbose bool
progByteFile string
msigParams string
logicSigFile string
timeStamp int64
protoVersion string
rekeyToAddress string
signerAddress string
rawOutput bool
)
func init() {
clerkCmd.AddCommand(sendCmd)
clerkCmd.AddCommand(rawsendCmd)
clerkCmd.AddCommand(inspectCmd)
clerkCmd.AddCommand(signCmd)
clerkCmd.AddCommand(groupCmd)
clerkCmd.AddCommand(splitCmd)
clerkCmd.AddCommand(compileCmd)
clerkCmd.AddCommand(dryrunCmd)
clerkCmd.AddCommand(dryrunRemoteCmd)
// Wallet to be used for the clerk operation
clerkCmd.PersistentFlags().StringVarP(&walletName, "wallet", "w", "", "Set the wallet to be used for the selected operation")
// send flags
sendCmd.Flags().StringVarP(&account, "from", "f", "", "Account address to send the money from (If not specified, uses default account)")
sendCmd.Flags().StringVarP(&toAddress, "to", "t", "", "Address to send to money to (required)")
sendCmd.Flags().Uint64VarP(&amount, "amount", "a", 0, "The amount to be transferred (required), in microAlgos")
sendCmd.Flags().StringVarP(&closeToAddress, "close-to", "c", "", "Close account and send remainder to this address")
sendCmd.Flags().StringVar(&rekeyToAddress, "rekey-to", "", "Rekey account to the given spending key/address. (Future transactions from this account will need to be signed with the new key.)")
sendCmd.Flags().StringVarP(&programSource, "from-program", "F", "", "Program source to use as account logic")
sendCmd.Flags().StringVarP(&progByteFile, "from-program-bytes", "P", "", "Program binary to use as account logic")
sendCmd.Flags().StringSliceVar(&argB64Strings, "argb64", nil, "base64 encoded args to pass to transaction logic")
sendCmd.Flags().StringVarP(&logicSigFile, "logic-sig", "L", "", "LogicSig to apply to transaction")
sendCmd.Flags().StringVar(&msigParams, "msig-params", "", "Multisig preimage parameters - [threshold] [Address 1] [Address 2] ...\nUsed to add the necessary fields in case the account was rekeyed to a multisig account")
sendCmd.MarkFlagRequired("to")
sendCmd.MarkFlagRequired("amount")
// Add common transaction flags
addTxnFlags(sendCmd)
// rawsend flags
rawsendCmd.Flags().StringVarP(&txFilename, "filename", "f", "", "Filename of file containing raw transactions")
rawsendCmd.Flags().StringVarP(&rejectsFilename, "rejects", "r", "", "Filename for writing rejects to (default is txFilename.rej)")
rawsendCmd.Flags().BoolVarP(&noWaitAfterSend, "no-wait", "N", false, "Don't wait for transactions to commit")
rawsendCmd.MarkFlagRequired("filename")
signCmd.Flags().StringVarP(&txFilename, "infile", "i", "", "Partially-signed transaction file to add signature to")
signCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename for writing the signed transaction")
signCmd.Flags().StringVarP(&signerAddress, "signer", "S", "", "Address of key to sign with, if different from transaction \"from\" address due to rekeying")
signCmd.Flags().StringVarP(&programSource, "program", "p", "", "Program source to use as account logic")
signCmd.Flags().StringVarP(&logicSigFile, "logic-sig", "L", "", "LogicSig to apply to transaction")
signCmd.Flags().StringSliceVar(&argB64Strings, "argb64", nil, "base64 encoded args to pass to transaction logic")
signCmd.Flags().StringVarP(&protoVersion, "proto", "P", "", "consensus protocol version id string")
signCmd.MarkFlagRequired("infile")
signCmd.MarkFlagRequired("outfile")
groupCmd.Flags().StringVarP(&txFilename, "infile", "i", "", "File storing transactions to be grouped")
groupCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename for writing the grouped transactions")
groupCmd.MarkFlagRequired("infile")
groupCmd.MarkFlagRequired("outfile")
splitCmd.Flags().StringVarP(&txFilename, "infile", "i", "", "File storing transactions to be split")
splitCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Base filename for writing the individual transactions; each transaction will be written to filename-N.ext")
splitCmd.MarkFlagRequired("infile")
splitCmd.MarkFlagRequired("outfile")
compileCmd.Flags().BoolVarP(&disassemble, "disassemble", "D", false, "disassemble a compiled program")
compileCmd.Flags().BoolVarP(&noProgramOutput, "no-out", "n", false, "don't write contract program binary")
compileCmd.Flags().BoolVarP(&signProgram, "sign", "s", false, "sign program, output is a binary signed LogicSig record")
compileCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename to write program bytes or signed LogicSig to")
compileCmd.Flags().StringVarP(&account, "account", "a", "", "Account address to sign the program (If not specified, uses default account)")
dryrunCmd.Flags().StringVarP(&txFilename, "txfile", "t", "", "transaction or transaction-group to test")
dryrunCmd.Flags().StringVarP(&protoVersion, "proto", "P", "", "consensus protocol version id string")
dryrunCmd.Flags().BoolVar(&dumpForDryrun, "dryrun-dump", false, "Dump in dryrun format acceptable by dryrun REST api instead of running")
dryrunCmd.Flags().Var(&dumpForDryrunFormat, "dryrun-dump-format", "Dryrun dump format: "+dumpForDryrunFormat.AllowedString())
dryrunCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename for writing dryrun state object")
dryrunCmd.MarkFlagRequired("txfile")
dryrunRemoteCmd.Flags().StringVarP(&txFilename, "dryrun-state", "D", "", "dryrun request object to run")
dryrunRemoteCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "print more info")
dryrunRemoteCmd.Flags().BoolVarP(&rawOutput, "raw", "r", false, "output raw response from algod")
dryrunRemoteCmd.MarkFlagRequired("dryrun-state")
}
var clerkCmd = &cobra.Command{
Use: "clerk",
Short: "Provides the tools to control transactions ",
Long: `Collection of commands to support the management of transaction information.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
//If no arguments passed, we should fallback to help
cmd.HelpFunc()(cmd, args)
},
}
func waitForCommit(client libgoal.Client, txid string) error {
// Get current round information
stat, err := client.Status()
if err != nil {
return fmt.Errorf(errorRequestFail, err)
}
for {
// Check if we know about the transaction yet
txn, err := client.PendingTransactionInformation(txid)
if err != nil {
return fmt.Errorf(errorRequestFail, err)
}
if txn.ConfirmedRound > 0 {
reportInfof(infoTxCommitted, txid, txn.ConfirmedRound)
break
}
if txn.PoolError != "" {
return fmt.Errorf(txPoolError, txid, txn.PoolError)
}
reportInfof(infoTxPending, txid, stat.LastRound)
stat, err = client.WaitForRound(stat.LastRound + 1)
if err != nil {
return fmt.Errorf(errorRequestFail, err)
}
}
return nil
}
func createSignedTransaction(client libgoal.Client, signTx bool, dataDir string, walletName string, tx transactions.Transaction) (stxn transactions.SignedTxn, err error) {
if signTx {
// Sign the transaction
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
stxn, err = client.SignTransactionWithWallet(wh, pw, tx)
if err != nil {
return
}
} else {
// Wrap in a transactions.SignedTxn with an empty sig.
// This way protocol.Encode will encode the transaction type
stxn, err = transactions.AssembleSignedTxn(tx, crypto.Signature{}, crypto.MultisigSig{})
if err != nil {
return
}
stxn = populateBlankMultisig(client, dataDir, walletName, stxn)
}
return
}
func writeTxnToFile(client libgoal.Client, signTx bool, dataDir string, walletName string, tx transactions.Transaction, filename string) error {
stxn, err := createSignedTransaction(client, signTx, dataDir, walletName, tx)
if err != nil {
return err
}
// Write the SignedTxn to the output file
return writeFile(filename, protocol.Encode(&stxn), 0600)
}
func getB64Args(args []string) [][]byte {
if len(args) == 0 {
return nil
}
programArgs := make([][]byte, len(args))
for i, argstr := range args {
if argstr == "" {
programArgs[i] = []byte{}
continue
}
var err error
programArgs[i], err = base64.StdEncoding.DecodeString(argstr)
if err != nil {
reportErrorf("arg[%d] decode error: %s", i, err)
}
}
return programArgs
}
func getProgramArgs() [][]byte {
return getB64Args(argB64Strings)
}
func parseNoteField(cmd *cobra.Command) []byte {
if cmd.Flags().Changed("noteb64") {
noteBytes, err := base64.StdEncoding.DecodeString(noteBase64)
if err != nil {
reportErrorf(malformedNote, noteBase64, err)
}
return noteBytes
}
if cmd.Flags().Changed("note") {
return []byte(noteText)
}
// Make sure that back-to-back, similar transactions will have a different txid
noteBytes := make([]byte, 8)
crypto.RandBytes(noteBytes[:])
return noteBytes
}
func parseLease(cmd *cobra.Command) (leaseBytes [32]byte) {
// Parse lease field
if cmd.Flags().Changed("lease") {
leaseBytesRaw, err := base64.StdEncoding.DecodeString(lease)
if err != nil {
reportErrorf(malformedLease, lease, err)
}
if len(leaseBytesRaw) != 32 {
reportErrorf(malformedLease, lease, fmt.Errorf("lease length %d != 32", len(leaseBytesRaw)))
}
copy(leaseBytes[:], leaseBytesRaw)
}
return
}
var sendCmd = &cobra.Command{
Use: "send",
Short: "Send money to an address",
Long: `Send money from one account to another. Note: by default, the money will be withdrawn from the default account. Creates a transaction sending amount tokens from fromAddr to toAddr. If the optional --fee is not provided, the transaction will use the recommended amount. If the optional --firstvalid and --lastvalid are provided, the transaction will only be valid from round firstValid to round lastValid. If broadcast of the transaction is successful, the transaction ID will be returned.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
// -s is invalid without -o
if outFilename == "" && sign {
reportErrorln(soFlagError)
}
// --msig-params is invalid without -o
if outFilename == "" && msigParams != "" {
reportErrorln(noOutputFileError)
}
checkTxValidityPeriodCmdFlags(cmd)
dataDir := ensureSingleDataDir()
accountList := makeAccountsList(dataDir)
var fromAddressResolved string
var program []byte = nil
var programArgs [][]byte = nil
var lsig transactions.LogicSig
var err error
if progByteFile != "" {
if programSource != "" || logicSigFile != "" {
reportErrorln("should at most one of --from-program/-F or --from-program-bytes/-P --logic-sig/-L")
}
program, err = readFile(progByteFile)
if err != nil {
reportErrorf("%s: %s", progByteFile, err)
}
} else if programSource != "" {
if logicSigFile != "" {
reportErrorln("should at most one of --from-program/-F or --from-program-bytes/-P --logic-sig/-L")
}
program = assembleFile(programSource)
} else if logicSigFile != "" {
lsigFromArgs(&lsig)
}
if program != nil {
ph := logic.HashProgram(program)
pha := basics.Address(ph)
fromAddressResolved = pha.String()
programArgs = getProgramArgs()
} else {
// Check if from was specified, else use default
if account == "" {
account = accountList.getDefaultAccount()
}
// Resolving friendly names
fromAddressResolved = accountList.getAddressByName(account)
}
toAddressResolved := accountList.getAddressByName(toAddress)
// Parse notes and lease fields
noteBytes := parseNoteField(cmd)
leaseBytes := parseLease(cmd)
// If closing an account, resolve that address as well
var closeToAddressResolved string
if closeToAddress != "" {
closeToAddressResolved = accountList.getAddressByName(closeToAddress)
}
// If rekeying, parse that address
// (we don't use accountList.getAddressByName because this address likely doesn't correspond to an account)
var rekeyTo basics.Address
if rekeyToAddress != "" {
var err error
rekeyTo, err = basics.UnmarshalChecksumAddress(rekeyToAddress)
if err != nil {
reportErrorf(err.Error())
}
}
client := ensureFullClient(dataDir)
firstValid, lastValid, err = client.ComputeValidityRounds(firstValid, lastValid, numValidRounds)
if err != nil {
reportErrorf(err.Error())
}
payment, err := client.ConstructPayment(
fromAddressResolved, toAddressResolved, fee, amount, noteBytes, closeToAddressResolved,
leaseBytes, basics.Round(firstValid), basics.Round(lastValid),
)
if err != nil {
reportErrorf(errorConstructingTX, err)
}
if !rekeyTo.IsZero() {
payment.RekeyTo = rekeyTo
}
var stx transactions.SignedTxn
if lsig.Logic != nil {
params, err := client.SuggestedParams()
if err != nil {
reportErrorf(errorNodeStatus, err)
}
proto := protocol.ConsensusVersion(params.ConsensusVersion)
uncheckedTxn := transactions.SignedTxn{
Txn: payment,
Lsig: lsig,
}
blockHeader := bookkeeping.BlockHeader{
UpgradeState: bookkeeping.UpgradeState{
CurrentProtocol: proto,
},
}
groupCtx, err := verify.PrepareGroupContext([]transactions.SignedTxn{uncheckedTxn}, blockHeader)
if err == nil {
err = verify.LogicSigSanityCheck(&uncheckedTxn, 0, groupCtx)
}
if err != nil {
reportErrorf("%s: txn[0] error %s", outFilename, err)
}
stx = uncheckedTxn
} else if program != nil {
stx = transactions.SignedTxn{
Txn: payment,
Lsig: transactions.LogicSig{
Logic: program,
Args: programArgs,
},
}
} else {
signTx := sign || (outFilename == "")
stx, err = createSignedTransaction(client, signTx, dataDir, walletName, payment)
if err != nil {
reportErrorf(errorSigningTX, err)
}
}
// Handle the case where the user wants to send to an account that was rekeyed to a multisig account
if msigParams != "" {
// Decode params
params := strings.Split(msigParams, " ")
if len(params) < 3 {
reportErrorf(msigParseError, "Not enough arguments to create the multisig address.\nPlease make sure to specify the threshold and at least 2 addresses\n")
}
threshold, err := strconv.ParseUint(params[0], 10, 8)
if err != nil || threshold < 1 || threshold > 255 {
reportErrorf(msigParseError, "Failed to parse the threshold. Make sure it's a number between 1 and 255")
}
// Convert the addresses into public keys
pks := make([]crypto.PublicKey, len(params[1:]))
for i, addrStr := range params[1:] {
addr, err := basics.UnmarshalChecksumAddress(addrStr)
if err != nil {
reportErrorf(failDecodeAddressError, err)
}
pks[i] = crypto.PublicKey(addr)
}
addr, err := crypto.MultisigAddrGen(1, uint8(threshold), pks)
if err != nil {
reportErrorf(msigParseError, err)
}
// Generate the multisig and assign to the txn
stx.Msig = crypto.MultisigPreimageFromPKs(1, uint8(threshold), pks)
// Append the signer since it's a rekey txn
if basics.Address(addr) == stx.Txn.Sender {
reportWarnln(rekeySenderTargetSameError)
}
stx.AuthAddr = basics.Address(addr)
}
if outFilename == "" {
// Broadcast the tx
txid, err := client.BroadcastTransaction(stx)
if err != nil {
reportErrorf(errorBroadcastingTX, err)
}
// update information from Transaction
fee = stx.Txn.Fee.Raw
// Report tx details to user
reportInfof(infoTxIssued, amount, fromAddressResolved, toAddressResolved, txid, fee)
if !noWaitAfterSend {
err = waitForCommit(client, txid)
if err != nil {
reportErrorf(err.Error())
}
}
} else {
if dumpForDryrun {
// Write dryrun data to file
proto, _ := getProto(protoVersion)
data, err := libgoal.MakeDryrunStateBytes(client, stx, []transactions.SignedTxn{}, string(proto), dumpForDryrunFormat.String())
if err != nil {
reportErrorf(err.Error())
}
writeFile(outFilename, data, 0600)
} else {
err = writeFile(outFilename, protocol.Encode(&stx), 0600)
if err != nil {
reportErrorf(err.Error())
}
}
}
},
}
var rawsendCmd = &cobra.Command{
Use: "rawsend",
Short: "Send raw transactions",
Long: `Send raw transactions. The transactions must be stored in a file, encoded using msgpack as transactions.SignedTxn. Multiple transactions can be concatenated together in a file.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
if rejectsFilename == "" {
rejectsFilename = txFilename + ".rej"
}
data, err := readFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dec := protocol.NewDecoderBytes(data)
client := ensureAlgodClient(ensureSingleDataDir())
txnIDs := make(map[transactions.Txid]transactions.SignedTxn)
var txns []transactions.SignedTxn
for {
var txn transactions.SignedTxn
err = dec.Decode(&txn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
_, present := txnIDs[txn.ID()]
if present {
reportErrorf(txDupError, txn.ID().String(), txFilename)
}
txnIDs[txn.ID()] = txn
txns = append(txns, txn)
}
txgroups := bookkeeping.SignedTxnsToGroups(txns)
txnErrors := make(map[transactions.Txid]string)
pendingTxns := make(map[transactions.Txid]string)
for _, txgroup := range txgroups {
// Broadcast the transaction
err := client.BroadcastTransactionGroup(txgroup)
if err != nil {
for _, txn := range txgroup {
txnErrors[txn.ID()] = err.Error()
}
reportWarnf(errorBroadcastingTX, err)
continue
}
for _, txn := range txgroup {
txidStr := txn.ID().String()
reportInfof(infoRawTxIssued, txidStr)
pendingTxns[txn.ID()] = txidStr
}
}
if noWaitAfterSend {
return
}
// Get current round information
stat, err := client.Status()
if err != nil {
reportErrorf(errorRequestFail, err)
}
for txid, txidStr := range pendingTxns {
for {
// Check if we know about the transaction yet
txn, err := client.PendingTransactionInformation(txidStr)
if err != nil {
txnErrors[txid] = err.Error()
reportWarnf(errorRequestFail, err)
continue
}
if txn.ConfirmedRound > 0 {
reportInfof(infoTxCommitted, txidStr, txn.ConfirmedRound)
break
}
if txn.PoolError != "" {
txnErrors[txid] = txn.PoolError
reportWarnf(txPoolError, txidStr, txn.PoolError)
continue
}
reportInfof(infoTxPending, txidStr, stat.LastRound)
stat, err = client.WaitForRound(stat.LastRound + 1)
if err != nil {
reportErrorf(errorRequestFail, err)
}
}
}
if len(txnErrors) > 0 {
fmt.Printf("Encountered errors in sending %d transactions:\n", len(txnErrors))
var rejectsData []byte
// Loop over transactions in the same order as the original file,
// to preserve transaction groups.
for _, txn := range txns {
txid := txn.ID()
errmsg, ok := txnErrors[txid]
if !ok {
continue
}
fmt.Printf(" %s: %s\n", txid, errmsg)
rejectsData = append(rejectsData, protocol.Encode(&txn)...)
}
f, err := os.OpenFile(rejectsFilename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
reportErrorf(fileWriteError, rejectsFilename, err.Error())
}
_, err = f.Write(rejectsData)
if err != nil {
reportErrorf(fileWriteError, rejectsFilename, err.Error())
}
f.Close()
fmt.Printf("Rejected transactions written to %s\n", rejectsFilename)
os.Exit(1)
}
},
}
var inspectCmd = &cobra.Command{
Use: "inspect [input file 1] [input file 2]...",
Short: "Print a transaction file",
Long: `Loads a transaction file, attempts to decode the transaction, and displays the decoded information.`,
Run: func(cmd *cobra.Command, args []string) {
for _, txFilename := range args {
data, err := readFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dec := protocol.NewDecoderBytes(data)
count := 0
for {
var txn transactions.SignedTxn
err = dec.Decode(&txn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
sti, err := inspectTxn(txn)
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
fmt.Printf("%s[%d]\n%s\n\n", txFilename, count, string(protocol.EncodeJSON(sti)))
count++
}
}
},
}
func lsigFromArgs(lsig *transactions.LogicSig) {
lsigBytes, err := readFile(logicSigFile)
if err != nil {
reportErrorf("%s: read failed, %s", logicSigFile, err)
}
err = protocol.Decode(lsigBytes, lsig)
if err != nil {
reportErrorf("%s: decode failed, %s", logicSigFile, err)
}
lsig.Args = getProgramArgs()
}
func getProto(versArg string) (protocol.ConsensusVersion, config.ConsensusParams) {
cvers := protocol.ConsensusCurrentVersion
if versArg != "" {
cvers = protocol.ConsensusVersion(versArg)
} else {
dataDir := maybeSingleDataDir()
if dataDir != "" {
client := ensureAlgodClient(dataDir)
params, err := client.SuggestedParams()
if err == nil {
cvers = protocol.ConsensusVersion(params.ConsensusVersion)
}
// else warning message?
}
// else warning message?
}
proto, ok := config.Consensus[cvers]
if !ok {
fmt.Fprintf(os.Stderr, "Invalid consensus version. Possible versions:\n")
for xvers := range config.Consensus {
fmt.Fprintf(os.Stderr, "\t%s\n", xvers)
}
os.Exit(1)
}
return cvers, proto
}
var signCmd = &cobra.Command{
Use: "sign -i [input file] -o [output file]",
Short: "Sign a transaction file",
Long: `Sign the passed transaction file, which may contain one or more transactions. If the infile and the outfile are the same, this overwrites the file with the new, signed data.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
data, err := readFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
var lsig transactions.LogicSig
var client libgoal.Client
var wh []byte
var pw []byte
if programSource != "" {
if logicSigFile != "" {
reportErrorln("goal clerk sign should have at most one of --program/-p or --logic-sig/-L")
}
lsig.Logic = assembleFile(programSource)
lsig.Args = getProgramArgs()
} else if logicSigFile != "" {
lsigFromArgs(&lsig)
}
if lsig.Logic == nil {
// sign the usual way
dataDir := ensureSingleDataDir()
client = ensureKmdClient(dataDir)
wh, pw = ensureWalletHandleMaybePassword(dataDir, walletName, true)
}
var outData []byte
dec := protocol.NewDecoderBytes(data)
// read the entire file and prepare in-memory copy of each signed transaction, with grouping.
txnGroups := make(map[crypto.Digest][]*transactions.SignedTxn)
var groupsOrder []crypto.Digest
txnIndex := make(map[*transactions.SignedTxn]int)
count := 0
for {
uncheckedTxn := new(transactions.SignedTxn)
err = dec.Decode(uncheckedTxn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
group := uncheckedTxn.Txn.Group
if group.IsZero() {
// create a dummy group.
randGroupBytes := crypto.Digest{}
crypto.RandBytes(randGroupBytes[:])
group = randGroupBytes
}
if _, hasGroup := txnGroups[group]; !hasGroup {
// add a new group as needed.
groupsOrder = append(groupsOrder, group)
}
txnGroups[group] = append(txnGroups[group], uncheckedTxn)
txnIndex[uncheckedTxn] = count
count++
}
consensusVersion, _ := getProto(protoVersion)
contextHdr := bookkeeping.BlockHeader{
UpgradeState: bookkeeping.UpgradeState{
CurrentProtocol: consensusVersion,
},
}
for _, group := range groupsOrder {
txnGroup := []transactions.SignedTxn{}
for _, txn := range txnGroups[group] {
txnGroup = append(txnGroup, *txn)
}
var groupCtx *verify.GroupContext
if lsig.Logic != nil {
groupCtx, err = verify.PrepareGroupContext(txnGroup, contextHdr)
if err != nil {
// this error has to be unsupported protocol
reportErrorf("%s: %v", txFilename, err)
}
}
for i, txn := range txnGroup {
var signedTxn transactions.SignedTxn
if lsig.Logic != nil {
txn.Lsig = lsig
err = verify.LogicSigSanityCheck(&txn, i, groupCtx)
if err != nil {
reportErrorf("%s: txn[%d] error %s", txFilename, txnIndex[txnGroups[group][i]], err)
}
signedTxn = txn
} else {
// sign the usual way
signedTxn, err = client.SignTransactionWithWalletAndSigner(wh, pw, signerAddress, txn.Txn)
if err != nil {
reportErrorf(errorSigningTX, err)
}
}
outData = append(outData, protocol.Encode(&signedTxn)...)
}
}
err = writeFile(outFilename, outData, 0600)
if err != nil {
reportErrorf(fileWriteError, outFilename, err)
}
},
}
var groupCmd = &cobra.Command{
Use: "group",
Short: "Group transactions together",
Long: `Form a transaction group. The input file must contain one or more unsigned transactions that will form a group. The output file will contain the same transactions, in order, with a group flag added to each transaction, which requires that the transactions must be committed together. The group command would retain the logic signature, if present, as the TEAL program could verify the group using a logic signature argument.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
data, err := readFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dec := protocol.NewDecoderBytes(data)
var stxns []transactions.SignedTxn
var group transactions.TxGroup
transactionIdx := 0
for {
var stxn transactions.SignedTxn
// we decode the file into a SignedTxn since we want to verify the absense of the signature as well as preserve the AuthAddr.
err = dec.Decode(&stxn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
if !stxn.Txn.Group.IsZero() {
reportErrorf("Transaction #%d with ID of %s is already part of a group.", transactionIdx, stxn.ID().String())
}
if (!stxn.Sig.Blank()) || (!stxn.Msig.Blank()) {
reportErrorf("Transaction #%d with ID of %s is already signed", transactionIdx, stxn.ID().String())
}
stxns = append(stxns, stxn)
group.TxGroupHashes = append(group.TxGroupHashes, crypto.HashObj(stxn.Txn))
transactionIdx++
}
var outData []byte
for _, stxn := range stxns {
stxn.Txn.Group = crypto.HashObj(group)
outData = append(outData, protocol.Encode(&stxn)...)
}
err = writeFile(outFilename, outData, 0600)
if err != nil {
reportErrorf(fileWriteError, outFilename, err)
}
},
}
var splitCmd = &cobra.Command{
Use: "split",
Short: "Split a file containing many transactions into one transaction per file",
Long: `Split a file containing many transactions. The input file must contain one or more transactions. These transactions will be written to individual files.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
data, err := readFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dec := protocol.NewDecoderBytes(data)
var txns []transactions.SignedTxn
for {
var txn transactions.SignedTxn
err = dec.Decode(&txn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
txns = append(txns, txn)
}
outExt := filepath.Ext(outFilename)
outBase := outFilename[:len(outFilename)-len(outExt)]
for idx, txn := range txns {
fn := fmt.Sprintf("%s-%d%s", outBase, idx, outExt)
err = writeFile(fn, protocol.Encode(&txn), 0600)
if err != nil {
reportErrorf(fileWriteError, outFilename, err)
}
fmt.Printf("Wrote transaction %d to %s\n", idx, fn)
}
},
}
func mustReadFile(fname string) []byte {
contents, err := readFile(fname)
if err != nil {
reportErrorf("%s: %s", fname, err)
}
return contents
}
func assembleFile(fname string) (program []byte) {
text, err := readFile(fname)
if err != nil {
reportErrorf("%s: %s", fname, err)
}
ops, err := logic.AssembleString(string(text))
if err != nil {
ops.ReportProblems(fname)
reportErrorf("%s: %s", fname, err)
}
_, params := getProto(protoVersion)
if ops.HasStatefulOps {
if len(ops.Program) > params.MaxAppProgramLen {
reportErrorf(tealAppSize, fname, len(ops.Program), params.MaxAppProgramLen)
}
} else {
if uint64(len(ops.Program)) > params.LogicSigMaxSize {
reportErrorf(tealLogicSigSize, fname, len(ops.Program), params.LogicSigMaxSize)
}
}
return ops.Program
}
func disassembleFile(fname, outname string) {
program, err := readFile(fname)
if err != nil {
reportErrorf("%s: %s", fname, err)
}
// try parsing it as a msgpack LogicSig
var lsig transactions.LogicSig
err = protocol.Decode(program, &lsig)
extra := ""
if err == nil {
// success, extract program to disassemble
program = lsig.Logic
if lsig.Sig != (crypto.Signature{}) || (!lsig.Msig.Blank()) || len(lsig.Args) > 0 {
nologic := lsig
nologic.Logic = nil
ilsig := lsigToInspect(nologic)
extra = "LogicSig: " + string(protocol.EncodeJSON(ilsig))
}
}
text, err := logic.Disassemble(program)
if err != nil {
reportErrorf("%s: %s", fname, err)
}
if extra != "" {
text = text + extra + "\n"
}
if outname == "" {
os.Stdout.Write([]byte(text))
} else {
err = writeFile(outname, []byte(text), 0666)
if err != nil {
reportErrorf("%s: %s", outname, err)
}
}
}
var compileCmd = &cobra.Command{
Use: "compile [input file 1] [input file 2]...",
Short: "Compile a contract program",
Long: "Reads a TEAL contract program and compiles it to binary output and contract address.",
Run: func(cmd *cobra.Command, args []string) {
for _, fname := range args {
if disassemble {
disassembleFile(fname, outFilename)
continue
}
program := assembleFile(fname)
outblob := program
outname := outFilename
if outname == "" {
if fname == stdinFileNameValue {
outname = stdoutFilenameValue
} else {
outname = fmt.Sprintf("%s.tok", fname)
}
}
if signProgram {
dataDir := ensureSingleDataDir()
accountList := makeAccountsList(dataDir)
client := ensureKmdClient(dataDir)
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
// Check if from was specified, else use default
if account == "" {
account = accountList.getDefaultAccount()
if account == "" {
reportErrorln("no default account set. set one with 'goal account -f' or specify an account with '-a'.")
}
fmt.Printf("will use default account: %v\n", account)
}
signingAddressResolved := accountList.getAddressByName(account)
signature, err := client.SignProgramWithWallet(wh, pw, signingAddressResolved, program)
if err != nil {
reportErrorf(errorSigningTX, err)
}
ls := transactions.LogicSig{Logic: program, Sig: signature}
outblob = protocol.Encode(&ls)
}
if !noProgramOutput {
err := writeFile(outname, outblob, 0666)
if err != nil {
reportErrorf("%s: %s", outname, err)
}
}
if !signProgram && outname != stdoutFilenameValue {
pd := logic.HashProgram(program)
addr := basics.Address(pd)
fmt.Printf("%s: %s\n", fname, addr.String())
}
}
},
}
var dryrunCmd = &cobra.Command{
Use: "dryrun",
Short: "Test a program offline",
Long: "Test a TEAL program offline under various conditions and verbosity.",
Run: func(cmd *cobra.Command, args []string) {
data, err := readFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dec := protocol.NewDecoderBytes(data)
stxns := make([]transactions.SignedTxn, 0, 10)
for {
var txn transactions.SignedTxn
err = dec.Decode(&txn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
stxns = append(stxns, txn)
}
txgroup := make([]transactions.SignedTxn, len(stxns))
for i, st := range stxns {
txgroup[i] = st
}
proto, params := getProto(protoVersion)
if dumpForDryrun {
// Write dryrun data to file
dataDir := ensureSingleDataDir()
client := ensureFullClient(dataDir)
data, err := libgoal.MakeDryrunStateBytes(client, nil, txgroup, string(proto), dumpForDryrunFormat.String())
if err != nil {
reportErrorf(err.Error())
}
writeFile(outFilename, data, 0600)
return
}
if timeStamp <= 0 {
timeStamp = time.Now().Unix()
}
for i, txn := range txgroup {
if txn.Lsig.Blank() {
continue
}
if uint64(txn.Lsig.Len()) > params.LogicSigMaxSize {
reportErrorf("program size too large: %d > %d", len(txn.Lsig.Logic), params.LogicSigMaxSize)
}
ep := logic.EvalParams{Txn: &txn, Proto: ¶ms, GroupIndex: i, TxnGroup: txgroup}
cost, err := logic.Check(txn.Lsig.Logic, ep)
if err != nil {
reportErrorf("program failed Check: %s", err)
}
if uint64(cost) > params.LogicSigMaxCost {
reportErrorf("program cost too large: %d > %d", cost, params.LogicSigMaxCost)
}
sb := strings.Builder{}
ep = logic.EvalParams{
Txn: &txn,
GroupIndex: i,
Proto: ¶ms,
Trace: &sb,
TxnGroup: txgroup,
}
pass, err := logic.Eval(txn.Lsig.Logic, ep)
// TODO: optionally include `inspect` output here?
fmt.Fprintf(os.Stdout, "tx[%d] cost=%d trace:\n%s\n", i, cost, sb.String())
if pass {
fmt.Fprintf(os.Stdout, " - pass -\n")
} else {
fmt.Fprintf(os.Stdout, "REJECT\n")
}
if err != nil {
fmt.Fprintf(os.Stdout, "ERROR: %s\n", err.Error())
}
}
},
}
var dryrunRemoteCmd = &cobra.Command{
Use: "dryrun-remote",
Short: "Test a program with algod's dryrun REST endpoint",
Long: "Test a TEAL program with algod's dryrun REST endpoint under various conditions and verbosity.",
Run: func(cmd *cobra.Command, args []string) {
data, err := readFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dataDir := ensureSingleDataDir()
client := ensureFullClient(dataDir)
resp, err := client.Dryrun(data)
if err != nil {
reportErrorf("dryrun-remote: %s", err.Error())
}
if rawOutput {
fmt.Fprintf(os.Stdout, string(protocol.EncodeJSON(&resp)))
return
}
stackToString := func(stack []generatedV2.TealValue) string {
result := make([]string, len(stack))
for i, sv := range stack {
if sv.Type == uint64(basics.TealBytesType) {
result[i] = heuristicFormatStr(sv.Bytes)
} else {
result[i] = fmt.Sprintf("%d", sv.Uint)
}
}
return strings.Join(result, " ")
}
if len(resp.Txns) > 0 {
for i, txnResult := range resp.Txns {
var msgs []string
var trace []generatedV2.DryrunState
if txnResult.AppCallMessages != nil && len(*txnResult.AppCallMessages) > 0 {
msgs = *txnResult.AppCallMessages
if txnResult.AppCallTrace != nil {
trace = *txnResult.AppCallTrace
}
} else if txnResult.LogicSigMessages != nil && len(*txnResult.LogicSigMessages) > 0 {
msgs = *txnResult.LogicSigMessages
if txnResult.LogicSigTrace != nil {
trace = *txnResult.LogicSigTrace
}
}
fmt.Fprintf(os.Stdout, "tx[%d] messages:\n", i)
for _, msg := range msgs {
fmt.Fprintf(os.Stdout, "%s\n", msg)
}
if verbose && len(trace) > 0 {
fmt.Fprintf(os.Stdout, "tx[%d] trace:\n", i)
for _, item := range trace {
fmt.Fprintf(os.Stdout, "%4d (%04x): %s [%s]\n",
item.Line, item.Pc, txnResult.Disassembly[item.Line-1], stackToString(item.Stack))
}
}
}
}
},
}
| 1 | 42,380 | maybe we want 0 as a default value? | algorand-go-algorand | go |
@@ -193,7 +193,16 @@ func (api *Server) GetAccount(ctx context.Context, in *iotexapi.GetAccountReques
PendingNonce: pendingNonce,
NumActions: numActions,
}
- return &iotexapi.GetAccountResponse{AccountMeta: accountMeta}, nil
+ tipHeight := api.bc.TipHeight()
+ header, err := api.bc.BlockHeaderByHeight(tipHeight)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, err.Error())
+ }
+ hash := header.HashBlock()
+ return &iotexapi.GetAccountResponse{AccountMeta: accountMeta, BlockIdentifier: &iotextypes.BlockIdentifier{
+ Hash: hex.EncodeToString(hash[:]),
+ Height: tipHeight,
+ }}, nil
}
// GetActions returns actions | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"context"
"encoding/hex"
"math"
"math/big"
"net"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-election/committee"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/blockdao"
"github.com/iotexproject/iotex-core/blockindex"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/gasstation"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/version"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/systemlog"
)
var (
// ErrInternalServer indicates the internal server error
ErrInternalServer = errors.New("internal server error")
// ErrReceipt indicates the error of receipt
ErrReceipt = errors.New("invalid receipt")
// ErrAction indicates the error of action
ErrAction = errors.New("invalid action")
candidateNameLen = 12
)
// BroadcastOutbound sends a broadcast message to the whole network
type BroadcastOutbound func(ctx context.Context, chainID uint32, msg proto.Message) error
// Config represents the config to setup api
type Config struct {
broadcastHandler BroadcastOutbound
electionCommittee committee.Committee
}
// Option is the option to override the api config
type Option func(cfg *Config) error
// WithBroadcastOutbound is the option to broadcast msg outbound
func WithBroadcastOutbound(broadcastHandler BroadcastOutbound) Option {
return func(cfg *Config) error {
cfg.broadcastHandler = broadcastHandler
return nil
}
}
// WithNativeElection is the option to return native election data through API.
func WithNativeElection(committee committee.Committee) Option {
return func(cfg *Config) error {
cfg.electionCommittee = committee
return nil
}
}
// Server provides api for user to query blockchain data
type Server struct {
bc blockchain.Blockchain
sf factory.Factory
dao blockdao.BlockDAO
indexer blockindex.Indexer
systemLogIndexer *systemlog.Indexer
ap actpool.ActPool
gs *gasstation.GasStation
broadcastHandler BroadcastOutbound
cfg config.Config
registry *protocol.Registry
chainListener Listener
grpcServer *grpc.Server
hasActionIndex bool
electionCommittee committee.Committee
}
// NewServer creates a new server
func NewServer(
cfg config.Config,
chain blockchain.Blockchain,
sf factory.Factory,
dao blockdao.BlockDAO,
indexer blockindex.Indexer,
systemLogIndexer *systemlog.Indexer,
actPool actpool.ActPool,
registry *protocol.Registry,
opts ...Option,
) (*Server, error) {
apiCfg := Config{}
for _, opt := range opts {
if err := opt(&apiCfg); err != nil {
return nil, err
}
}
if cfg.API == (config.API{}) {
log.L().Warn("API server is not configured.")
cfg.API = config.Default.API
}
if cfg.API.RangeQueryLimit < uint64(cfg.API.TpsWindow) {
return nil, errors.New("range query upper limit cannot be less than tps window")
}
svr := &Server{
bc: chain,
sf: sf,
dao: dao,
indexer: indexer,
systemLogIndexer: systemLogIndexer,
ap: actPool,
broadcastHandler: apiCfg.broadcastHandler,
cfg: cfg,
registry: registry,
chainListener: NewChainListener(),
gs: gasstation.NewGasStation(chain, sf.SimulateExecution, dao, cfg.API),
electionCommittee: apiCfg.electionCommittee,
}
if _, ok := cfg.Plugins[config.GatewayPlugin]; ok {
svr.hasActionIndex = true
}
svr.grpcServer = grpc.NewServer(
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
)
iotexapi.RegisterAPIServiceServer(svr.grpcServer, svr)
grpc_prometheus.Register(svr.grpcServer)
reflection.Register(svr.grpcServer)
return svr, nil
}
// GetAccount returns the metadata of an account
func (api *Server) GetAccount(ctx context.Context, in *iotexapi.GetAccountRequest) (*iotexapi.GetAccountResponse, error) {
state, err := accountutil.AccountState(api.sf, in.Address)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
pendingNonce, err := api.ap.GetPendingNonce(in.Address)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if api.indexer == nil {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
addr, err := address.FromString(in.Address)
if err != nil {
return nil, err
}
numActions, err := api.indexer.GetActionCountByAddress(hash.BytesToHash160(addr.Bytes()))
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
accountMeta := &iotextypes.AccountMeta{
Address: in.Address,
Balance: state.Balance.String(),
Nonce: state.Nonce,
PendingNonce: pendingNonce,
NumActions: numActions,
}
return &iotexapi.GetAccountResponse{AccountMeta: accountMeta}, nil
}
// GetActions returns actions
func (api *Server) GetActions(ctx context.Context, in *iotexapi.GetActionsRequest) (*iotexapi.GetActionsResponse, error) {
if (!api.hasActionIndex || api.indexer == nil) && (in.GetByHash() != nil || in.GetByAddr() != nil) {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
switch {
case in.GetByIndex() != nil:
request := in.GetByIndex()
return api.getActions(request.Start, request.Count)
case in.GetByHash() != nil:
request := in.GetByHash()
return api.getSingleAction(request.ActionHash, request.CheckPending)
case in.GetByAddr() != nil:
request := in.GetByAddr()
return api.getActionsByAddress(request.Address, request.Start, request.Count)
case in.GetUnconfirmedByAddr() != nil:
request := in.GetUnconfirmedByAddr()
return api.getUnconfirmedActionsByAddress(request.Address, request.Start, request.Count)
case in.GetByBlk() != nil:
request := in.GetByBlk()
return api.getActionsByBlock(request.BlkHash, request.Start, request.Count)
default:
return nil, status.Error(codes.NotFound, "invalid GetActionsRequest type")
}
}
// GetBlockMetas returns block metadata
func (api *Server) GetBlockMetas(ctx context.Context, in *iotexapi.GetBlockMetasRequest) (*iotexapi.GetBlockMetasResponse, error) {
switch {
case in.GetByIndex() != nil:
request := in.GetByIndex()
return api.getBlockMetas(request.Start, request.Count)
case in.GetByHash() != nil:
request := in.GetByHash()
return api.getBlockMeta(request.BlkHash)
default:
return nil, status.Error(codes.NotFound, "invalid GetBlockMetasRequest type")
}
}
// GetChainMeta returns blockchain metadata
func (api *Server) GetChainMeta(ctx context.Context, in *iotexapi.GetChainMetaRequest) (*iotexapi.GetChainMetaResponse, error) {
tipHeight := api.bc.TipHeight()
if tipHeight == 0 {
return &iotexapi.GetChainMetaResponse{
ChainMeta: &iotextypes.ChainMeta{
Epoch: &iotextypes.EpochData{},
},
}, nil
}
if api.indexer == nil {
// TODO: in case indexer does not exist, may consider return a value like 0 instead of exit
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
totalActions, err := api.indexer.GetTotalActions()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
blockLimit := int64(api.cfg.API.TpsWindow)
if blockLimit <= 0 {
return nil, status.Errorf(codes.Internal, "block limit is %d", blockLimit)
}
// avoid genesis block
if int64(tipHeight) < blockLimit {
blockLimit = int64(tipHeight)
}
r, err := api.getBlockMetas(tipHeight-uint64(blockLimit)+1, uint64(blockLimit))
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blks := r.BlkMetas
if len(blks) == 0 {
return nil, status.Error(codes.NotFound, "get 0 blocks! not able to calculate aps")
}
var numActions int64
for _, blk := range blks {
numActions += blk.NumActions
}
t1 := time.Unix(blks[0].Timestamp.GetSeconds(), int64(blks[0].Timestamp.GetNanos()))
t2 := time.Unix(blks[len(blks)-1].Timestamp.GetSeconds(), int64(blks[len(blks)-1].Timestamp.GetNanos()))
// duration of time difference in milli-seconds
// TODO: use config.Genesis.BlockInterval after PR1289 merges
timeDiff := (t2.Sub(t1) + 10*time.Second) / time.Millisecond
tps := float32(numActions*1000) / float32(timeDiff)
chainMeta := &iotextypes.ChainMeta{
Height: tipHeight,
NumActions: int64(totalActions),
Tps: int64(math.Ceil(float64(tps))),
TpsFloat: tps,
}
rp := rolldpos.FindProtocol(api.registry)
if rp != nil {
epochNum := rp.GetEpochNum(tipHeight)
epochHeight := rp.GetEpochHeight(epochNum)
gravityChainStartHeight, err := api.getGravityChainStartHeight(epochHeight)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
chainMeta.Epoch = &iotextypes.EpochData{
Num: epochNum,
Height: epochHeight,
GravityChainStartHeight: gravityChainStartHeight,
}
}
return &iotexapi.GetChainMetaResponse{ChainMeta: chainMeta}, nil
}
// GetServerMeta gets the server metadata
func (api *Server) GetServerMeta(ctx context.Context,
in *iotexapi.GetServerMetaRequest) (*iotexapi.GetServerMetaResponse, error) {
return &iotexapi.GetServerMetaResponse{ServerMeta: &iotextypes.ServerMeta{
PackageVersion: version.PackageVersion,
PackageCommitID: version.PackageCommitID,
GitStatus: version.GitStatus,
GoVersion: version.GoVersion,
BuildTime: version.BuildTime,
}}, nil
}
// SendAction is the API to send an action to blockchain.
func (api *Server) SendAction(ctx context.Context, in *iotexapi.SendActionRequest) (*iotexapi.SendActionResponse, error) {
log.L().Debug("receive send action request")
var selp action.SealedEnvelope
var err error
if err = selp.LoadProto(in.Action); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
// Add to local actpool
ctx = protocol.WithRegistry(ctx, api.registry)
if err = api.ap.Add(ctx, selp); err != nil {
log.L().Debug(err.Error())
var desc string
switch errors.Cause(err) {
case action.ErrBalance:
desc = "Invalid balance"
case action.ErrInsufficientBalanceForGas:
desc = "Insufficient balance for gas"
case action.ErrNonce:
desc = "Invalid nonce"
case action.ErrAddress:
desc = "Blacklisted address"
case action.ErrActPool:
desc = "Invalid actpool"
case action.ErrGasPrice:
desc = "Invalid gas price"
default:
desc = "Unknown"
}
st := status.New(codes.Internal, err.Error())
v := &errdetails.BadRequest_FieldViolation{
Field: "Action rejected",
Description: desc,
}
br := &errdetails.BadRequest{}
br.FieldViolations = append(br.FieldViolations, v)
st, err := st.WithDetails(br)
if err != nil {
log.S().Panicf("Unexpected error attaching metadata: %v", err)
}
return nil, st.Err()
}
// If there is no error putting into local actpool,
// Broadcast it to the network
if err = api.broadcastHandler(context.Background(), api.bc.ChainID(), in.Action); err != nil {
log.L().Warn("Failed to broadcast SendAction request.", zap.Error(err))
}
hash := selp.Hash()
return &iotexapi.SendActionResponse{ActionHash: hex.EncodeToString(hash[:])}, nil
}
// GetReceiptByAction gets receipt with corresponding action hash
func (api *Server) GetReceiptByAction(ctx context.Context, in *iotexapi.GetReceiptByActionRequest) (*iotexapi.GetReceiptByActionResponse, error) {
if !api.hasActionIndex || api.indexer == nil {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
actHash, err := hash.HexStringToHash256(in.ActionHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
receipt, err := api.GetReceiptByActionHash(actHash)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blkHash, err := api.getBlockHashByActionHash(actHash)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
return &iotexapi.GetReceiptByActionResponse{
ReceiptInfo: &iotexapi.ReceiptInfo{
Receipt: receipt.ConvertToReceiptPb(),
BlkHash: hex.EncodeToString(blkHash[:]),
},
}, nil
}
// ReadContract reads the state in a contract address specified by the slot
func (api *Server) ReadContract(ctx context.Context, in *iotexapi.ReadContractRequest) (*iotexapi.ReadContractResponse, error) {
log.L().Debug("receive read smart contract request")
sc := &action.Execution{}
if err := sc.LoadProto(in.Execution); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
state, err := accountutil.AccountState(api.sf, in.CallerAddress)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
sc, _ = action.NewExecution(
sc.Contract(),
state.Nonce+1,
sc.Amount(),
api.cfg.Genesis.BlockGasLimit,
big.NewInt(0),
sc.Data(),
)
callerAddr, err := address.FromString(in.CallerAddress)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
ctx, err = api.bc.Context()
if err != nil {
return nil, err
}
retval, receipt, err := api.sf.SimulateExecution(ctx, callerAddr, sc, api.dao.GetBlockHash)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.ReadContractResponse{
Data: hex.EncodeToString(retval),
Receipt: receipt.ConvertToReceiptPb(),
}, nil
}
// ReadState reads state on blockchain
func (api *Server) ReadState(ctx context.Context, in *iotexapi.ReadStateRequest) (*iotexapi.ReadStateResponse, error) {
p, ok := api.registry.Find(string(in.ProtocolID))
if !ok {
return nil, status.Errorf(codes.Internal, "protocol %s isn't registered", string(in.ProtocolID))
}
data, err := api.readState(ctx, p, in.GetHeight(), in.MethodName, in.Arguments...)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
out := iotexapi.ReadStateResponse{
Data: data,
}
return &out, nil
}
// SuggestGasPrice suggests gas price
func (api *Server) SuggestGasPrice(ctx context.Context, in *iotexapi.SuggestGasPriceRequest) (*iotexapi.SuggestGasPriceResponse, error) {
suggestPrice, err := api.gs.SuggestGasPrice()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.SuggestGasPriceResponse{GasPrice: suggestPrice}, nil
}
// EstimateGasForAction estimates gas for action
func (api *Server) EstimateGasForAction(ctx context.Context, in *iotexapi.EstimateGasForActionRequest) (*iotexapi.EstimateGasForActionResponse, error) {
estimateGas, err := api.gs.EstimateGasForAction(in.Action)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.EstimateGasForActionResponse{Gas: estimateGas}, nil
}
// EstimateActionGasConsumption estimate gas consume for action without signature
func (api *Server) EstimateActionGasConsumption(ctx context.Context, in *iotexapi.EstimateActionGasConsumptionRequest) (respone *iotexapi.EstimateActionGasConsumptionResponse, err error) {
respone = &iotexapi.EstimateActionGasConsumptionResponse{}
switch {
case in.GetExecution() != nil:
request := in.GetExecution()
return api.estimateActionGasConsumptionForExecution(request, in.GetCallerAddress())
case in.GetTransfer() != nil:
respone.Gas = uint64(len(in.GetTransfer().Payload))*action.TransferPayloadGas + action.TransferBaseIntrinsicGas
case in.GetStakeCreate() != nil:
respone.Gas = uint64(len(in.GetStakeCreate().Payload))*action.CreateStakePayloadGas + action.CreateStakeBaseIntrinsicGas
case in.GetStakeUnstake() != nil:
respone.Gas = uint64(len(in.GetStakeUnstake().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas
case in.GetStakeWithdraw() != nil:
respone.Gas = uint64(len(in.GetStakeWithdraw().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas
case in.GetStakeAddDeposit() != nil:
respone.Gas = uint64(len(in.GetStakeAddDeposit().Payload))*action.DepositToStakePayloadGas + action.DepositToStakeBaseIntrinsicGas
case in.GetStakeRestake() != nil:
respone.Gas = uint64(len(in.GetStakeRestake().Payload))*action.RestakePayloadGas + action.RestakeBaseIntrinsicGas
case in.GetStakeChangeCandidate() != nil:
respone.Gas = uint64(len(in.GetStakeChangeCandidate().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas
case in.GetStakeTransferOwnership() != nil:
respone.Gas = uint64(len(in.GetStakeTransferOwnership().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas
case in.GetCandidateRegister() != nil:
respone.Gas = uint64(len(in.GetCandidateRegister().Payload))*action.CandidateRegisterPayloadGas + action.CandidateRegisterBaseIntrinsicGas
case in.GetCandidateUpdate() != nil:
respone.Gas = action.CandidateUpdateBaseIntrinsicGas
default:
return nil, status.Error(codes.InvalidArgument, "invalid argument")
}
return
}
// GetEpochMeta gets epoch metadata
func (api *Server) GetEpochMeta(
ctx context.Context,
in *iotexapi.GetEpochMetaRequest,
) (*iotexapi.GetEpochMetaResponse, error) {
rp := rolldpos.FindProtocol(api.registry)
if rp == nil {
return &iotexapi.GetEpochMetaResponse{}, nil
}
if in.EpochNumber < 1 {
return nil, status.Error(codes.InvalidArgument, "epoch number cannot be less than one")
}
epochHeight := rp.GetEpochHeight(in.EpochNumber)
gravityChainStartHeight, err := api.getGravityChainStartHeight(epochHeight)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
epochData := &iotextypes.EpochData{
Num: in.EpochNumber,
Height: epochHeight,
GravityChainStartHeight: gravityChainStartHeight,
}
pp := poll.FindProtocol(api.registry)
if pp == nil {
return nil, status.Error(codes.Internal, "poll protocol is not registered")
}
methodName := []byte("ActiveBlockProducersByEpoch")
arguments := [][]byte{[]byte(strconv.FormatUint(in.EpochNumber, 10))}
height := strconv.FormatUint(epochHeight, 10)
data, err := api.readState(context.Background(), pp, height, methodName, arguments...)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
var activeConsensusBlockProducers state.CandidateList
if err := activeConsensusBlockProducers.Deserialize(data); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
numBlks, produce, err := api.getProductivityByEpoch(rp, in.EpochNumber, api.bc.TipHeight(), activeConsensusBlockProducers)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
methodName = []byte("BlockProducersByEpoch")
data, err = api.readState(context.Background(), pp, height, methodName, arguments...)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
var BlockProducers state.CandidateList
if err := BlockProducers.Deserialize(data); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
var blockProducersInfo []*iotexapi.BlockProducerInfo
for _, bp := range BlockProducers {
var active bool
var blockProduction uint64
if production, ok := produce[bp.Address]; ok {
active = true
blockProduction = production
}
blockProducersInfo = append(blockProducersInfo, &iotexapi.BlockProducerInfo{
Address: bp.Address,
Votes: bp.Votes.String(),
Active: active,
Production: blockProduction,
})
}
return &iotexapi.GetEpochMetaResponse{
EpochData: epochData,
TotalBlocks: numBlks,
BlockProducersInfo: blockProducersInfo,
}, nil
}
// GetRawBlocks gets raw block data
func (api *Server) GetRawBlocks(
ctx context.Context,
in *iotexapi.GetRawBlocksRequest,
) (*iotexapi.GetRawBlocksResponse, error) {
if in.Count == 0 || in.Count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
tipHeight := api.bc.TipHeight()
if in.StartHeight > tipHeight {
return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height")
}
var res []*iotexapi.BlockInfo
for height := int(in.StartHeight); height <= int(tipHeight); height++ {
if uint64(len(res)) >= in.Count {
break
}
blk, err := api.dao.GetBlockByHeight(uint64(height))
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
var receiptsPb []*iotextypes.Receipt
if in.WithReceipts {
receipts, err := api.dao.GetReceipts(uint64(height))
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
for _, receipt := range receipts {
receiptsPb = append(receiptsPb, receipt.ConvertToReceiptPb())
}
}
res = append(res, &iotexapi.BlockInfo{
Block: blk.ConvertToBlockPb(),
Receipts: receiptsPb,
})
}
return &iotexapi.GetRawBlocksResponse{Blocks: res}, nil
}
// GetLogs get logs filtered by contract address and topics
func (api *Server) GetLogs(
ctx context.Context,
in *iotexapi.GetLogsRequest,
) (*iotexapi.GetLogsResponse, error) {
switch {
case in.GetByBlock() != nil:
req := in.GetByBlock()
h, err := api.dao.GetBlockHeight(hash.BytesToHash256(req.BlockHash))
if err != nil {
return nil, status.Error(codes.InvalidArgument, "invalid block hash")
}
filter, ok := NewLogFilter(in.Filter, nil, nil).(*LogFilter)
if !ok {
return nil, status.Error(codes.Internal, "cannot convert to *LogFilter")
}
logs, err := api.getLogsInBlock(filter, h, 1)
return &iotexapi.GetLogsResponse{Logs: logs}, err
case in.GetByRange() != nil:
req := in.GetByRange()
if req.FromBlock > api.bc.TipHeight() {
return nil, status.Error(codes.InvalidArgument, "start block > tip height")
}
filter, ok := NewLogFilter(in.Filter, nil, nil).(*LogFilter)
if !ok {
return nil, status.Error(codes.Internal, "cannot convert to *LogFilter")
}
logs, err := api.getLogsInBlock(filter, req.FromBlock, req.Count)
return &iotexapi.GetLogsResponse{Logs: logs}, err
default:
return nil, status.Error(codes.InvalidArgument, "invalid GetLogsRequest type")
}
}
// StreamBlocks streams blocks
func (api *Server) StreamBlocks(in *iotexapi.StreamBlocksRequest, stream iotexapi.APIService_StreamBlocksServer) error {
errChan := make(chan error)
if err := api.chainListener.AddResponder(NewBlockListener(stream, errChan)); err != nil {
return status.Error(codes.Internal, err.Error())
}
for {
select {
case err := <-errChan:
if err != nil {
err = status.Error(codes.Aborted, err.Error())
}
return err
}
}
}
// StreamLogs streams logs that match the filter condition
func (api *Server) StreamLogs(in *iotexapi.StreamLogsRequest, stream iotexapi.APIService_StreamLogsServer) error {
errChan := make(chan error)
// register the log filter so it will match logs in new blocks
if err := api.chainListener.AddResponder(NewLogFilter(in.Filter, stream, errChan)); err != nil {
return status.Error(codes.Internal, err.Error())
}
for {
select {
case err := <-errChan:
if err != nil {
err = status.Error(codes.Aborted, err.Error())
}
return err
}
}
}
// GetElectionBuckets returns the native election buckets.
func (api *Server) GetElectionBuckets(
ctx context.Context,
in *iotexapi.GetElectionBucketsRequest,
) (*iotexapi.GetElectionBucketsResponse, error) {
if api.electionCommittee == nil {
return nil, status.Error(codes.Unavailable, "Native election no supported")
}
buckets, err := api.electionCommittee.NativeBucketsByEpoch(in.GetEpochNum())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
re := make([]*iotextypes.ElectionBucket, len(buckets))
for i, b := range buckets {
startTime, err := ptypes.TimestampProto(b.StartTime())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
re[i] = &iotextypes.ElectionBucket{
Voter: b.Voter(),
Candidate: b.Candidate(),
Amount: b.Amount().Bytes(),
StartTime: startTime,
Duration: ptypes.DurationProto(b.Duration()),
Decay: b.Decay(),
}
}
return &iotexapi.GetElectionBucketsResponse{Buckets: re}, nil
}
// GetReceiptByActionHash returns receipt by action hash
func (api *Server) GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) {
if !api.hasActionIndex || api.indexer == nil {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
actIndex, err := api.indexer.GetActionIndex(h[:])
if err != nil {
return nil, err
}
return api.dao.GetReceiptByActionHash(h, actIndex.BlockHeight())
}
// GetActionByActionHash returns action by action hash
func (api *Server) GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) {
if !api.hasActionIndex || api.indexer == nil {
return action.SealedEnvelope{}, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
selp, _, _, err := api.getActionByActionHash(h)
return selp, err
}
// GetEvmTransfersByActionHash returns evm transfers by action hash
func (api *Server) GetEvmTransfersByActionHash(ctx context.Context, in *iotexapi.GetEvmTransfersByActionHashRequest) (*iotexapi.GetEvmTransfersByActionHashResponse, error) {
if !api.hasActionIndex || api.systemLogIndexer == nil {
return nil, status.Error(codes.Unavailable, "evm transfer index not supported")
}
actHash, err := hash.HexStringToHash256(in.ActionHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
transfers, err := api.systemLogIndexer.GetEvmTransfersByActionHash(actHash)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, "no such action with evm transfer")
}
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.GetEvmTransfersByActionHashResponse{ActionEvmTransfers: transfers}, nil
}
// GetEvmTransfersByBlockHeight returns evm transfers by block height
func (api *Server) GetEvmTransfersByBlockHeight(ctx context.Context, in *iotexapi.GetEvmTransfersByBlockHeightRequest) (*iotexapi.GetEvmTransfersByBlockHeightResponse, error) {
if !api.hasActionIndex || api.systemLogIndexer == nil {
return nil, status.Error(codes.Unavailable, "evm transfer index not supported")
}
if in.BlockHeight < 1 {
return nil, status.Errorf(codes.InvalidArgument, "invalid block height = %d", in.BlockHeight)
}
transfers, err := api.systemLogIndexer.GetEvmTransfersByBlockHeight(in.BlockHeight)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, "no such block with evm transfer")
}
if strings.Contains(err.Error(), systemlog.ErrHeightNotReached.Error()) {
return nil, status.Errorf(codes.InvalidArgument, "height = %d is higher than current height", in.BlockHeight)
}
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.GetEvmTransfersByBlockHeightResponse{BlockEvmTransfers: transfers}, nil
}
// Start starts the API server
func (api *Server) Start() error {
portStr := ":" + strconv.Itoa(api.cfg.API.Port)
lis, err := net.Listen("tcp", portStr)
if err != nil {
log.L().Error("API server failed to listen.", zap.Error(err))
return errors.Wrap(err, "API server failed to listen")
}
log.L().Info("API server is listening.", zap.String("addr", lis.Addr().String()))
go func() {
if err := api.grpcServer.Serve(lis); err != nil {
log.L().Fatal("Node failed to serve.", zap.Error(err))
}
}()
if err := api.bc.AddSubscriber(api.chainListener); err != nil {
return errors.Wrap(err, "failed to subscribe to block creations")
}
if err := api.chainListener.Start(); err != nil {
return errors.Wrap(err, "failed to start blockchain listener")
}
return nil
}
// Stop stops the API server
func (api *Server) Stop() error {
api.grpcServer.Stop()
if err := api.bc.RemoveSubscriber(api.chainListener); err != nil {
return errors.Wrap(err, "failed to unsubscribe blockchain listener")
}
return api.chainListener.Stop()
}
func (api *Server) readState(ctx context.Context, p protocol.Protocol, height string, methodName []byte, arguments ...[]byte) ([]byte, error) {
// TODO: need to complete the context
tipHeight := api.bc.TipHeight()
ctx = protocol.WithBlockCtx(ctx, protocol.BlockCtx{
BlockHeight: tipHeight,
})
ctx = protocol.WithBlockchainCtx(
protocol.WithRegistry(ctx, api.registry),
protocol.BlockchainCtx{
Genesis: api.cfg.Genesis,
},
)
rp := rolldpos.FindProtocol(api.registry)
if rp == nil {
return nil, errors.New("rolldpos is not registered")
}
tipEpochNum := rp.GetEpochNum(tipHeight)
if height != "" {
inputHeight, err := strconv.ParseUint(height, 0, 64)
if err != nil {
return nil, err
}
inputEpochNum := rp.GetEpochNum(inputHeight)
if inputEpochNum < tipEpochNum {
// old data, wrap to history state reader
return p.ReadState(ctx, factory.NewHistoryStateReader(api.sf, rp.GetEpochHeight(inputEpochNum)), methodName, arguments...)
}
}
// TODO: need to distinguish user error and system error
return p.ReadState(ctx, api.sf, methodName, arguments...)
}
func (api *Server) getActionsFromIndex(totalActions, start, count uint64) (*iotexapi.GetActionsResponse, error) {
var actionInfo []*iotexapi.ActionInfo
hashes, err := api.indexer.GetActionHashFromIndex(start, count)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
for i := range hashes {
act, err := api.getAction(hash.BytesToHash256(hashes[i]), false)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
actionInfo = append(actionInfo, act)
}
return &iotexapi.GetActionsResponse{
Total: totalActions,
ActionInfo: actionInfo,
}, nil
}
// GetActions returns actions within the range
func (api *Server) getActions(start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
totalActions, err := api.indexer.GetTotalActions()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if start >= totalActions {
return nil, status.Error(codes.InvalidArgument, "start exceeds the limit")
}
if totalActions == uint64(0) || count == 0 {
return &iotexapi.GetActionsResponse{}, nil
}
if start+count > totalActions {
count = totalActions - start
}
if api.hasActionIndex {
return api.getActionsFromIndex(totalActions, start, count)
}
// Finding actions in reverse order saves time for querying most recent actions
reverseStart := totalActions - (start + count)
if totalActions < start+count {
reverseStart = uint64(0)
count = totalActions - start
}
var res []*iotexapi.ActionInfo
var hit bool
for height := api.bc.TipHeight(); height >= 1 && count > 0; height-- {
blk, err := api.dao.GetBlockByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if !hit && reverseStart >= uint64(len(blk.Actions)) {
reverseStart -= uint64(len(blk.Actions))
continue
}
// now reverseStart < len(blk.Actions), we are going to fetch actions from this block
hit = true
act := api.reverseActionsInBlock(blk, reverseStart, count)
res = append(act, res...)
count -= uint64(len(act))
reverseStart = 0
}
return &iotexapi.GetActionsResponse{
Total: totalActions,
ActionInfo: res,
}, nil
}
// getSingleAction returns action by action hash
func (api *Server) getSingleAction(actionHash string, checkPending bool) (*iotexapi.GetActionsResponse, error) {
actHash, err := hash.HexStringToHash256(actionHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
act, err := api.getAction(actHash, checkPending)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
return &iotexapi.GetActionsResponse{
Total: 1,
ActionInfo: []*iotexapi.ActionInfo{act},
}, nil
}
// getActionsByAddress returns all actions associated with an address
func (api *Server) getActionsByAddress(addrStr string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
addr, err := address.FromString(addrStr)
if err != nil {
return nil, err
}
actions, err := api.indexer.GetActionsByAddress(hash.BytesToHash160(addr.Bytes()), start, count)
if err != nil && (errors.Cause(err) == db.ErrBucketNotExist || errors.Cause(err) == db.ErrNotExist) {
// no actions associated with address, return nil
return &iotexapi.GetActionsResponse{}, nil
}
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
res := &iotexapi.GetActionsResponse{Total: uint64(len(actions))}
for i := range actions {
act, err := api.getAction(hash.BytesToHash256(actions[i]), false)
if err != nil {
continue
}
res.ActionInfo = append(res.ActionInfo, act)
}
return res, nil
}
// getBlockHashByActionHash returns block hash by action hash
func (api *Server) getBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) {
actIndex, err := api.indexer.GetActionIndex(h[:])
if err != nil {
return hash.ZeroHash256, err
}
return api.dao.GetBlockHash(actIndex.BlockHeight())
}
// getActionByActionHash returns action by action hash
func (api *Server) getActionByActionHash(h hash.Hash256) (action.SealedEnvelope, hash.Hash256, uint64, error) {
actIndex, err := api.indexer.GetActionIndex(h[:])
if err != nil {
return action.SealedEnvelope{}, hash.ZeroHash256, 0, err
}
blk, err := api.dao.GetBlockByHeight(actIndex.BlockHeight())
if err != nil {
return action.SealedEnvelope{}, hash.ZeroHash256, 0, err
}
selp, err := api.dao.GetActionByActionHash(h, actIndex.BlockHeight())
return selp, blk.HashBlock(), actIndex.BlockHeight(), err
}
// getUnconfirmedActionsByAddress returns all unconfirmed actions in actpool associated with an address
func (api *Server) getUnconfirmedActionsByAddress(address string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
selps := api.ap.GetUnconfirmedActs(address)
if len(selps) == 0 {
return &iotexapi.GetActionsResponse{}, nil
}
if start >= uint64(len(selps)) {
return nil, status.Error(codes.InvalidArgument, "start exceeds the limit")
}
var res []*iotexapi.ActionInfo
for i := start; i < uint64(len(selps)) && i < start+count; i++ {
act, err := api.pendingAction(selps[i])
if err != nil {
continue
}
res = append(res, act)
}
return &iotexapi.GetActionsResponse{
Total: uint64(len(selps)),
ActionInfo: res,
}, nil
}
// getActionsByBlock returns all actions in a block
func (api *Server) getActionsByBlock(blkHash string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
hash, err := hash.HexStringToHash256(blkHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
blk, err := api.dao.GetBlock(hash)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if len(blk.Actions) == 0 {
return &iotexapi.GetActionsResponse{}, nil
}
if start >= uint64(len(blk.Actions)) {
return nil, status.Error(codes.InvalidArgument, "start exceeds the limit")
}
res := api.actionsInBlock(blk, start, count)
return &iotexapi.GetActionsResponse{
Total: uint64(len(blk.Actions)),
ActionInfo: res,
}, nil
}
// getBlockMetas returns blockmetas response within the height range
func (api *Server) getBlockMetas(start uint64, count uint64) (*iotexapi.GetBlockMetasResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
tipHeight := api.bc.TipHeight()
if start > tipHeight {
return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height")
}
var res []*iotextypes.BlockMeta
for height := start; height <= tipHeight && count > 0; height++ {
blockMeta, err := api.getBlockMetasByHeader(height)
if errors.Cause(err) == db.ErrNotExist {
blockMeta, err = api.getBlockMetasByBlock(height)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
res = append(res, blockMeta)
count--
}
return &iotexapi.GetBlockMetasResponse{
Total: tipHeight,
BlkMetas: res,
}, nil
}
// getBlockMeta returns blockmetas response by block hash
func (api *Server) getBlockMeta(blkHash string) (*iotexapi.GetBlockMetasResponse, error) {
hash, err := hash.HexStringToHash256(blkHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
blockMeta, err := api.getBlockMetaByHeader(hash)
if errors.Cause(err) == db.ErrNotExist {
blockMeta, err = api.getBlockMetaByBlock(hash)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
return &iotexapi.GetBlockMetasResponse{
Total: 1,
BlkMetas: []*iotextypes.BlockMeta{blockMeta},
}, nil
}
// putBlockMetaUpgradeByBlock puts numActions and transferAmount for blockmeta by block
func (api *Server) putBlockMetaUpgradeByBlock(blk *block.Block, blockMeta *iotextypes.BlockMeta) *iotextypes.BlockMeta {
blockMeta.NumActions = int64(len(blk.Actions))
blockMeta.TransferAmount = blk.CalculateTransferAmount().String()
return blockMeta
}
// putBlockMetaUpgradeByHeader puts numActions and transferAmount for blockmeta by header height
func (api *Server) putBlockMetaUpgradeByHeader(height uint64, blockMeta *iotextypes.BlockMeta) (*iotextypes.BlockMeta, error) {
index, err := api.indexer.GetBlockIndex(height)
if err != nil {
return nil, errors.Wrapf(err, "missing block index at height %d", height)
}
blockMeta.NumActions = int64(index.NumAction())
blockMeta.TransferAmount = index.TsfAmount().String()
return blockMeta, nil
}
// getBlockMetasByHeader gets block header by height
func (api *Server) getBlockMetasByHeader(height uint64) (*iotextypes.BlockMeta, error) {
header, err := api.bc.BlockHeaderByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blockMeta := api.getCommonBlockMeta(header)
blockMeta, err = api.putBlockMetaUpgradeByHeader(header.Height(), blockMeta)
if err != nil {
return nil, err
}
return blockMeta, nil
}
// getBlockMetasByBlock gets block by height
func (api *Server) getBlockMetasByBlock(height uint64) (*iotextypes.BlockMeta, error) {
blk, err := api.dao.GetBlockByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blockMeta := api.getCommonBlockMeta(blk)
blockMeta = api.putBlockMetaUpgradeByBlock(blk, blockMeta)
return blockMeta, nil
}
// getBlockMetaByHeader gets block header by hash
func (api *Server) getBlockMetaByHeader(h hash.Hash256) (*iotextypes.BlockMeta, error) {
header, err := api.dao.Header(h)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blockMeta := api.getCommonBlockMeta(header)
blockMeta, err = api.putBlockMetaUpgradeByHeader(header.Height(), blockMeta)
if err != nil {
return nil, err
}
return blockMeta, nil
}
// getBlockMetaByBlock gets block by hash
func (api *Server) getBlockMetaByBlock(h hash.Hash256) (*iotextypes.BlockMeta, error) {
blk, err := api.dao.GetBlock(h)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blockMeta := api.getCommonBlockMeta(blk)
blockMeta = api.putBlockMetaUpgradeByBlock(blk, blockMeta)
return blockMeta, nil
}
// getCommonBlockMeta gets blockmeta by empty interface
func (api *Server) getCommonBlockMeta(common interface{}) *iotextypes.BlockMeta {
header, ok := common.(*block.Header)
if !ok {
blk := common.(*block.Block)
header = &blk.Header
}
hash := header.HashBlock()
height := header.Height()
ts, _ := ptypes.TimestampProto(header.Timestamp())
producerAddress := header.ProducerAddress()
txRoot := header.TxRoot()
receiptRoot := header.ReceiptRoot()
deltaStateDigest := header.DeltaStateDigest()
logsBloom := header.LogsBloomfilter()
blockMeta := &iotextypes.BlockMeta{
Hash: hex.EncodeToString(hash[:]),
Height: height,
Timestamp: ts,
ProducerAddress: producerAddress,
TxRoot: hex.EncodeToString(txRoot[:]),
ReceiptRoot: hex.EncodeToString(receiptRoot[:]),
DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]),
}
if logsBloom != nil {
blockMeta.LogsBloom = hex.EncodeToString(logsBloom.Bytes())
}
return blockMeta
}
func (api *Server) getGravityChainStartHeight(epochHeight uint64) (uint64, error) {
gravityChainStartHeight := epochHeight
if pp := poll.FindProtocol(api.registry); pp != nil {
methodName := []byte("GetGravityChainStartHeight")
arguments := [][]byte{[]byte(strconv.FormatUint(epochHeight, 10))}
data, err := api.readState(context.Background(), pp, "", methodName, arguments...)
if err != nil {
return 0, err
}
if len(data) == 0 {
return 0, nil
}
if gravityChainStartHeight, err = strconv.ParseUint(string(data), 10, 64); err != nil {
return 0, err
}
}
return gravityChainStartHeight, nil
}
func (api *Server) committedAction(selp action.SealedEnvelope, blkHash hash.Hash256, blkHeight uint64) (
*iotexapi.ActionInfo, error) {
actHash := selp.Hash()
header, err := api.dao.Header(blkHash)
if err != nil {
return nil, err
}
sender, _ := address.FromBytes(selp.SrcPubkey().Hash())
receipt, err := api.dao.GetReceiptByActionHash(actHash, blkHeight)
if err != nil {
return nil, err
}
gas := new(big.Int)
gas = gas.Mul(selp.GasPrice(), big.NewInt(int64(receipt.GasConsumed)))
return &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: hex.EncodeToString(blkHash[:]),
BlkHeight: header.Height(),
Sender: sender.String(),
GasFee: gas.String(),
Timestamp: header.BlockHeaderCoreProto().Timestamp,
}, nil
}
func (api *Server) pendingAction(selp action.SealedEnvelope) (*iotexapi.ActionInfo, error) {
actHash := selp.Hash()
sender, _ := address.FromBytes(selp.SrcPubkey().Hash())
return &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: hex.EncodeToString(hash.ZeroHash256[:]),
BlkHeight: 0,
Sender: sender.String(),
Timestamp: nil,
}, nil
}
func (api *Server) getAction(actHash hash.Hash256, checkPending bool) (*iotexapi.ActionInfo, error) {
selp, blkHash, blkHeight, err := api.getActionByActionHash(actHash)
if err == nil {
return api.committedAction(selp, blkHash, blkHeight)
}
// Try to fetch pending action from actpool
if checkPending {
selp, err = api.ap.GetActionByHash(actHash)
}
if err != nil {
return nil, err
}
return api.pendingAction(selp)
}
func (api *Server) actionsInBlock(blk *block.Block, start, count uint64) []*iotexapi.ActionInfo {
h := blk.HashBlock()
blkHash := hex.EncodeToString(h[:])
blkHeight := blk.Height()
ts := blk.Header.BlockHeaderCoreProto().Timestamp
var res []*iotexapi.ActionInfo
for i := start; i < uint64(len(blk.Actions)) && i < start+count; i++ {
selp := blk.Actions[i]
actHash := selp.Hash()
sender, _ := address.FromBytes(selp.SrcPubkey().Hash())
res = append(res, &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: blkHash,
BlkHeight: blkHeight,
Sender: sender.String(),
Timestamp: ts,
})
}
return res
}
func (api *Server) reverseActionsInBlock(blk *block.Block, reverseStart, count uint64) []*iotexapi.ActionInfo {
h := blk.HashBlock()
blkHash := hex.EncodeToString(h[:])
blkHeight := blk.Height()
ts := blk.Header.BlockHeaderCoreProto().Timestamp
var res []*iotexapi.ActionInfo
for i := reverseStart; i < uint64(len(blk.Actions)) && i < reverseStart+count; i++ {
ri := uint64(len(blk.Actions)) - 1 - i
selp := blk.Actions[ri]
actHash := selp.Hash()
sender, _ := address.FromBytes(selp.SrcPubkey().Hash())
res = append([]*iotexapi.ActionInfo{
{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: blkHash,
BlkHeight: blkHeight,
Sender: sender.String(),
Timestamp: ts,
},
}, res...)
}
return res
}
func (api *Server) getLogsInBlock(filter *LogFilter, start, count uint64) ([]*iotextypes.Log, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
// filter logs within start --> end
var logs []*iotextypes.Log
end := start + count - 1
if end > api.bc.TipHeight() {
end = api.bc.TipHeight()
}
for i := start; i <= end; i++ {
receipts, err := api.dao.GetReceipts(i)
if err != nil {
return logs, status.Error(codes.InvalidArgument, err.Error())
}
logs = append(logs, filter.MatchLogs(receipts)...)
}
return logs, nil
}
// TODO: Since GasConsumed on the receipt may not be enough for the gas limit, we use binary search for the gas estimate. Need a better way to address it later.
func (api *Server) estimateActionGasConsumptionForExecution(exec *iotextypes.Execution, sender string) (*iotexapi.EstimateActionGasConsumptionResponse, error) {
sc := &action.Execution{}
if err := sc.LoadProto(exec); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
state, err := accountutil.AccountState(api.sf, sender)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
nonce := state.Nonce + 1
callerAddr, err := address.FromString(sender)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
sc, _ = action.NewExecution(
sc.Contract(),
nonce,
sc.Amount(),
api.cfg.Genesis.BlockGasLimit,
big.NewInt(0),
sc.Data(),
)
ctx, err := api.bc.Context()
if err != nil {
return nil, err
}
_, receipt, err := api.sf.SimulateExecution(ctx, callerAddr, sc, api.dao.GetBlockHash)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if receipt.Status != uint64(iotextypes.ReceiptStatus_Success) {
return nil, status.Error(codes.Internal, "execution simulation gets failure status")
}
estimatedGas := receipt.GasConsumed
enough, err := api.isGasLimitEnough(callerAddr, sc, nonce, estimatedGas)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !enough {
low, high := estimatedGas, api.cfg.Genesis.BlockGasLimit
estimatedGas = high
for low <= high {
mid := (low + high) / 2
enough, err = api.isGasLimitEnough(callerAddr, sc, nonce, mid)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if enough {
estimatedGas = mid
break
}
low = mid + 1
}
}
return &iotexapi.EstimateActionGasConsumptionResponse{
Gas: estimatedGas,
}, nil
}
func (api *Server) estimateActionGasConsumptionForTransfer(transfer *iotextypes.Transfer) (*iotexapi.EstimateActionGasConsumptionResponse, error) {
payloadSize := uint64(len(transfer.Payload))
return &iotexapi.EstimateActionGasConsumptionResponse{
Gas: payloadSize*action.TransferPayloadGas + action.TransferBaseIntrinsicGas,
}, nil
}
func (api *Server) isGasLimitEnough(
caller address.Address,
sc *action.Execution,
nonce uint64,
gasLimit uint64,
) (bool, error) {
sc, _ = action.NewExecution(
sc.Contract(),
nonce,
sc.Amount(),
gasLimit,
big.NewInt(0),
sc.Data(),
)
ctx, err := api.bc.Context()
if err != nil {
return false, err
}
_, receipt, err := api.sf.SimulateExecution(ctx, caller, sc, api.dao.GetBlockHash)
if err != nil {
return false, err
}
return receipt.Status == uint64(iotextypes.ReceiptStatus_Success), nil
}
func (api *Server) getProductivityByEpoch(
rp *rolldpos.Protocol,
epochNum uint64,
tipHeight uint64,
abps state.CandidateList,
) (uint64, map[string]uint64, error) {
num, produce, err := rp.ProductivityByEpoch(epochNum, tipHeight, func(start uint64, end uint64) (map[string]uint64, error) {
return blockchain.Productivity(api.bc, start, end)
})
if err != nil {
return 0, nil, status.Error(codes.NotFound, err.Error())
}
// check if there is any active block producer who didn't prodcue any block
for _, abp := range abps {
if _, ok := produce[abp.Address]; !ok {
produce[abp.Address] = 0
}
}
return num, produce, nil
}
| 1 | 21,961 | can we get height from sf at very beginning of the function and use that as height of the block? | iotexproject-iotex-core | go |
@@ -0,0 +1,6 @@
+namespace Microsoft.AspNet.Server.Kestrel
+{
+ public class StandardsPoliceCompileModule : Microsoft.StandardsPolice.StandardsPoliceCompileModule
+ {
+ }
+} | 1 | 1 | 5,722 | Does this have to be public and in the primary namespace? | aspnet-KestrelHttpServer | .cs |
|
@@ -1206,6 +1206,15 @@ func (fbo *folderBranchOps) initMDLocked(
return err
}
+ fbo.headLock.Lock(lState)
+ defer fbo.headLock.Unlock(lState)
+ // Some other thread got here first, so give up and let it go
+ // before we push anything to the servers.
+ if fbo.head != (ImmutableRootMetadata{}) {
+ fbo.log.CDebugf(ctx, "Head was already set, aborting")
+ return nil
+ }
+
if err = PutBlockCheckQuota(ctx, fbo.config.BlockServer(),
fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData,
md.GetTlfHandle().GetCanonicalName()); err != nil { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
)
// mdReqType indicates whether an operation makes MD modifications or not
type mdReqType int
const (
// A read request that doesn't need an identify to be
// performed.
mdReadNoIdentify mdReqType = iota
// A read request that needs an identify to be performed (if
// it hasn't been already).
mdReadNeedIdentify
// A write request.
mdWrite
// A rekey request. Doesn't need an identify to be performed, as
// a rekey does its own (finer-grained) identifies.
mdRekey
)
type branchType int
const (
standard branchType = iota // an online, read-write branch
archive // an online, read-only branch
offline // an offline, read-write branch
archiveOffline // an offline, read-only branch
)
// Constants used in this file. TODO: Make these configurable?
const (
// MaxBlockSizeBytesDefault is the default maximum block size for KBFS.
// 512K blocks by default, block changes embedded max == 8K.
// Block size was chosen somewhat arbitrarily by trying to
// minimize the overall size of the history written by a user when
// appending 1KB writes to a file, up to a 1GB total file. Here
// is the output of a simple script that approximates that
// calculation:
//
// Total history size for 0065536-byte blocks: 1134341128192 bytes
// Total history size for 0131072-byte blocks: 618945052672 bytes
// Total history size for 0262144-byte blocks: 412786622464 bytes
// Total history size for 0524288-byte blocks: 412786622464 bytes
// Total history size for 1048576-byte blocks: 618945052672 bytes
// Total history size for 2097152-byte blocks: 1134341128192 bytes
// Total history size for 4194304-byte blocks: 2216672886784 bytes
MaxBlockSizeBytesDefault = 512 << 10
// Maximum number of blocks that can be sent in parallel
maxParallelBlockPuts = 100
// Maximum number of blocks that can be fetched in parallel
maxParallelBlockGets = 10
// Max response size for a single DynamoDB query is 1MB.
maxMDsAtATime = 10
// Time between checks for dirty files to flush, in case Sync is
// never called.
secondsBetweenBackgroundFlushes = 10
// Cap the number of times we retry after a recoverable error
maxRetriesOnRecoverableErrors = 10
// When the number of dirty bytes exceeds this level, force a sync.
dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault
// The timeout for any background task.
backgroundTaskTimeout = 1 * time.Minute
// If it's been more than this long since our last update, check
// the current head before downloading all of the new revisions.
fastForwardTimeThresh = 15 * time.Minute
// If there are more than this many new revisions, fast forward
// rather than downloading them all.
fastForwardRevThresh = 50
)
type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
fboHead fboMutexLevel = 2
fboBlock fboMutexLevel = 3
)
func (o fboMutexLevel) String() string {
switch o {
case fboMDWriter:
return "mdWriterLock"
case fboHead:
return "headLock"
case fboBlock:
return "blockLock"
default:
return fmt.Sprintf("Invalid fboMutexLevel %d", int(o))
}
}
func fboMutexLevelToString(o mutexLevel) string {
return (fboMutexLevel(o)).String()
}
// Rules for working with lockState in FBO:
//
// - Every "execution flow" (i.e., program flow that happens
// sequentially) needs its own lockState object. This usually means
// that each "public" FBO method does:
//
// lState := makeFBOLockState()
//
// near the top.
//
// - Plumb lState through to all functions that hold any of the
// relevant locks, or are called under those locks.
//
// This way, violations of the lock hierarchy will be detected at
// runtime.
func makeFBOLockState() *lockState {
return makeLevelState(fboMutexLevelToString)
}
// blockLock is just like a sync.RWMutex, but with an extra operation
// (DoRUnlockedIfPossible).
type blockLock struct {
leveledRWMutex
locked bool
}
func (bl *blockLock) Lock(lState *lockState) {
bl.leveledRWMutex.Lock(lState)
bl.locked = true
}
func (bl *blockLock) Unlock(lState *lockState) {
bl.locked = false
bl.leveledRWMutex.Unlock(lState)
}
// DoRUnlockedIfPossible must be called when r- or w-locked. If
// r-locked, r-unlocks, runs the given function, and r-locks after
// it's done. Otherwise, just runs the given function.
func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) {
if !bl.locked {
bl.RUnlock(lState)
defer bl.RLock(lState)
}
f(lState)
}
// folderBranchOps implements the KBFSOps interface for a specific
// branch of a specific folder. It is go-routine safe for operations
// within the folder.
//
// We use locks to protect against multiple goroutines accessing the
// same folder-branch. The goal with our locking strategy is maximize
// concurrent access whenever possible. See design/state_machine.md
// for more details. There are three important locks:
//
// 1) mdWriterLock: Any "remote-sync" operation (one which modifies the
// folder's metadata) must take this lock during the entirety of
// its operation, to avoid forking the MD.
//
// 2) headLock: This is a read/write mutex. It must be taken for
// reading before accessing any part of the current head MD. It
// should be taken for the shortest time possible -- that means in
// general that it should be taken, and the MD copied to a
// goroutine-local variable, and then it can be released.
// Remote-sync operations should take it for writing after pushing
// all of the blocks and MD to the KBFS servers (i.e., all network
// accesses), and then hold it until after all notifications have
// been fired, to ensure that no concurrent "local" operations ever
// see inconsistent state locally.
//
// 3) blockLock: This too is a read/write mutex. It must be taken for
// reading before accessing any blocks in the block cache that
// belong to this folder/branch. This includes checking their
// dirty status. It should be taken for the shortest time possible
// -- that means in general it should be taken, and then the blocks
// that will be modified should be copied to local variables in the
// goroutine, and then it should be released. The blocks should
// then be modified locally, and then readied and pushed out
// remotely. Only after the blocks have been pushed to the server
// should a remote-sync operation take the lock again (this time
// for writing) and put/finalize the blocks. Write and Truncate
// should take blockLock for their entire lifetime, since they
// don't involve writes over the network. Furthermore, if a block
// is not in the cache and needs to be fetched, we should release
// the mutex before doing the network operation, and lock it again
// before writing the block back to the cache.
//
// We want to allow writes and truncates to a file that's currently
// being sync'd, like any good networked file system. The tricky part
// is making sure the changes can both: a) be read while the sync is
// happening, and b) be applied to the new file path after the sync is
// done.
//
// For now, we just do the dumb, brute force thing for now: if a block
// is currently being sync'd, it copies the block and puts it back
// into the cache as modified. Then, when the sync finishes, it
// throws away the modified blocks and re-applies the change to the
// new file path (which might have a completely different set of
// blocks, so we can't just reuse the blocks that were modified during
// the sync.)
type folderBranchOps struct {
config Config
folderBranch FolderBranch
bid BranchID // protected by mdWriterLock
bType branchType
observers *observerList
// these locks, when locked concurrently by the same goroutine,
// should only be taken in the following order to avoid deadlock:
mdWriterLock leveledMutex // taken by any method making MD modifications
// protects access to head and latestMergedRevision.
headLock leveledRWMutex
head ImmutableRootMetadata
// latestMergedRevision tracks the latest heard merged revision on server
latestMergedRevision MetadataRevision
blocks folderBlockOps
// nodeCache itself is goroutine-safe, but this object's use
// of it has special requirements:
//
// - Reads can call PathFromNode() unlocked, since there are
// no guarantees with concurrent reads.
//
// - Operations that takes mdWriterLock always needs the
// most up-to-date paths, so those must call
// PathFromNode() under mdWriterLock.
//
// - Block write operations (write/truncate/sync) need to
// coordinate. Specifically, sync must make sure that
// blocks referenced in a path (including all of the child
// blocks) must exist in the cache during calls to
// PathFromNode from write/truncate. This means that sync
// must modify dirty file blocks only under blockLock, and
// write/truncate must call PathFromNode() under
// blockLock.
//
// Furthermore, calls to UpdatePointer() must happen
// before the copy-on-write mode induced by Sync() is
// finished.
nodeCache NodeCache
// Whether we've identified this TLF or not.
identifyLock sync.Mutex
identifyDone bool
identifyTime time.Time
// The current status summary for this folder
status *folderBranchStatusKeeper
// How to log
log logger.Logger
deferLog logger.Logger
// Closed on shutdown
shutdownChan chan struct{}
// Can be used to turn off notifications for a while (e.g., for testing)
updatePauseChan chan (<-chan struct{})
// After a shutdown, this channel will be closed when the register
// goroutine completes.
updateDoneChan chan struct{}
// forceSyncChan is read from by the background sync process
// to know when it should sync immediately.
forceSyncChan <-chan struct{}
// How to resolve conflicts
cr *ConflictResolver
// Helper class for archiving and cleaning up the blocks for this TLF
fbm *folderBlockManager
// rekeyWithPromptTimer tracks a timed function that will try to
// rekey with a paper key prompt, if enough time has passed.
// Protected by mdWriterLock
rekeyWithPromptTimer *time.Timer
editHistory *TlfEditHistory
branchChanges kbfssync.RepeatedWaitGroup
mdFlushes kbfssync.RepeatedWaitGroup
}
var _ KBFSOps = (*folderBranchOps)(nil)
var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
nodeCache := newNodeCacheStandard(fb)
// make logger
branchSuffix := ""
if fb.Branch != MasterBranch {
branchSuffix = " " + string(fb.Branch)
}
tlfStringFull := fb.Tlf.String()
// Shorten the TLF ID for the module name. 8 characters should be
// unique enough for a local node.
log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8],
branchSuffix))
// But print it out once in full, just in case.
log.CInfof(nil, "Created new folder-branch for %s", tlfStringFull)
observers := newObserverList()
mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{})
headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{})
blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{})
forceSyncChan := make(chan struct{})
fbo := &folderBranchOps{
config: config,
folderBranch: fb,
bid: BranchID{},
bType: bType,
observers: observers,
status: newFolderBranchStatusKeeper(config, nodeCache),
mdWriterLock: mdWriterLock,
headLock: headLock,
blocks: folderBlockOps{
config: config,
log: log,
folderBranch: fb,
observers: observers,
forceSyncChan: forceSyncChan,
blockLock: blockLock{
leveledRWMutex: blockLockMu,
},
dirtyFiles: make(map[BlockPointer]*dirtyFile),
unrefCache: make(map[BlockRef]*syncInfo),
deCache: make(map[BlockRef]DirEntry),
nodeCache: nodeCache,
},
nodeCache: nodeCache,
log: log,
deferLog: log.CloneWithAddedDepth(1),
shutdownChan: make(chan struct{}),
updatePauseChan: make(chan (<-chan struct{})),
forceSyncChan: forceSyncChan,
}
fbo.cr = NewConflictResolver(config, fbo)
fbo.fbm = newFolderBlockManager(config, fb, fbo)
fbo.editHistory = NewTlfEditHistory(config, fbo, log)
if config.DoBackgroundFlushes() {
go fbo.backgroundFlusher(secondsBetweenBackgroundFlushes * time.Second)
}
return fbo
}
// markForReIdentifyIfNeeded checks whether this tlf is identified and mark
// it for lazy reidentification if it exceeds time limits.
func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) {
fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime)
fbo.identifyDone = false
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by folderBranchOps.
func (fbo *folderBranchOps) Shutdown() error {
if fbo.config.CheckStateOnShutdown() {
ctx := context.TODO()
lState := makeFBOLockState()
if fbo.blocks.GetState(lState) == dirtyState {
fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state")
} else if !fbo.isMasterBranch(lState) {
fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged")
} else {
// Make sure we're up to date first
if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch); err != nil {
return err
}
// Check the state for consistency before shutting down.
sc := NewStateChecker(fbo.config)
if err := sc.CheckMergedState(ctx, fbo.id()); err != nil {
return err
}
}
}
close(fbo.shutdownChan)
fbo.cr.Shutdown()
fbo.fbm.shutdown()
fbo.editHistory.Shutdown()
// Wait for the update goroutine to finish, so that we don't have
// any races with logging during test reporting.
if fbo.updateDoneChan != nil {
<-fbo.updateDoneChan
}
return nil
}
func (fbo *folderBranchOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBranchOps) branch() BranchName {
return fbo.folderBranch.Branch
}
func (fbo *folderBranchOps) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return nil, errors.New("GetFavorites is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) {
// no-op
}
func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("DeleteFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) AddFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("AddFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) addToFavorites(ctx context.Context,
favorites *Favorites, created bool) (err error) {
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head == (ImmutableRootMetadata{}) {
return OpsCantHandleFavorite{"Can't add a favorite without a handle"}
}
return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created)
}
func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context,
favorites *Favorites, handle *TlfHandle, created bool) (err error) {
if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil {
// Can't favorite while not logged in
return nil
}
favorites.AddAsync(ctx, handle.toFavToAdd(created))
return nil
}
func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context,
favorites *Favorites) error {
if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil {
// Can't unfavorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head == (ImmutableRootMetadata{}) {
// This can happen when identifies fail and the head is never set.
return OpsCantHandleFavorite{"Can't delete a favorite without a handle"}
}
h := head.GetTlfHandle()
return favorites.Delete(ctx, h.ToFavorite())
}
func (fbo *folderBranchOps) getHead(lState *lockState) ImmutableRootMetadata {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.head
}
// isMasterBranch should not be called if mdWriterLock is already taken.
func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid BranchID) {
fbo.mdWriterLock.AssertLocked(lState)
fbo.bid = bid
if bid == NullBranchID {
fbo.status.setCRSummary(nil, nil)
}
}
var errNoFlushedRevisions = errors.New("No flushed MDs yet")
// getJournalPredecessorRevision returns the revision that precedes
// the current journal head if journaling enabled and there are
// unflushed MD updates; otherwise it returns
// MetadataRevisionUninitialized. If there aren't any flushed MD
// revisions, it returns errNoFlushedRevisions.
func (fbo *folderBranchOps) getJournalPredecessorRevision(ctx context.Context) (
MetadataRevision, error) {
jServer, err := GetJournalServer(fbo.config)
if err != nil {
// Journaling is disabled entirely.
return MetadataRevisionUninitialized, nil
}
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
// Journaling is disabled for this TLF, so use the local head.
// TODO: JournalStatus could return other errors (likely
// file/disk corruption) that indicate a real problem, so it
// might be nice to type those errors so we can distinguish
// them.
return MetadataRevisionUninitialized, nil
}
if jStatus.BranchID != NullBranchID.String() {
return MetadataRevisionUninitialized,
errors.New("Cannot find most recent merged revision while staged")
}
if jStatus.RevisionStart == MetadataRevisionUninitialized {
// The journal is empty, so the local head must be the most recent.
return MetadataRevisionUninitialized, nil
} else if jStatus.RevisionStart == MetadataRevisionInitial {
// Nothing has been flushed to the servers yet, so don't
// return anything.
return MetadataRevisionUninitialized, errNoFlushedRevisions
}
return jStatus.RevisionStart - 1, nil
}
func (fbo *folderBranchOps) setHeadLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
isFirstHead := fbo.head == ImmutableRootMetadata{}
wasReadable := false
if !isFirstHead {
wasReadable = fbo.head.IsReadable()
if fbo.head.mdID == md.mdID {
panic(fmt.Errorf("Re-putting the same MD: %s", md.mdID))
}
}
fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision())
err := fbo.config.MDCache().Put(md)
if err != nil {
return err
}
// If this is the first time the MD is being set, and we are
// operating on unmerged data, initialize the state properly and
// kick off conflict resolution.
if isFirstHead && md.MergedStatus() == Unmerged {
fbo.setBranchIDLocked(lState, md.BID())
// Use uninitialized for the merged branch; the unmerged
// revision is enough to trigger conflict resolution.
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
} else if md.MergedStatus() == Merged {
journalEnabled := TLFJournalEnabled(fbo.config, fbo.id())
var key kbfscrypto.VerifyingKey
if journalEnabled {
if isFirstHead {
// If journaling is on, and this is the first head
// we're setting, we have to make sure we use the
// server's notion of the latest MD, not the one
// potentially coming from our journal. If there are
// no flushed revisions, it's not a hard error, and we
// just leave the latest merged revision
// uninitialized.
journalPred, err := fbo.getJournalPredecessorRevision(ctx)
switch err {
case nil:
// journalPred will be
// MetadataRevisionUninitialized when the journal
// is empty.
if journalPred >= MetadataRevisionInitial {
fbo.setLatestMergedRevisionLocked(
ctx, lState, journalPred, false)
} else {
fbo.setLatestMergedRevisionLocked(ctx, lState,
md.Revision(), false)
}
case errNoFlushedRevisions:
// The server has no revisions, so leave the
// latest merged revision uninitialized.
default:
return err
}
} else {
// If this isn't the first head, then this is either
// an update from the server, or an update just
// written by the client. But since journaling is on,
// then latter case will be handled by onMDFlush when
// the update is properly flushed to the server. So
// ignore updates written by this device.
key, err = fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
if key != md.LastModifyingWriterVerifyingKey() {
fbo.setLatestMergedRevisionLocked(
ctx, lState, md.Revision(), false)
}
}
} else {
// This is a merged revision, and journaling is disabled,
// so it's definitely the latest revision on the server as
// well.
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
}
// Make sure that any unembedded block changes have been swapped
// back in.
if md.data.Changes.Info.BlockPointer != zeroPtr &&
len(md.data.Changes.Ops) == 0 {
return errors.New("Must swap in block changes before setting head")
}
fbo.head = md
fbo.status.setRootMetadata(md)
if isFirstHead {
// Start registering for updates right away, using this MD
// as a starting point. For now only the master branch can
// get updates
if fbo.branch() == MasterBranch {
fbo.updateDoneChan = make(chan struct{})
go fbo.registerAndWaitForUpdates()
}
}
if !wasReadable && md.IsReadable() {
// Let any listeners know that this folder is now readable,
// which may indicate that a rekey successfully took place.
fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification(
md.GetTlfHandle(), md.TlfID().IsPublic()))
}
return nil
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched not due to a user action, i.e. via a Rekey
// notification, and we don't have a TLF name to check against.
func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setNewInitialHeadLocked is for when we're creating a brand-new TLF.
func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setNewInitialHeadLocked")
}
if md.Revision() != MetadataRevisionInitial {
return fmt.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision())
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched due to a user action, and will be checked against the
// TLF name.
func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setHeadSuccessorLocked is for when we're applying updates from the
// server or when we're applying new updates we created ourselves.
func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata, rebased bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// This can happen in tests via SyncFromServerForTesting().
return fbo.setInitialHeadTrustedLocked(ctx, lState, md)
}
if !rebased {
err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly())
if err != nil {
return err
}
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// Newer handles should be equal or more resolved over time.
//
// TODO: In some cases, they shouldn't, e.g. if we're on an
// unmerged branch. Add checks for this.
resolvesTo, partialResolvedOldHandle, err :=
oldHandle.ResolvesTo(
ctx, fbo.config.Codec(), fbo.config.KBPKI(),
*newHandle)
if err != nil {
return err
}
oldName := oldHandle.GetCanonicalName()
newName := newHandle.GetCanonicalName()
if !resolvesTo {
return IncompatibleHandleError{
oldName,
partialResolvedOldHandle.GetCanonicalName(),
newName,
}
}
err = fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
if oldName != newName {
fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)",
oldName, newName)
// If the handle has changed, send out a notification.
fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle())
// Also the folder should be re-identified given the
// newly-resolved assertions.
func() {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
fbo.identifyDone = false
}()
}
return nil
}
// setHeadPredecessorLocked is for when we're unstaging updates.
func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return errors.New("Unexpected nil head in setHeadPredecessorLocked")
}
if fbo.head.Revision() <= MetadataRevisionInitial {
return fmt.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision())
}
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadPredecessorLocked")
}
err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly())
if err != nil {
return err
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// The two handles must be the same, since no rekeying is done
// while unmerged.
eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle)
if err != nil {
return err
}
if !eq {
return fmt.Errorf(
"head handle %v unexpectedly not equal to new handle = %v",
oldHandle, newHandle)
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setHeadConflictResolvedLocked is for when we're setting the merged
// update with resolved conflicts.
func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadConflictResolvedLocked")
}
if md.MergedStatus() != Merged {
return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked")
}
err := fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
// Since the CR head goes directly to the server, we can safely
// set the latest merged revision here. (Normally self-commits
// don't update the latest merged revision since all non-CR
// updates go through the journal.)
if TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
return nil
}
func (fbo *folderBranchOps) identifyOnce(
ctx context.Context, md ReadOnlyRootMetadata) error {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone {
return nil
}
h := md.GetTlfHandle()
fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath())
kbpki := fbo.config.KBPKI()
err := identifyHandle(ctx, kbpki, kbpki, h)
if err != nil {
fbo.log.CDebugf(ctx, "Identify finished with error: %v", err)
// For now, if the identify fails, let the
// next function to hit this code path retry.
return err
}
ei := getExtendedIdentify(ctx)
if ei.behavior.WarningInsteadOfErrorOnBrokenTracks() &&
len(ei.getTlfBreakAndClose().Breaks) > 0 {
fbo.log.CDebugf(ctx,
"Identify finished with no error but broken proof warnings")
} else {
fbo.log.CDebugf(ctx, "Identify finished successfully")
fbo.identifyDone = true
fbo.identifyTime = fbo.config.Clock().Now()
}
return nil
}
// if rtype == mdWrite || mdRekey, then mdWriterLock must be taken
func (fbo *folderBranchOps) getMDLocked(
ctx context.Context, lState *lockState, rtype mdReqType) (
md ImmutableRootMetadata, err error) {
defer func() {
if err != nil || rtype == mdReadNoIdentify || rtype == mdRekey {
return
}
err = fbo.identifyOnce(ctx, md.ReadOnly())
}()
md = fbo.getHead(lState)
if md != (ImmutableRootMetadata{}) {
return md, nil
}
// Unless we're in mdWrite or mdRekey mode, we can't safely fetch
// the new MD without causing races, so bail.
if rtype != mdWrite && rtype != mdRekey {
return ImmutableRootMetadata{}, MDWriteNeededInRequest{}
}
// We go down this code path either due to a rekey
// notification for an unseen TLF, or in some tests.
//
// TODO: Make tests not take this code path, and keep track of
// the fact that MDs coming from rekey notifications are
// untrusted.
fbo.mdWriterLock.AssertLocked(lState)
// Not in cache, fetch from server and add to cache. First, see
// if this device has any unmerged commits -- take the latest one.
mdops := fbo.config.MDOps()
// get the head of the unmerged branch for this device (if any)
md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID)
if err != nil {
return ImmutableRootMetadata{}, err
}
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedMD == (ImmutableRootMetadata{}) {
return ImmutableRootMetadata{}, fmt.Errorf("Got nil RMD for %s", fbo.id())
}
if md == (ImmutableRootMetadata{}) {
// There are no unmerged MDs for this device, so just use the current head.
md = mergedMD
} else {
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// We don't need to do this for merged head
// because the setHeadLocked() already does
// that anyway.
fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false)
}()
}
if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) {
return ImmutableRootMetadata{}, fmt.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable())
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setInitialHeadUntrustedLocked(ctx, lState, md)
if err != nil {
return ImmutableRootMetadata{}, err
}
return md, nil
}
func (fbo *folderBranchOps) getMDForReadHelper(
ctx context.Context, lState *lockState, rtype mdReqType) (ImmutableRootMetadata, error) {
md, err := fbo.getMDLocked(ctx, lState, rtype)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.TlfID().IsPublic() {
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.GetTlfHandle().IsReader(uid) {
return ImmutableRootMetadata{}, NewReadAccessError(md.GetTlfHandle(), username, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
// getMostRecentFullyMergedMD is a helper method that returns the most
// recent merged MD that has been flushed to the server. This could
// be different from the current local head if journaling is on. If
// the journal is on a branch, it returns an error.
func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) (
ImmutableRootMetadata, error) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedRev == MetadataRevisionUninitialized {
// No unflushed journal entries, so use the local head.
lState := makeFBOLockState()
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
// Otherwise, use the specified revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
mergedRev, Merged)
if err != nil {
return ImmutableRootMetadata{}, err
}
fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev)
return rmd, nil
}
func (fbo *folderBranchOps) getMDForReadNoIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNeedIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
}
// getMDForWriteLocked returns a new RootMetadata object with an
// incremented version number for modification. If the returned object
// is put to the MDServer (via MDOps), mdWriterLock must be held until
// then. (See comments for mdWriterLock above.)
func (fbo *folderBranchOps) getMDForWriteLocked(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getMDForWriteLockedForFilename(ctx, lState, "")
}
func (fbo *folderBranchOps) getMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDLocked(ctx, lState, mdWrite)
if err != nil {
return nil, err
}
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
if !md.GetTlfHandle().IsWriter(uid) {
return nil, NewWriteAccessError(md.GetTlfHandle(), username, filename)
}
// Make a new successor of the current MD to hold the coming
// writes. The caller must pass this into
// syncBlockAndCheckEmbedLocked or the changes will be lost.
newMd, err := md.MakeSuccessor(ctx, fbo.config, md.mdID, true)
if err != nil {
return nil, err
}
return newMd, nil
}
func (fbo *folderBranchOps) getMDForRekeyWriteLocked(
ctx context.Context, lState *lockState) (
rmd *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey,
wasRekeySet bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDLocked(ctx, lState, mdRekey)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
handle := md.GetTlfHandle()
// must be a reader or writer (it checks both.)
if !handle.IsReader(uid) {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(md.GetTlfHandle(), username)
}
newMd, err := md.MakeSuccessor(ctx, fbo.config, md.mdID, handle.IsWriter(uid))
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
// readers shouldn't modify writer metadata
if !handle.IsWriter(uid) && !newMd.IsWriterMetadataCopiedSet() {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(handle, username)
}
return newMd, md.LastModifyingWriterVerifyingKey(), md.IsRekeySet(), nil
}
func (fbo *folderBranchOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
func (fbo *folderBranchOps) maybeUnembedAndPutBlocks(ctx context.Context,
md *RootMetadata) (*blockPutState, error) {
if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) {
return nil, nil
}
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
bps := newBlockPutState(1)
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return nil, err
}
if len(ptrsToDelete) > 0 {
return nil, fmt.Errorf("Unexpected pointers to delete after "+
"unembedding block changes in gc op: %v", ptrsToDelete)
}
return bps, nil
}
// ResetRootBlock creates a new empty dir block and sets the given
// metadata's root block to it.
func ResetRootBlock(ctx context.Context, config Config,
currentUID keybase1.UID, rmd *RootMetadata) (
Block, BlockInfo, ReadyBlockData, error) {
newDblock := NewDirBlock()
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, config.BlockCache(), config.BlockOps(), config.Crypto(),
rmd.ReadOnly(), newDblock, currentUID)
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
now := config.Clock().Now().UnixNano()
rmd.data.Dir = DirEntry{
BlockInfo: info,
EntryInfo: EntryInfo{
Type: Dir,
Size: uint64(plainSize),
Mtime: now,
Ctime: now,
},
}
prevDiskUsage := rmd.DiskUsage()
rmd.SetDiskUsage(0)
// Redundant, since this is called only for brand-new or
// successor RMDs, but leave in to be defensive.
rmd.ClearBlockChanges()
co := newCreateOpForRootDir()
rmd.AddOp(co)
rmd.AddRefBlock(rmd.data.Dir.BlockInfo)
// Set unref bytes to the previous disk usage, so that the
// accounting works out.
rmd.AddUnrefBytes(prevDiskUsage)
return newDblock, info, readyBlockData, nil
}
func (fbo *folderBranchOps) initMDLocked(
ctx context.Context, lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
handle := md.GetTlfHandle()
// make sure we're a writer before rekeying or putting any blocks.
if !handle.IsWriter(uid) {
return NewWriteAccessError(handle, username, handle.GetCanonicalPath())
}
var expectedKeyGen KeyGen
var tlfCryptKey *kbfscrypto.TLFCryptKey
if md.TlfID().IsPublic() {
expectedKeyGen = PublicKeyGen
} else {
var rekeyDone bool
// create a new set of keys for this metadata
rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false)
if err != nil {
return err
}
if !rekeyDone {
return fmt.Errorf("Initial rekey unexpectedly not done for private TLF %v", md.TlfID())
}
expectedKeyGen = FirstValidKeyGen
}
keyGen := md.LatestKeyGeneration()
if keyGen != expectedKeyGen {
return InvalidKeyGenerationError{md.TlfID(), keyGen}
}
// create a dblock since one doesn't exist yet
newDblock, info, readyBlockData, err :=
ResetRootBlock(ctx, fbo.config, uid, md)
if err != nil {
return err
}
if err = PutBlockCheckQuota(ctx, fbo.config.BlockServer(),
fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData,
md.GetTlfHandle().GetCanonicalName()); err != nil {
return err
}
if err = fbo.config.BlockCache().Put(
info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil {
return err
}
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
// finally, write out the new metadata
mdID, err := fbo.config.MDOps().Put(ctx, md)
if err != nil {
return err
}
md.loadCachedBlockChanges(bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fmt.Errorf(
"%v: Unexpected MD ID during new MD initialization: %v",
md.TlfID(), fbo.head.mdID)
}
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
fbo.setNewInitialHeadLocked(ctx, lState, MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now()))
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context,
h *TlfHandle) (keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) {
return nil, tlf.ID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetTLFID(ctx context.Context, h *TlfHandle) (tlf.ID, error) {
return tlf.ID{}, errors.New("GetTLFID is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) checkNode(node Node) error {
fb := node.GetFolderBranch()
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return nil
}
// SetInitialHeadFromServer sets the head to the given
// ImmutableRootMetadata, which must be retrieved from the MD server.
func (fbo *folderBranchOps) SetInitialHeadFromServer(
ctx context.Context, md ImmutableRootMetadata) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)",
md.Revision(), md.MergedStatus())
defer func() {
fbo.deferLog.CDebugf(ctx, "Done: %v", err)
}()
if md.data.Dir.Type != Dir {
// Not initialized.
return fmt.Errorf("MD with revision=%d not initialized", md.Revision())
}
// Return early if the head is already set. This avoids taking
// mdWriterLock for no reason, and it also avoids any side effects
// (e.g., calling `identifyOnce` and downloading the merged
// head) if head is already set.
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) && head.mdID == md.mdID {
fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+
"need to set initial head again", md.Revision(), md.MergedStatus())
return nil
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{md.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, md.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if md.MergedStatus() == Unmerged {
mdops := fbo.config.MDOps()
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return err
}
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState,
mergedMD.Revision(), false)
}()
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Only update the head the first time; later it will be
// updated either directly via writes or through the
// background update processor.
if fbo.head == (ImmutableRootMetadata{}) {
err = fbo.setInitialHeadTrustedLocked(ctx, lState, md)
if err != nil {
return err
}
}
return nil
})
}
// SetInitialHeadToNew creates a brand-new ImmutableRootMetadata
// object and sets the head to that.
func (fbo *folderBranchOps) SetInitialHeadToNew(
ctx context.Context, id tlf.ID, handle *TlfHandle) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadToNew")
defer func() {
fbo.deferLog.CDebugf(ctx, "Done: %v", err)
}()
rmd, err := makeInitialRootMetadata(
fbo.config.MetadataVersion(), id, handle)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{rmd.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, rmd.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.initMDLocked(ctx, lState, rmd)
})
}
// execMDReadNoIdentifyThenMDWrite first tries to execute the
// passed-in method in mdReadNoIdentify mode. If it fails with an
// MDWriteNeededInRequest error, it re-executes the method as in
// mdWrite mode. The passed-in method must note whether or not this
// is an mdWrite call.
//
// This must only be used by getRootNode().
func (fbo *folderBranchOps) execMDReadNoIdentifyThenMDWrite(
lState *lockState, f func(*lockState, mdReqType) error) error {
err := f(lState, mdReadNoIdentify)
// Redo as an MD write request if needed
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
err = f(lState, mdWrite)
}
return err
}
func (fbo *folderBranchOps) getRootNode(ctx context.Context) (
node Node, ei EntryInfo, handle *TlfHandle, err error) {
fbo.log.CDebugf(ctx, "getRootNode")
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
// node may still be nil if we're unwinding
// from a panic.
fbo.deferLog.CDebugf(ctx, "Done: %v", node)
}
}()
lState := makeFBOLockState()
var md ImmutableRootMetadata
err = fbo.execMDReadNoIdentifyThenMDWrite(lState,
func(lState *lockState, rtype mdReqType) error {
md, err = fbo.getMDLocked(ctx, lState, rtype)
return err
})
if err != nil {
return nil, EntryInfo{}, nil, err
}
// we may be an unkeyed client
if err := isReadableOrError(ctx, fbo.config, md.ReadOnly()); err != nil {
return nil, EntryInfo{}, nil, err
}
handle = md.GetTlfHandle()
node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(handle.GetCanonicalName()), nil)
if err != nil {
return nil, EntryInfo{}, nil, err
}
return node, md.Data().Dir.EntryInfo, handle, nil
}
type makeNewBlock func() Block
// pathFromNodeHelper() shouldn't be called except by the helper
// functions below.
func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) {
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// Helper functions to clarify uses of pathFromNodeHelper() (see
// nodeCache comments).
func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) {
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
fbo.log.CDebugf(ctx, "GetDirChildren %p", dir.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done GetDirChildren: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return nil, err
}
err = runUnlessCanceled(ctx, func() error {
var err error
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node
// has been unlinked. Probably we have fast-forwarded, and
// missed all the updates deleting the children in this
// directory. In that case, just return an empty set of
// children so we don't return an incorrect set from the
// cache.
if md.data.Dir.BlockPointer != dirPath.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Returning an empty children set for "+
"unlinked directory %v", dirPath.tailPointer())
return nil
}
children, err = fbo.blocks.GetDirtyDirChildren(
ctx, lState, md.ReadOnly(), dirPath)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return children, nil
}
func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) (
node Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Lookup %p %s", dir.GetID(), name)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
node, de, err = fbo.blocks.Lookup(ctx, lState, md.ReadOnly(), dir, name)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, EntryInfo{}, err
}
return node, de.EntryInfo, nil
}
// statEntry is like Stat, but it returns a DirEntry. This is used by
// tests.
func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) (
de DirEntry, err error) {
err = fbo.checkNode(node)
if err != nil {
return DirEntry{}, err
}
lState := makeFBOLockState()
nodePath, err := fbo.pathFromNodeForRead(node)
if err != nil {
return DirEntry{}, err
}
var md ImmutableRootMetadata
if nodePath.hasValidParent() {
md, err = fbo.getMDForReadNeedIdentify(ctx, lState)
} else {
// If nodePath has no valid parent, it's just the TLF
// root, so we don't need an identify in this case.
md, err = fbo.getMDForReadNoIdentify(ctx, lState)
}
if err != nil {
return DirEntry{}, err
}
if nodePath.hasValidParent() {
de, err = fbo.blocks.GetDirtyEntry(
ctx, lState, md.ReadOnly(), nodePath)
if err != nil {
return DirEntry{}, err
}
} else {
// nodePath is just the root.
de = md.data.Dir
}
return de, nil
}
var zeroPtr BlockPointer
type blockState struct {
blockPtr BlockPointer
block Block
readyBlockData ReadyBlockData
syncedCb func() error
}
func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Stat %p", node.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return EntryInfo{}, err
}
return de.EntryInfo, nil
}
func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) (
ei NodeMetadata, err error) {
fbo.log.CDebugf(ctx, "GetNodeMetadata %p", node.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
var res NodeMetadata
if err != nil {
return res, err
}
res.BlockInfo = de.BlockInfo
uid := de.Writer
if uid == keybase1.UID("") {
uid = de.Creator
}
res.LastWriterUnverified, err =
fbo.config.KBPKI().GetNormalizedUsername(ctx, uid)
if err != nil {
return res, err
}
return res, nil
}
// blockPutState is an internal structure to track data when putting blocks
type blockPutState struct {
blockStates []blockState
}
func newBlockPutState(length int) *blockPutState {
bps := &blockPutState{}
bps.blockStates = make([]blockState, 0, length)
return bps
}
// addNewBlock tracks a new block that will be put. If syncedCb is
// non-nil, it will be called whenever the put for that block is
// complete (whether or not the put resulted in an error). Currently
// it will not be called if the block is never put (due to an earlier
// error).
func (bps *blockPutState) addNewBlock(blockPtr BlockPointer, block Block,
readyBlockData ReadyBlockData, syncedCb func() error) {
bps.blockStates = append(bps.blockStates,
blockState{blockPtr, block, readyBlockData, syncedCb})
}
func (bps *blockPutState) mergeOtherBps(other *blockPutState) {
bps.blockStates = append(bps.blockStates, other.blockStates...)
}
func (bps *blockPutState) DeepCopy() *blockPutState {
newBps := &blockPutState{}
newBps.blockStates = make([]blockState, len(bps.blockStates))
copy(newBps.blockStates, bps.blockStates)
return newBps
}
func (fbo *folderBranchOps) readyBlockMultiple(ctx context.Context,
kmd KeyMetadata, currBlock Block, uid keybase1.UID,
bps *blockPutState) (info BlockInfo, plainSize int, err error) {
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, fbo.config.BlockCache(), fbo.config.BlockOps(),
fbo.config.Crypto(), kmd, currBlock, uid)
if err != nil {
return
}
bps.addNewBlock(info.BlockPointer, currBlock, readyBlockData, nil)
return
}
func (fbo *folderBranchOps) unembedBlockChanges(
ctx context.Context, bps *blockPutState, md *RootMetadata,
changes *BlockChanges, uid keybase1.UID) error {
buf, err := fbo.config.Codec().Encode(changes)
if err != nil {
return err
}
block := NewFileBlock().(*FileBlock)
copied := fbo.config.BlockSplitter().CopyUntilSplit(block, false, buf, 0)
info, _, err := fbo.readyBlockMultiple(ctx, md.ReadOnly(), block, uid, bps)
if err != nil {
return err
}
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
// Everything fits in one block.
toCopy := int64(len(buf))
if copied >= toCopy {
changes.Info = info
md.data.cachedChanges = *changes
changes.Ops = nil
return nil
}
// Otherwise make a top block and split up the remaining buffer.
topBlock := NewFileBlock().(*FileBlock)
topBlock.IsInd = true
topBlock.IPtrs = append(topBlock.IPtrs, IndirectFilePtr{
BlockInfo: info,
Off: 0,
})
copiedSize := copied
for copiedSize < toCopy {
block := NewFileBlock().(*FileBlock)
currOff := copiedSize
copied := fbo.config.BlockSplitter().CopyUntilSplit(block, false,
buf[currOff:], 0)
copiedSize += copied
info, _, err := fbo.readyBlockMultiple(
ctx, md.ReadOnly(), block, uid, bps)
if err != nil {
return err
}
topBlock.IPtrs = append(topBlock.IPtrs, IndirectFilePtr{
BlockInfo: info,
Off: currOff,
})
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
}
info, _, err = fbo.readyBlockMultiple(
ctx, md.ReadOnly(), topBlock, uid, bps)
if err != nil {
return err
}
changes.Info = info
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
md.data.cachedChanges = *changes
changes.Ops = nil
return nil
}
type localBcache map[BlockPointer]*DirBlock
// syncBlock updates, and readies, the blocks along the path for the
// given write, up to the root of the tree or stopAt (if specified).
// When it updates the root of the tree, it also modifies the given
// head object with a new revision number and root block ID. It first
// checks the provided lbc for blocks that may have been modified by
// previous syncBlock calls or the FS calls themselves. It returns
// the updated path to the changed directory, the new or updated
// directory entry created as part of the call, and a summary of all
// the blocks that now must be put to the block server.
//
// This function is safe to use unlocked, but may modify MD to have
// the same revision number as another one. All functions in this file
// must call syncBlockLocked instead, which holds mdWriterLock and
// thus serializes the revision numbers. Conflict resolution may call
// syncBlockForConflictResolution, which doesn't hold the lock, since
// it already handles conflicts correctly.
//
// entryType must not be Sym.
//
// TODO: deal with multiple nodes for indirect blocks
func (fbo *folderBranchOps) syncBlock(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
// now ready each dblock and write the DirEntry for the next one
// in the path
currBlock := newBlock
currName := name
newPath := path{
FolderBranch: dir.FolderBranch,
path: make([]pathNode, 0, len(dir.path)),
}
bps := newBlockPutState(len(dir.path))
refPath := dir.ChildPathNoPtr(name)
var newDe DirEntry
doSetTime := true
now := fbo.nowUnixNano()
for len(newPath.path) < len(dir.path)+1 {
info, plainSize, err := fbo.readyBlockMultiple(
ctx, md.ReadOnly(), currBlock, uid, bps)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// prepend to path and setup next one
newPath.path = append([]pathNode{{info.BlockPointer, currName}},
newPath.path...)
// get the parent block
prevIdx := len(dir.path) - len(newPath.path)
var prevDblock *DirBlock
var de DirEntry
var nextName string
nextDoSetTime := false
if prevIdx < 0 {
// root dir, update the MD instead
de = md.data.Dir
} else {
prevDir := path{
FolderBranch: dir.FolderBranch,
path: dir.path[:prevIdx+1],
}
// First, check the localBcache, which could contain
// blocks that were modified across multiple calls to
// syncBlock.
var ok bool
prevDblock, ok = lbc[prevDir.tailPointer()]
if !ok {
// If the block isn't in the local bcache, we
// have to fetch it, possibly from the
// network. Directory blocks are only ever
// modified while holding mdWriterLock, so it's
// safe to fetch them one at a time.
prevDblock, err = fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(),
prevDir, blockWrite)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
// modify the direntry for currName; make one
// if it doesn't exist (which should only
// happen the first time around).
//
// TODO: Pull the creation out of here and
// into createEntryLocked().
if de, ok = prevDblock.Children[currName]; !ok {
// If this isn't the first time
// around, we have an error.
if len(newPath.path) > 1 {
return path{}, DirEntry{}, nil, NoSuchNameError{currName}
}
// If this is a file, the size should be 0. (TODO:
// Ensure this.) If this is a directory, the size will
// be filled in below. The times will be filled in
// below as well, since we should only be creating a
// new directory entry when doSetTime is true.
de = DirEntry{
EntryInfo: EntryInfo{
Type: entryType,
Size: 0,
},
}
// If we're creating a new directory entry, the
// parent's times must be set as well.
nextDoSetTime = true
}
currBlock = prevDblock
nextName = prevDir.tailName()
}
if de.Type == Dir {
// TODO: When we use indirect dir blocks,
// we'll have to calculate the size some other
// way.
de.Size = uint64(plainSize)
}
if prevIdx < 0 {
md.AddUpdate(md.data.Dir.BlockInfo, info)
} else if prevDe, ok := prevDblock.Children[currName]; ok {
md.AddUpdate(prevDe.BlockInfo, info)
} else {
// this is a new block
md.AddRefBlock(info)
}
if len(refPath.path) > 1 {
refPath = *refPath.parentPath()
}
de.BlockInfo = info
if doSetTime {
if mtime {
de.Mtime = now
}
if ctime {
de.Ctime = now
}
}
if !newDe.IsInitialized() {
newDe = de
}
if prevIdx < 0 {
md.data.Dir = de
} else {
prevDblock.Children[currName] = de
}
currName = nextName
// Stop before we get to the common ancestor; it will be taken care of
// on the next sync call
if prevIdx >= 0 && dir.path[prevIdx].BlockPointer == stopAt {
// Put this back into the cache as dirty -- the next
// syncBlock call will ready it.
dblock, ok := currBlock.(*DirBlock)
if !ok {
return path{}, DirEntry{}, nil, BadDataError{stopAt.ID}
}
lbc[stopAt] = dblock
break
}
doSetTime = nextDoSetTime
}
return newPath, newDe, bps, nil
}
// syncBlockLock calls syncBlock under mdWriterLock.
func (fbo *folderBranchOps) syncBlockLocked(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.syncBlock(ctx, lState, uid, md, newBlock, dir, name,
entryType, mtime, ctime, stopAt, lbc)
}
// syncBlockForConflictResolution calls syncBlock unlocked, since
// conflict resolution can handle MD revision number conflicts
// correctly.
func (fbo *folderBranchOps) syncBlockForConflictResolution(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
return fbo.syncBlock(
ctx, lState, uid, md, newBlock, dir,
name, entryType, mtime, ctime, stopAt, lbc)
}
// entryType must not be Sym.
func (fbo *folderBranchOps) syncBlockAndCheckEmbedLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, lbc localBcache) (
path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return path{}, DirEntry{}, nil, err
}
newPath, newDe, bps, err := fbo.syncBlockLocked(
ctx, lState, uid, md, newBlock, dir, name, entryType, mtime,
ctime, stopAt, lbc)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// Do the block changes need their own blocks? Unembed only if
// this is the final call to this function with this MD.
if stopAt == zeroPtr {
bsplit := fbo.config.BlockSplitter()
if !bsplit.ShouldEmbedBlockChanges(&md.data.Changes) {
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes,
uid)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
}
return newPath, newDe, bps, nil
}
// Returns whether the given error is one that shouldn't block the
// removal of a file or directory.
//
// TODO: Consider other errors recoverable, e.g. ones that arise from
// present but corrupted blocks?
func isRecoverableBlockErrorForRemoval(err error) bool {
return isRecoverableBlockError(err)
}
func isRetriableError(err error, retries int) bool {
_, isExclOnUnmergedError := err.(ExclOnUnmergedError)
_, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError)
recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError ||
isRecoverableBlockError(err)
return recoverable && retries < maxRetriesOnRecoverableErrors
}
func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error {
if bps == nil {
return nil
}
bcache := fbo.config.BlockCache()
for _, blockState := range bps.blockStates {
newPtr := blockState.blockPtr
// only cache this block if we made a brand new block, not if
// we just incref'd some other block.
if !newPtr.IsFirstRef() {
continue
}
if err := bcache.Put(newPtr, fbo.id(), blockState.block,
TransientEntry); err != nil {
return err
}
}
return nil
}
// Returns true if the passed error indicates a revision conflict.
func isRevisionConflict(err error) bool {
if err == nil {
return false
}
_, isConflictRevision := err.(MDServerErrorConflictRevision)
_, isConflictPrevRoot := err.(MDServerErrorConflictPrevRoot)
_, isConflictDiskUsage := err.(MDServerErrorConflictDiskUsage)
_, isConditionFailed := err.(MDServerErrorConditionFailed)
_, isConflictFolderMapping := err.(MDServerErrorConflictFolderMapping)
_, isJournal := err.(MDJournalConflictError)
return isConflictRevision || isConflictPrevRoot ||
isConflictDiskUsage || isConditionFailed ||
isConflictFolderMapping || isJournal
}
func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
mdops := fbo.config.MDOps()
doUnmergedPut := true
mergedRev := MetadataRevisionUninitialized
oldPrevRoot := md.PrevRoot()
var mdID MdID
// This puts on a delay on any cancellations arriving to ctx. It is intended
// to work sort of like a critical section, except that there isn't an
// explicit call to exit the critical section. The cancellation, if any, is
// triggered after a timeout (i.e.
// fbo.config.DelayedCancellationGracePeriod()).
//
// The purpose of trying to avoid cancellation once we start MD write is to
// avoid having an unpredictable perceived MD state. That is, when
// runUnlessCanceled returns Canceled on cancellation, application receives
// an EINTR, and would assume the operation didn't succeed. But the MD write
// continues, and there's a chance the write will succeed, meaning the
// operation succeeds. This contradicts with the application's perception
// through error code and can lead to horrible situations. An easily caught
// situation is when application calls Create with O_EXCL set, gets an EINTR
// while MD write succeeds, retries and gets an EEXIST error. If users hit
// Ctrl-C, this might not be a big deal. However, it also happens for other
// interrupts. For applications that use signals to communicate, e.g.
// SIGALRM and SIGUSR1, this can happen pretty often, which renders broken.
if err = EnableDelayedCancellationWithGracePeriod(
ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil {
return err
}
// we don't explicitly clean up (by using a defer) CancellationDelayer here
// because sometimes fuse makes another call using the same ctx. For example, in
// fuse's Create call handler, a dir.Create is followed by an Attr call. If
// we do a deferred cleanup here, if an interrupt has been received, it can
// cause ctx to be canceled before Attr call finishes, which causes FUSE to
// return EINTR for the Create request. But at this point, the request may
// have already succeeded. Returning EINTR makes application thinks the file
// is not created successfully.
if fbo.isMasterBranchLocked(lState) {
// only do a normal Put if we're not already staged.
mdID, err = mdops.Put(ctx, md)
if doUnmergedPut = isRevisionConflict(err); doUnmergedPut {
fbo.log.CDebugf(ctx, "Conflict: %v", err)
mergedRev = md.Revision()
if excl == WithExcl {
// If this was caused by an exclusive create, we shouldn't do an
// UnmergedPut, but rather try to get newest update from server, and
// retry afterwards.
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
return ExclOnUnmergedError{}
}
} else if err != nil {
return err
}
} else if excl == WithExcl {
return ExclOnUnmergedError{}
}
if doUnmergedPut {
// We're out of date, and this is not an exclusive write, so put it as an
// unmerged MD.
mdID, err = mdops.PutUnmerged(ctx, md)
if isRevisionConflict(err) {
// Self-conflicts are retried in `doMDWriteWithRetry`.
err = UnmergedSelfConflictError{err}
}
if err != nil {
return err
}
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), mergedRev)
} else {
fbo.setBranchIDLocked(lState, NullBranchID)
if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() {
// Queue this folder for rekey if the bit was set and it's not a copy.
// This is for the case where we're coming out of conflict resolution.
// So why don't we do this in finalizeResolution? Well, we do but we don't
// want to block on a rekey so we queue it. Because of that it may fail
// due to a conflict with some subsequent write. By also handling it here
// we'll always retry if we notice we haven't been successful in clearing
// the bit yet. Note that I haven't actually seen this happen but it seems
// theoretically possible.
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
}
md.loadCachedBlockChanges(bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
fbo.notifyBatchLocked(ctx, lState, irmd)
return nil
}
func (fbo *folderBranchOps) waitForJournalLocked(ctx context.Context,
lState *lockState, jServer *JournalServer) error {
fbo.mdWriterLock.AssertLocked(lState)
if !TLFJournalEnabled(fbo.config, fbo.id()) {
// Nothing to do.
return nil
}
if err := jServer.Wait(ctx, fbo.id()); err != nil {
return err
}
// Make sure everything flushed successfully, since we're holding
// the writer lock, no other revisions could have snuck in.
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
return err
}
if jStatus.RevisionEnd != MetadataRevisionUninitialized {
return fmt.Errorf("Couldn't flush all MD revisions; current "+
"revision end for the journal is %d", jStatus.RevisionEnd)
}
if jStatus.LastFlushErr != "" {
return fmt.Errorf("Couldn't flush the journal: %s",
jStatus.LastFlushErr)
}
return nil
}
func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata,
lastWriterVerifyingKey kbfscrypto.VerifyingKey) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
oldPrevRoot := md.PrevRoot()
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so wait for the journal to flush and then push
// straight to the server. TODO: we're holding the writer lock
// while flushing the journal here (just like for exclusive
// writes), which may end up blocking incoming writes for a long
// time. Rekeys are pretty rare, but if this becomes an issue
// maybe we should consider letting these hit the journal and
// scrubbing them when converting it to a branch.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
if err = fbo.waitForJournalLocked(ctx, lState, jServer); err != nil {
return err
}
mdOps = jServer.delegateMDOps
}
mdID, err := mdOps.Put(ctx, md)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
}
if isConflict {
// drop this block. we've probably collided with someone also
// trying to rekey the same folder but that's not necessarily
// the case. we'll queue another rekey just in case. it should
// be safe as it's idempotent. we don't want any rekeys present
// in unmerged history or that will just make a mess.
fbo.config.RekeyQueue().Enqueue(md.TlfID())
return RekeyConflictError{err}
}
fbo.setBranchIDLocked(lState, NullBranchID)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
md.loadCachedBlockChanges(nil)
var key kbfscrypto.VerifyingKey
if md.IsWriterMetadataCopiedSet() {
key = lastWriterVerifyingKey
} else {
var err error
key, err = fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
return fbo.setHeadSuccessorLocked(ctx, lState,
MakeImmutableRootMetadata(md, key, mdID, fbo.config.Clock().Now()),
rebased)
}
func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *GCOp) (
err error) {
lState := makeFBOLockState()
// Lock the folder so we can get an internally-consistent MD
// revision number.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
if md.MergedStatus() == Unmerged {
return UnexpectedUnmergedPutError{}
}
md.AddOp(gco)
md.SetLastGCRevision(gco.LatestRev)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
oldPrevRoot := md.PrevRoot()
// finally, write out the new metadata
mdID, err := fbo.config.MDOps().Put(ctx, md)
if err != nil {
// Don't allow garbage collection to put us into a conflicting
// state; just wait for the next period.
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
md.loadCachedBlockChanges(bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
fbo.notifyBatchLocked(ctx, lState, irmd)
return nil
}
func (fbo *folderBranchOps) syncBlockAndFinalizeLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, excl Excl) (de DirEntry, err error) {
fbo.mdWriterLock.AssertLocked(lState)
_, de, bps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newBlock, dir, name, entryType, mtime,
ctime, zeroPtr, nil)
if err != nil {
return DirEntry{}, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(
md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
_, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return DirEntry{}, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl)
if err != nil {
return DirEntry{}, err
}
return de, nil
}
func checkDisallowedPrefixes(name string) error {
for _, prefix := range disallowedPrefixes {
if strings.HasPrefix(name, prefix) {
return DisallowedPrefixError{name, prefix}
}
}
return nil
}
func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata,
dirPath path, newName string) error {
// Check that the directory isn't past capacity already.
var currSize uint64
if dirPath.hasValidParent() {
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath)
if err != nil {
return err
}
currSize = de.Size
} else {
// dirPath is just the root.
currSize = md.data.Dir.Size
}
// Just an approximation since it doesn't include the size of the
// directory entry itself, but that's ok -- at worst it'll be an
// off-by-one-entry error, and since there's a maximum name length
// we can't get in too much trouble.
if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() {
return DirTooBigError{dirPath, currSize + uint64(len(newName)),
fbo.config.MaxDirBytes()}
}
return nil
}
// PathType returns path type
func (fbo *folderBranchOps) PathType() PathType {
if fbo.folderBranch.Tlf.IsPublic() {
return PublicPathType
}
return PrivatePathType
}
// canonicalPath returns full canonical path for dir node and name.
func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) {
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return "", err
}
return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil
}
// entryType must not by Sym.
func (fbo *folderBranchOps) createEntryLocked(
ctx context.Context, lState *lockState, dir Node, name string,
entryType EntryType, excl Excl) (Node, DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(name); err != nil {
return nil, DirEntry{}, err
}
if uint32(len(name)) > fbo.config.MaxNameBytes() {
return nil, DirEntry{},
NameTooLongError{name, fbo.config.MaxNameBytes()}
}
filename, err := fbo.canonicalPath(ctx, dir, name)
if err != nil {
return nil, DirEntry{}, err
}
// verify we have permission to write
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return nil, DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockWrite)
if err != nil {
return nil, DirEntry{}, err
}
// does name already exist?
if _, ok := dblock.Children[name]; ok {
return nil, DirEntry{}, NameExistsError{name}
}
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), dirPath, name); err != nil {
return nil, DirEntry{}, err
}
co, err := newCreateOp(name, dirPath.tailPointer(), entryType)
if err != nil {
return nil, DirEntry{}, err
}
md.AddOp(co)
// create new data block
var newBlock Block
// XXX: for now, put a unique ID in every new block, to make sure it
// has a unique block ID. This may not be needed once we have encryption.
if entryType == Dir {
newBlock = &DirBlock{
Children: make(map[string]DirEntry),
}
} else {
newBlock = &FileBlock{}
}
de, err := fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, newBlock, dirPath, name, entryType,
true, true, zeroPtr, excl)
if err != nil {
return nil, DirEntry{}, err
}
node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
return node, de, nil
}
func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context,
lState *lockState, fn func(lState *lockState) error) error {
doUnlock := false
defer func() {
if doUnlock {
fbo.mdWriterLock.Unlock(lState)
}
}()
for i := 0; ; i++ {
fbo.mdWriterLock.Lock(lState)
doUnlock = true
// Make sure we haven't been canceled before doing anything
// too serious.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := fn(lState)
if isRetriableError(err, i) {
fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err)
// Release the lock to give someone else a chance
doUnlock = false
fbo.mdWriterLock.Unlock(lState)
if _, ok := err.(ExclOnUnmergedError); ok {
if err = fbo.cr.Wait(ctx); err != nil {
return err
}
} else if _, ok := err.(UnmergedSelfConflictError); ok {
// We can only get here if we are already on an
// unmerged branch and an errored PutUnmerged did make
// it to the mdserver. Let's force sync, with a fresh
// context so the observer doesn't ignore the updates
// (but tie the cancels together).
newCtx := fbo.ctxWithFBOID(context.Background())
newCtx, cancel := context.WithCancel(newCtx)
defer cancel()
go func() {
select {
case <-ctx.Done():
cancel()
case <-newCtx.Done():
}
}()
fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+
"(%v); forcing a sync", err)
err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState)
if err != nil {
return err
}
cancel()
}
continue
} else if err != nil {
return err
}
return nil
}
}
func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled(
ctx context.Context, fn func(lState *lockState) error) error {
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
return fbo.doMDWriteWithRetry(ctx, lState, fn)
})
}
func (fbo *folderBranchOps) CreateDir(
ctx context.Context, dir Node, path string) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateDir %p %s", dir.GetID(), path)
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID())
}
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl)
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) CreateFile(
ctx context.Context, dir Node, path string, isExec bool, excl Excl) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateFile %p %s isExec=%v Excl=%s",
dir.GetID(), path, isExec, excl)
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID())
}
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var entryType EntryType
if isExec {
entryType = Exec
} else {
entryType = File
}
// If journaling is turned on, an exclusive create may end up on a
// conflict branch.
if excl == WithExcl && TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.log.CDebugf(ctx, "Exclusive create status is being discarded.")
excl = NoExcl
}
if excl == WithExcl {
if err = fbo.cr.Wait(ctx); err != nil {
return nil, EntryInfo{}, err
}
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl)
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) createLinkLocked(
ctx context.Context, lState *lockState, dir Node, fromName string,
toPath string) (DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(fromName); err != nil {
return DirEntry{}, err
}
if uint32(len(fromName)) > fbo.config.MaxNameBytes() {
return DirEntry{},
NameTooLongError{fromName, fbo.config.MaxNameBytes()}
}
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockWrite)
if err != nil {
return DirEntry{}, err
}
// TODO: validate inputs
// does name already exist?
if _, ok := dblock.Children[fromName]; ok {
return DirEntry{}, NameExistsError{fromName}
}
if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(),
dirPath, fromName); err != nil {
return DirEntry{}, err
}
co, err := newCreateOp(fromName, dirPath.tailPointer(), Sym)
if err != nil {
return DirEntry{}, err
}
md.AddOp(co)
// Create a direntry for the link, and then sync
now := fbo.nowUnixNano()
dblock.Children[fromName] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
Size: uint64(len(toPath)),
SymPath: toPath,
Mtime: now,
Ctime: now,
},
}
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *dirPath.parentPath(),
dirPath.tailName(), Dir, true, true, zeroPtr, NoExcl)
if err != nil {
return DirEntry{}, err
}
return dblock.Children[fromName], nil
}
func (fbo *folderBranchOps) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateLink %p %s -> %s",
dir.GetID(), fromName, toPath)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return EntryInfo{}, err
}
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set ei directly, as that can cause a race when
// the Create is canceled.
de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath)
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return EntryInfo{}, err
}
return retEntryInfo, nil
}
// unrefEntry modifies md to unreference all relevant blocks for the
// given entry.
func (fbo *folderBranchOps) unrefEntry(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, de DirEntry,
name string) error {
md.AddUnrefBlock(de.BlockInfo)
// construct a path for the child so we can unlink with it.
childPath := dir.ChildPath(name, de.BlockPointer)
// If this is an indirect block, we need to delete all of its
// children as well. NOTE: non-empty directories can't be
// removed, so no need to check for indirect directory blocks
// here.
if de.Type == File || de.Type == Exec {
blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos(
ctx, lState, md.ReadOnly(), childPath)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
}
for _, blockInfo := range blockInfos {
md.AddUnrefBlock(blockInfo)
}
}
return nil
}
func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, name string) error {
fbo.mdWriterLock.AssertLocked(lState)
pblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dir, blockWrite)
if err != nil {
return err
}
// make sure the entry exists
de, ok := pblock.Children[name]
if !ok {
return NoSuchNameError{name}
}
ro, err := newRmOp(name, dir.tailPointer())
if err != nil {
return err
}
md.AddOp(ro)
err = fbo.unrefEntry(ctx, lState, md, dir, de, name)
if err != nil {
return err
}
// the actual unlink
delete(pblock.Children, name)
// sync the parent directory
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, pblock, *dir.parentPath(), dir.tailName(),
Dir, true, true, zeroPtr, NoExcl)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) removeDirLocked(ctx context.Context,
lState *lockState, dir Node, dirName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
pblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
de, ok := pblock.Children[dirName]
if !ok {
return NoSuchNameError{dirName}
}
// construct a path for the child so we can check for an empty dir
childPath := dirPath.ChildPath(dirName, de.BlockPointer)
childBlock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), childPath, blockRead)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
} else if len(childBlock.Children) > 0 {
return DirNotEmptyError{dirName}
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, dirName)
}
func (fbo *folderBranchOps) RemoveDir(
ctx context.Context, dir Node, dirName string) (err error) {
fbo.log.CDebugf(ctx, "RemoveDir %p %s", dir.GetID(), dirName)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.removeDirLocked(ctx, lState, dir, dirName)
})
}
func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node,
name string) (err error) {
fbo.log.CDebugf(ctx, "RemoveEntry %p %s", dir.GetID(), name)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, name)
})
}
func (fbo *folderBranchOps) renameLocked(
ctx context.Context, lState *lockState, oldParent path,
oldName string, newParent path, newName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
oldPBlock, newPBlock, newDe, lbc, err := fbo.blocks.PrepRename(
ctx, lState, md, oldParent, oldName, newParent, newName)
if err != nil {
return err
}
// does name exist?
if de, ok := newPBlock.Children[newName]; ok {
// Usually higher-level programs check these, but just in case.
if de.Type == Dir && newDe.Type != Dir {
return NotDirError{newParent.ChildPathNoPtr(newName)}
} else if de.Type != Dir && newDe.Type == Dir {
return NotFileError{newParent.ChildPathNoPtr(newName)}
}
if de.Type == Dir {
// The directory must be empty.
oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState,
md.ReadOnly(), de.BlockPointer, newParent.Branch,
newParent.ChildPathNoPtr(newName))
if err != nil {
return err
}
if len(oldTargetDir.Children) != 0 {
fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+
" (%s/%s) not allowed.", newParent, newName)
return DirNotEmptyError{newName}
}
}
// Delete the old block pointed to by this direntry.
err := fbo.unrefEntry(ctx, lState, md, newParent, de, newName)
if err != nil {
return err
}
}
// only the ctime changes
newDe.Ctime = fbo.nowUnixNano()
newPBlock.Children[newName] = newDe
delete(oldPBlock.Children, oldName)
// find the common ancestor
var i int
found := false
// the root block will always be the same, so start at number 1
for i = 1; i < len(oldParent.path) && i < len(newParent.path); i++ {
if oldParent.path[i].ID != newParent.path[i].ID {
found = true
i--
break
}
}
if !found {
// if we couldn't find one, then the common ancestor is the
// last node in the shorter path
if len(oldParent.path) < len(newParent.path) {
i = len(oldParent.path) - 1
} else {
i = len(newParent.path) - 1
}
}
commonAncestor := oldParent.path[i].BlockPointer
oldIsCommon := oldParent.tailPointer() == commonAncestor
newIsCommon := newParent.tailPointer() == commonAncestor
newOldPath := path{FolderBranch: oldParent.FolderBranch}
var oldBps *blockPutState
if oldIsCommon {
if newIsCommon {
// if old and new are both the common ancestor, there is
// nothing to do (syncBlock will take care of everything)
} else {
// If the old one is common and the new one is
// not, then the last
// syncBlockAndCheckEmbedLocked call will need
// to access the old one.
lbc[oldParent.tailPointer()] = oldPBlock
}
} else {
if newIsCommon {
// If the new one is common, then the first
// syncBlockAndCheckEmbedLocked call will need to access
// it.
lbc[newParent.tailPointer()] = newPBlock
}
// The old one is not the common ancestor, so we need to sync it.
// TODO: optimize by pushing blocks from both paths in parallel
newOldPath, _, oldBps, err = fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, oldPBlock, *oldParent.parentPath(), oldParent.tailName(),
Dir, true, true, commonAncestor, lbc)
if err != nil {
return err
}
}
newNewPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newPBlock, *newParent.parentPath(), newParent.tailName(),
Dir, true, true, zeroPtr, lbc)
if err != nil {
return err
}
// newOldPath is really just a prefix now. A copy is necessary as an
// append could cause the new path to contain nodes from the old path.
newOldPath.path = append(make([]pathNode, i+1, i+1), newOldPath.path...)
copy(newOldPath.path[:i+1], newNewPath.path[:i+1])
// merge and finalize the blockPutStates
if oldBps != nil {
newBps.mergeOtherBps(oldBps)
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(
md.ReadOnly(), newBps, blockDeleteOnMDFail)
}
}()
_, err = doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(),
fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *newBps)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, newBps, NoExcl)
}
func (fbo *folderBranchOps) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) (err error) {
fbo.log.CDebugf(ctx, "Rename %p/%s -> %p/%s", oldParent.GetID(),
oldName, newParent.GetID(), newName)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(newParent)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent)
if err != nil {
return err
}
newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent)
if err != nil {
return err
}
// only works for paths within the same topdir
if oldParentPath.FolderBranch != newParentPath.FolderBranch {
return RenameAcrossDirsError{}
}
return fbo.renameLocked(ctx, lState, oldParentPath, oldName,
newParentPath, newName)
})
}
func (fbo *folderBranchOps) Read(
ctx context.Context, file Node, dest []byte, off int64) (
n int64, err error) {
fbo.log.CDebugf(ctx, "Read %p %d %d", file.GetID(), len(dest), off)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return 0, err
}
filePath, err := fbo.pathFromNodeForRead(file)
if err != nil {
return 0, err
}
{
// It seems git isn't handling EINTR from some of its read calls (likely
// fread), which causes it to get corrupted data (which leads to coredumps
// later) when a read system call on pack files gets interrupted. This
// enables delayed cancellation for Read if the file path contains `.git`.
//
// TODO: get a patch in git, wait for sufficiently long time for people to
// upgrade, and remove this.
// allow turning this feature off by env var to make life easier when we
// try to fix git.
if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet {
for _, n := range filePath.path {
if n.Name == ".git" {
EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod())
break
}
}
}
}
// Don't let the goroutine below write directly to the return
// variable, since if the context is canceled the goroutine might
// outlast this function call, and end up in a read/write race
// with the caller.
var bytesRead int64
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// verify we have permission to read
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
bytesRead, err = fbo.blocks.Read(
ctx, lState, md.ReadOnly(), filePath, dest, off)
return err
})
if err != nil {
return 0, err
}
return bytesRead, nil
}
func (fbo *folderBranchOps) Write(
ctx context.Context, file Node, data []byte, off int64) (err error) {
fbo.log.CDebugf(ctx, "Write %p %d %d", file.GetID(), len(data), off)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), file, data, off)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) Truncate(
ctx context.Context, file Node, size uint64) (err error) {
fbo.log.CDebugf(ctx, "Truncate %p %d", file.GetID(), size)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Truncate(
ctx, lState, md.ReadOnly(), file, size)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) setExLocked(
ctx context.Context, lState *lockState, file path,
ex bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md.ReadOnly(), file)
if err != nil {
return err
}
// If the file is a symlink, do nothing (to match ext4
// behavior).
if de.Type == Sym || de.Type == Dir {
fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type)
return nil
}
if ex && (de.Type == File) {
de.Type = Exec
} else if !ex && (de.Type == Exec) {
de.Type = File
} else {
// Treating this as a no-op, without updating the ctime, is a
// POSIX violation, but it's an important optimization to keep
// permissions-preserving rsyncs fast.
fbo.log.CDebugf(ctx, "Ignoring no-op setex")
return nil
}
de.Ctime = fbo.nowUnixNano()
parentPath := file.parentPath()
sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(),
exAttr, file.tailPointer())
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this setex.
if md.data.Dir.BlockPointer != file.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v",
file.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
md.AddOp(sao)
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr, NoExcl)
return err
}
func (fbo *folderBranchOps) SetEx(
ctx context.Context, file Node, ex bool) (err error) {
fbo.log.CDebugf(ctx, "SetEx %p %t", file.GetID(), ex)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setExLocked(ctx, lState, filePath, ex)
})
}
func (fbo *folderBranchOps) setMtimeLocked(
ctx context.Context, lState *lockState, file path,
mtime *time.Time) error {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md.ReadOnly(), file)
if err != nil {
return err
}
de.Mtime = mtime.UnixNano()
// setting the mtime counts as changing the file MD, so must set ctime too
de.Ctime = fbo.nowUnixNano()
parentPath := file.parentPath()
sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(),
mtimeAttr, file.tailPointer())
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this
// setmtime.
if md.data.Dir.BlockPointer != file.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v",
file.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
md.AddOp(sao)
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr, NoExcl)
return err
}
func (fbo *folderBranchOps) SetMtime(
ctx context.Context, file Node, mtime *time.Time) (err error) {
fbo.log.CDebugf(ctx, "SetMtime %p %v", file.GetID(), mtime)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if mtime == nil {
// Can happen on some OSes (e.g. OSX) when trying to set the atime only
return nil
}
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setMtimeLocked(ctx, lState, filePath, mtime)
})
}
func (fbo *folderBranchOps) syncLocked(ctx context.Context,
lState *lockState, file path) (stillDirty bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
// if the cache for this file isn't dirty, we're done
if !fbo.blocks.IsDirty(lState, file) {
return false, nil
}
// Verify we have permission to write. We do this after the dirty
// check because otherwise readers who sync clean files on close
// would get an error.
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return true, err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this sync.
if md.data.Dir.BlockPointer != file.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v",
file.tailPointer())
// Removing the cached info here is a little sketchy,
// since there's no guarantee that this sync comes
// from closing the file, and we still want to serve
// stat calls accurately if the user still has an open
// handle to this file. TODO: Hook this in with the
// node cache GC logic to be perfectly accurate.
return true, fbo.blocks.ClearCacheInfo(lState, file)
}
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return true, err
}
// notify the daemon that a write is being performed
fbo.config.Reporter().Notify(ctx, writeNotification(file, false))
defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true))
// Filled in by doBlockPuts below.
var blocksToRemove []BlockPointer
fblock, bps, lbc, syncState, err :=
fbo.blocks.StartSync(ctx, lState, md, uid, file)
defer func() {
fbo.blocks.CleanupSyncState(
ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err)
}()
if err != nil {
return true, err
}
newPath, _, newBps, err :=
fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, fblock, *file.parentPath(),
file.tailName(), File, true, true, zeroPtr, lbc)
if err != nil {
return true, err
}
bps.mergeOtherBps(newBps)
// Note: We explicitly don't call fbo.fbm.cleanUpBlockState here
// when there's an error, because it's possible some of the blocks
// will be reused in a future attempt at this same sync, and we
// don't want them cleaned up in that case. Instead, the
// FinishSync call below will take care of that.
blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return true, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl)
if err != nil {
return true, err
}
// At this point, all reads through the old path (i.e., file)
// see writes that happened since StartSync, whereas all reads
// through the new path (newPath) don't.
//
// TODO: This isn't completely correct, since reads that
// happen after a write should always see the new data.
//
// After FinishSync succeeds, then reads through both the old
// and the new paths will see the writes that happened during
// the sync.
return fbo.blocks.FinishSync(ctx, lState, file, newPath,
md.ReadOnly(), syncState, fbo.fbm)
}
func (fbo *folderBranchOps) Sync(ctx context.Context, file Node) (err error) {
fbo.log.CDebugf(ctx, "Sync %p", file.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return
}
var stillDirty bool
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
stillDirty, err = fbo.syncLocked(ctx, lState, filePath)
return err
})
if err != nil {
return err
}
if !stillDirty {
fbo.status.rmDirtyNode(file)
}
return nil
}
func (fbo *folderBranchOps) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) {
fbo.log.CDebugf(ctx, "Status")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return FolderBranchStatus{}, nil,
WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.status.getStatus(ctx, &fbo.blocks)
}
func (fbo *folderBranchOps) Status(
ctx context.Context) (
fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) {
return KBFSStatus{}, nil, InvalidOpError{}
}
// RegisterForChanges registers a single Observer to receive
// notifications about this folder/branch.
func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error {
// It's the caller's responsibility to make sure
// RegisterForChanges isn't called twice for the same Observer
fbo.observers.add(obs)
return nil
}
// UnregisterFromChanges stops an Observer from getting notifications
// about the folder/branch.
func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error {
fbo.observers.remove(obs)
return nil
}
// notifyBatchLocked sends out a notification for the most recent op
// in md.
func (fbo *folderBranchOps) notifyBatchLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) {
fbo.headLock.AssertLocked(lState)
lastOp := md.data.Changes.Ops[len(md.data.Changes.Ops)-1]
fbo.notifyOneOpLocked(ctx, lState, lastOp, md)
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md})
}
// searchForNode tries to figure out the path to the given
// blockPointer, using only the block updates that happened as part of
// a given MD update operation.
func (fbo *folderBranchOps) searchForNode(ctx context.Context,
ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) {
// Record which pointers are new to this update, and thus worth
// searching.
newPtrs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, update := range op.allUpdates() {
newPtrs[update.Ref] = true
}
for _, ref := range op.Refs() {
newPtrs[ref] = true
}
}
nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache,
[]BlockPointer{ptr}, newPtrs, md, md.data.Dir.BlockPointer)
if err != nil {
return nil, err
}
n, ok := nodeMap[ptr]
if !ok {
return nil, NodeNotFoundError{ptr}
}
return n, nil
}
func (fbo *folderBranchOps) unlinkFromCache(op op, oldDir BlockPointer,
node Node, name string) error {
// The entry could be under any one of the unref'd blocks, and
// it's safe to perform this when the pointer isn't real, so just
// try them all to avoid the overhead of looking up the right
// pointer in the old version of the block.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
childPath := p.ChildPathNoPtr(name)
// revert the parent pointer
childPath.path[len(childPath.path)-2].BlockPointer = oldDir
for _, ptr := range op.Unrefs() {
childPath.path[len(childPath.path)-1].BlockPointer = ptr
fbo.nodeCache.Unlink(ptr.Ref(), childPath)
}
return nil
}
func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
lState *lockState, op op, md ImmutableRootMetadata) {
fbo.headLock.AssertLocked(lState)
fbo.blocks.UpdatePointers(lState, op)
var changes []NodeChange
switch realOp := op.(type) {
default:
return
case *createOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %p",
realOp.NewName, node.GetID())
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.NewName},
})
case *rmOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %p",
realOp.OldName, node.GetID())
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.OldName},
})
// If this node exists, then the child node might exist too,
// and we need to unlink it in the node cache.
err := fbo.unlinkFromCache(op, realOp.Dir.Unref, node, realOp.OldName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err)
return
}
case *renameOp:
oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.Ref())
if oldNode != nil {
changes = append(changes, NodeChange{
Node: oldNode,
DirUpdated: []string{realOp.OldName},
})
}
var newNode Node
if realOp.NewDir.Ref != zeroPtr {
newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.Ref())
if newNode != nil {
changes = append(changes, NodeChange{
Node: newNode,
DirUpdated: []string{realOp.NewName},
})
}
} else {
newNode = oldNode
if oldNode != nil {
// Add another name to the existing NodeChange.
changes[len(changes)-1].DirUpdated =
append(changes[len(changes)-1].DirUpdated, realOp.NewName)
}
}
if oldNode != nil {
var newNodeID NodeID
if newNode != nil {
newNodeID = newNode.GetID()
}
fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%p to %s/%p",
realOp.Renamed, realOp.OldName, oldNode.GetID(), realOp.NewName,
newNodeID)
if newNode == nil {
if childNode :=
fbo.nodeCache.Get(realOp.Renamed.Ref()); childNode != nil {
// if the childNode exists, we still have to update
// its path to go through the new node. That means
// creating nodes for all the intervening paths.
// Unfortunately we don't have enough information to
// know what the newPath is; we have to guess it from
// the updates.
var err error
newNode, err =
fbo.searchForNode(ctx, realOp.NewDir.Ref, md.ReadOnly())
if newNode == nil {
fbo.log.CErrorf(ctx, "Couldn't find the new node: %v",
err)
}
}
}
if newNode != nil {
// If new node exists as well, unlink any previously
// existing entry and move the node.
var unrefPtr BlockPointer
if oldNode != newNode {
unrefPtr = realOp.NewDir.Unref
} else {
unrefPtr = realOp.OldDir.Unref
}
err := fbo.unlinkFromCache(op, unrefPtr, newNode, realOp.NewName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err)
return
}
err = fbo.nodeCache.Move(realOp.Renamed.Ref(), newNode, realOp.NewName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't move node in cache: %v", err)
return
}
}
}
case *syncOp:
node := fbo.nodeCache.Get(realOp.File.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %p",
len(realOp.Writes), node.GetID())
changes = append(changes, NodeChange{
Node: node,
FileUpdated: realOp.Writes,
})
case *setAttrOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %p",
realOp.Attr, realOp.Name, node.GetID())
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return
}
childNode, err := fbo.blocks.UpdateCachedEntryAttributes(
ctx, lState, md.ReadOnly(), p, realOp)
if err != nil {
// TODO: Log error?
return
}
if childNode == nil {
return
}
changes = append(changes, NodeChange{
Node: childNode,
})
case *GCOp:
// Unreferenced blocks in a GCOp mean that we shouldn't cache
// them anymore
bcache := fbo.config.BlockCache()
for _, ptr := range realOp.Unrefs() {
if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil {
fbo.log.CDebugf(ctx,
"Couldn't delete transient entry for %v: %v", ptr, err)
}
}
case *resolutionOp:
// If there are any unrefs of blocks that have a node, this is an
// implied rmOp (see KBFS-1424).
reverseUpdates := make(map[BlockPointer]BlockPointer)
for _, unref := range op.Unrefs() {
// TODO: I will add logic here to unlink and invalidate any
// corresponding unref'd nodes.
node := fbo.nodeCache.Get(unref.Ref())
if node == nil {
// TODO: even if we don't have the node that was
// unreferenced, we might have its parent, and that
// parent might need an invalidation.
continue
}
// If there is a node, unlink and invalidate.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get path: %v", err)
continue
}
if !p.hasValidParent() {
fbo.log.CErrorf(ctx, "Removed node %s has no parent", p)
continue
}
parentPath := p.parentPath()
parentNode := fbo.nodeCache.Get(parentPath.tailPointer().Ref())
if parentNode != nil {
changes = append(changes, NodeChange{
Node: parentNode,
DirUpdated: []string{p.tailName()},
})
}
fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %p",
p.tailPointer(), node.GetID())
// Revert the path back to the original BlockPointers,
// before the updates were applied.
if len(reverseUpdates) == 0 {
for _, update := range op.allUpdates() {
reverseUpdates[update.Ref] = update.Unref
}
}
for i, pNode := range p.path {
if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok {
p.path[i].BlockPointer = oldPtr
}
}
fbo.nodeCache.Unlink(p.tailPointer().Ref(), p)
}
if len(changes) == 0 {
return
}
}
fbo.observers.batchChanges(ctx, changes)
}
func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) MetadataRevision {
fbo.headLock.AssertAnyLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head.Revision()
}
return MetadataRevisionUninitialized
}
func (fbo *folderBranchOps) getCurrMDRevision(
lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.getCurrMDRevisionLocked(lState)
}
type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error
func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// If there's anything in the journal, don't apply these MDs.
// Wait for CR to happen.
if fbo.isMasterBranchLocked(lState) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return err
}
if mergedRev != MetadataRevisionUninitialized {
fbo.log.CDebugf(ctx,
"Ignoring fetched revisions while MDs are in journal")
return nil
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// if we have staged changes, ignore all updates until conflict
// resolution kicks in. TODO: cache these for future use.
if !fbo.isMasterBranchLocked(lState) {
if len(rmds) > 0 {
latestMerged := rmds[len(rmds)-1]
// If we're running a journal, don't trust our own updates
// here because they might have come from our own journal
// before the conflict was detected. Assume we'll hear
// about the conflict via callbacks from the journal.
if TLFJournalEnabled(fbo.config, fbo.id()) {
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
if key == latestMerged.LastModifyingWriterVerifyingKey() {
return UnmergedError{}
}
}
// setHeadLocked takes care of merged case
fbo.setLatestMergedRevisionLocked(
ctx, lState, latestMerged.Revision(), false)
unmergedRev := MetadataRevisionUninitialized
if fbo.head != (ImmutableRootMetadata{}) {
unmergedRev = fbo.head.Revision()
}
fbo.cr.Resolve(unmergedRev, latestMerged.Revision())
}
return UnmergedError{}
}
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return errors.New("Ignoring MD updates while writes are dirty")
}
appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds))
for _, rmd := range rmds {
// check that we're applying the expected MD revision
if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) {
// Already caught up!
continue
}
if err := isReadableOrError(ctx, fbo.config, rmd.ReadOnly()); err != nil {
return err
}
err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false)
if err != nil {
return err
}
// No new operations in these.
if rmd.IsWriterMetadataCopiedSet() {
continue
}
for _, op := range rmd.data.Changes.Ops {
fbo.notifyOneOpLocked(ctx, lState, op, rmd)
}
appliedRevs = append(appliedRevs, rmd)
}
if len(appliedRevs) > 0 {
fbo.editHistory.UpdateHistory(ctx, appliedRevs)
}
return nil
}
func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// go backwards through the updates
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
// on undo, it's ok to re-apply the current revision since you
// need to invert all of its ops.
//
// This duplicates a check in
// fbo.setHeadPredecessorLocked. TODO: Remove this
// duplication.
if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) &&
rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 {
return MDUpdateInvertError{rmd.Revision(),
fbo.getCurrMDRevisionLocked(lState)}
}
// TODO: Check that the revisions are equal only for
// the first iteration.
if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) {
err := fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
}
// iterate the ops in reverse and invert each one
ops := rmd.data.Changes.Ops
for j := len(ops) - 1; j >= 0; j-- {
io, err := invertOpForLocalNotifications(ops[j])
if err != nil {
fbo.log.CWarningf(ctx,
"got error %v when invert op %v; "+
"skipping. Open file handles "+
"may now be in an invalid "+
"state, which can be fixed by "+
"either closing them all or "+
"restarting KBFS.",
err, ops[j])
continue
}
fbo.notifyOneOpLocked(ctx, lState, io, rmd)
}
}
// TODO: update the edit history?
return nil
}
func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.applyMDUpdatesLocked(ctx, lState, rmds)
}
func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.latestMergedRevision
}
// caller should have held fbo.headLock
func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev MetadataRevision, allowBackward bool) {
fbo.headLock.AssertLocked(lState)
if rev == MetadataRevisionUninitialized {
panic("Cannot set latest merged revision to an uninitialized value")
}
if fbo.latestMergedRevision < rev || allowBackward {
fbo.latestMergedRevision = rev
fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev)
} else {
fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+
"the new revision (%d); won't update.", fbo.latestMergedRevision, rev)
}
}
// Assumes all necessary locking is either already done by caller, or
// is done by applyFunc.
func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
lState *lockState, applyFunc applyMDUpdatesFunc) error {
// first look up all MD revisions newer than my current head
start := fbo.getLatestMergedRevision(lState) + 1
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start)
if err != nil {
return err
}
err = applyFunc(ctx, lState, rmds)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context,
lState *lockState) error {
fbo.log.CDebugf(ctx, "Fetching the newest unmerged head")
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
// We can only ever be at most one revision behind, so fetch the
// latest unmerged revision and apply it as a successor.
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid)
if err != nil {
return err
}
if md == (ImmutableRootMetadata{}) {
// There is no unmerged revision, oops!
return errors.New("Couldn't find an unmerged head")
}
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if fbo.bid != bid {
// The branches switched (apparently CR completed), so just
// try again.
fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head")
return nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil {
return err
}
fbo.notifyBatchLocked(ctx, lState, md)
if err := fbo.config.MDCache().Put(md); err != nil {
return err
}
return nil
}
// getUnmergedMDUpdates returns a slice of the unmerged MDs for this
// TLF's current unmerged branch and unmerged branch, between the
// merge point for the branch and the current head. The returned MDs
// are the same instances that are stored in the MD cache, so they
// should be modified with care.
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
MetadataRevision, []ImmutableRootMetadata, error) {
// acquire mdWriterLock to read the current branch ID.
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
bid, fbo.getCurrMDRevision(lState))
}
func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) (
MetadataRevision, []ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
fbo.bid, fbo.getCurrMDRevision(lState))
}
// Returns a list of block pointers that were created during the
// staged era.
func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) ([]BlockPointer, error) {
fbo.mdWriterLock.AssertLocked(lState)
currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return nil, err
}
err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds)
if err != nil {
return nil, err
}
// We have arrived at the branch point. The new root is
// the previous revision from the current head. Find it
// and apply. TODO: somehow fake the current head into
// being currHead-1, so that future calls to
// applyMDUpdates will fetch this along with the rest of
// the updates.
fbo.setBranchIDLocked(lState, NullBranchID)
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
currHead, Merged)
if err != nil {
return nil, err
}
err = func() error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true)
return nil
}()
if err != nil {
return nil, err
}
// Return all new refs
var unmergedPtrs []BlockPointer
for _, rmd := range unmergedRmds {
for _, op := range rmd.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr != zeroPtr {
unmergedPtrs = append(unmergedPtrs, ptr)
}
}
for _, update := range op.allUpdates() {
if update.Ref != zeroPtr {
unmergedPtrs = append(unmergedPtrs, update.Ref)
}
}
}
}
return unmergedPtrs, nil
}
func (fbo *folderBranchOps) unstageLocked(ctx context.Context,
lState *lockState) error {
fbo.mdWriterLock.AssertLocked(lState)
// fetch all of my unstaged updates, and undo them one at a time
bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState)
unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return err
}
// let the server know we no longer have need
if !wasMasterBranch {
err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid)
if err != nil {
return err
}
}
// now go forward in time, if possible
err = fbo.getAndApplyMDUpdates(ctx, lState,
fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
// Finally, create a resolutionOp with the newly-unref'd pointers.
resOp := newResolutionOp()
for _, ptr := range unmergedPtrs {
resOp.AddUnrefBlock(ptr)
}
md.AddOp(resOp)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl)
}
// TODO: remove once we have automatic conflict resolution
func (fbo *folderBranchOps) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "UnstageForTesting")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
if fbo.isMasterBranch(lState) {
// no-op
return nil
}
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// launch unstaging in a new goroutine, because we don't want to
// use the provided context because upper layers might ignore our
// notifications if we do. But we still want to wait for the
// context to cancel.
c := make(chan error, 1)
freshCtx, cancel := fbo.newCtxWithFBOID()
defer cancel()
fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting")
go func() {
lState := makeFBOLockState()
c <- fbo.doMDWriteWithRetry(ctx, lState,
func(lState *lockState) error {
return fbo.unstageLocked(freshCtx, lState)
})
}()
select {
case err := <-c:
return err
case <-ctx.Done():
return ctx.Err()
}
})
}
// mdWriterLock must be taken by the caller.
func (fbo *folderBranchOps) rekeyLocked(ctx context.Context,
lState *lockState, promptPaper bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
if !fbo.isMasterBranchLocked(lState) {
return errors.New("can't rekey while staged")
}
head := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) {
// If we already have a cached revision, make sure we're
// up-to-date with the latest revision before inspecting the
// metadata, since Rekey doesn't let us go into CR mode, and
// we don't actually get folder update notifications when the
// rekey bit is set, just a "folder needs rekey" update.
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdatesLocked); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); !ok ||
applyErr.rev != applyErr.curr {
return err
}
}
}
md, lastWriterVerifyingKey, rekeyWasSet, err :=
fbo.getMDForRekeyWriteLocked(ctx, lState)
if err != nil {
return err
}
if fbo.rekeyWithPromptTimer != nil {
if !promptPaper {
fbo.log.CDebugf(ctx, "rekeyWithPrompt superseded before it fires.")
} else if !md.IsRekeySet() {
fbo.rekeyWithPromptTimer.Stop()
fbo.rekeyWithPromptTimer = nil
// If the rekey bit isn't set, then some other device
// already took care of our request, and we can stop
// early. Note that if this FBO never registered for
// updates, then we might not yet have seen the update, in
// which case we'll still try to rekey but it will fail as
// a conflict.
fbo.log.CDebugf(ctx, "rekeyWithPrompt not needed because the "+
"rekey bit was already unset.")
return nil
}
}
rekeyDone, tlfCryptKey, err := fbo.config.KeyManager().
Rekey(ctx, md, promptPaper)
stillNeedsRekey := false
switch err.(type) {
case nil:
// TODO: implement a "forced" option that rekeys even when the
// devices haven't changed?
if !rekeyDone {
fbo.log.CDebugf(ctx, "No rekey necessary")
return nil
}
// Clear the rekey bit if any.
md.clearRekeyBit()
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
// Readers can't clear the last revision, because:
// 1) They don't have access to the writer metadata, so can't clear the
// block changes.
// 2) Readers need the MetadataFlagWriterMetadataCopied bit set for
// MDServer to authorize the write.
// Without this check, MDServer returns an Unauthorized error.
if md.GetTlfHandle().IsWriter(uid) {
md.clearLastRevision()
}
case RekeyIncompleteError:
if !rekeyDone && rekeyWasSet {
// The rekey bit was already set, and there's nothing else
// we can to do, so don't put any new revisions.
fbo.log.CDebugf(ctx, "No further rekey possible by this user.")
return nil
}
// Rekey incomplete, fallthrough without early exit, to ensure
// we write the metadata with any potential changes
fbo.log.CDebugf(ctx,
"Rekeyed reader devices, but still need writer rekey")
case NeedOtherRekeyError:
stillNeedsRekey = true
case NeedSelfRekeyError:
stillNeedsRekey = true
default:
if err == context.DeadlineExceeded {
fbo.log.CDebugf(ctx, "Paper key prompt timed out")
// Reschedule the prompt in the timeout case.
stillNeedsRekey = true
} else {
return err
}
}
if stillNeedsRekey {
fbo.log.CDebugf(ctx, "Device doesn't have access to rekey")
// If we didn't have read access, then we don't have any
// unlocked paper keys. Wait for some time, and then if we
// still aren't rekeyed, try again but this time prompt the
// user for any known paper keys. We do this even if the
// rekey bit is already set, since we may have restarted since
// the previous rekey attempt, before prompting for the paper
// key. Only schedule this as a one-time event, since direct
// folder accesses from the user will also cause a
// rekeyWithPrompt.
//
// Only ever set the timer once.
if fbo.rekeyWithPromptTimer == nil {
d := fbo.config.RekeyWithPromptWaitTime()
fbo.log.CDebugf(ctx, "Scheduling a rekeyWithPrompt in %s", d)
fbo.rekeyWithPromptTimer = time.AfterFunc(d, fbo.rekeyWithPrompt)
}
if rekeyWasSet {
// Devices not yet keyed shouldn't set the rekey bit again
fbo.log.CDebugf(ctx, "Rekey bit already set")
return nil
}
// This device hasn't been keyed yet, fall through to set the rekey bit
}
// add an empty operation to satisfy assumptions elsewhere
md.AddOp(newRekeyOp())
// we still let readers push a new md block that we validate against reader
// permissions
err = fbo.finalizeMDRekeyWriteLocked(
ctx, lState, md, lastWriterVerifyingKey)
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
keyGen := md.LatestKeyGeneration()
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
// send rekey finish notification
handle := md.GetTlfHandle()
fbo.config.Reporter().Notify(ctx,
rekeyNotification(ctx, fbo.config, handle, true))
if !stillNeedsRekey && fbo.rekeyWithPromptTimer != nil {
fbo.log.CDebugf(ctx, "Scheduled rekey timer no longer needed")
fbo.rekeyWithPromptTimer.Stop()
fbo.rekeyWithPromptTimer = nil
}
return nil
}
func (fbo *folderBranchOps) rekeyWithPrompt() {
var err error
ctx := ctxWithRandomIDReplayable(
context.Background(), CtxRekeyIDKey, CtxRekeyOpID, fbo.log)
// Only give the user limited time to enter their paper key, so we
// don't wait around forever.
d := fbo.config.RekeyWithPromptWaitTime()
ctx, cancel := context.WithTimeout(ctx, d)
defer cancel()
if ctx, err = NewContextWithCancellationDelayer(ctx); err != nil {
panic(err)
}
fbo.log.CDebugf(ctx, "rekeyWithPrompt")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.rekeyLocked(ctx, lState, true)
})
}
// Rekey rekeys the given folder.
func (fbo *folderBranchOps) Rekey(ctx context.Context, tlf tlf.ID) (err error) {
fbo.log.CDebugf(ctx, "Rekey")
defer func() {
fbo.deferLog.CDebugf(ctx, "Done: %v", err)
}()
fb := FolderBranch{tlf, MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.rekeyLocked(ctx, lState, false)
})
}
func (fbo *folderBranchOps) SyncFromServerForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncFromServerForTesting")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
// A journal flush before CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
// Loop until we're fully updated on the master branch.
for {
if !fbo.isMasterBranch(lState) {
if err := fbo.cr.Wait(ctx); err != nil {
return err
}
// If we are still staged after the wait, then we have a problem.
if !fbo.isMasterBranch(lState) {
return fmt.Errorf("Conflict resolution didn't take us out of " +
"staging.")
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) > 0 {
for _, ref := range dirtyRefs {
fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref)
}
return errors.New("can't sync from server while dirty")
}
// A journal flush after CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdates); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); ok {
if applyErr.rev == applyErr.curr {
fbo.log.CDebugf(ctx, "Already up-to-date with server")
return nil
}
}
if _, isUnmerged := err.(UnmergedError); isUnmerged {
continue
}
return err
}
break
}
// Wait for all the asynchronous block archiving and quota
// reclamation to hit the block server.
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
if err := fbo.editHistory.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil {
return err
}
// A second journal flush if needed, to clear out any
// archive/remove calls caused by the above operations.
return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log)
}
// CtxFBOTagKey is the type used for unique context tags within folderBranchOps
type CtxFBOTagKey int
const (
// CtxFBOIDKey is the type of the tag for unique operation IDs
// within folderBranchOps.
CtxFBOIDKey CtxFBOTagKey = iota
)
// CtxFBOOpID is the display name for the unique operation
// folderBranchOps ID tag.
const CtxFBOOpID = "FBOID"
func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context {
return ctxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log)
}
func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) {
// No need to call NewContextReplayable since ctxWithFBOID calls
// ctxWithRandomIDReplayable, which attaches replayably.
ctx := fbo.ctxWithFBOID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
ctx, err := NewContextWithCancellationDelayer(ctx)
if err != nil {
panic(err)
}
return ctx, cancelFunc
}
// Run the passed function with a context that's canceled on shutdown.
func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbo.shutdownChan:
return ShutdownHappenedError{}
}
}
func (fbo *folderBranchOps) maybeFastForward(ctx context.Context,
lState *lockState, lastUpdate time.Time, currUpdate time.Time) (
fastForwardDone bool, err error) {
// Has it been long enough to try fast-forwarding?
if currUpdate.Before(lastUpdate.Add(fastForwardTimeThresh)) ||
!fbo.isMasterBranch(lState) {
return false, nil
}
fbo.log.CDebugf(ctx, "Checking head for possible "+
"fast-forwarding (last update time=%s)", lastUpdate)
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id())
if err != nil {
return false, err
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// If the journal has anything in it, don't fast-forward since we
// haven't finished flushing yet. If there was really a remote
// update on the server, we'll end up in CR eventually.
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return false, err
}
if mergedRev != MetadataRevisionUninitialized {
return false, nil
}
if !fbo.isMasterBranchLocked(lState) {
// Don't update if we're staged.
return false, nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if currHead.Revision() < fbo.latestMergedRevision+fastForwardRevThresh {
// Might as well fetch all the revisions.
return false, nil
}
fbo.log.CDebugf(ctx, "Fast-forwarding from rev %d to rev %d",
fbo.latestMergedRevision, currHead.Revision())
changes, err := fbo.blocks.FastForwardAllNodes(
ctx, lState, currHead.ReadOnly())
if err != nil {
return false, err
}
err = fbo.setHeadSuccessorLocked(ctx, lState, currHead, true /*rebase*/)
if err != nil {
return false, err
}
// Invalidate all the affected nodes.
fbo.observers.batchChanges(ctx, changes)
// Reset the edit history. TODO: notify any listeners that we've
// done this.
fbo.editHistory.Shutdown()
fbo.editHistory = NewTlfEditHistory(fbo.config, fbo, fbo.log)
return true, nil
}
func (fbo *folderBranchOps) registerAndWaitForUpdates() {
defer close(fbo.updateDoneChan)
childDone := make(chan struct{})
var lastUpdate time.Time
err := fbo.runUnlessShutdown(func(ctx context.Context) error {
defer close(childDone)
// If we fail to register for or process updates, try again
// with an exponential backoff, so we don't overwhelm the
// server or ourselves with too many attempts in a hopeless
// situation.
expBackoff := backoff.NewExponentialBackOff()
// Never give up hope until we shut down
expBackoff.MaxElapsedTime = 0
// Register and wait in a loop unless we hit an unrecoverable error
for {
err := backoff.RetryNotifyWithContext(ctx, func() error {
// Replace the FBOID one with a fresh id for every attempt
newCtx := fbo.ctxWithFBOID(ctx)
updateChan, err := fbo.registerForUpdates(newCtx)
if err != nil {
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
}
currUpdate, err := fbo.waitForAndProcessUpdates(
newCtx, lastUpdate, updateChan)
if _, ok := err.(UnmergedError); ok {
// skip the back-off timer and continue directly to next
// registerForUpdates
return nil
}
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
if err == nil {
lastUpdate = currUpdate
}
return err
}
},
expBackoff,
func(err error, nextTime time.Duration) {
fbo.log.CDebugf(ctx,
"Retrying registerForUpdates in %s due to err: %v",
nextTime, err)
})
if err != nil {
return err
}
}
})
if err != nil && err != context.Canceled {
fbo.log.CWarningf(context.Background(),
"registerAndWaitForUpdates failed unexpectedly with an error: %v",
err)
}
<-childDone
}
func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) (
updateChan <-chan error, err error) {
lState := makeFBOLockState()
currRev := fbo.getLatestMergedRevision(lState)
fbo.log.CDebugf(ctx, "Registering for updates (curr rev = %d)", currRev)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
// RegisterForUpdate will itself retry on connectivity issues
return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), currRev)
}
func (fbo *folderBranchOps) waitForAndProcessUpdates(
ctx context.Context, lastUpdate time.Time,
updateChan <-chan error) (currUpdate time.Time, err error) {
// successful registration; now, wait for an update or a shutdown
fbo.log.CDebugf(ctx, "Waiting for updates")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
lState := makeFBOLockState()
for {
select {
case err := <-updateChan:
fbo.log.CDebugf(ctx, "Got an update: %v", err)
if err != nil {
return time.Time{}, err
}
// Getting and applying the updates requires holding
// locks, so make sure it doesn't take too long.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
defer cancel()
currUpdate := fbo.config.Clock().Now()
ffDone, err :=
fbo.maybeFastForward(ctx, lState, lastUpdate, currUpdate)
if err != nil {
return time.Time{}, err
}
if ffDone {
return currUpdate, nil
}
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates)
if err != nil {
fbo.log.CDebugf(ctx, "Got an error while applying "+
"updates: %v", err)
return time.Time{}, err
}
return currUpdate, nil
case unpause := <-fbo.updatePauseChan:
fbo.log.CInfof(ctx, "Updates paused")
// wait to be unpaused
select {
case <-unpause:
fbo.log.CInfof(ctx, "Updates unpaused")
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
}
}
func (fbo *folderBranchOps) backgroundFlusher(betweenFlushes time.Duration) {
ticker := time.NewTicker(betweenFlushes)
defer ticker.Stop()
lState := makeFBOLockState()
var prevDirtyRefMap map[BlockRef]bool
sameDirtyRefCount := 0
for {
doSelect := true
if fbo.blocks.GetState(lState) == dirtyState &&
fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
// We have dirty files, and the system has a full buffer,
// so don't bother waiting for a signal, just get right to
// the main attraction.
doSelect = false
}
if doSelect {
select {
case <-ticker.C:
case <-fbo.forceSyncChan:
case <-fbo.shutdownChan:
return
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) == 0 {
sameDirtyRefCount = 0
continue
}
// Make sure we are making some progress
currDirtyRefMap := make(map[BlockRef]bool)
for _, ref := range dirtyRefs {
currDirtyRefMap[ref] = true
}
if reflect.DeepEqual(currDirtyRefMap, prevDirtyRefMap) {
sameDirtyRefCount++
} else {
sameDirtyRefCount = 0
}
if sameDirtyRefCount >= 10 {
panic(fmt.Sprintf("Making no Sync progress on dirty refs: %v",
dirtyRefs))
}
prevDirtyRefMap = currDirtyRefMap
fbo.runUnlessShutdown(func(ctx context.Context) (err error) {
// Denote that these are coming from a background
// goroutine, not directly from any user.
ctx = NewContextReplayable(ctx,
func(ctx context.Context) context.Context {
return context.WithValue(ctx, CtxBackgroundSyncKey, "1")
})
// Just in case network access or a bug gets stuck for a
// long time, time out the sync eventually.
longCtx, longCancel :=
context.WithTimeout(ctx, backgroundTaskTimeout)
defer longCancel()
// Make sure this loop doesn't starve user requests for
// too long. But use the longer-timeout version in the
// actual Sync command, to avoid unnecessary errors.
shortCtx, shortCancel := context.WithTimeout(ctx, 1*time.Second)
defer shortCancel()
for _, ref := range dirtyRefs {
select {
case <-shortCtx.Done():
fbo.log.CDebugf(ctx,
"Stopping background sync early due to timeout")
return nil
default:
}
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
err := fbo.Sync(longCtx, node)
if err != nil {
// Just log the warning and keep trying to
// sync the rest of the dirty files.
p := fbo.nodeCache.PathFromNode(node)
fbo.log.CWarningf(ctx, "Couldn't sync dirty file with "+
"ref=%v, nodeID=%p, and path=%v: %v",
ref, node.GetID(), p, err)
}
}
return nil
})
}
}
func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Lock(lState)
}
func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Unlock(lState)
}
func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []BlockID) error {
fbo.mdWriterLock.AssertLocked(lState)
// Put the blocks into the cache so that, even if we fail below,
// future attempts may reuse the blocks.
err := fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
mdID, err := fbo.config.MDOps().ResolveBranch(ctx, fbo.id(), fbo.bid,
blocksToDelete, md)
doUnmergedPut := isRevisionConflict(err)
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR")
return err
}
if err != nil {
return err
}
// Queue a rekey if the bit was set.
if md.IsRekeySet() {
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
md.loadCachedBlockChanges(bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Set the head to the new MD.
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+
"successful put: %v", err)
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
// notifyOneOp for every fixed-up merged op.
for _, op := range newOps {
fbo.notifyOneOpLocked(ctx, lState, op, irmd)
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd})
return nil
}
// finalizeResolution caches all the blocks, and writes the new MD to
// the merged branch, failing if there is a conflict. It also sends
// out the given newOps notifications locally. This is used for
// completing conflict resolution.
func (fbo *folderBranchOps) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []BlockID) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.finalizeResolutionLocked(
ctx, lState, md, bps, newOps, blocksToDelete)
}
func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context,
lState *lockState) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure",
fbo.bid)
return fbo.unstageLocked(ctx, lState)
}
func (fbo *folderBranchOps) handleTLFBranchChange(ctx context.Context,
newBID BranchID) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID)
if !fbo.isMasterBranchLocked(lState) {
if fbo.bid == newBID {
fbo.log.CDebugf(ctx, "Already on branch %s", newBID)
return
}
panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s",
newBID, fbo.bid))
}
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID)
if err != nil {
fbo.log.CWarningf(ctx,
"No unmerged head on journal branch change (bid=%s)", newBID)
return
}
if md == (ImmutableRootMetadata{}) || md.MergedStatus() != Unmerged ||
md.BID() != newBID {
// This can happen if CR got kicked off in some other way and
// completed before we took the lock to process this
// notification.
fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d",
md, newBID)
return
}
// Everything we thought we knew about quota reclamation is now
// called into question.
fbo.fbm.clearLastQRData()
// Kick off conflict resolution and set the head to the correct branch.
fbo.setBranchIDLocked(lState, newBID)
fbo.cr.BeginNewBranch()
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/)
if err != nil {
fbo.log.CWarningf(ctx,
"Could not set head on journal branch change: %v", err)
return
}
}
func (fbo *folderBranchOps) onTLFBranchChange(newBID BranchID) {
fbo.branchChanges.Add(1)
go func() {
defer fbo.branchChanges.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
// This only happens on a `PruneBranch` call, in which case we
// would have already updated fbo's local view of the branch/head.
if newBID == NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring branch change back to master")
return
}
fbo.handleTLFBranchChange(ctx, newBID)
}()
}
func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid BranchID,
rev MetadataRevision) {
fbo.log.CDebugf(ctx, "Considering archiving references for flushed MD revision %d", rev)
lState := makeFBOLockState()
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState, rev, false)
}()
// Get that revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
rev, Merged)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v",
rev, err)
return
}
if err := isArchivableMDOrError(rmd.ReadOnly()); err != nil {
fbo.log.CDebugf(
ctx, "Skipping archiving references for flushed MD revision %d: %s", rev, err)
return
}
fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly())
}
func (fbo *folderBranchOps) onMDFlush(bid BranchID, rev MetadataRevision) {
fbo.mdFlushes.Add(1)
go func() {
defer fbo.mdFlushes.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
if bid != NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for "+
"revision %d", bid, rev)
return
}
fbo.handleMDFlush(ctx, bid, rev)
}()
}
// GetUpdateHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
fbo.log.CDebugf(ctx, "GetUpdateHistory")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch}
}
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(),
MetadataRevisionInitial)
if err != nil {
return TLFUpdateHistory{}, err
}
if len(rmds) > 0 {
rmd := rmds[len(rmds)-1]
history.ID = rmd.TlfID().String()
history.Name = rmd.GetTlfHandle().GetCanonicalPath()
}
history.Updates = make([]UpdateSummary, 0, len(rmds))
writerNames := make(map[keybase1.UID]string)
for _, rmd := range rmds {
writer, ok := writerNames[rmd.LastModifyingWriter()]
if !ok {
name, err := fbo.config.KBPKI().
GetNormalizedUsername(ctx, rmd.LastModifyingWriter())
if err != nil {
return TLFUpdateHistory{}, err
}
writer = string(name)
writerNames[rmd.LastModifyingWriter()] = writer
}
updateSummary := UpdateSummary{
Revision: rmd.Revision(),
Date: time.Unix(0, rmd.data.Dir.Mtime),
Writer: writer,
LiveBytes: rmd.DiskUsage(),
Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)),
}
for _, op := range rmd.data.Changes.Ops {
opSummary := OpSummary{
Op: op.String(),
Refs: make([]string, 0, len(op.Refs())),
Unrefs: make([]string, 0, len(op.Unrefs())),
Updates: make(map[string]string),
}
for _, ptr := range op.Refs() {
opSummary.Refs = append(opSummary.Refs, ptr.String())
}
for _, ptr := range op.Unrefs() {
opSummary.Unrefs = append(opSummary.Unrefs, ptr.String())
}
for _, update := range op.allUpdates() {
opSummary.Updates[update.Unref.String()] = update.Ref.String()
}
updateSummary.Ops = append(updateSummary.Ops, opSummary)
}
history.Updates = append(history.Updates, updateSummary)
}
return history, nil
}
// GetEditHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetEditHistory(ctx context.Context,
folderBranch FolderBranch) (edits TlfWriterEdits, err error) {
fbo.log.CDebugf(ctx, "GetEditHistory")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return nil, WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
head, err := fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
if err != nil {
return nil, err
}
return fbo.editHistory.GetComplete(ctx, head)
}
// PushStatusChange forces a new status be fetched by status listeners.
func (fbo *folderBranchOps) PushStatusChange() {
fbo.config.KBFSOps().PushStatusChange()
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) {
fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus)
}
| 1 | 14,613 | I'm a bit confused. Doesn't `mdWriterLock` already synchronizes calls to this function? | keybase-kbfs | go |
@@ -120,7 +120,13 @@ final class DailyMotionProvider extends BaseVideoProvider
public function getReferenceUrl(MediaInterface $media): string
{
- return sprintf('http://www.dailymotion.com/video/%s', $media->getProviderReference());
+ $providerReference = $media->getProviderReference();
+
+ if (null === $providerReference) {
+ return '';
+ }
+
+ return sprintf('http://www.dailymotion.com/video/%s', $providerReference);
}
protected function doTransform(MediaInterface $media): void | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <thomas.rabaix@sonata-project.org>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Provider;
use Sonata\MediaBundle\Model\MediaInterface;
use Symfony\Component\HttpFoundation\RedirectResponse;
use Symfony\Component\HttpFoundation\Response;
final class DailyMotionProvider extends BaseVideoProvider
{
public function getHelperProperties(MediaInterface $media, string $format, array $options = []): array
{
// documentation : http://www.dailymotion.com/en/doc/api/player
$defaults = [
// Values: 0 or 1. Default is 0. Determines if the player loads related videos when
// the current video begins playback.
'related' => 0,
// Values: 0 or 1. Default is 1. Determines if the player allows explicit content to
// be played. This parameter may be added to embed code by platforms which do not
// want explicit content to be posted by their users.
'explicit' => 0,
// Values: 0 or 1. Default is 0. Determines if the video will begin playing
// automatically when the player loads.
'autoPlay' => 0,
// Values: 0 or 1. Default is 0. Determines if the video will begin muted.
'autoMute' => 0,
// Values: 0 or 1. Default is 0. Determines if the video will unmuted on mouse over.
// Of course it works only if the player is on automute=1.
'unmuteOnMouseOver' => 0,
// Values: a number of seconds. Default is 0. Determines if the video will begin
// playing the video at a given time.
'start' => 0,
// Values: 0 or 1. Default is 0. Enable the Javascript API by setting this parameter
// to 1. For more information and instructions on using the Javascript API, see the
// JavaScript API documentation.
'enableApi' => 0,
// Values: 0 or 1. Default is 0. Determines if the player should display controls
// or not during video playback.
'chromeless' => 0,
// Values: 0 or 1. Default is 0. Determines if the video should be expended to fit
// the whole player's size.
'expendVideo' => 0,
'color2' => null,
// Player color changes may be set using color codes. A color is described by its
// hexadecimal value (eg: FF0000 for red).
'foreground' => null,
'background' => null,
'highlight' => null,
];
$player_parameters = array_merge($defaults, $options['player_parameters'] ?? []);
$box = $this->getBoxHelperProperties($media, $format, $options);
$params = [
'player_parameters' => http_build_query($player_parameters),
'allowFullScreen' => $options['allowFullScreen'] ?? 'true',
'allowScriptAccess' => $options['allowScriptAccess'] ?? 'always',
'width' => $box->getWidth(),
'height' => $box->getHeight(),
];
return $params;
}
public function getProviderMetadata(): MetadataInterface
{
return new Metadata($this->getName(), $this->getName().'.description', 'bundles/sonatamedia/dailymotion-icon.png', 'SonataMediaBundle');
}
public function updateMetadata(MediaInterface $media, bool $force = false): void
{
$url = sprintf('http://www.dailymotion.com/services/oembed?url=%s&format=json', $this->getReferenceUrl($media));
try {
$metadata = $this->getMetadata($media, $url);
} catch (\RuntimeException $e) {
$media->setEnabled(false);
$media->setProviderStatus(MediaInterface::STATUS_ERROR);
return;
}
$media->setProviderMetadata($metadata);
if ($force) {
$media->setName($metadata['title']);
$media->setAuthorName($metadata['author_name']);
}
$media->setHeight($metadata['height']);
$media->setWidth($metadata['width']);
}
public function getDownloadResponse(MediaInterface $media, string $format, string $mode, array $headers = []): Response
{
return new RedirectResponse($this->getReferenceUrl($media), 302, $headers);
}
public function getReferenceUrl(MediaInterface $media): string
{
return sprintf('http://www.dailymotion.com/video/%s', $media->getProviderReference());
}
protected function doTransform(MediaInterface $media): void
{
$this->fixBinaryContent($media);
if (null === $media->getBinaryContent()) {
return;
}
$media->setProviderName($this->name);
$media->setProviderStatus(MediaInterface::STATUS_OK);
$media->setProviderReference($media->getBinaryContent());
$this->updateMetadata($media, true);
}
private function fixBinaryContent(MediaInterface $media): void
{
if (null === $media->getBinaryContent()) {
return;
}
if (1 === preg_match('{^(?:https?://)?www.dailymotion.com/video/(?<video_id>[0-9a-zA-Z]*)}', $media->getBinaryContent(), $matches)) {
$media->setBinaryContent($matches['video_id']);
}
}
}
| 1 | 12,406 | Does it make sense to generate a dailyMotion url without the video reference? It does not to me, but can we throw an exception here? | sonata-project-SonataMediaBundle | php |
@@ -24,10 +24,11 @@ import (
"sort"
"strings"
- stringutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/string"
- "github.com/GoogleCloudPlatform/compute-image-tools/daisy"
computeBeta "google.golang.org/api/compute/v0.beta"
"google.golang.org/api/compute/v1"
+
+ stringutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/string"
+ "github.com/GoogleCloudPlatform/compute-image-tools/daisy"
)
const ( | 1 | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"regexp"
"sort"
"strings"
stringutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/string"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy"
computeBeta "google.golang.org/api/compute/v0.beta"
"google.golang.org/api/compute/v1"
)
const (
// BuildIDOSEnvVarName is the os env var name to get build id
BuildIDOSEnvVarName = "BUILD_ID"
translateFailedPrefix = "TranslateFailed"
)
// TranslationSettings includes information that needs to be added to a disk or image after it is imported,
// for a particular OS and version.
type TranslationSettings struct {
// GcloudOsFlag is the user-facing string corresponding to this OS, version, and licensing mode.
// It is passed as a value of the `--os` flag.
GcloudOsFlag string
// LicenseURI is the GCP Compute license corresponding to this OS, version, and licensing mode:
// https://cloud.google.com/compute/docs/reference/rest/v1/licenses
LicenseURI string
// WorkflowPath is the path to a Daisy json workflow, relative to the
// `daisy_workflows/image_import` directory.
WorkflowPath string
}
var (
supportedOS = []TranslationSettings{
// Enterprise Linux
{
GcloudOsFlag: "centos-7",
WorkflowPath: "enterprise_linux/translate_centos_7.wf.json",
LicenseURI: "projects/centos-cloud/global/licenses/centos-7",
}, {
GcloudOsFlag: "centos-8",
WorkflowPath: "enterprise_linux/translate_centos_8.wf.json",
LicenseURI: "projects/centos-cloud/global/licenses/centos-8",
}, {
GcloudOsFlag: "rhel-6",
WorkflowPath: "enterprise_linux/translate_rhel_6_licensed.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-6-server",
}, {
GcloudOsFlag: "rhel-6-byol",
WorkflowPath: "enterprise_linux/translate_rhel_6_byol.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-6-byol",
}, {
GcloudOsFlag: "rhel-7",
WorkflowPath: "enterprise_linux/translate_rhel_7_licensed.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-7-server",
}, {
GcloudOsFlag: "rhel-7-byol",
WorkflowPath: "enterprise_linux/translate_rhel_7_byol.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-7-byol",
}, {
GcloudOsFlag: "rhel-8",
WorkflowPath: "enterprise_linux/translate_rhel_8_licensed.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-8-server",
}, {
GcloudOsFlag: "rhel-8-byol",
WorkflowPath: "enterprise_linux/translate_rhel_8_byol.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-8-byos",
},
// SUSE
{
GcloudOsFlag: "opensuse-15",
WorkflowPath: "suse/translate_opensuse_15.wf.json",
LicenseURI: "projects/opensuse-cloud/global/licenses/opensuse-leap-42",
}, {
GcloudOsFlag: "sles-12",
WorkflowPath: "suse/translate_sles_12.wf.json",
LicenseURI: "projects/suse-cloud/global/licenses/sles-12",
}, {
GcloudOsFlag: "sles-12-byol",
WorkflowPath: "suse/translate_sles_12_byol.wf.json",
LicenseURI: "projects/suse-byos-cloud/global/licenses/sles-12-byos",
}, {
GcloudOsFlag: "sles-sap-12",
WorkflowPath: "suse/translate_sles_sap_12.wf.json",
LicenseURI: "projects/suse-sap-cloud/global/licenses/sles-sap-12",
}, {
GcloudOsFlag: "sles-sap-12-byol",
WorkflowPath: "suse/translate_sles_sap_12_byol.wf.json",
LicenseURI: "projects/suse-byos-cloud/global/licenses/sles-sap-12-byos",
}, {
GcloudOsFlag: "sles-15",
WorkflowPath: "suse/translate_sles_15.wf.json",
LicenseURI: "projects/suse-cloud/global/licenses/sles-15",
}, {
GcloudOsFlag: "sles-15-byol",
WorkflowPath: "suse/translate_sles_15_byol.wf.json",
LicenseURI: "projects/suse-byos-cloud/global/licenses/sles-15-byos",
}, {
GcloudOsFlag: "sles-sap-15",
WorkflowPath: "suse/translate_sles_sap_15.wf.json",
LicenseURI: "projects/suse-sap-cloud/global/licenses/sles-sap-15",
}, {
GcloudOsFlag: "sles-sap-15-byol",
WorkflowPath: "suse/translate_sles_sap_15_byol.wf.json",
LicenseURI: "projects/suse-byos-cloud/global/licenses/sles-sap-15-byos",
},
// Debian
{
GcloudOsFlag: "debian-8",
WorkflowPath: "debian/translate_debian_8.wf.json",
LicenseURI: "projects/debian-cloud/global/licenses/debian-8-jessie",
}, {
GcloudOsFlag: "debian-9",
WorkflowPath: "debian/translate_debian_9.wf.json",
LicenseURI: "projects/debian-cloud/global/licenses/debian-9-stretch",
},
// Ubuntu
{
GcloudOsFlag: "ubuntu-1404",
WorkflowPath: "ubuntu/translate_ubuntu_1404.wf.json",
LicenseURI: "projects/ubuntu-os-cloud/global/licenses/ubuntu-1404-trusty",
}, {
GcloudOsFlag: "ubuntu-1604",
WorkflowPath: "ubuntu/translate_ubuntu_1604.wf.json",
LicenseURI: "projects/ubuntu-os-cloud/global/licenses/ubuntu-1604-xenial",
}, {
GcloudOsFlag: "ubuntu-1804",
WorkflowPath: "ubuntu/translate_ubuntu_1804.wf.json",
LicenseURI: "projects/ubuntu-os-cloud/global/licenses/ubuntu-1804-lts",
}, {
GcloudOsFlag: "ubuntu-2004",
WorkflowPath: "ubuntu/translate_ubuntu_2004.wf.json",
LicenseURI: "projects/ubuntu-os-cloud/global/licenses/ubuntu-2004-lts",
},
// Windows
{
GcloudOsFlag: "windows-7-x64-byol",
WorkflowPath: "windows/translate_windows_7_x64_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-7-x64-byol",
}, {
GcloudOsFlag: "windows-7-x86-byol",
WorkflowPath: "windows/translate_windows_7_x86_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-7-x86-byol",
}, {
GcloudOsFlag: "windows-8-x64-byol",
WorkflowPath: "windows/translate_windows_8_x64_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-8-x64-byol",
}, {
GcloudOsFlag: "windows-8-x86-byol",
WorkflowPath: "windows/translate_windows_8_x86_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-8-x86-byol",
}, {
GcloudOsFlag: "windows-10-x64-byol",
WorkflowPath: "windows/translate_windows_10_x64_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-10-x64-byol",
}, {
GcloudOsFlag: "windows-10-x86-byol",
WorkflowPath: "windows/translate_windows_10_x86_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-10-x86-byol",
}, {
GcloudOsFlag: "windows-2008r2",
WorkflowPath: "windows/translate_windows_2008_r2.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2008-r2-dc",
}, {
GcloudOsFlag: "windows-2008r2-byol",
WorkflowPath: "windows/translate_windows_2008_r2_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2008-r2-byol",
}, {
GcloudOsFlag: "windows-2012",
WorkflowPath: "windows/translate_windows_2012.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2012-dc",
}, {
GcloudOsFlag: "windows-2012-byol",
WorkflowPath: "windows/translate_windows_2012_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2012-byol",
}, {
GcloudOsFlag: "windows-2012r2",
WorkflowPath: "windows/translate_windows_2012_r2.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2012-r2-dc",
}, {
GcloudOsFlag: "windows-2012r2-byol",
WorkflowPath: "windows/translate_windows_2012_r2_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2012-r2-byol",
}, {
GcloudOsFlag: "windows-2016",
WorkflowPath: "windows/translate_windows_2016.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2016-dc",
}, {
GcloudOsFlag: "windows-2016-byol",
WorkflowPath: "windows/translate_windows_2016_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2016-byol",
}, {
GcloudOsFlag: "windows-2019",
WorkflowPath: "windows/translate_windows_2019.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2019-dc",
}, {
GcloudOsFlag: "windows-2019-byol",
WorkflowPath: "windows/translate_windows_2019_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2019-byol",
},
}
// legacyIDs maps a legacy identifier to its replacement.
legacyIDs = map[string]string{
"windows-7-byol": "windows-7-x64-byol",
"windows-8-1-x64-byol": "windows-8-x64-byol",
"windows-10-byol": "windows-10-x64-byol",
}
privacyRegex = regexp.MustCompile(`\[Privacy\->.*?<\-Privacy\]`)
privacyTagRegex = regexp.MustCompile(`(\[Privacy\->)|(<\-Privacy\])`)
debianWorkerRegex = regexp.MustCompile("projects/compute-image-tools/global/images/family/debian-\\d+-worker")
)
// GetSortedOSIDs returns the supported OS identifiers, sorted.
func GetSortedOSIDs() []string {
choices := make([]string, 0, len(supportedOS))
for _, k := range supportedOS {
choices = append(choices, k.GcloudOsFlag)
}
sort.Strings(choices)
return choices
}
// ValidateOS validates that osID is supported by Daisy image import
func ValidateOS(osID string) error {
_, err := GetTranslationSettings(osID)
return err
}
// GetTranslationSettings returns parameters required for translating a particular OS, version,
// and licensing mode to run on GCE.
//
// An error is returned if the OS, version, and licensing mode is not supported for import.
func GetTranslationSettings(osID string) (spec TranslationSettings, err error) {
if osID == "" {
return spec, errors.New("osID is empty")
}
if replacement := legacyIDs[osID]; replacement != "" {
osID = replacement
}
for _, choice := range supportedOS {
if choice.GcloudOsFlag == osID {
return choice, nil
}
}
allowedValuesMsg := fmt.Sprintf("Allowed values: %v", GetSortedOSIDs())
return spec, daisy.Errf("os `%v` is invalid. "+allowedValuesMsg, osID)
}
// UpdateAllInstanceNoExternalIP updates all Create Instance steps in the workflow to operate
// when no external IP access is allowed by the VPC Daisy workflow is running in.
func UpdateAllInstanceNoExternalIP(workflow *daisy.Workflow, noExternalIP bool) {
if !noExternalIP {
return
}
workflow.IterateWorkflowSteps(func(step *daisy.Step) {
if step.CreateInstances != nil {
for _, instance := range step.CreateInstances.Instances {
if instance.Instance.NetworkInterfaces == nil {
continue
}
for _, networkInterface := range instance.Instance.NetworkInterfaces {
networkInterface.AccessConfigs = []*compute.AccessConfig{}
}
}
for _, instance := range step.CreateInstances.InstancesBeta {
if instance.Instance.NetworkInterfaces == nil {
continue
}
for _, networkInterface := range instance.Instance.NetworkInterfaces {
networkInterface.AccessConfigs = []*computeBeta.AccessConfig{}
}
}
}
})
}
// UpdateToUEFICompatible marks workflow resources (disks and images) to be UEFI
// compatible by adding "UEFI_COMPATIBLE" to GuestOSFeatures. Debian workers
// are excluded until UEFI becomes the default boot method.
func UpdateToUEFICompatible(workflow *daisy.Workflow) {
workflow.IterateWorkflowSteps(func(step *daisy.Step) {
if step.CreateDisks != nil {
for _, disk := range *step.CreateDisks {
// for the time being, don't run Debian worker in UEFI mode
if debianWorkerRegex.MatchString(disk.SourceImage) {
continue
}
// also, don't run Windows bootstrap worker in UEFI mode
if strings.Contains(disk.SourceImage, "projects/windows-cloud/global/images/family/windows-2019-core") && strings.Contains(disk.Name, "disk-bootstrap") {
continue
}
disk.Disk.GuestOsFeatures = daisy.CombineGuestOSFeatures(disk.Disk.GuestOsFeatures, "UEFI_COMPATIBLE")
}
}
if step.CreateImages != nil {
for _, image := range step.CreateImages.Images {
image.GuestOsFeatures = stringutils.CombineStringSlices(image.GuestOsFeatures, "UEFI_COMPATIBLE")
image.Image.GuestOsFeatures = daisy.CombineGuestOSFeatures(image.Image.GuestOsFeatures, "UEFI_COMPATIBLE")
}
for _, image := range step.CreateImages.ImagesBeta {
image.GuestOsFeatures = stringutils.CombineStringSlices(image.GuestOsFeatures, "UEFI_COMPATIBLE")
image.Image.GuestOsFeatures = daisy.CombineGuestOSFeaturesBeta(image.Image.GuestOsFeatures, "UEFI_COMPATIBLE")
}
}
})
}
// RemovePrivacyLogInfo removes privacy log information.
func RemovePrivacyLogInfo(message string) string {
// Since translation scripts vary and is hard to predict the output, we have to hide the
// details and only remain "TranslateFailed"
if strings.Contains(message, translateFailedPrefix) {
return translateFailedPrefix
}
// All import/export bash scripts enclose privacy info inside "[Privacy-> XXX <-Privacy]". Let's
// remove it for privacy.
message = privacyRegex.ReplaceAllString(message, "")
return message
}
// RemovePrivacyLogTag removes privacy log tag.
func RemovePrivacyLogTag(message string) string {
// All import/export bash scripts enclose privacy info inside a pair of tag "[Privacy->XXX<-Privacy]".
// Let's remove the tag to improve the readability.
message = privacyTagRegex.ReplaceAllString(message, "")
return message
}
// PostProcessDErrorForNetworkFlag determines whether to show more hints for network flag
func PostProcessDErrorForNetworkFlag(action string, err error, network string, w *daisy.Workflow) {
if derr, ok := err.(daisy.DError); ok {
if derr.CausedByErrType("networkResourceDoesNotExist") && network == "" {
w.LogWorkflowInfo("A VPC network is required for running %v,"+
" and the default VPC network does not exist in your project. You will need to"+
" specify a VPC network with the --network flag. For more information about"+
" VPC networks, see https://cloud.google.com/vpc.", action)
}
}
}
// RunWorkflowWithCancelSignal runs Daisy workflow with accepting Ctrl-C signal
func RunWorkflowWithCancelSignal(ctx context.Context, w *daisy.Workflow) error {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func(w *daisy.Workflow) {
select {
case <-c:
w.LogWorkflowInfo("\nCtrl-C caught, sending cancel signal to %q...\n", w.Name)
close(w.Cancel)
case <-w.Cancel:
}
}(w)
return w.Run(ctx)
}
// NewStep creates a new step for the workflow along with dependencies.
func NewStep(w *daisy.Workflow, name string, dependencies ...*daisy.Step) (*daisy.Step, error) {
s, err := w.NewStep(name)
if err != nil {
return nil, err
}
err = w.AddDependency(s, dependencies...)
return s, err
}
// GetResourceID gets resource id from its URI. Definition of resource ID:
// https://cloud.google.com/apis/design/resource_names#resource_id
func GetResourceID(resourceURI string) string {
dm := strings.Split(resourceURI, "/")
return dm[len(dm)-1]
}
// GetDeviceURI gets a URI for a device based on its attributes. A device is a disk
// attached to a instance.
func GetDeviceURI(project, zone, name string) string {
return fmt.Sprintf("projects/%v/zones/%v/devices/%v", project, zone, name)
}
// GetDiskURI gets a URI for a disk based on its attributes. Introduction
// to a disk resource: https://cloud.google.com/compute/docs/reference/rest/v1/disks
func GetDiskURI(project, zone, name string) string {
return fmt.Sprintf("projects/%v/zones/%v/disks/%v", project, zone, name)
}
// GetInstanceURI gets a URI for a instance based on its attributes. Introduction
// to a instance resource: https://cloud.google.com/compute/docs/reference/rest/v1/instances
func GetInstanceURI(project, zone, name string) string {
return fmt.Sprintf("projects/%v/zones/%v/instances/%v", project, zone, name)
}
| 1 | 13,356 | Why the separation of imports here? | GoogleCloudPlatform-compute-image-tools | go |
@@ -4,14 +4,14 @@ import (
"context"
"github.com/influxdata/flux/ast"
- "github.com/influxdata/flux/dependencies"
+
"github.com/influxdata/flux/interpreter"
"github.com/influxdata/flux/parser"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
-func Eval(ctx context.Context, deps dependencies.Interface, itrp *interpreter.Interpreter, scope values.Scope, importer interpreter.Importer, src string) ([]interpreter.SideEffect, error) {
+func Eval(ctx context.Context, itrp *interpreter.Interpreter, scope values.Scope, importer interpreter.Importer, src string) ([]interpreter.SideEffect, error) {
pkg := parser.ParseSource(src)
if ast.Check(pkg) > 0 {
return nil, ast.GetError(pkg) | 1 | package interptest
import (
"context"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/dependencies"
"github.com/influxdata/flux/interpreter"
"github.com/influxdata/flux/parser"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
func Eval(ctx context.Context, deps dependencies.Interface, itrp *interpreter.Interpreter, scope values.Scope, importer interpreter.Importer, src string) ([]interpreter.SideEffect, error) {
pkg := parser.ParseSource(src)
if ast.Check(pkg) > 0 {
return nil, ast.GetError(pkg)
}
node, err := semantic.New(pkg)
if err != nil {
return nil, err
}
return itrp.Eval(ctx, deps, node, scope, importer)
}
| 1 | 11,864 | Random extra newline. | influxdata-flux | go |
@@ -0,0 +1,18 @@
+/* global dom */
+
+/**
+ * Get all descendents that are focusable from a given node
+ * @method getFocusableElements
+ * @memberof axe.commons.dom
+ * @instance
+ * @param {Object} virtualNode The virtualNode to assess
+ * @return {Boolean}
+ */
+dom.getFocusableElements = function getFocusableElements(virtualNode) {
+ const descendents = axe.utils.querySelectorAll(virtualNode, '*');
+ const focusableElms = descendents.filter(({ actualNode: el }) => {
+ const isElFocusable = dom.isFocusable(el);
+ return isElFocusable;
+ });
+ return focusableElms;
+}; | 1 | 1 | 13,439 | No need for this variable. | dequelabs-axe-core | js |
|
@@ -79,6 +79,15 @@ def authentication_required(url, authenticator, abort_on):
return answer
+def _format_msg(msg: str) -> str:
+ """Convert message to HTML suitable for rendering."""
+ ret = msg
+ ret = html.escape(ret)
+ ret = ret.strip()
+ ret = ret.replace('\n', '<br />')
+ return ret
+
+
def javascript_confirm(url, js_msg, abort_on):
"""Display a javascript confirm prompt."""
log.js.debug("confirm: {}".format(js_msg)) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Various utilities shared between webpage/webview subclasses."""
import os
import sys
import html
import netrc
from typing import Callable, Mapping, List
import tempfile
from PyQt5.QtCore import QUrl
from qutebrowser.config import config
from qutebrowser.utils import (usertypes, message, log, objreg, jinja, utils,
qtutils)
from qutebrowser.mainwindow import mainwindow
from qutebrowser.misc import guiprocess
class CallSuper(Exception):
"""Raised when the caller should call the superclass instead."""
def custom_headers(url):
"""Get the combined custom headers."""
headers = {}
dnt_config = config.instance.get('content.headers.do_not_track', url=url)
if dnt_config is not None:
dnt = b'1' if dnt_config else b'0'
headers[b'DNT'] = dnt
conf_headers = config.instance.get('content.headers.custom', url=url)
for header, value in conf_headers.items():
headers[header.encode('ascii')] = value.encode('ascii')
accept_language = config.instance.get('content.headers.accept_language',
url=url)
if accept_language is not None:
headers[b'Accept-Language'] = accept_language.encode('ascii')
return sorted(headers.items())
def authentication_required(url, authenticator, abort_on):
"""Ask a prompt for an authentication question."""
realm = authenticator.realm()
if realm:
msg = '<b>{}</b> says:<br/>{}'.format(
html.escape(url.toDisplayString()), html.escape(realm))
else:
msg = '<b>{}</b> needs authentication'.format(
html.escape(url.toDisplayString()))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
answer = message.ask(title="Authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd,
abort_on=abort_on, url=urlstr)
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
return answer
def javascript_confirm(url, js_msg, abort_on):
"""Display a javascript confirm prompt."""
log.js.debug("confirm: {}".format(js_msg))
if config.val.content.javascript.modal_dialog:
raise CallSuper
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()),
html.escape(js_msg))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
ans = message.ask('Javascript confirm', msg,
mode=usertypes.PromptMode.yesno,
abort_on=abort_on, url=urlstr)
return bool(ans)
def javascript_prompt(url, js_msg, default, abort_on):
"""Display a javascript prompt."""
log.js.debug("prompt: {}".format(js_msg))
if config.val.content.javascript.modal_dialog:
raise CallSuper
if not config.val.content.javascript.prompt:
return (False, "")
msg = '<b>{}</b> asks:<br/>{}'.format(html.escape(url.toDisplayString()),
html.escape(js_msg))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
answer = message.ask('Javascript prompt', msg,
mode=usertypes.PromptMode.text,
default=default,
abort_on=abort_on, url=urlstr)
if answer is None:
return (False, "")
else:
return (True, answer)
def javascript_alert(url, js_msg, abort_on):
"""Display a javascript alert."""
log.js.debug("alert: {}".format(js_msg))
if config.val.content.javascript.modal_dialog:
raise CallSuper
if not config.val.content.javascript.alert:
return
msg = 'From <b>{}</b>:<br/>{}'.format(html.escape(url.toDisplayString()),
html.escape(js_msg))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
message.ask('Javascript alert', msg, mode=usertypes.PromptMode.alert,
abort_on=abort_on, url=urlstr)
# Needs to line up with the values allowed for the
# content.javascript.log setting.
_JS_LOGMAP: Mapping[str, Callable[[str], None]] = {
'none': lambda arg: None,
'debug': log.js.debug,
'info': log.js.info,
'warning': log.js.warning,
'error': log.js.error,
}
def javascript_log_message(level, source, line, msg):
"""Display a JavaScript log message."""
logstring = "[{}:{}] {}".format(source, line, msg)
logger = _JS_LOGMAP[config.cache['content.javascript.log'][level.name]]
logger(logstring)
def ignore_certificate_errors(url, errors, abort_on):
"""Display a certificate error question.
Args:
url: The URL the errors happened in
errors: A list of QSslErrors or QWebEngineCertificateErrors
Return:
True if the error should be ignored, False otherwise.
"""
ssl_strict = config.instance.get('content.ssl_strict', url=url)
log.network.debug("Certificate errors {!r}, strict {}".format(
errors, ssl_strict))
for error in errors:
assert error.is_overridable(), repr(error)
if ssl_strict == 'ask':
err_template = jinja.environment.from_string("""
Errors while loading <b>{{url.toDisplayString()}}</b>:<br/>
<ul>
{% for err in errors %}
<li>{{err}}</li>
{% endfor %}
</ul>
""".strip())
msg = err_template.render(url=url, errors=errors)
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
ignore = message.ask(title="Certificate errors - continue?", text=msg,
mode=usertypes.PromptMode.yesno, default=False,
abort_on=abort_on, url=urlstr)
if ignore is None:
# prompt aborted
ignore = False
return ignore
elif ssl_strict is False:
log.network.debug("ssl_strict is False, only warning about errors")
for err in errors:
# FIXME we might want to use warn here (non-fatal error)
# https://github.com/qutebrowser/qutebrowser/issues/114
message.error('Certificate error: {}'.format(err))
return True
elif ssl_strict is True:
return False
else:
raise ValueError("Invalid ssl_strict value {!r}".format(ssl_strict))
raise utils.Unreachable
def feature_permission(url, option, msg, yes_action, no_action, abort_on,
blocking=False):
"""Handle a feature permission request.
Args:
url: The URL the request was done for.
option: An option name to check.
msg: A string like "show notifications"
yes_action: A callable to call if the request was approved
no_action: A callable to call if the request was denied
abort_on: A list of signals which interrupt the question.
blocking: If True, ask a blocking question.
Return:
The Question object if a question was asked (and blocking=False),
None otherwise.
"""
config_val = config.instance.get(option, url=url)
if config_val == 'ask':
if url.isValid():
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
text = "Allow the website at <b>{}</b> to {}?".format(
html.escape(url.toDisplayString()), msg)
else:
urlstr = None
option = None # For message.ask/confirm_async
text = "Allow the website to {}?".format(msg)
if blocking:
answer = message.ask(abort_on=abort_on, title='Permission request',
text=text, url=urlstr, option=option,
mode=usertypes.PromptMode.yesno)
if answer:
yes_action()
else:
no_action()
return None
else:
return message.confirm_async(
yes_action=yes_action, no_action=no_action,
cancel_action=no_action, abort_on=abort_on,
title='Permission request', text=text, url=urlstr,
option=option)
elif config_val:
yes_action()
return None
else:
no_action()
return None
def get_tab(win_id, target):
"""Get a tab widget for the given usertypes.ClickTarget.
Args:
win_id: The window ID to open new tabs in
target: A usertypes.ClickTarget
"""
if target == usertypes.ClickTarget.tab:
bg_tab = False
elif target == usertypes.ClickTarget.tab_bg:
bg_tab = True
elif target == usertypes.ClickTarget.window:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
window = mainwindow.MainWindow(private=tabbed_browser.is_private)
window.show()
win_id = window.win_id
bg_tab = False
else:
raise ValueError("Invalid ClickTarget {}".format(target))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
return tabbed_browser.tabopen(url=None, background=bg_tab)
def get_user_stylesheet(searching=False):
"""Get the combined user-stylesheet."""
css = ''
stylesheets = config.val.content.user_stylesheets
for filename in stylesheets:
with open(filename, 'r', encoding='utf-8') as f:
css += f.read()
setting = config.val.scrolling.bar
if setting == 'overlay' and utils.is_mac:
setting = 'when-searching'
if setting == 'never' or setting == 'when-searching' and not searching:
css += '\nhtml > ::-webkit-scrollbar { width: 0px; height: 0px; }'
return css
def netrc_authentication(url, authenticator):
"""Perform authorization using netrc.
Args:
url: The URL the request was done for.
authenticator: QAuthenticator object used to set credentials provided.
Return:
True if netrc found credentials for the URL.
False otherwise.
"""
if 'HOME' not in os.environ:
# We'll get an OSError by netrc if 'HOME' isn't available in
# os.environ. We don't want to log that, so we prevent it
# altogether.
return False
user = None
password = None
authenticators = None
try:
net = netrc.netrc(config.val.content.netrc_file)
if url.port() != -1:
authenticators = net.authenticators(
"{}:{}".format(url.host(), url.port()))
if not authenticators:
authenticators = net.authenticators(url.host())
if authenticators:
user, _account, password = authenticators
except FileNotFoundError:
log.misc.debug("No .netrc file found")
except OSError as e:
log.misc.exception("Unable to read the netrc file: {}".format(e))
except netrc.NetrcParseError as e:
log.misc.exception("Error when parsing the netrc file: {}".format(e))
if user is None:
return False
authenticator.setUser(user)
authenticator.setPassword(password)
return True
def choose_file(multiple: bool) -> List[str]:
"""Select file(s) for uploading, using external command defined in config.
Args:
multiple: Should selecting multiple files be allowed.
Return:
A list of selected file paths, or empty list if no file is selected.
If multiple is False, the return value will have at most 1 item.
"""
handle = tempfile.NamedTemporaryFile(prefix='qutebrowser-fileselect-', delete=False)
handle.close()
tmpfilename = handle.name
with utils.cleanup_file(tmpfilename):
if multiple:
command = config.val.fileselect.multiple_files.command
else:
command = config.val.fileselect.single_file.command
proc = guiprocess.GUIProcess(what='choose-file')
proc.start(command[0],
[arg.replace('{}', tmpfilename) for arg in command[1:]])
loop = qtutils.EventLoop()
proc.finished.connect(lambda _code, _status: loop.exit())
loop.exec()
with open(tmpfilename, mode='r', encoding=sys.getfilesystemencoding()) as f:
selected_files = f.read().splitlines()
if not multiple:
if len(selected_files) > 1:
message.warning("More than one file chosen, using only the first")
return selected_files[:1]
return selected_files
| 1 | 25,809 | Why not use `msg` directly below? | qutebrowser-qutebrowser | py |
@@ -29,9 +29,8 @@ import org.apache.parquet.schema.OriginalType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
-import static org.apache.parquet.schema.Type.Repetition.REPEATED;
-
public class ParquetTypeVisitor<T> {
+ @SuppressWarnings({"checkstyle:IllegalType", "checkstyle:VisibilityModifier"})
protected LinkedList<String> fieldNames = Lists.newLinkedList();
public static <T> T visit(Type type, ParquetTypeVisitor<T> visitor) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.parquet;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import java.util.LinkedList;
import java.util.List;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.OriginalType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
import static org.apache.parquet.schema.Type.Repetition.REPEATED;
public class ParquetTypeVisitor<T> {
protected LinkedList<String> fieldNames = Lists.newLinkedList();
public static <T> T visit(Type type, ParquetTypeVisitor<T> visitor) {
if (type instanceof MessageType) {
return visitor.message((MessageType) type,
visitFields(type.asGroupType(), visitor));
} else if (type.isPrimitive()) {
return visitor.primitive(type.asPrimitiveType());
} else {
// if not a primitive, the typeId must be a group
GroupType group = type.asGroupType();
OriginalType annotation = group.getOriginalType();
if (annotation != null) {
switch (annotation) {
case LIST:
Preconditions.checkArgument(!group.isRepetition(REPEATED),
"Invalid list: top-level group is repeated: " + group);
Preconditions.checkArgument(group.getFieldCount() == 1,
"Invalid list: does not contain single repeated field: " + group);
GroupType repeatedElement = group.getFields().get(0).asGroupType();
Preconditions.checkArgument(repeatedElement.isRepetition(REPEATED),
"Invalid list: inner group is not repeated");
Preconditions.checkArgument(repeatedElement.getFieldCount() <= 1,
"Invalid list: repeated group is not a single field: " + group);
visitor.fieldNames.push(repeatedElement.getName());
try {
T elementResult = null;
if (repeatedElement.getFieldCount() > 0) {
elementResult = visitField(repeatedElement.getType(0), visitor);
}
return visitor.list(group, elementResult);
} finally {
visitor.fieldNames.pop();
}
case MAP:
Preconditions.checkArgument(!group.isRepetition(REPEATED),
"Invalid map: top-level group is repeated: " + group);
Preconditions.checkArgument(group.getFieldCount() == 1,
"Invalid map: does not contain single repeated field: " + group);
GroupType repeatedKeyValue = group.getType(0).asGroupType();
Preconditions.checkArgument(repeatedKeyValue.isRepetition(REPEATED),
"Invalid map: inner group is not repeated");
Preconditions.checkArgument(repeatedKeyValue.getFieldCount() <= 2,
"Invalid map: repeated group does not have 2 fields");
visitor.fieldNames.push(repeatedKeyValue.getName());
try {
T keyResult = null;
T valueResult = null;
switch (repeatedKeyValue.getFieldCount()) {
case 2:
// if there are 2 fields, both key and value are projected
keyResult = visitField(repeatedKeyValue.getType(0), visitor);
valueResult = visitField(repeatedKeyValue.getType(1), visitor);
case 1:
// if there is just one, use the name to determine what it is
Type keyOrValue = repeatedKeyValue.getType(0);
if (keyOrValue.getName().equalsIgnoreCase("key")) {
keyResult = visitField(keyOrValue, visitor);
// value result remains null
} else {
valueResult = visitField(keyOrValue, visitor);
// key result remains null
}
default:
// both results will remain null
}
return visitor.map(group, keyResult, valueResult);
} finally {
visitor.fieldNames.pop();
}
default:
}
}
return visitor.struct(group, visitFields(group, visitor));
}
}
private static <T> T visitField(Type field, ParquetTypeVisitor<T> visitor) {
visitor.fieldNames.push(field.getName());
try {
return visit(field, visitor);
} finally {
visitor.fieldNames.pop();
}
}
private static <T> List<T> visitFields(GroupType group, ParquetTypeVisitor<T> visitor) {
List<T> results = Lists.newArrayListWithExpectedSize(group.getFieldCount());
for (Type field : group.getFields()) {
results.add(visitField(field, visitor));
}
return results;
}
public T message(MessageType message, List<T> fields) {
return null;
}
public T struct(GroupType struct, List<T> fields) {
return null;
}
public T list(GroupType array, T element) {
return null;
}
public T map(GroupType map, T key, T value) {
return null;
}
public T primitive(PrimitiveType primitive) {
return null;
}
}
| 1 | 15,950 | I think other places changed to use `Deque` instead of suppressing the warning. | apache-iceberg | java |
@@ -4,8 +4,11 @@
#include "engine/guidance/toolkit.hpp"
#include <boost/assert.hpp>
+#include <boost/range/algorithm_ext/erase.hpp>
+
#include <iostream>
-#include <vector>
+#include <cstddef>
+#include <utility>
using TurnInstruction = osrm::extractor::guidance::TurnInstruction;
using TurnType = osrm::extractor::guidance::TurnType; | 1 | #include "engine/guidance/post_processing.hpp"
#include "extractor/guidance/turn_instruction.hpp"
#include "engine/guidance/toolkit.hpp"
#include <boost/assert.hpp>
#include <iostream>
#include <vector>
using TurnInstruction = osrm::extractor::guidance::TurnInstruction;
using TurnType = osrm::extractor::guidance::TurnType;
using DirectionModifier = osrm::extractor::guidance::DirectionModifier;
namespace osrm
{
namespace engine
{
namespace guidance
{
namespace detail
{
bool canMergeTrivially(const PathData &destination, const PathData &source)
{
return destination.exit == 0 && destination.name_id == source.name_id &&
destination.travel_mode == source.travel_mode && isSilent(source.turn_instruction);
}
PathData forwardInto(PathData destination, const PathData &source)
{
// Merge a turn into a silent turn
// Overwrites turn instruction and increases exit NR
destination.exit = source.exit;
return destination;
}
PathData accumulateInto(PathData destination, const PathData &source)
{
// Merge a turn into a silent turn
// Overwrites turn instruction and increases exit NR
BOOST_ASSERT(canMergeTrivially(destination, source));
destination.exit = source.exit + 1;
return destination;
}
PathData mergeInto(PathData destination, const PathData &source)
{
if (source.turn_instruction == TurnInstruction::NO_TURN())
{
BOOST_ASSERT(canMergeTrivially(destination, source));
return detail::forwardInto(destination, source);
}
if (source.turn_instruction.type == TurnType::Suppressed)
{
return detail::forwardInto(destination, source);
}
if (source.turn_instruction.type == TurnType::StayOnRoundabout)
{
return detail::forwardInto(destination, source);
}
if (entersRoundabout(source.turn_instruction))
{
return detail::forwardInto(destination, source);
}
return destination;
}
} // namespace detail
void print(const std::vector<std::vector<PathData>> &leg_data)
{
std::cout << "Path\n";
int legnr = 0;
for (const auto &leg : leg_data)
{
std::cout << "\tLeg: " << ++legnr << "\n";
int segment = 0;
for (const auto &data : leg)
{
const auto type = static_cast<int>(data.turn_instruction.type);
const auto modifier = static_cast<int>(data.turn_instruction.direction_modifier);
std::cout << "\t\t[" << ++segment << "]: " << type << " " << modifier
<< " exit: " << data.exit << "\n";
}
}
std::cout << std::endl;
}
std::vector<std::vector<PathData>> postProcess(std::vector<std::vector<PathData>> leg_data)
{
if (leg_data.empty())
return leg_data;
#define PRINT_DEBUG 0
unsigned carry_exit = 0;
#if PRINT_DEBUG
std::cout << "[POSTPROCESSING ITERATION]" << std::endl;
std::cout << "Input\n";
print(leg_data);
#endif
// Count Street Exits forward
bool on_roundabout = false;
for (auto &path_data : leg_data)
{
if (not path_data.empty())
path_data[0].exit = carry_exit;
for (std::size_t data_index = 0; data_index + 1 < path_data.size(); ++data_index)
{
if (entersRoundabout(path_data[data_index].turn_instruction))
{
path_data[data_index].exit += 1;
on_roundabout = true;
}
if (isSilent(path_data[data_index].turn_instruction) &&
path_data[data_index].turn_instruction != TurnInstruction::NO_TURN())
{
path_data[data_index].exit += 1;
}
if (leavesRoundabout(path_data[data_index].turn_instruction))
{
if (!on_roundabout)
{
BOOST_ASSERT(leg_data[0][0].turn_instruction.type ==
TurnInstruction::NO_TURN());
if (path_data[data_index].turn_instruction.type == TurnType::ExitRoundabout)
leg_data[0][0].turn_instruction.type = TurnType::EnterRoundabout;
if (path_data[data_index].turn_instruction.type == TurnType::ExitRotary)
leg_data[0][0].turn_instruction.type = TurnType::EnterRotary;
path_data[data_index].exit += 1;
}
on_roundabout = false;
}
if (path_data[data_index].turn_instruction.type == TurnType::EnterRoundaboutAtExit)
{
path_data[data_index].exit += 1;
path_data[data_index].turn_instruction.type = TurnType::EnterRoundabout;
}
else if (path_data[data_index].turn_instruction.type == TurnType::EnterRotaryAtExit)
{
path_data[data_index].exit += 1;
path_data[data_index].turn_instruction.type = TurnType::EnterRotary;
}
if (isSilent(path_data[data_index].turn_instruction) ||
entersRoundabout(path_data[data_index].turn_instruction))
{
path_data[data_index + 1] =
detail::mergeInto(path_data[data_index + 1], path_data[data_index]);
}
carry_exit = path_data[data_index].exit;
}
}
#if PRINT_DEBUG
std::cout << "Merged\n";
print(leg_data);
#endif
on_roundabout = false;
// Move Roundabout exit numbers to front
for (auto rev_itr = leg_data.rbegin(); rev_itr != leg_data.rend(); ++rev_itr)
{
auto &path_data = *rev_itr;
for (std::size_t data_index = path_data.size(); data_index > 1; --data_index)
{
if (entersRoundabout(path_data[data_index - 1].turn_instruction))
{
if (!on_roundabout && !leavesRoundabout(path_data[data_index - 1].turn_instruction))
path_data[data_index - 1].exit = 0;
on_roundabout = false;
}
if (on_roundabout)
{
path_data[data_index - 2].exit = path_data[data_index - 1].exit;
}
if (leavesRoundabout(path_data[data_index - 1].turn_instruction) &&
!entersRoundabout(path_data[data_index - 1].turn_instruction))
{
path_data[data_index - 2].exit = path_data[data_index - 1].exit;
on_roundabout = true;
}
}
auto prev_leg = std::next(rev_itr);
if (!path_data.empty() && prev_leg != leg_data.rend())
{
if (on_roundabout && path_data[0].exit)
prev_leg->back().exit = path_data[0].exit;
}
}
#if PRINT_DEBUG
std::cout << "Move To Front\n";
print(leg_data);
#endif
// silence silent turns for good
for (auto &path_data : leg_data)
{
for (auto &data : path_data)
{
if (isSilent(data.turn_instruction) || (leavesRoundabout(data.turn_instruction) &&
!entersRoundabout(data.turn_instruction)))
{
data.turn_instruction = TurnInstruction::NO_TURN();
data.exit = 0;
}
}
}
return leg_data;
}
} // namespace guidance
} // namespace engine
} // namespace osrm
| 1 | 15,658 | missing: utility, cstddef also: vector is already included in the header (interface requires vectors as in/out types) | Project-OSRM-osrm-backend | cpp |
@@ -16,6 +16,9 @@ module.exports = {
'html',
'text-summary',
],
+ globals: {
+ fetchMock: '',
+ },
rootDir: '../../',
transform: {
'^.+\\.[jt]sx?$': '<rootDir>/node_modules/@wordpress/scripts/config/babel-transform', | 1 | const { preset } = require( '@wordpress/scripts/config/jest-unit.config' );
module.exports = {
preset,
collectCoverage: true,
collectCoverageFrom: [
'assets/**/**.js',
],
coverageDirectory: 'coverage',
coveragePathIgnorePatterns: [
'<rootDir>/build/',
'<rootDir>/node_modules/',
'<rootDir>/assets/js/googlesitekit-(.*)\.js',
],
coverageReporters: [
'html',
'text-summary',
],
rootDir: '../../',
transform: {
'^.+\\.[jt]sx?$': '<rootDir>/node_modules/@wordpress/scripts/config/babel-transform',
},
setupFiles: [
'<rootDir>/tests/js/setup-globals',
'<rootDir>/tests/js/setup-mocks',
'jest-localstorage-mock',
],
setupFilesAfterEnv: [
'<rootDir>/tests/js/jest-matchers',
'<rootDir>/tests/js/setup-before-after',
],
testMatch: [
'<rootDir>/assets/**/__tests__/**/*.js',
'<rootDir>/assets/**/test/*.js',
'<rootDir>/assets/**/?(*.)test.js',
],
testPathIgnorePatterns: [
'<rootDir>/.git',
'<rootDir>/node_modules',
'<rootDir>/build',
],
// Matches aliases in webpack.config.js.
moduleNameMapper: {
// New (JSR) modules.
'^googlesitekit-(.+)$': '<rootDir>assets/js/googlesitekit-$1',
},
};
| 1 | 29,220 | I'm not sure this is necessary in the config here, as Jest would be setting the global `fetchMock = ''`. See below. | google-site-kit-wp | js |
@@ -57,7 +57,7 @@ public abstract class Message implements Part, Body {
final int MULTIPLIER = 31;
int result = 1;
- result = MULTIPLIER * result + mFolder.getName().hashCode();
+ result = MULTIPLIER * result + (mFolder != null ? mFolder.getName().hashCode() : 0);
result = MULTIPLIER * result + mUid.hashCode();
return result;
} | 1 |
package com.fsck.k9.mail;
import java.io.IOException;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Set;
import android.support.annotation.NonNull;
import com.fsck.k9.mail.filter.CountingOutputStream;
import com.fsck.k9.mail.filter.EOLConvertingOutputStream;
import timber.log.Timber;
public abstract class Message implements Part, Body {
public enum RecipientType {
TO, CC, BCC, X_ORIGINAL_TO, DELIVERED_TO, X_ENVELOPE_TO
}
protected String mUid;
private Set<Flag> mFlags = EnumSet.noneOf(Flag.class);
private Date mInternalDate;
protected Folder mFolder;
public boolean olderThan(Date earliestDate) {
if (earliestDate == null) {
return false;
}
Date myDate = getSentDate();
if (myDate == null) {
myDate = getInternalDate();
}
if (myDate != null) {
return myDate.before(earliestDate);
}
return false;
}
@Override
public boolean equals(Object o) {
if (o == null || !(o instanceof Message)) {
return false;
}
Message other = (Message)o;
return (getUid().equals(other.getUid())
&& getFolder().getName().equals(other.getFolder().getName()));
}
@Override
public int hashCode() {
final int MULTIPLIER = 31;
int result = 1;
result = MULTIPLIER * result + mFolder.getName().hashCode();
result = MULTIPLIER * result + mUid.hashCode();
return result;
}
public String getUid() {
return mUid;
}
public void setUid(String uid) {
this.mUid = uid;
}
public Folder getFolder() {
return mFolder;
}
public abstract String getSubject();
public abstract void setSubject(String subject);
public Date getInternalDate() {
return mInternalDate;
}
public void setInternalDate(Date internalDate) {
this.mInternalDate = internalDate;
}
public abstract Date getSentDate();
public abstract void setSentDate(Date sentDate, boolean hideTimeZone);
public abstract Address[] getRecipients(RecipientType type);
public abstract void setRecipients(RecipientType type, Address[] addresses);
public void setRecipient(RecipientType type, Address address) {
setRecipients(type, new Address[] {
address
});
}
public abstract Address[] getFrom();
public abstract void setFrom(Address from);
public abstract Address[] getSender();
public abstract void setSender(Address sender);
public abstract Address[] getReplyTo();
public abstract void setReplyTo(Address[] from);
public abstract String getMessageId();
public abstract void setInReplyTo(String inReplyTo);
public abstract String[] getReferences();
public abstract void setReferences(String references);
@Override
public abstract Body getBody();
@Override
public abstract void addHeader(String name, String value);
@Override
public abstract void addRawHeader(String name, String raw);
@Override
public abstract void setHeader(String name, String value);
@NonNull
@Override
public abstract String[] getHeader(String name);
public abstract Set<String> getHeaderNames();
@Override
public abstract void removeHeader(String name);
@Override
public abstract void setBody(Body body);
public abstract boolean hasAttachments();
public abstract long getSize();
public void delete(String trashFolderName) throws MessagingException {}
/*
* TODO Refactor Flags at some point to be able to store user defined flags.
*/
public Set<Flag> getFlags() {
return Collections.unmodifiableSet(mFlags);
}
/**
* @param flag
* Flag to set. Never <code>null</code>.
* @param set
* If <code>true</code>, the flag is added. If <code>false</code>
* , the flag is removed.
* @throws MessagingException
*/
public void setFlag(Flag flag, boolean set) throws MessagingException {
if (set) {
mFlags.add(flag);
} else {
mFlags.remove(flag);
}
}
/**
* This method calls setFlag(Flag, boolean)
* @param flags
* @param set
*/
public void setFlags(final Set<Flag> flags, boolean set) throws MessagingException {
for (Flag flag : flags) {
setFlag(flag, set);
}
}
public boolean isSet(Flag flag) {
return mFlags.contains(flag);
}
public void destroy() throws MessagingException {}
@Override
public abstract void setEncoding(String encoding) throws MessagingException;
public abstract void setCharset(String charset) throws MessagingException;
public long calculateSize() {
try {
CountingOutputStream out = new CountingOutputStream();
EOLConvertingOutputStream eolOut = new EOLConvertingOutputStream(out);
writeTo(eolOut);
eolOut.flush();
return out.getCount();
} catch (IOException e) {
Timber.e(e, "Failed to calculate a message size");
} catch (MessagingException e) {
Timber.e(e, "Failed to calculate a message size");
}
return 0;
}
/**
* Copy the contents of this object into another {@code Message} object.
*
* @param destination The {@code Message} object to receive the contents of this instance.
*/
protected void copy(Message destination) {
destination.mUid = mUid;
destination.mInternalDate = mInternalDate;
destination.mFolder = mFolder;
// mFlags contents can change during the object lifetime, so copy the Set
destination.mFlags = EnumSet.copyOf(mFlags);
}
/**
* Creates a new {@code Message} object with the same content as this object.
*
* <p>
* <strong>Note:</strong>
* This method was introduced as a hack to prevent {@code ConcurrentModificationException}s. It
* shouldn't be used unless absolutely necessary. See the comment in
* {@link com.fsck.k9.activity.MessageView.Listener#loadMessageForViewHeadersAvailable(com.fsck.k9.Account, String, String, Message)}
* for more information.
* </p>
*/
@Override
public abstract Message clone();
}
| 1 | 16,166 | Can `mFolder == null` happen during regular operations? Or is it only so we can be lazy in tests? | k9mail-k-9 | java |
@@ -0,0 +1,8 @@
+require "migrate"
+
+class AddPreferredEmailFormat < ActiveRecord::Migration
+ def change
+ create_enumeration :email_format_enum, %w(text_only multipart)
+ add_column :users, :preferred_email_format, :email_format_enum, :null => false, :default => "multipart"
+ end
+end | 1 | 1 | 10,673 | I'd suggest using the actual MIME types here - so `text/plain` and `multipart/alternative` or does that cause problems with what postgres allows for enumeration names? If so then maybe just replace the slash with an underscore? | openstreetmap-openstreetmap-website | rb |
|
@@ -619,9 +619,9 @@ def package_put(owner, package_name, package_hash):
if team_access is None:
raise ApiException(
requests.codes.forbidden,
- ("%(user)s/%(pkg)s is private. To share it with the team, " +
- "run `quilt access add %(user)s/%(pkg)s team`.") %
- dict(user=owner, pkg=package_name)
+ ("%(team)s:%(user)s/%(pkg)s is private. To share it with the team, " +
+ "run `quilt access add %(team)s:%(user)s/%(pkg)s team`.") %
+ dict(team=app.config['TEAM_NAME'].lower(), user=owner, pkg=package_name)
)
# Insert an instance if it doesn't already exist. | 1 | # Copyright (c) 2017 Quilt Data, Inc. All rights reserved.
"""
API routes.
"""
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from functools import wraps
import json
import time
from urllib.parse import urlencode
import boto3
from flask import abort, g, redirect, render_template, request, Response
from flask_cors import CORS
from flask_json import as_json, jsonify
import httpagentparser
from jsonschema import Draft4Validator, ValidationError
from oauthlib.oauth2 import OAuth2Error
import re
import requests
from requests_oauthlib import OAuth2Session
import sqlalchemy as sa
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import undefer
import stripe
from . import app, db
from .analytics import MIXPANEL_EVENT, mp
from .const import PaymentPlan, PUBLIC, TEAM, VALID_NAME_RE, VALID_EMAIL_RE
from .core import decode_node, find_object_hashes, hash_contents, FileNode, GroupNode, RootNode
from .models import (Access, Customer, Event, Instance, Invitation, Log, Package,
S3Blob, Tag, Version)
from .schemas import LOG_SCHEMA, PACKAGE_SCHEMA
QUILT_CDN = 'https://cdn.quiltdata.com/'
DEPLOYMENT_ID = app.config['DEPLOYMENT_ID']
OAUTH_ACCESS_TOKEN_URL = app.config['OAUTH']['access_token_url']
OAUTH_AUTHORIZE_URL = app.config['OAUTH']['authorize_url']
OAUTH_CLIENT_ID = app.config['OAUTH']['client_id']
OAUTH_CLIENT_SECRET = app.config['OAUTH']['client_secret']
OAUTH_REDIRECT_URL = app.config['OAUTH']['redirect_url']
OAUTH_USER_API = app.config['OAUTH']['user_api']
OAUTH_PROFILE_API = app.config['OAUTH']['profile_api']
OAUTH_HAVE_REFRESH_TOKEN = app.config['OAUTH']['have_refresh_token']
CATALOG_URL = app.config['CATALOG_URL']
CATALOG_REDIRECT_URL = '%s/oauth_callback' % CATALOG_URL
QUILT_AUTH_URL = app.config['QUILT_AUTH_URL']
AUTHORIZATION_HEADER = 'Authorization'
INVITE_SEND_URL = app.config['INVITE_SEND_URL']
PACKAGE_BUCKET_NAME = app.config['PACKAGE_BUCKET_NAME']
PACKAGE_URL_EXPIRATION = app.config['PACKAGE_URL_EXPIRATION']
ALLOW_ANONYMOUS_ACCESS = app.config['ALLOW_ANONYMOUS_ACCESS']
ALLOW_TEAM_ACCESS = app.config['ALLOW_TEAM_ACCESS']
ENABLE_USER_ENDPOINTS = app.config['ENABLE_USER_ENDPOINTS']
S3_HEAD_OBJECT = 'head_object'
S3_GET_OBJECT = 'get_object'
S3_PUT_OBJECT = 'put_object'
OBJ_DIR = 'objs'
# Limit the JSON metadata to 100MB.
# This is mostly a sanity check; it's already limited by app.config['MAX_CONTENT_LENGTH'].
MAX_METADATA_SIZE = 100 * 1024 * 1024
PREVIEW_MAX_CHILDREN = 10
PREVIEW_MAX_DEPTH = 4
s3_client = boto3.client(
's3',
endpoint_url=app.config.get('S3_ENDPOINT'),
aws_access_key_id=app.config.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=app.config.get('AWS_SECRET_ACCESS_KEY')
)
stripe.api_key = app.config['STRIPE_SECRET_KEY']
HAVE_PAYMENTS = bool(stripe.api_key)
class QuiltCli(httpagentparser.Browser):
look_for = 'quilt-cli'
version_markers = [('/', '')]
httpagentparser.detectorshub.register(QuiltCli())
class PythonPlatform(httpagentparser.DetectorBase):
def __init__(self, name):
super().__init__()
self.name = name
self.look_for = name
info_type = 'python_platform'
version_markers = [('/', '')]
for python_name in ['CPython', 'Jython', 'PyPy']:
httpagentparser.detectorshub.register(PythonPlatform(python_name))
### Web routes ###
def _create_session(next=''):
return OAuth2Session(
client_id=OAUTH_CLIENT_ID,
redirect_uri=OAUTH_REDIRECT_URL,
state=json.dumps(dict(next=next))
)
@app.route('/healthcheck')
def healthcheck():
"""ELB health check; just needs to return a 200 status code."""
return Response("ok", content_type='text/plain')
ROBOTS_TXT = '''
User-agent: *
Disallow: /
'''.lstrip()
@app.route('/robots.txt')
def robots():
"""Disallow crawlers; there's nothing useful for them here."""
return Response(ROBOTS_TXT, mimetype='text/plain')
def _valid_catalog_redirect(next):
return next is None or next.startswith(CATALOG_REDIRECT_URL)
def _validate_username(username):
if not VALID_NAME_RE.fullmatch(username):
raise ApiException(
requests.codes.bad,
"""
Username is not valid. Usernames must start with a letter or underscore, and
contain only alphanumeric characters and underscores thereafter.
""")
@app.route('/login')
def login():
next = request.args.get('next')
if not _valid_catalog_redirect(next):
return render_template('oauth_fail.html', error="Invalid redirect", QUILT_CDN=QUILT_CDN)
session = _create_session(next=next)
url, state = session.authorization_url(url=OAUTH_AUTHORIZE_URL)
return redirect(url)
@app.route('/oauth_callback')
def oauth_callback():
# TODO: Check `state`? Do we need CSRF protection here?
try:
state = json.loads(request.args.get('state', '{}'))
except ValueError:
abort(requests.codes.bad_request)
if not isinstance(state, dict):
abort(requests.codes.bad_request)
next = state.get('next')
if not _valid_catalog_redirect(next):
abort(requests.codes.bad_request)
common_tmpl_args = dict(
QUILT_CDN=QUILT_CDN,
CATALOG_URL=CATALOG_URL,
)
error = request.args.get('error')
if error is not None:
return render_template('oauth_fail.html', error=error, **common_tmpl_args)
code = request.args.get('code')
if code is None:
abort(requests.codes.bad_request)
session = _create_session()
try:
resp = session.fetch_token(
token_url=OAUTH_ACCESS_TOKEN_URL,
code=code,
client_secret=OAUTH_CLIENT_SECRET
)
if next:
return redirect('%s#%s' % (next, urlencode(resp)))
else:
token = resp['refresh_token' if OAUTH_HAVE_REFRESH_TOKEN else 'access_token']
return render_template('oauth_success.html', code=token, **common_tmpl_args)
except OAuth2Error as ex:
return render_template('oauth_fail.html', error=ex.error, **common_tmpl_args)
@app.route('/api/token', methods=['POST'])
@as_json
def token():
refresh_token = request.values.get('refresh_token')
if refresh_token is None:
abort(requests.codes.bad_request)
if not OAUTH_HAVE_REFRESH_TOKEN:
return dict(
refresh_token='',
access_token=refresh_token,
expires_at=float('inf')
)
session = _create_session()
try:
resp = session.refresh_token(
token_url=OAUTH_ACCESS_TOKEN_URL,
client_id=OAUTH_CLIENT_ID, # Why??? The session object already has it!
client_secret=OAUTH_CLIENT_SECRET,
refresh_token=refresh_token
)
except OAuth2Error as ex:
return dict(error=ex.error)
return dict(
refresh_token=resp['refresh_token'],
access_token=resp['access_token'],
expires_at=resp['expires_at']
)
### API routes ###
# Allow CORS requests to API routes.
# The "*" origin is more secure than specific origins because it blocks cookies.
# Cache the settings for a day to avoid pre-flight requests.
CORS(app, resources={"/api/*": {"origins": "*", "max_age": timedelta(days=1)}})
class Auth:
"""
Info about the user making the API request.
"""
def __init__(self, user, email, is_logged_in, is_admin):
self.user = user
self.email = email
self.is_logged_in = is_logged_in
self.is_admin = is_admin
class ApiException(Exception):
"""
Base class for API exceptions.
"""
def __init__(self, status_code, message):
super().__init__()
self.status_code = status_code
self.message = message
class PackageNotFoundException(ApiException):
"""
API exception for missing packages.
"""
def __init__(self, owner, package, logged_in=True):
message = "Package %s/%s does not exist" % (owner, package)
if not logged_in:
message = "%s (do you need to log in?)" % message
super().__init__(requests.codes.not_found, message)
@app.errorhandler(ApiException)
def handle_api_exception(error):
"""
Converts an API exception into an error response.
"""
_mp_track(
type="exception",
status_code=error.status_code,
message=error.message,
)
response = jsonify(dict(
message=error.message
))
response.status_code = error.status_code
return response
def api(require_login=True, schema=None, enabled=True, require_admin=False):
"""
Decorator for API requests.
Handles auth and adds the username as the first argument.
"""
if require_admin:
require_login=True
if schema is not None:
Draft4Validator.check_schema(schema)
validator = Draft4Validator(schema)
else:
validator = None
def innerdec(f):
@wraps(f)
def wrapper(*args, **kwargs):
g.auth = Auth(user=None, email=None, is_logged_in=False, is_admin=False)
user_agent_str = request.headers.get('user-agent', '')
g.user_agent = httpagentparser.detect(user_agent_str, fill_none=True)
if not enabled:
raise ApiException(requests.codes.bad_request,
"This endpoint is not enabled.")
if validator is not None:
try:
validator.validate(request.get_json(cache=True))
except ValidationError as ex:
raise ApiException(requests.codes.bad_request, ex.message)
auth = request.headers.get(AUTHORIZATION_HEADER)
g.auth_header = auth
if auth is None:
if require_login or not ALLOW_ANONYMOUS_ACCESS:
raise ApiException(requests.codes.unauthorized, "Not logged in")
else:
headers = {
AUTHORIZATION_HEADER: auth
}
try:
resp = requests.get(OAUTH_USER_API, headers=headers)
resp.raise_for_status()
data = resp.json()
# TODO(dima): Generalize this.
user = data.get('current_user', data.get('login'))
assert user
email = data['email']
is_admin = data.get('is_staff', False)
g.auth = Auth(user=user, email=email, is_logged_in=True, is_admin=is_admin)
except requests.HTTPError as ex:
if resp.status_code == requests.codes.unauthorized:
raise ApiException(
requests.codes.unauthorized,
"Invalid credentials"
)
else:
raise ApiException(requests.codes.server_error, "Server error")
except (ConnectionError, requests.RequestException) as ex:
raise ApiException(requests.codes.server_error, "Server error")
if require_admin and not g.auth.is_admin:
raise ApiException(
requests.codes.forbidden,
"Must be authenticated as an admin to use this endpoint."
)
return f(*args, **kwargs)
return wrapper
return innerdec
def _access_filter(auth):
query = []
if ALLOW_ANONYMOUS_ACCESS:
query.append(PUBLIC)
if auth.is_logged_in:
assert auth.user not in [None, PUBLIC, TEAM] # Sanity check
query.append(auth.user)
if ALLOW_TEAM_ACCESS:
query.append(TEAM)
return Access.user.in_(query)
def _get_package(auth, owner, package_name):
"""
Helper for looking up a package and checking permissions.
Only useful for *_list functions; all others should use more efficient queries.
"""
package = (
Package.query
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(_access_filter(auth))
.one_or_none()
)
if package is None:
raise PackageNotFoundException(owner, package_name, auth.is_logged_in)
return package
def _get_instance(auth, owner, package_name, package_hash):
instance = (
Instance.query
.filter_by(hash=package_hash)
.options(undefer('contents')) # Contents is deferred by default.
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.join(Package.access)
.filter(_access_filter(auth))
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Package hash does not exist"
)
return instance
def _mp_track(**kwargs):
if g.user_agent['browser']['name'] == 'QuiltCli':
source = 'cli'
else:
source = 'web'
# Use the user ID if the user is logged in; otherwise, let MP use the IP address.
distinct_id = g.auth.user
# Try to get the ELB's forwarded IP, and fall back to the actual IP (in dev).
ip_addr = request.headers.get('x-forwarded-for', request.remote_addr)
# Set common attributes sent with each event. kwargs cannot override these.
all_args = dict(
kwargs,
time=time.time(),
ip=ip_addr,
user=g.auth.user,
source=source,
browser_name=g.user_agent['browser']['name'],
browser_version=g.user_agent['browser']['version'],
platform_name=g.user_agent['platform']['name'],
platform_version=g.user_agent['platform']['version'],
python_name=g.user_agent.get('python_platform', {}).get('name'),
python_version=g.user_agent.get('python_platform', {}).get('version'),
deployment_id=DEPLOYMENT_ID,
)
mp.track(distinct_id, MIXPANEL_EVENT, all_args)
def _generate_presigned_url(method, owner, blob_hash):
return s3_client.generate_presigned_url(
method,
Params=dict(
Bucket=PACKAGE_BUCKET_NAME,
Key='%s/%s/%s' % (OBJ_DIR, owner, blob_hash)
),
ExpiresIn=PACKAGE_URL_EXPIRATION
)
def _get_or_create_customer():
assert HAVE_PAYMENTS, "Payments are not enabled"
assert g.auth.user
db_customer = Customer.query.filter_by(id=g.auth.user).one_or_none()
if db_customer is None:
try:
# Insert a placeholder with no Stripe ID just to lock the row.
db_customer = Customer(id=g.auth.user)
db.session.add(db_customer)
db.session.flush()
except IntegrityError:
# Someone else just created it, so look it up.
db.session.rollback()
db_customer = Customer.query.filter_by(id=g.auth.user).one()
else:
# Create a new customer.
plan = PaymentPlan.FREE.value
customer = stripe.Customer.create(
email=g.auth.email,
description=g.auth.user,
)
stripe.Subscription.create(
customer=customer.id,
plan=plan,
)
db_customer.stripe_customer_id = customer.id
db.session.commit()
customer = stripe.Customer.retrieve(db_customer.stripe_customer_id)
assert customer.subscriptions.total_count == 1
return customer
def _get_customer_plan(customer):
return PaymentPlan(customer.subscriptions.data[0].plan.id)
@app.route('/api/blob/<owner>/<blob_hash>', methods=['GET'])
@api()
@as_json
def blob_get(owner, blob_hash):
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the owner can upload objects.")
return dict(
head=_generate_presigned_url(S3_HEAD_OBJECT, owner, blob_hash),
get=_generate_presigned_url(S3_GET_OBJECT, owner, blob_hash),
put=_generate_presigned_url(S3_PUT_OBJECT, owner, blob_hash),
)
@app.route('/api/package/<owner>/<package_name>/<package_hash>', methods=['PUT'])
@api(schema=PACKAGE_SCHEMA)
@as_json
def package_put(owner, package_name, package_hash):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the package owner can push packages.")
if not VALID_NAME_RE.match(package_name):
raise ApiException(requests.codes.bad_request, "Invalid package name")
# TODO: Description.
data = json.loads(request.data.decode('utf-8'), object_hook=decode_node)
dry_run = data.get('dry_run', False)
public = data.get('is_public', data.get('public', False))
team = data.get('is_team', False)
contents = data['contents']
sizes = data.get('sizes', {})
if public and not ALLOW_ANONYMOUS_ACCESS:
raise ApiException(requests.codes.forbidden, "Public access not allowed")
if team and not ALLOW_TEAM_ACCESS:
raise ApiException(requests.codes.forbidden, "Team access not allowed")
if hash_contents(contents) != package_hash:
raise ApiException(requests.codes.bad_request, "Wrong contents hash")
all_hashes = set(find_object_hashes(contents))
# Old clients don't send sizes. But if sizes are present, make sure they match the hashes.
if sizes and set(sizes) != all_hashes:
raise ApiException(requests.codes.bad_request, "Sizes don't match the hashes")
# Insert a package if it doesn't already exist.
# TODO: Separate endpoint for just creating a package with no versions?
package = (
Package.query
.with_for_update()
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if package is None:
# Check for case-insensitive matches, and reject the push.
package_ci = (
Package.query
.filter(
sa.and_(
sa.func.lower(Package.owner) == sa.func.lower(owner),
sa.func.lower(Package.name) == sa.func.lower(package_name)
)
)
.one_or_none()
)
if package_ci is not None:
raise ApiException(
requests.codes.forbidden,
"Package already exists: %s/%s" % (package_ci.owner, package_ci.name)
)
if HAVE_PAYMENTS and not public:
customer = _get_or_create_customer()
plan = _get_customer_plan(customer)
if plan == PaymentPlan.FREE:
raise ApiException(
requests.codes.payment_required,
("Insufficient permissions. Run `quilt push --public %s/%s` to make " +
"this package public, or upgrade your service plan to create " +
"private packages: https://quiltdata.com/profile.") %
(owner, package_name)
)
package = Package(owner=owner, name=package_name)
db.session.add(package)
owner_access = Access(package=package, user=owner)
db.session.add(owner_access)
if public:
public_access = Access(package=package, user=PUBLIC)
db.session.add(public_access)
if team:
team_access = Access(package=package, user=TEAM)
db.session.add(team_access)
else:
if public:
public_access = (
Access.query
.filter(sa.and_(
Access.package == package,
Access.user == PUBLIC
))
.one_or_none()
)
if public_access is None:
raise ApiException(
requests.codes.forbidden,
("%(user)s/%(pkg)s is private. To make it public, " +
"run `quilt access add %(user)s/%(pkg)s public`.") %
dict(user=owner, pkg=package_name)
)
if team:
team_access = (
Access.query
.filter(sa.and_(
Access.package == package,
Access.user == TEAM
))
.one_or_none()
)
if team_access is None:
raise ApiException(
requests.codes.forbidden,
("%(user)s/%(pkg)s is private. To share it with the team, " +
"run `quilt access add %(user)s/%(pkg)s team`.") %
dict(user=owner, pkg=package_name)
)
# Insert an instance if it doesn't already exist.
instance = (
Instance.query
.with_for_update()
.filter_by(package=package, hash=package_hash)
.one_or_none()
)
# No more error checking at this point, so return from dry-run early.
if dry_run:
db.session.rollback()
# List of signed URLs is potentially huge, so stream it.
def _generate():
yield '{"upload_urls":{'
for idx, blob_hash in enumerate(all_hashes):
comma = ('' if idx == 0 else ',')
value = dict(
head=_generate_presigned_url(S3_HEAD_OBJECT, owner, blob_hash),
put=_generate_presigned_url(S3_PUT_OBJECT, owner, blob_hash)
)
yield '%s%s:%s' % (comma, json.dumps(blob_hash), json.dumps(value))
yield '}}'
return Response(_generate(), content_type='application/json')
if instance is None:
instance = Instance(
package=package,
contents=contents,
hash=package_hash,
created_by=g.auth.user,
updated_by=g.auth.user
)
# Add all the hashes that don't exist yet.
blobs = (
S3Blob.query
.with_for_update()
.filter(
sa.and_(
S3Blob.owner == owner,
S3Blob.hash.in_(all_hashes)
)
)
.all()
) if all_hashes else []
blob_by_hash = { blob.hash: blob for blob in blobs }
for blob_hash in all_hashes:
blob_size = sizes.get(blob_hash)
blob = blob_by_hash.get(blob_hash)
if blob is None:
blob = S3Blob(owner=owner, hash=blob_hash, size=blob_size)
instance.blobs.append(blob)
else:
# Just update the contents dictionary.
# Nothing else could've changed without invalidating the hash.
instance.contents = contents
instance.updated_by = g.auth.user
db.session.add(instance)
# Insert a log.
log = Log(
package=package,
instance=instance,
author=owner,
)
db.session.add(log)
# Insert an event.
event = Event(
user=g.auth.user,
type=Event.Type.PUSH,
package_owner=owner,
package_name=package_name,
package_hash=package_hash,
extra=dict(
public=public
)
)
db.session.add(event)
db.session.commit()
_mp_track(
type="push",
package_owner=owner,
package_name=package_name,
public=public,
)
return dict(
package_url='%s/package/%s/%s' % (CATALOG_URL, owner, package_name)
)
@app.route('/api/package/<owner>/<package_name>/<package_hash>', methods=['GET'])
@api(require_login=False)
@as_json
def package_get(owner, package_name, package_hash):
subpath = request.args.get('subpath')
instance = _get_instance(g.auth, owner, package_name, package_hash)
assert isinstance(instance.contents, RootNode)
subnode = instance.contents
for component in subpath.split('/') if subpath else []:
try:
subnode = subnode.children[component]
except (AttributeError, KeyError):
raise ApiException(requests.codes.not_found, "Invalid subpath: %r" % component)
all_hashes = set(find_object_hashes(subnode))
blobs = (
S3Blob.query
.filter(
sa.and_(
S3Blob.owner == owner,
S3Blob.hash.in_(all_hashes)
)
)
.all()
) if all_hashes else []
urls = {
blob_hash: _generate_presigned_url(S3_GET_OBJECT, owner, blob_hash)
for blob_hash in all_hashes
}
# Insert an event.
event = Event(
user=g.auth.user,
type=Event.Type.INSTALL,
package_owner=owner,
package_name=package_name,
package_hash=package_hash,
extra=dict(
subpath=subpath
)
)
db.session.add(event)
db.session.commit()
_mp_track(
type="install",
package_owner=owner,
package_name=package_name,
subpath=subpath,
)
return dict(
contents=instance.contents,
urls=urls,
sizes={blob.hash: blob.size for blob in blobs},
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
)
def _generate_preview(node, max_depth=PREVIEW_MAX_DEPTH):
if isinstance(node, GroupNode):
max_children = PREVIEW_MAX_CHILDREN if max_depth else 0
children_preview = [
(name, _generate_preview(child, max_depth - 1))
for name, child in sorted(node.children.items())[:max_children]
]
if len(node.children) > max_children:
children_preview.append(('...', None))
return children_preview
else:
return None
@app.route('/api/package_preview/<owner>/<package_name>/<package_hash>', methods=['GET'])
@api(require_login=False)
@as_json
def package_preview(owner, package_name, package_hash):
instance = _get_instance(g.auth, owner, package_name, package_hash)
assert isinstance(instance.contents, RootNode)
readme = instance.contents.children.get('README')
if isinstance(readme, FileNode):
assert len(readme.hashes) == 1
readme_url = _generate_presigned_url(S3_GET_OBJECT, owner, readme.hashes[0])
else:
readme_url = None
contents_preview = _generate_preview(instance.contents)
# Insert an event.
event = Event(
type=Event.Type.PREVIEW,
user=g.auth.user,
package_owner=owner,
package_name=package_name,
package_hash=package_hash,
)
db.session.add(event)
db.session.commit()
_mp_track(
type="preview",
package_owner=owner,
package_name=package_name,
)
return dict(
preview=contents_preview,
readme_url=readme_url,
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
)
@app.route('/api/package/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def package_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
instances = (
Instance.query
.filter_by(package=package)
)
return dict(
hashes=[instance.hash for instance in instances]
)
@app.route('/api/package/<owner>/<package_name>/', methods=['DELETE'])
@api()
@as_json
def package_delete(owner, package_name):
if g.auth.user != owner:
raise ApiException(requests.codes.forbidden,
"Only the package owner can delete packages.")
package = _get_package(g.auth, owner, package_name)
db.session.delete(package)
# Insert an event.
event = Event(
user=g.auth.user,
type=Event.Type.DELETE,
package_owner=owner,
package_name=package_name,
)
db.session.add(event)
db.session.commit()
return dict()
@app.route('/api/package/<owner>/', methods=['GET'])
@api(require_login=False)
@as_json
def user_packages(owner):
packages = (
db.session.query(
Package,
sa.func.bool_or(Access.user == PUBLIC),
sa.func.bool_or(Access.user == TEAM)
)
.filter_by(owner=owner)
.join(Package.access)
.filter(_access_filter(g.auth))
.group_by(Package.id)
.order_by(Package.name)
.all()
)
return dict(
packages=[
dict(
name=package.name,
is_public=is_public,
is_team=is_team,
)
for package, is_public, is_team in packages
]
)
@app.route('/api/admin/package_list/<owner>/', methods=['GET'])
@api(require_login=True, require_admin=True)
@as_json
def list_user_packages(owner):
packages = (
db.session.query(
Package,
sa.func.bool_or(Access.user == PUBLIC),
sa.func.bool_or(Access.user == TEAM)
)
.filter_by(owner=owner)
.join(Package.access)
.group_by(Package.id)
.order_by(Package.name)
.all()
)
return dict(
packages=[
dict(
name=package.name,
is_public=is_public,
is_team=is_team,
)
for package, is_public, is_team in packages
]
)
@app.route('/api/log/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def logs_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
logs = (
db.session.query(Log, Instance)
.filter_by(package=package)
.join(Log.instance)
# Sort chronologically, but rely on IDs in case of duplicate created times.
.order_by(Log.created, Log.id)
)
return dict(
logs=[dict(
hash=instance.hash,
created=log.created.timestamp(),
author=log.author
) for log, instance in logs]
)
VERSION_SCHEMA = {
'type': 'object',
'properties': {
'hash': {
'type': 'string'
}
},
'required': ['hash']
}
def normalize_version(version):
try:
version = Version.normalize(version)
except ValueError:
raise ApiException(requests.codes.bad_request, "Malformed version")
return version
@app.route('/api/version/<owner>/<package_name>/<package_version>', methods=['PUT'])
@api(schema=VERSION_SCHEMA)
@as_json
def version_put(owner, package_name, package_version):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can create versions"
)
user_version = package_version
package_version = normalize_version(package_version)
data = request.get_json()
package_hash = data['hash']
instance = (
Instance.query
.filter_by(hash=package_hash)
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if instance is None:
raise ApiException(requests.codes.not_found, "Package hash does not exist")
version = Version(
package_id=instance.package_id,
version=package_version,
user_version=user_version,
instance=instance
)
try:
db.session.add(version)
db.session.commit()
except IntegrityError:
raise ApiException(requests.codes.conflict, "Version already exists")
return dict()
@app.route('/api/version/<owner>/<package_name>/<package_version>', methods=['GET'])
@api(require_login=False)
@as_json
def version_get(owner, package_name, package_version):
package_version = normalize_version(package_version)
package = _get_package(g.auth, owner, package_name)
instance = (
Instance.query
.join(Instance.versions)
.filter_by(package=package, version=package_version)
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Version %s does not exist" % package_version
)
_mp_track(
type="get_hash",
package_owner=owner,
package_name=package_name,
package_version=package_version,
)
return dict(
hash=instance.hash,
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
)
@app.route('/api/version/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def version_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
versions = (
db.session.query(Version, Instance)
.filter_by(package=package)
.join(Version.instance)
.all()
)
sorted_versions = sorted(versions, key=lambda row: row.Version.sort_key())
return dict(
versions=[
dict(
version=version.user_version,
hash=instance.hash
) for version, instance in sorted_versions
]
)
TAG_SCHEMA = {
'type': 'object',
'properties': {
'hash': {
'type': 'string'
}
},
'required': ['hash']
}
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['PUT'])
@api(schema=TAG_SCHEMA)
@as_json
def tag_put(owner, package_name, package_tag):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can modify tags"
)
data = request.get_json()
package_hash = data['hash']
instance = (
Instance.query
.filter_by(hash=package_hash)
.join(Instance.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if instance is None:
raise ApiException(requests.codes.not_found, "Package hash does not exist")
# Update an existing tag or create a new one.
tag = (
Tag.query
.with_for_update()
.filter_by(package_id=instance.package_id, tag=package_tag)
.one_or_none()
)
if tag is None:
tag = Tag(
package_id=instance.package_id,
tag=package_tag,
instance=instance
)
db.session.add(tag)
else:
tag.instance = instance
db.session.commit()
return dict()
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['GET'])
@api(require_login=False)
@as_json
def tag_get(owner, package_name, package_tag):
package = _get_package(g.auth, owner, package_name)
instance = (
Instance.query
.join(Instance.tags)
.filter_by(package=package, tag=package_tag)
.one_or_none()
)
if instance is None:
raise ApiException(
requests.codes.not_found,
"Tag %r does not exist" % package_tag
)
_mp_track(
type="get_hash",
package_owner=owner,
package_name=package_name,
package_tag=package_tag,
)
return dict(
hash=instance.hash,
created_by=instance.created_by,
created_at=instance.created_at.timestamp(),
updated_by=instance.updated_by,
updated_at=instance.updated_at.timestamp(),
)
@app.route('/api/tag/<owner>/<package_name>/<package_tag>', methods=['DELETE'])
@api()
@as_json
def tag_delete(owner, package_name, package_tag):
# TODO: Write access for collaborators.
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can delete tags"
)
tag = (
Tag.query
.with_for_update()
.filter_by(tag=package_tag)
.join(Tag.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if tag is None:
raise ApiException(
requests.codes.not_found,
"Package %s/%s tag %r does not exist" % (owner, package_name, package_tag)
)
db.session.delete(tag)
db.session.commit()
return dict()
@app.route('/api/tag/<owner>/<package_name>/', methods=['GET'])
@api(require_login=False)
@as_json
def tag_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
tags = (
db.session.query(Tag, Instance)
.filter_by(package=package)
.order_by(Tag.tag)
.join(Tag.instance)
.all()
)
return dict(
tags=[
dict(
tag=tag.tag,
hash=instance.hash
) for tag, instance in tags
]
)
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['PUT'])
@api()
@as_json
def access_put(owner, package_name, user):
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can grant access"
)
auth_headers = {
AUTHORIZATION_HEADER: g.auth_header
}
package = (
Package.query
.with_for_update()
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if package is None:
raise PackageNotFoundException(owner, package_name)
if VALID_EMAIL_RE.match(user):
email = user.lower()
invitation = Invitation(package=package, email=email)
db.session.add(invitation)
db.session.commit()
# Call to Auth to send invitation email
resp = requests.post(INVITE_SEND_URL,
headers=auth_headers,
data=dict(email=email,
owner=g.auth.user,
package=package.name,
client_id=OAUTH_CLIENT_ID,
client_secret=OAUTH_CLIENT_SECRET,
callback_url=OAUTH_REDIRECT_URL))
if resp.status_code == requests.codes.unauthorized:
raise ApiException(
requests.codes.unauthorized,
"Invalid credentials"
)
elif resp.status_code != requests.codes.ok:
raise ApiException(requests.codes.server_error, "Server error")
return dict()
else:
_validate_username(user)
if user == PUBLIC:
if not ALLOW_ANONYMOUS_ACCESS:
raise ApiException(requests.codes.forbidden, "Public access not allowed")
elif user == TEAM:
if not ALLOW_TEAM_ACCESS:
raise ApiException(requests.codes.forbidden, "Team access not allowed")
else:
resp = requests.get(OAUTH_PROFILE_API % user,
headers=auth_headers)
if resp.status_code == requests.codes.not_found:
raise ApiException(
requests.codes.not_found,
"User %s does not exist" % user
)
elif resp.status_code != requests.codes.ok:
raise ApiException(
requests.codes.server_error,
"Unknown error"
)
try:
access = Access(package=package, user=user)
db.session.add(access)
db.session.commit()
except IntegrityError:
raise ApiException(requests.codes.conflict, "The user already has access")
return dict()
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['GET'])
@api()
@as_json
def access_get(owner, package_name, user):
_validate_username(user)
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can view access"
)
access = (
db.session.query(Access)
.filter_by(user=user)
.join(Access.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if access is None:
raise PackageNotFoundException(owner, package_name)
return dict()
@app.route('/api/access/<owner>/<package_name>/<user>', methods=['DELETE'])
@api()
@as_json
def access_delete(owner, package_name, user):
_validate_username(user)
if g.auth.user != owner:
raise ApiException(
requests.codes.forbidden,
"Only the package owner can revoke access"
)
if user == owner:
raise ApiException(
requests.codes.forbidden,
"Cannot revoke the owner's access"
)
if HAVE_PAYMENTS and user == PUBLIC:
customer = _get_or_create_customer()
plan = _get_customer_plan(customer)
if plan == PaymentPlan.FREE:
raise ApiException(
requests.codes.payment_required,
"Insufficient permissions. " +
"Upgrade your plan to create private packages: https://quiltdata.com/profile."
)
access = (
Access.query
.with_for_update()
.filter_by(user=user)
.join(Access.package)
.filter_by(owner=owner, name=package_name)
.one_or_none()
)
if access is None:
raise PackageNotFoundException(owner, package_name)
db.session.delete(access)
db.session.commit()
return dict()
@app.route('/api/access/<owner>/<package_name>/', methods=['GET'])
@api()
@as_json
def access_list(owner, package_name):
accesses = (
Access.query
.join(Access.package)
.filter_by(owner=owner, name=package_name)
)
can_access = [access.user for access in accesses]
is_collaborator = g.auth.user in can_access
is_public = ALLOW_ANONYMOUS_ACCESS and (PUBLIC in can_access)
is_team = ALLOW_TEAM_ACCESS and (TEAM in can_access)
if is_public or is_team or is_collaborator:
return dict(users=can_access)
else:
raise PackageNotFoundException(owner, package_name)
@app.route('/api/recent_packages/', methods=['GET'])
@api(require_login=False)
@as_json
def recent_packages():
try:
count = int(request.args.get('count', ''))
except ValueError:
count = 10
if ALLOW_ANONYMOUS_ACCESS:
max_visibility = PUBLIC
elif ALLOW_TEAM_ACCESS:
max_visibility = TEAM
else:
# Shouldn't really happen, but let's handle this case.
raise ApiException(requests.codes.forbidden, "Not allowed")
results = (
db.session.query(Package, sa.func.max(Instance.updated_at))
.join(Package.access)
.filter_by(user=max_visibility)
.join(Package.instances)
.group_by(Package.id)
.order_by(sa.func.max(Instance.updated_at).desc())
.limit(count)
.all()
)
return dict(
packages=[
dict(
owner=package.owner,
name=package.name,
updated_at=updated_at
) for package, updated_at in results
]
)
@app.route('/api/search/', methods=['GET'])
@api(require_login=False)
@as_json
def search():
query = request.args.get('q', '')
keywords = query.split()
if len(keywords) > 5:
# Let's not overload the DB with crazy queries.
raise ApiException(requests.codes.bad_request, "Too many search terms (max is 5)")
filter_list = [
sa.func.strpos(
sa.func.lower(sa.func.concat(Package.owner, '/', Package.name)),
sa.func.lower(keyword)
) > 0
for keyword in keywords
]
results = (
db.session.query(
Package,
sa.func.bool_or(Access.user == PUBLIC),
sa.func.bool_or(Access.user == TEAM)
)
.filter(sa.and_(*filter_list))
.join(Package.access)
.filter(_access_filter(g.auth))
.group_by(Package.id)
.order_by(
sa.func.lower(Package.owner),
sa.func.lower(Package.name)
)
.all()
)
return dict(
packages=[
dict(
owner=package.owner,
name=package.name,
is_public=is_public,
is_team=is_team,
) for package, is_public, is_team in results
]
)
@app.route('/api/profile', methods=['GET'])
@api()
@as_json
def profile():
if HAVE_PAYMENTS:
customer = _get_or_create_customer()
plan = _get_customer_plan(customer).value
have_cc = customer.sources.total_count > 0
else:
plan = None
have_cc = None
# Check for outstanding package sharing invitations
invitations = (
db.session.query(Invitation, Package)
.filter_by(email=g.auth.email.lower())
.join(Invitation.package)
)
for invitation, package in invitations:
access = Access(package=package, user=g.auth.user)
db.session.add(access)
db.session.delete(invitation)
if invitations:
db.session.commit()
# We want to show only the packages owned by or explicitly shared with the user -
# but also show whether they're public, in case a package is both public and shared with the user.
# So do a "GROUP BY" to get the public info, then "HAVING" to filter out packages that aren't shared.
packages = (
db.session.query(
Package,
sa.func.bool_or(Access.user == PUBLIC),
sa.func.bool_or(Access.user == TEAM)
)
.join(Package.access)
.filter(_access_filter(g.auth))
.group_by(Package.id)
.order_by(
sa.func.lower(Package.owner),
sa.func.lower(Package.name)
)
.having(sa.func.bool_or(Access.user == g.auth.user))
.all()
)
return dict(
packages=dict(
own=[
dict(
owner=package.owner,
name=package.name,
is_public=is_public,
is_team=is_team,
)
for package, is_public, is_team in packages if package.owner == g.auth.user
],
shared=[
dict(
owner=package.owner,
name=package.name,
is_public=is_public,
is_team=is_team,
)
for package, is_public, is_team in packages if package.owner != g.auth.user
],
),
plan=plan,
have_credit_card=have_cc,
)
@app.route('/api/payments/update_plan', methods=['POST'])
@api()
@as_json
def payments_update_plan():
if not HAVE_PAYMENTS:
raise ApiException(requests.codes.not_found, "Payments not enabled")
plan = request.values.get('plan')
try:
plan = PaymentPlan(plan)
except ValueError:
raise ApiException(requests.codes.bad_request, "Invalid plan: %r" % plan)
if plan not in (PaymentPlan.FREE, PaymentPlan.INDIVIDUAL, PaymentPlan.BUSINESS_ADMIN):
# Cannot switch to the BUSINESS_MEMBER plan manually.
raise ApiException(requests.codes.forbidden, "Not allowed to switch to plan: %r" % plan)
stripe_token = request.values.get('token')
customer = _get_or_create_customer()
if _get_customer_plan(customer) == PaymentPlan.BUSINESS_MEMBER:
raise ApiException(
requests.codes.forbidden,
"Not allowed to leave Business plan; contact your admin."
)
if stripe_token is not None:
customer.source = stripe_token
try:
customer.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.bad_request, str(ex))
assert customer.sources.total_count
if plan != PaymentPlan.FREE and not customer.sources.total_count:
# No payment info.
raise ApiException(
requests.codes.payment_required,
"Payment information required to upgrade to %r" % plan.value
)
subscription = customer.subscriptions.data[0]
subscription.plan = plan.value
try:
subscription.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.server_error, str(ex))
return dict(
plan=plan.value
)
@app.route('/api/payments/update_payment', methods=['POST'])
@api()
@as_json
def payments_update_payment():
if not HAVE_PAYMENTS:
raise ApiException(requests.codes.not_found, "Payments not enabled")
stripe_token = request.values.get('token')
if not stripe_token:
raise ApiException(requests.codes.bad_request, "Missing token")
customer = _get_or_create_customer()
customer.source = stripe_token
try:
customer.save()
except stripe.InvalidRequestError as ex:
raise ApiException(requests.codes.bad_request, str(ex))
return dict()
@app.route('/api/invite/', methods=['GET'])
@api(require_login=False)
@as_json
def invitation_user_list():
invitations = (
db.session.query(Invitation, Package)
.filter_by(email=g.auth.email.lower())
.join(Invitation.package)
.all()
)
return dict(invitations=[dict(invitation_id=invite.id,
owner=package.owner,
package=package.name,
email=invite.email,
invited_at=invite.invited_at)
for invite, package in invitations])
@app.route('/api/invite/<owner>/<package_name>/', methods=['GET'])
@api()
@as_json
def invitation_package_list(owner, package_name):
package = _get_package(g.auth, owner, package_name)
invitations = (
Invitation.query
.filter_by(package_id=package.id)
)
return dict(invitations=[dict(invitation_id=invite.id,
owner=package.owner,
package=package.name,
email=invite.email,
invited_at=invite.invited_at)
for invite in invitations])
@app.route('/api/log', methods=['POST'])
@api(require_login=False, schema=LOG_SCHEMA)
@as_json
def client_log():
data = request.get_json()
for event in data:
_mp_track(**event)
return dict()
@app.route('/api/users/list', methods=['GET'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True)
@as_json
def list_users():
auth_headers = {
AUTHORIZATION_HEADER: g.auth_header
}
user_list_api = "%s/accounts/users" % QUILT_AUTH_URL
resp = requests.get(user_list_api, headers=auth_headers)
if resp.status_code == requests.codes.not_found:
raise ApiException(
requests.codes.not_found,
"Cannot list users"
)
elif resp.status_code != requests.codes.ok:
raise ApiException(
requests.codes.server_error,
"Unknown error"
)
return resp.json()
@app.route('/api/users/list_detailed', methods=['GET'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True)
@as_json
def list_users_detailed():
package_counts_query = (
db.session.query(Package.owner, sa.func.count(Package.owner))
.group_by(Package.owner)
)
package_counts = dict(package_counts_query)
events = (
db.session.query(Event.user, Event.type, sa.func.count(Event.type))
.group_by(Event.user, Event.type)
)
event_results = defaultdict(int)
for event_user, event_type, event_count in events:
event_results[(event_user, event_type)] = event_count
# replicate code from list_users since endpoints aren't callable from each other
auth_headers = {
AUTHORIZATION_HEADER: g.auth_header
}
user_list_api = "%s/accounts/users" % QUILT_AUTH_URL
users = requests.get(user_list_api, headers=auth_headers).json()
results = {
user['username'] : {
'packages' : package_counts.get(user['username'], 0),
'installs' : event_results[(user['username'], Event.Type.INSTALL)],
'previews' : event_results[(user['username'], Event.Type.PREVIEW)],
'pushes' : event_results[(user['username'], Event.Type.PUSH)],
'deletes' : event_results[(user['username'], Event.Type.DELETE)],
'status' : 'active' if user['is_active'] else 'disabled',
'last_seen' : user['last_login']
}
for user in users['results']
}
return {'users' : results}
@app.route('/api/users/create', methods=['POST'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True)
@as_json
def create_user():
auth_headers = {
AUTHORIZATION_HEADER: g.auth_header,
"Content-Type": "application/json",
"Accept": "application/json",
}
request_data = request.get_json()
user_create_api = '%s/accounts/users/' % QUILT_AUTH_URL
username = request_data.get('username')
_validate_username(username)
resp = requests.post(user_create_api, headers=auth_headers,
data=json.dumps({
"username": username,
"first_name": "",
"last_name": "",
"email": request_data.get('email'),
"is_superuser": False,
"is_staff": False,
"is_active": True,
"last_login": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
}))
if resp.status_code == requests.codes.not_found:
raise ApiException(
requests.codes.not_found,
"Cannot create user"
)
if resp.status_code == requests.codes.bad:
if resp.text == '{"email":["Enter a valid email address."]}':
raise ApiException(
requests.codes.bad,
"Please enter a valid email address."
)
raise ApiException(
requests.codes.bad,
"Bad request. Maybe there's already a user with the username you provided?"
)
elif resp.status_code != requests.codes.created:
raise ApiException(
requests.codes.server_error,
"Unknown error"
)
return resp.json()
@app.route('/api/users/disable', methods=['POST'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True)
@as_json
def disable_user():
auth_headers = {
AUTHORIZATION_HEADER: g.auth_header,
"Content-Type": "application/json",
"Accept": "application/json",
}
user_modify_api = '%s/accounts/users/' % QUILT_AUTH_URL
data = request.get_json()
username = data.get('username')
_validate_username(username)
resp = requests.patch("%s%s/" % (user_modify_api, username) , headers=auth_headers,
data=json.dumps({
'is_active' : False
}))
if resp.status_code == requests.codes.not_found:
raise ApiException(
resp.status_code,
"User to disable not found."
)
if resp.status_code != requests.codes.ok:
raise ApiException(
requests.codes.server_error,
"Unknown error"
)
return resp.json()
@app.route('/api/users/enable', methods=['POST'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True)
@as_json
def enable_user():
auth_headers = {
AUTHORIZATION_HEADER: g.auth_header,
"Content-Type": "application/json",
"Accept": "application/json",
}
user_modify_api = '%s/accounts/users/' % QUILT_AUTH_URL
data = request.get_json()
username = data.get('username')
_validate_username(username)
resp = requests.patch("%s%s/" % (user_modify_api, username) , headers=auth_headers,
data=json.dumps({
'is_active' : True
}))
if resp.status_code == requests.codes.not_found:
raise ApiException(
resp.status_code,
"User to enable not found."
)
if resp.status_code != requests.codes.ok:
raise ApiException(
requests.codes.server_error,
"Unknown error"
)
return resp.json()
# This endpoint is disabled pending a rework of authentication
@app.route('/api/users/delete', methods=['POST'])
@api(enabled=False, require_admin=True)
@as_json
def delete_user():
auth_headers = {
AUTHORIZATION_HEADER: g.auth_header,
"Content-Type": "application/json",
"Accept": "application/json",
}
user_modify_api = '%s/accounts/users/' % QUILT_AUTH_URL
data = request.get_json()
username = data.get('username')
_validate_username(username)
resp = requests.delete("%s%s/" % (user_modify_api, username), headers=auth_headers)
if resp.status_code == requests.codes.not_found:
raise ApiException(
resp.status_code,
"User to delete not found."
)
if resp.status_code != requests.codes.ok:
raise ApiException(
resp.status_code,
"Unknown error"
)
return resp.json()
@app.route('/api/audit/<owner>/<package_name>/')
@api(require_admin=True)
@as_json
def audit_package(owner, package_name):
events = (
Event.query
.filter_by(package_owner=owner, package_name=package_name)
)
return dict(
events=[dict(
created=event.created.timestamp(),
user=event.user,
type=Event.Type(event.type).name,
package_owner=event.package_owner,
package_name=event.package_name,
package_hash=event.package_hash,
extra=event.extra,
) for event in events]
)
@app.route('/api/audit/<user>/')
@api(require_admin=True)
@as_json
def audit_user(user):
events = (
Event.query
.filter_by(user=user)
)
return dict(
events=[dict(
created=event.created.timestamp(),
user=event.user,
type=Event.Type(event.type).name,
package_owner=event.package_owner,
package_name=event.package_name,
package_hash=event.package_hash,
extra=event.extra,
) for event in events]
)
@app.route('/api/admin/package_summary')
@api(require_admin=True)
@as_json
def package_summary():
events = (
db.session.query(Event.package_owner, Event.package_name, Event.type,
sa.func.count(Event.type), sa.func.max(Event.created))
.group_by(Event.package_owner, Event.package_name, Event.type)
)
event_results = defaultdict(lambda: {'count':0})
packages = set()
for event_owner, event_package, event_type, event_count, latest in events:
package = "{owner}/{pkg}".format(owner=event_owner, pkg=event_package)
event_results[(package, event_type)] = {'latest':latest.timestamp(), 'count':event_count}
packages.add(package)
results = {
package : {
'installs' : event_results[(package, Event.Type.INSTALL)],
'previews' : event_results[(package, Event.Type.PREVIEW)],
'pushes' : event_results[(package, Event.Type.PUSH)],
'deletes' : event_results[(package, Event.Type.DELETE)]
} for package in packages
}
return {'packages' : results}
@app.route('/api/users/reset_password', methods=['POST'])
@api(enabled=ENABLE_USER_ENDPOINTS, require_admin=True)
@as_json
def reset_password():
auth_headers = {
AUTHORIZATION_HEADER: g.auth_header,
"Content-Type": "application/json",
"Accept": "application/json",
}
password_reset_api = '%s/accounts/users/' % QUILT_AUTH_URL
data = request.get_json()
username = data.get('username')
_validate_username(username)
resp = requests.post("%s%s/reset_pass/" % (password_reset_api, username), headers=auth_headers)
if resp.status_code == requests.codes.not_found:
raise ApiException(
resp.status_code,
"User not found."
)
if resp.status_code != requests.codes.ok:
raise ApiException(
requests.codes.server_error,
"Unknown error"
)
return resp.json()
| 1 | 16,163 | Ohh. `TeamName` is actually a "friendly" name displayed in the Catalog - not the name used in the CLI. So I guess we'll need a new variable here. (That is, this is going to be mainly a `quilt.yaml` change. You won't need the `.lower()`, though.) | quiltdata-quilt | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.