file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
consumer.go | package meter
import (
"encoding/json"
"github.com/pkg/errors"
)
// consumer should be a component that process response and failure
type consumer interface {
processResponse(bg *background) next
processFailure(bg *background, err error) next
}
// defaultConsumerType will continue test no matter what
type defaultConsumerType struct{}
func (d defaultConsumerType) processResponse(bg *background) next {
return nextContinue
}
func (d defaultConsumerType) processFailure(_ *background, _ error) next {
return nextContinue
}
var defaultConsumer = &defaultConsumerType{}
type failDecision int
const (
abortOnFail failDecision = iota
ignoreOnFail
)
type dynamicConsumer struct {
// set to true if you need process failure as response
check composable
success composable
fail composable
template jsonRule
decision failDecision
}
func (d *dynamicConsumer) processResponse(bg *background) next {
return d.process(bg, KeyResponse)
}
func (d *dynamicConsumer) process(bg *background, key string) next {
if d.template != nil {
if err := compareTemplate(d.template, bg, bg.getLocalEnv(key)); err != nil {
return d.processFailure(bg, err)
}
}
if d.check != nil {
_, err := d.check.compose(bg)
if err != nil {
return d.processFailure(bg, err)
}
}
d.processSuccess(bg)
return nextContinue
}
func (d *dynamicConsumer) decideFailure(bg *background, err error) next {
//if bg.inDebug() {
// glog.Errorf("failed: %+v", err)
//} else {
// glog.Errorf("failed: %v", err)
//}
switch d.decision {
case abortOnFail:
return nextAbortPlan
case ignoreOnFail:
return nextContinue
}
return nextAbortAll
}
func (d *dynamicConsumer) processSuccess(bg *background) {
if d.success != nil {
_, _ = d.success.compose(bg)
}
}
func (d *dynamicConsumer) processFailure(bg *background, err error) next {
err = errors.Wrap(err, "process failure")
// move error to failure if any to make sure fail processing without any error
bg.setLocalEnv(KeyFailure, err.Error())
bg.setError(nil)
if d.fail != nil {
_, _ = d.fail.compose(bg)
}
n := d.decideFailure(bg, err)
bg.setError(err)
return n
}
func makeDynamicConsumer(check, success, fail interface{}, template json.RawMessage, failAction failDecision) (*dynamicConsumer, error) | {
d := &dynamicConsumer{}
d.decision = failAction
var err error
d.check, _, err = makeComposable(check)
if err != nil {
return nil, err
}
d.success, _, err = makeComposable(success, optIgnoreError())
if err != nil {
return nil, err
}
d.fail, _, err = makeComposable(fail, optIgnoreError())
if err != nil {
return nil, err
}
d.template, err = makeJsonTemplate(template)
if err != nil {
return nil, err
}
return d, nil
} |
|
ims_token.go | /*******************************************************************************
* IBM Confidential
* OCO Source Materials
* IBM Cloud Container Service, 5737-D43
* (C) Copyright IBM Corp. 2018, 2019 All Rights Reserved.
* The source code for this program is not published or otherwise divested of
* its trade secrets, irrespective of what has been deposited with
* the U.S. Copyright Office.
******************************************************************************/
package auth
import (
"strconv"
"go.uber.org/zap"
"github.com/IBM/ibmcloud-storage-volume-lib/provider/local"
"github.com/IBM/ibmcloud-storage-volume-lib/volume-providers/iam"
"github.com/IBM/ibmcloud-storage-volume-lib/lib/provider"
)
const (
// IMSToken is an IMS user ID and token
IMSToken = provider.AuthType("IMS_TOKEN")
// IAMAccessToken ...
IAMAccessToken = provider.AuthType("IAM_ACCESS_TOKEN")
)
// ForRefreshToken ...
func (ccf *ContextCredentialsFactory) ForRefreshToken(refreshToken string, logger *zap.Logger) (provider.ContextCredentials, error) {
accessToken, err := ccf.tokenExchangeService.ExchangeRefreshTokenForAccessToken(refreshToken, logger)
if err != nil {
// Must preserve provider error code in the ErrorProviderAccountTemporarilyLocked case
logger.Error("Unable to retrieve access token from refresh token", local.ZapError(err))
return provider.ContextCredentials{}, err
}
imsToken, err := ccf.tokenExchangeService.ExchangeAccessTokenForIMSToken(*accessToken, logger)
if err != nil {
// Must preserve provider error code in the ErrorProviderAccountTemporarilyLocked case
logger.Error("Unable to retrieve IAM token from access token", local.ZapError(err))
return provider.ContextCredentials{}, err
}
return forIMSToken("", imsToken), nil
}
// ForIAMAPIKey ...
func (ccf *ContextCredentialsFactory) ForIAMAPIKey(iamAccountID, apiKey string, logger *zap.Logger) (provider.ContextCredentials, error) {
imsToken, err := ccf.tokenExchangeService.ExchangeIAMAPIKeyForIMSToken(apiKey, logger)
if err != nil {
// Must preserve provider error code in the ErrorProviderAccountTemporarilyLocked case
logger.Error("Unable to retrieve IMS credentials from IAM API key", local.ZapError(err))
return provider.ContextCredentials{}, err
}
return forIMSToken(iamAccountID, imsToken), nil
}
// ForIAMAccessToken ...
func (ccf *ContextCredentialsFactory) ForIAMAccessToken(apiKey string, logger *zap.Logger) (provider.ContextCredentials, error) {
iamAccessToken, err := ccf.tokenExchangeService.ExchangeIAMAPIKeyForAccessToken(apiKey, logger)
if err != nil {
logger.Error("Unable to retrieve IAM access toekn from IAM API key", local.ZapError(err))
return provider.ContextCredentials{}, err
}
iamAccountID, err := ccf.tokenExchangeService.GetIAMAccountIDFromAccessToken(iam.AccessToken{Token: iamAccessToken.Token}, logger)
if err != nil {
logger.Error("Unable to retrieve IAM access toekn from IAM API key", local.ZapError(err))
return provider.ContextCredentials{}, err
}
return forIAMAccessToken(iamAccountID, iamAccessToken), nil
}
// forIMSToken ...
func forIMSToken(iamAccountID string, imsToken *iam.IMSToken) provider.ContextCredentials {
return provider.ContextCredentials{
AuthType: IMSToken,
IAMAccountID: iamAccountID,
UserID: strconv.Itoa(imsToken.UserID),
Credential: imsToken.Token,
}
}
// forIAMAccessToken ...
func | (iamAccountID string, iamAccessToken *iam.AccessToken) provider.ContextCredentials {
return provider.ContextCredentials{
AuthType: IAMAccessToken,
IAMAccountID: iamAccountID,
Credential: iamAccessToken.Token,
}
}
| forIAMAccessToken |
fake_api.py | # coding: utf-8
"""
Swagger Petstore */ ' \" =end -- \\r\\n \\n \\r
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ */ ' \" =end --
OpenAPI spec version: 1.0.0 */ ' \" =end -- \\r\\n \\n \\r
Contact: apiteam@swagger.io */ ' \" =end -- \\r\\n \\n \\r
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class | (object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def test_code_inject____end__rn_n_r(self, **kwargs):
"""
To test code injection */ ' \" =end -- \\r\\n \\n \\r
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_code_inject____end__rn_n_r(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str test_code_inject____end____rn_n_r: To test code injection */ ' \" =end -- \\r\\n \\n \\r
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.test_code_inject____end__rn_n_r_with_http_info(**kwargs)
else:
(data) = self.test_code_inject____end__rn_n_r_with_http_info(**kwargs)
return data
def test_code_inject____end__rn_n_r_with_http_info(self, **kwargs):
"""
To test code injection */ ' \" =end -- \\r\\n \\n \\r
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.test_code_inject____end__rn_n_r_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str test_code_inject____end____rn_n_r: To test code injection */ ' \" =end -- \\r\\n \\n \\r
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['test_code_inject____end____rn_n_r']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_code_inject____end__rn_n_r" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/fake'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'test_code_inject____end____rn_n_r' in params:
form_params.append(('test code inject */ ' " =end -- \r\n \n \r', params['test_code_inject____end____rn_n_r']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', '*/ \" =end -- '])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', '*/ \" =end -- '])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| FakeApi |
base.rs | use crate::back::metadata::create_compressed_metadata_file;
use crate::back::write::{
compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
};
use crate::common::{IntPredicate, RealPredicate, TypeKind};
use crate::meth;
use crate::mir;
use crate::mir::operand::OperandValue;
use crate::mir::place::PlaceRef;
use crate::traits::*;
use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
use rustc_attr as attr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
#[cfg(parallel_compiler)]
use rustc_data_structures::sync::{par_iter, ParallelIterator};
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::lang_items::LangItem;
use rustc_index::vec::Idx;
use rustc_metadata::EncodedMetadata;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::middle::exported_symbols;
use rustc_middle::middle::lang_items;
use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc_session::config::{self, EntryFnType, OutputType};
use rustc_session::Session;
use rustc_span::symbol::sym;
use rustc_target::abi::{Align, VariantIdx};
use std::convert::TryFrom;
use std::ops::{Deref, DerefMut};
use std::time::{Duration, Instant};
use itertools::Itertools;
pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
match op {
hir::BinOpKind::Eq => IntPredicate::IntEQ,
hir::BinOpKind::Ne => IntPredicate::IntNE,
hir::BinOpKind::Lt => {
if signed {
IntPredicate::IntSLT
} else {
IntPredicate::IntULT
}
}
hir::BinOpKind::Le => {
if signed {
IntPredicate::IntSLE
} else {
IntPredicate::IntULE
}
}
hir::BinOpKind::Gt => {
if signed {
IntPredicate::IntSGT
} else {
IntPredicate::IntUGT
}
}
hir::BinOpKind::Ge => {
if signed {
IntPredicate::IntSGE
} else {
IntPredicate::IntUGE
}
}
op => bug!(
"comparison_op_to_icmp_predicate: expected comparison operator, \
found {:?}",
op
),
}
}
pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
match op {
hir::BinOpKind::Eq => RealPredicate::RealOEQ,
hir::BinOpKind::Ne => RealPredicate::RealUNE,
hir::BinOpKind::Lt => RealPredicate::RealOLT,
hir::BinOpKind::Le => RealPredicate::RealOLE,
hir::BinOpKind::Gt => RealPredicate::RealOGT,
hir::BinOpKind::Ge => RealPredicate::RealOGE,
op => {
bug!(
"comparison_op_to_fcmp_predicate: expected comparison operator, \
found {:?}",
op
);
}
}
}
pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
lhs: Bx::Value,
rhs: Bx::Value,
t: Ty<'tcx>,
ret_ty: Bx::Type,
op: hir::BinOpKind,
) -> Bx::Value {
let signed = match t.kind() {
ty::Float(_) => {
let cmp = bin_op_to_fcmp_predicate(op);
let cmp = bx.fcmp(cmp, lhs, rhs);
return bx.sext(cmp, ret_ty);
}
ty::Uint(_) => false,
ty::Int(_) => true,
_ => bug!("compare_simd_types: invalid SIMD type"),
};
let cmp = bin_op_to_icmp_predicate(op, signed);
let cmp = bx.icmp(cmp, lhs, rhs);
// LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
// to get the correctly sized type. This will compile to a single instruction
// once the IR is converted to assembly if the SIMD instruction is supported
// by the target architecture.
bx.sext(cmp, ret_ty)
}
/// Retrieves the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
/// The `old_info` argument is a bit odd. It is intended for use in an upcast,
/// where the new vtable for an object will be derived from the old one.
pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<Bx::Value>,
) -> Bx::Value {
let cx = bx.cx();
let (source, target) =
cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
match (source.kind(), target.kind()) {
(&ty::Array(_, len), &ty::Slice(_)) => {
cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
}
(&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
let old_info =
old_info.expect("unsized_info: missing old info for trait upcasting coercion");
if data_a.principal_def_id() == data_b.principal_def_id() {
return old_info;
}
// trait upcasting coercion
let vptr_entry_idx =
cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
if let Some(entry_idx) = vptr_entry_idx {
let ptr_ty = cx.type_i8p();
let ptr_align = cx.tcx().data_layout.pointer_align.abi;
let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
let gep = bx.inbounds_gep(
ptr_ty,
llvtable,
&[bx.const_usize(u64::try_from(entry_idx).unwrap())],
);
let new_vptr = bx.load(ptr_ty, gep, ptr_align);
bx.nonnull_metadata(new_vptr);
// Vtable loads are invariant.
bx.set_invariant_load(new_vptr);
new_vptr
} else {
old_info
}
}
(_, &ty::Dynamic(ref data, ..)) => {
let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
cx.layout_of(cx.tcx().mk_mut_ptr(target)),
1,
true,
);
cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
}
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
src: Bx::Value,
src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>,
old_info: Option<Bx::Value>, | | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_layout = bx.cx().layout_of(src_ty);
let dst_layout = bx.cx().layout_of(dst_ty);
if src_ty == dst_ty {
return (src, old_info.unwrap());
}
let mut result = None;
for i in 0..src_layout.fields.count() {
let src_f = src_layout.field(bx.cx(), i);
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
if src_f.is_zst() {
continue;
}
assert_eq!(src_layout.size, src_f.size);
let dst_f = dst_layout.field(bx.cx(), i);
assert_ne!(src_f.ty, dst_f.ty);
assert_eq!(result, None);
result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
}
let (lldata, llextra) = result.unwrap();
let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
(bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
}
_ => bug!("unsize_ptr: called on bad types"),
}
}
/// Coerces `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty`, and stores the result in `dst`.
pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
src: PlaceRef<'tcx, Bx::Value>,
dst: PlaceRef<'tcx, Bx::Value>,
) {
let src_ty = src.layout.ty;
let dst_ty = dst.layout.ty;
match (src_ty.kind(), dst_ty.kind()) {
(&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
let (base, info) = match bx.load_operand(src).val {
OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
OperandValue::Ref(..) => bug!(),
};
OperandValue::Pair(base, info).store(bx, dst);
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
let src_f = src.project_field(bx, i);
let dst_f = dst.project_field(bx, i);
if dst_f.layout.is_zst() {
continue;
}
if src_f.layout.ty == dst_f.layout.ty {
memcpy_ty(
bx,
dst_f.llval,
dst_f.align,
src_f.llval,
src_f.align,
src_f.layout,
MemFlags::empty(),
);
} else {
coerce_unsized_into(bx, src_f, dst_f);
}
}
}
_ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
}
}
pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
cast_shift_rhs(bx, op, lhs, rhs)
}
fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
// Shifts may have any size int on the rhs
if op.is_shift() {
let mut rhs_llty = bx.cx().val_ty(rhs);
let mut lhs_llty = bx.cx().val_ty(lhs);
if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
rhs_llty = bx.cx().element_type(rhs_llty)
}
if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
lhs_llty = bx.cx().element_type(lhs_llty)
}
let rhs_sz = bx.cx().int_width(rhs_llty);
let lhs_sz = bx.cx().int_width(lhs_llty);
if lhs_sz < rhs_sz {
bx.trunc(rhs, lhs_llty)
} else if lhs_sz > rhs_sz {
// FIXME (#1877: If in the future shifting by negative
// values is no longer undefined then this is wrong.
bx.zext(rhs, lhs_llty)
} else {
rhs
}
} else {
rhs
}
}
/// Returns `true` if this session's target will use SEH-based unwinding.
///
/// This is only true for MSVC targets, and even then the 64-bit MSVC target
/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
/// 64-bit MinGW) instead of "full SEH".
pub fn wants_msvc_seh(sess: &Session) -> bool {
sess.target.is_like_msvc
}
pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
dst: Bx::Value,
dst_align: Align,
src: Bx::Value,
src_align: Align,
layout: TyAndLayout<'tcx>,
flags: MemFlags,
) {
let size = layout.size.bytes();
if size == 0 {
return;
}
bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
}
pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
instance: Instance<'tcx>,
) {
// this is an info! to allow collecting monomorphization statistics
// and to allow finding the last function before LLVM aborts from
// release builds.
info!("codegen_instance({})", instance);
mir::codegen_mir::<Bx>(cx, instance);
}
/// Creates the `main` function which will initialize the rust runtime and call
/// users main function.
pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
) -> Option<Bx::Function> {
let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
let main_is_local = main_def_id.is_local();
let instance = Instance::mono(cx.tcx(), main_def_id);
if main_is_local {
// We want to create the wrapper in the same codegen unit as Rust's main
// function.
if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
return None;
}
} else if !cx.codegen_unit().is_primary() {
// We want to create the wrapper only when the codegen unit is the primary one
return None;
}
let main_llfn = cx.get_fn_addr(instance);
let use_start_lang_item = EntryFnType::Start != entry_type;
let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, use_start_lang_item);
return Some(entry_fn);
fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
rust_main: Bx::Value,
rust_main_def_id: DefId,
use_start_lang_item: bool,
) -> Bx::Function {
// The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
// depending on whether the target needs `argc` and `argv` to be passed in.
let llfty = if cx.sess().target.main_needs_argc_argv {
cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
} else {
cx.type_func(&[], cx.type_int())
};
let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
// Given that `main()` has no arguments,
// then its return type cannot have
// late-bound regions, since late-bound
// regions must appear in the argument
// listing.
let main_ret_ty = cx.tcx().normalize_erasing_regions(
ty::ParamEnv::reveal_all(),
main_ret_ty.no_bound_vars().unwrap(),
);
let Some(llfn) = cx.declare_c_main(llfty) else {
// FIXME: We should be smart and show a better diagnostic here.
let span = cx.tcx().def_span(rust_main_def_id);
cx.sess()
.struct_span_err(span, "entry symbol `main` declared multiple times")
.help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
.emit();
cx.sess().abort_if_errors();
bug!();
};
// `main` should respect same config for frame pointer elimination as rest of code
cx.set_frame_pointer_type(llfn);
cx.apply_target_cpu_attr(llfn);
let llbb = Bx::append_block(&cx, llfn, "top");
let mut bx = Bx::build(&cx, llbb);
bx.insert_reference_to_gdb_debug_scripts_section_global();
let isize_ty = cx.type_isize();
let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
let (start_fn, start_ty, args) = if use_start_lang_item {
let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
let start_fn = cx.get_fn_addr(
ty::Instance::resolve(
cx.tcx(),
ty::ParamEnv::reveal_all(),
start_def_id,
cx.tcx().intern_substs(&[main_ret_ty.into()]),
)
.unwrap()
.unwrap(),
);
let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty], isize_ty);
(start_fn, start_ty, vec![rust_main, arg_argc, arg_argv])
} else {
debug!("using user-defined start fn");
let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
(rust_main, start_ty, vec![arg_argc, arg_argv])
};
let result = bx.call(start_ty, start_fn, &args, None);
let cast = bx.intcast(result, cx.type_int(), true);
bx.ret(cast);
llfn
}
}
/// Obtain the `argc` and `argv` values to pass to the rust start function.
fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
bx: &mut Bx,
) -> (Bx::Value, Bx::Value) {
if cx.sess().target.main_needs_argc_argv {
// Params from native `main()` used as args for rust start function
let param_argc = bx.get_param(0);
let param_argv = bx.get_param(1);
let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
let arg_argv = param_argv;
(arg_argc, arg_argv)
} else {
// The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
let arg_argc = bx.const_int(cx.type_int(), 0);
let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
(arg_argc, arg_argv)
}
}
pub fn codegen_crate<B: ExtraBackendMethods>(
backend: B,
tcx: TyCtxt<'_>,
target_cpu: String,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> OngoingCodegen<B> {
// Skip crate items and just output metadata in -Z no-codegen mode.
if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, None, 1);
ongoing_codegen.codegen_finished(tcx);
ongoing_codegen.check_for_errors(tcx.sess);
return ongoing_codegen;
}
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
// Run the monomorphization collector and partition the collected items into
// codegen units.
let codegen_units = tcx.collect_and_partition_mono_items(()).1;
// Force all codegen_unit queries so they are already either red or green
// when compile_codegen_unit accesses them. We are not able to re-execute
// the codegen_unit query from just the DepNode, so an unknown color would
// lead to having to re-execute compile_codegen_unit, possibly
// unnecessarily.
if tcx.dep_graph.is_fully_enabled() {
for cgu in codegen_units {
tcx.ensure().codegen_unit(cgu.name());
}
}
let metadata_module = if need_metadata_module {
// Emit compressed metadata object.
let metadata_cgu_name =
cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
tcx.sess.time("write_compressed_metadata", || {
let file_name =
tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
let data = create_compressed_metadata_file(
tcx.sess,
&metadata,
&exported_symbols::metadata_symbol_name(tcx),
);
if let Err(err) = std::fs::write(&file_name, data) {
tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
}
Some(CompiledModule {
name: metadata_cgu_name,
kind: ModuleKind::Metadata,
object: Some(file_name),
dwarf_object: None,
bytecode: None,
})
})
} else {
None
};
let ongoing_codegen = start_async_codegen(
backend.clone(),
tcx,
target_cpu,
metadata,
metadata_module,
codegen_units.len(),
);
let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
// Codegen an allocator shim, if necessary.
//
// If the crate doesn't have an `allocator_kind` set then there's definitely
// no shim to generate. Otherwise we also check our dependency graph for all
// our output crate types. If anything there looks like its a `Dynamic`
// linkage, then it's already got an allocator shim and we'll be using that
// one instead. If nothing exists then it's our job to generate the
// allocator!
let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
use rustc_middle::middle::dependency_format::Linkage;
list.iter().any(|&linkage| linkage == Linkage::Dynamic)
});
let allocator_module = if any_dynamic_crate {
None
} else if let Some(kind) = tcx.allocator_kind(()) {
let llmod_id =
cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
let mut module_llvm = backend.new_metadata(tcx, &llmod_id);
tcx.sess.time("write_allocator_module", || {
backend.codegen_allocator(
tcx,
&mut module_llvm,
&llmod_id,
kind,
tcx.lang_items().oom().is_some(),
)
});
Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
} else {
None
};
if let Some(allocator_module) = allocator_module {
ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
}
// For better throughput during parallel processing by LLVM, we used to sort
// CGUs largest to smallest. This would lead to better thread utilization
// by, for example, preventing a large CGU from being processed last and
// having only one LLVM thread working while the rest remained idle.
//
// However, this strategy would lead to high memory usage, as it meant the
// LLVM-IR for all of the largest CGUs would be resident in memory at once.
//
// Instead, we can compromise by ordering CGUs such that the largest and
// smallest are first, second largest and smallest are next, etc. If there
// are large size variations, this can reduce memory usage significantly.
let codegen_units: Vec<_> = {
let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
second_half.iter().rev().interleave(first_half).copied().collect()
};
// The non-parallel compiler can only translate codegen units to LLVM IR
// on a single thread, leading to a staircase effect where the N LLVM
// threads have to wait on the single codegen threads to generate work
// for them. The parallel compiler does not have this restriction, so
// we can pre-load the LLVM queue in parallel before handing off
// coordination to the OnGoingCodegen scheduler.
//
// This likely is a temporary measure. Once we don't have to support the
// non-parallel compiler anymore, we can compile CGUs end-to-end in
// parallel and get rid of the complicated scheduling logic.
#[cfg(parallel_compiler)]
let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
tcx.sess.time("compile_first_CGU_batch", || {
// Try to find one CGU to compile per thread.
let cgus: Vec<_> = cgu_reuse
.iter()
.enumerate()
.filter(|&(_, reuse)| reuse == &CguReuse::No)
.take(tcx.sess.threads())
.collect();
// Compile the found CGUs in parallel.
let start_time = Instant::now();
let pre_compiled_cgus = par_iter(cgus)
.map(|(i, _)| {
let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
(i, module)
})
.collect();
(pre_compiled_cgus, start_time.elapsed())
})
};
#[cfg(not(parallel_compiler))]
let pre_compile_cgus = |_: &[CguReuse]| (FxHashMap::default(), Duration::new(0, 0));
let mut cgu_reuse = Vec::new();
let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
let mut total_codegen_time = Duration::new(0, 0);
let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
for (i, cgu) in codegen_units.iter().enumerate() {
ongoing_codegen.wait_for_signal_to_codegen_item();
ongoing_codegen.check_for_errors(tcx.sess);
// Do some setup work in the first iteration
if pre_compiled_cgus.is_none() {
// Calculate the CGU reuse
cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
});
// Pre compile some CGUs
let (compiled_cgus, codegen_time) = pre_compile_cgus(&cgu_reuse);
pre_compiled_cgus = Some(compiled_cgus);
total_codegen_time += codegen_time;
}
let cgu_reuse = cgu_reuse[i];
tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
match cgu_reuse {
CguReuse::No => {
let (module, cost) =
if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) {
cgu
} else {
let start_time = Instant::now();
let module = backend.compile_codegen_unit(tcx, cgu.name());
total_codegen_time += start_time.elapsed();
module
};
// This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
// guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
// compilation hang on post-monomorphization errors.
tcx.sess.abort_if_errors();
submit_codegened_module_to_llvm(
&backend,
&ongoing_codegen.coordinator_send,
module,
cost,
);
false
}
CguReuse::PreLto => {
submit_pre_lto_module_to_llvm(
&backend,
tcx,
&ongoing_codegen.coordinator_send,
CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
},
);
true
}
CguReuse::PostLto => {
submit_post_lto_module_to_llvm(
&backend,
&ongoing_codegen.coordinator_send,
CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
},
);
true
}
};
}
ongoing_codegen.codegen_finished(tcx);
// Since the main thread is sometimes blocked during codegen, we keep track
// -Ztime-passes output manually.
if tcx.sess.time_passes() {
let end_rss = get_resident_set_size();
print_time_passes_entry(
"codegen_to_LLVM_IR",
total_codegen_time,
start_rss.unwrap(),
end_rss,
);
}
ongoing_codegen.check_for_errors(tcx.sess);
ongoing_codegen.into_inner()
}
/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
/// when it's dropped abnormally.
///
/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
/// stumbled upon. The segfault was never reproduced locally, but it was
/// suspected to be related to the fact that codegen worker threads were
/// sticking around by the time the main thread was exiting, causing issues.
///
/// This structure is an attempt to fix that issue where the `codegen_aborted`
/// message will block until all workers have finished. This should ensure that
/// even if the main codegen thread panics we'll wait for pending work to
/// complete before returning from the main thread, hopefully avoiding
/// segfaults.
///
/// If you see this comment in the code, then it means that this workaround
/// worked! We may yet one day track down the mysterious cause of that
/// segfault...
struct AbortCodegenOnDrop<B: ExtraBackendMethods>(Option<OngoingCodegen<B>>);
impl<B: ExtraBackendMethods> AbortCodegenOnDrop<B> {
fn into_inner(mut self) -> OngoingCodegen<B> {
self.0.take().unwrap()
}
}
impl<B: ExtraBackendMethods> Deref for AbortCodegenOnDrop<B> {
type Target = OngoingCodegen<B>;
fn deref(&self) -> &OngoingCodegen<B> {
self.0.as_ref().unwrap()
}
}
impl<B: ExtraBackendMethods> DerefMut for AbortCodegenOnDrop<B> {
fn deref_mut(&mut self) -> &mut OngoingCodegen<B> {
self.0.as_mut().unwrap()
}
}
impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
fn drop(&mut self) {
if let Some(codegen) = self.0.take() {
codegen.codegen_aborted();
}
}
}
impl CrateInfo {
pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
let exported_symbols = tcx
.sess
.crate_types()
.iter()
.map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
.collect();
let local_crate_name = tcx.crate_name(LOCAL_CRATE);
let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
let windows_subsystem = subsystem.map(|subsystem| {
if subsystem != sym::windows && subsystem != sym::console {
tcx.sess.fatal(&format!(
"invalid windows subsystem `{}`, only \
`windows` and `console` are allowed",
subsystem
));
}
subsystem.to_string()
});
// This list is used when generating the command line to pass through to
// system linker. The linker expects undefined symbols on the left of the
// command line to be defined in libraries on the right, not the other way
// around. For more info, see some comments in the add_used_library function
// below.
//
// In order to get this left-to-right dependency ordering, we use the reverse
// postorder of all crates putting the leaves at the right-most positions.
let used_crates = tcx
.postorder_cnums(())
.iter()
.rev()
.copied()
.filter(|&cnum| !tcx.dep_kind(cnum).macros_only())
.collect();
let mut info = CrateInfo {
target_cpu,
exported_symbols,
local_crate_name,
compiler_builtins: None,
profiler_runtime: None,
is_no_builtins: Default::default(),
native_libraries: Default::default(),
used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
crate_name: Default::default(),
used_crates,
used_crate_source: Default::default(),
lang_item_to_crate: Default::default(),
missing_lang_items: Default::default(),
dependency_formats: tcx.dependency_formats(()).clone(),
windows_subsystem,
};
let lang_items = tcx.lang_items();
let crates = tcx.crates(());
let n_crates = crates.len();
info.native_libraries.reserve(n_crates);
info.crate_name.reserve(n_crates);
info.used_crate_source.reserve(n_crates);
info.missing_lang_items.reserve(n_crates);
for &cnum in crates.iter() {
info.native_libraries
.insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
info.crate_name.insert(cnum, tcx.crate_name(cnum));
info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum).clone());
if tcx.is_compiler_builtins(cnum) {
info.compiler_builtins = Some(cnum);
}
if tcx.is_profiler_runtime(cnum) {
info.profiler_runtime = Some(cnum);
}
if tcx.is_no_builtins(cnum) {
info.is_no_builtins.insert(cnum);
}
let missing = tcx.missing_lang_items(cnum);
for &item in missing.iter() {
if let Ok(id) = lang_items.require(item) {
info.lang_item_to_crate.insert(item, id.krate);
}
}
// No need to look for lang items that don't actually need to exist.
let missing =
missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
info.missing_lang_items.insert(cnum, missing);
}
info
}
}
pub fn provide(providers: &mut Providers) {
providers.backend_optimization_level = |tcx, cratenum| {
let for_speed = match tcx.sess.opts.optimize {
// If globally no optimisation is done, #[optimize] has no effect.
//
// This is done because if we ended up "upgrading" to `-O2` here, weโd populate the
// pass manager and it is likely that some module-wide passes (such as inliner or
// cross-function constant propagation) would ignore the `optnone` annotation we put
// on the functions, thus necessarily involving these functions into optimisations.
config::OptLevel::No => return config::OptLevel::No,
// If globally optimise-speed is already specified, just use that level.
config::OptLevel::Less => return config::OptLevel::Less,
config::OptLevel::Default => return config::OptLevel::Default,
config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
// If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
// are present).
config::OptLevel::Size => config::OptLevel::Default,
config::OptLevel::SizeMin => config::OptLevel::Default,
};
let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
for id in &*defids {
let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
match optimize {
attr::OptimizeAttr::None => continue,
attr::OptimizeAttr::Size => continue,
attr::OptimizeAttr::Speed => {
return for_speed;
}
}
}
tcx.sess.opts.optimize
};
}
fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
if !tcx.dep_graph.is_fully_enabled() {
return CguReuse::No;
}
let work_product_id = &cgu.work_product_id();
if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
// We don't have anything cached for this CGU. This can happen
// if the CGU did not exist in the previous session.
return CguReuse::No;
}
// Try to mark the CGU as green. If it we can do so, it means that nothing
// affecting the LLVM module has changed and we can re-use a cached version.
// If we compile with any kind of LTO, this means we can re-use the bitcode
// of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
// know that later). If we are not doing LTO, there is only one optimized
// version of each module, so we re-use that.
let dep_node = cgu.codegen_dep_node(tcx);
assert!(
!tcx.dep_graph.dep_node_exists(&dep_node),
"CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
cgu.name()
);
if tcx.try_mark_green(&dep_node) {
// We can re-use either the pre- or the post-thinlto state. If no LTO is
// being performed then we can use post-LTO artifacts, otherwise we must
// reuse pre-LTO artifacts
match compute_per_cgu_lto_type(
&tcx.sess.lto(),
&tcx.sess.opts,
&tcx.sess.crate_types(),
ModuleKind::Regular,
) {
ComputedLtoType::No => CguReuse::PostLto,
_ => CguReuse::PreLto,
}
} else {
CguReuse::No
}
} | ) -> (Bx::Value, Bx::Value) {
debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
match (src_ty.kind(), dst_ty.kind()) {
(&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
test_vizplugin.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
from contextlib import redirect_stdout
import io
from .cmdline_tmpl import CmdlineTmpl
from viztracer import VizTracer
from viztracer.vizplugin import VizPluginBase, VizPluginError
class MyPlugin(VizPluginBase):
def __init__(self, terminate_well=True):
self.event_counter = 0
self.handler_triggered = False
self.terminate_well = terminate_well
def support_version(self):
return "0.10.5"
def message(self, m_type, payload):
def f(data):
self.handler_triggered = True
self.event_counter += 1
if m_type == "event" and payload["when"] == "pre-save":
return {
"action": "handle_data",
"handler": f
}
if m_type == "command":
if payload["cmd_type"] == "terminate":
return {"success": self.terminate_well}
return {}
class MyPluginIncomplete(VizPluginBase):
pass
class MyPluginFuture(VizPluginBase):
def support_version(self):
return "9999.999.99"
class | (CmdlineTmpl):
def test_basic(self):
pl = MyPlugin()
tracer = VizTracer(plugins=[pl])
tracer.start()
tracer.stop()
tracer.save()
self.assertEqual(pl.event_counter, 4)
self.assertEqual(pl.handler_triggered, True)
def test_invalid(self):
invalid_pl = []
with self.assertRaises(TypeError):
_ = VizTracer(plugins=[invalid_pl])
with self.assertRaises(NotImplementedError):
_ = VizTracer(plugins=[MyPluginIncomplete()])
def test_terminate(self):
pl = MyPlugin()
with VizTracer(plugins=[pl]):
_ = []
pl = MyPlugin(terminate_well=False)
with self.assertRaises(VizPluginError):
with VizTracer(plugins=[pl]):
_ = []
def test_version(self):
pl = MyPluginFuture()
s = io.StringIO()
with redirect_stdout(s):
with VizTracer(plugins=[pl]):
_ = []
self.assertIn("support version is higher", s.getvalue())
def test_cmdline(self):
self.template(["viztracer", "--plugin", "tests.modules.dummy_vizplugin", "--", "cmdline_test.py"])
self.template(["viztracer", "--plugin", "tests.modules.dummy_vizplugin_wrong", "--", "cmdline_test.py"], success=False)
self.template(["viztracer", "--plugin", "tests.modules", "--", "cmdline_test.py"], success=False)
self.template(["viztracer", "--plugin", "invalid", "--", "cmdline_test.py"], success=False)
| TestVizPlugin |
pe011.rs | //! This is a solution to [Project Euler Problem 11](https://projecteuler.net/problem=11).
use std::cmp::max;
use std::fmt::Display;
use std::str;
const DATA: &[u8] = include_bytes!("pe011-data.txt");
enum Direction {
South,
East,
NorthEast,
SouthEast,
}
struct Grid {
data: Vec<Vec<usize>>,
}
impl Grid {
/// Parse the data file
fn from(data: &str) -> Grid {
let data: Vec<Vec<usize>> = data
.lines()
.map(|line| {
line.split_whitespace()
.map(|s| {
str::parse::<usize>(s)
.unwrap_or_else(|_| panic!("Failed to parse as usize: {}", s))
}).collect()
}).collect();
Grid { data }
}
/// Given a starting point and a direction, produce elements in a given direction until the edge of the grid is reached.
fn line<'a>(
&'a self,
start_row: usize,
start_col: usize,
direction: &Direction,
) -> Vec<&usize> {
let mut r = start_row;
let mut c = start_col;
let mut v: Vec<&'a usize> = Vec::new();
while let Some(elem) = self.data.get(r).and_then(|row| row.get(c)) {
v.push(elem);
match direction {
Direction::South => {
r += 1;
}
Direction::East => {
c += 1;
}
Direction::NorthEast => {
if r == 0 {
break;
}
r -= 1;
c += 1;
}
Direction::SouthEast => {
r += 1;
c += 1;
}
};
}
v
}
}
pub fn solve() -> impl Display |
fn problem(n: usize) -> usize {
let data: &str = str::from_utf8(DATA).unwrap();
let grid = Grid::from(data);
// East going lines starting from column 0
let east = (0..n).map(|row_idx| grid.line(row_idx, 0, &Direction::East));
// South going lines starting from row 0
let south = (0..n).map(|col_idx| grid.line(0, col_idx, &Direction::South));
// There is a bit of overlap in the following lines, but that does not matter as we are looking for the
// maximum, not the sum or product.
// South east going lines starting at column 0
let south_east_1 = (0..n).map(|row_idx| grid.line(row_idx, 0, &Direction::SouthEast));
// South east going lines starting at row 0
let south_east_2 = (0..n).map(|col_idx| grid.line(0, col_idx, &Direction::SouthEast));
// North east going lines starting at column 0
let north_east_1 = (0..n).map(|row_idx| grid.line(row_idx, 0, &Direction::NorthEast));
// North east going lines starting at the last row
let north_east_2 = (0..n).map(|col_idx| grid.line(n - 1, col_idx, &Direction::NorthEast));
// This becomes an iterator of lines going in various directions, (where the lines are of type Vec<&usize>).
let lines = east
.chain(south)
.chain(south_east_1)
.chain(south_east_2)
.chain(north_east_1)
.chain(north_east_2);
let mut result: usize = 0;
for line in lines {
// Cut every line into windows of elements
let windows = line.windows(4);
for window in windows {
// Compute the product
let p: usize = window.iter().map(|x| *x).product();
// Store the largest seen product
result = max(result, p);
}
}
result
}
| {
problem(20)
} |
controllers.js | (function() {
'use strict';
var mesosApp = angular.module('mesos');
function hasSelectedText() {
if (window.getSelection) { // All browsers except IE before version 9.
var range = window.getSelection();
return range.toString().length > 0;
}
return false;
}
// Invokes the pailer for the specified host and path using the
// specified window_title.
function pailer(host, path, window_title) {
var url = host + 'files/read.json?path=' + path;
var pailer =
window.open('static/pailer.html', url, 'width=580px, height=700px');
// Need to use window.onload instead of document.ready to make
// sure the title doesn't get overwritten.
pailer.onload = function() {
pailer.document.title = window_title + ' (' + host + ')';
};
}
function updateInterval(num_slaves) {
// TODO(bmahler): Increasing the update interval for large clusters
// is done purely to mitigate webui performance issues. Ideally we can
// keep a consistently fast rate for updating statistical information.
// For the full system state updates, it may make sense to break
// it up using pagination and/or splitting the endpoint.
if (num_slaves < 500) {
return 10000;
} else if (num_slaves < 1000) {
return 20000;
} else if (num_slaves < 5000) {
return 60000;
} else if (num_slaves < 10000) {
return 120000;
} else if (num_slaves < 15000) {
return 240000;
} else if (num_slaves < 20000) {
return 480000;
} else {
return 960000;
}
}
// Update the outermost scope with the new state.
function update($scope, $timeout, data) {
// Don't do anything if the data hasn't changed.
if ($scope.data == data) {
return true; // Continue polling.
}
$scope.state = JSON.parse(data);
// A cluster is named if the state returns a non-empty string name.
// Track whether this cluster is named in a Boolean for display purposes.
$scope.clusterNamed = !!$scope.state.cluster;
// Check for selected text, and allow up to 20 seconds to pass before
// potentially wiping the user highlighted text.
// TODO(bmahler): This is to avoid the annoying loss of highlighting when
// the tables update. Once we can have tighter granularity control on the
// angular.js dynamic table updates, we should remove this hack.
$scope.time_since_update += $scope.delay;
if (hasSelectedText() && $scope.time_since_update < 20000) {
return true;
}
$scope.data = data;
// Pass this pollTime to all relativeDate calls to make them all relative to
// the same moment in time.
//
// If relativeDate is called without a reference time, it instantiates a new
// Date to be the reference. Since there can be hundreds of dates on a given
// page, they would all be relative to slightly different moments in time.
$scope.pollTime = new Date();
// Update the maps.
$scope.slaves = {};
$scope.frameworks = {};
$scope.offers = {};
$scope.completed_frameworks = {};
$scope.active_tasks = [];
$scope.completed_tasks = [];
// Update the stats.
$scope.cluster = $scope.state.cluster;
$scope.total_cpus = 0;
$scope.total_mem = 0;
$scope.used_cpus = 0;
$scope.used_mem = 0;
$scope.offered_cpus = 0;
$scope.offered_mem = 0;
$scope.staged_tasks = $scope.state.staged_tasks;
$scope.started_tasks = $scope.state.started_tasks;
$scope.finished_tasks = $scope.state.finished_tasks;
$scope.killed_tasks = $scope.state.killed_tasks;
$scope.failed_tasks = $scope.state.failed_tasks;
$scope.lost_tasks = $scope.state.lost_tasks;
$scope.activated_slaves = $scope.state.activated_slaves;
$scope.deactivated_slaves = $scope.state.deactivated_slaves;
_.each($scope.state.slaves, function(slave) {
$scope.slaves[slave.id] = slave;
$scope.total_cpus += slave.resources.cpus;
$scope.total_mem += slave.resources.mem;
});
var setTaskMetadata = function(task) {
if (!task.executor_id) {
task.executor_id = task.id;
}
if (task.statuses.length > 0) {
task.start_time = task.statuses[0].timestamp * 1000;
task.finish_time =
task.statuses[task.statuses.length - 1].timestamp * 1000;
}
};
_.each($scope.state.frameworks, function(framework) {
$scope.frameworks[framework.id] = framework;
_.each(framework.offers, function(offer) {
$scope.offers[offer.id] = offer;
$scope.offered_cpus += offer.resources.cpus;
$scope.offered_mem += offer.resources.mem;
offer.framework_name = $scope.frameworks[offer.framework_id].name;
offer.hostname = $scope.slaves[offer.slave_id].hostname;
});
$scope.used_cpus += framework.resources.cpus;
$scope.used_mem += framework.resources.mem;
framework.cpus_share = 0;
if ($scope.total_cpus > 0) {
framework.cpus_share = framework.resources.cpus / $scope.total_cpus;
}
framework.mem_share = 0;
if ($scope.total_mem > 0) {
framework.mem_share = framework.resources.mem / $scope.total_mem;
}
framework.max_share = Math.max(framework.cpus_share, framework.mem_share);
// If the executor ID is empty, this is a command executor with an
// internal executor ID generated from the task ID.
// TODO(brenden): Remove this once
// https://issues.apache.org/jira/browse/MESOS-527 is fixed.
_.each(framework.tasks, setTaskMetadata);
_.each(framework.completed_tasks, setTaskMetadata);
$scope.active_tasks = $scope.active_tasks.concat(framework.tasks);
$scope.completed_tasks =
$scope.completed_tasks.concat(framework.completed_tasks);
});
_.each($scope.state.completed_frameworks, function(framework) {
$scope.completed_frameworks[framework.id] = framework;
_.each(framework.completed_tasks, setTaskMetadata);
});
$scope.used_cpus -= $scope.offered_cpus;
$scope.used_mem -= $scope.offered_mem;
$scope.idle_cpus = $scope.total_cpus - ($scope.offered_cpus + $scope.used_cpus);
$scope.idle_mem = $scope.total_mem - ($scope.offered_mem + $scope.used_mem);
$scope.time_since_update = 0;
$scope.$broadcast('state_updated');
return true; // Continue polling.
}
// Main controller that can be used to handle "global" events. E.g.,:
// $scope.$on('$afterRouteChange', function() { ...; });
//
// In addition, the MainCntl encapsulates the "view", allowing the
// active controller/view to easily access anything in scope (e.g.,
// the state).
mesosApp.controller('MainCntl', [
'$scope', '$http', '$location', '$timeout', '$modal',
function($scope, $http, $location, $timeout, $modal) {
$scope.doneLoading = true;
// Adding bindings into scope so that they can be used from within
// AngularJS expressions.
$scope._ = _;
$scope.stringify = JSON.stringify;
$scope.encodeURIComponent = encodeURIComponent;
$scope.basename = function(path) {
// This is only a basic version of basename that handles the cases we care
// about, rather than duplicating unix basename functionality perfectly.
if (path === '/') {
return path; // Handle '/'.
}
// Strip a trailing '/' if present.
if (path.length > 0 && path.lastIndexOf('/') === (path.length - 1)) {
path = path.substr(0, path.length - 1);
}
return path.substr(path.lastIndexOf('/') + 1);
};
$scope.$location = $location;
$scope.delay = 2000;
$scope.retry = 0;
$scope.time_since_update = 0;
// Ordered Array of path => activeTab mappings. On successful route changes,
// the `pathRegexp` values are matched against the current route. The first
// match will be used to set the active navbar tab.
var NAVBAR_PATHS = [
{
pathRegexp: /^\/slaves/,
tab: 'slaves'
},
{
pathRegexp: /^\/frameworks/,
tab: 'frameworks'
},
{
pathRegexp: /^\/offers/,
tab: 'offers'
}
];
// Set the active tab on route changes according to NAVBAR_PATHS.
$scope.$on('$routeChangeSuccess', function(event, current) {
var path = current.$$route.originalPath;
// Use _.some so the loop can exit on the first `pathRegexp` match.
var matched = _.some(NAVBAR_PATHS, function(nav) {
if (path.match(nav.pathRegexp)) {
$scope.navbarActiveTab = nav.tab;
return true;
}
});
if (!matched) $scope.navbarActiveTab = null;
});
var poll = function() {
$http.get('master/state.json',
{transformResponse: function(data) { return data; }})
.success(function(data) {
if (update($scope, $timeout, data)) {
$scope.delay = updateInterval(_.size($scope.slaves));
$timeout(poll, $scope.delay);
}
})
.error(function() {
if ($scope.delay >= 128000) {
$scope.delay = 2000;
} else {
$scope.delay = $scope.delay * 2;
}
var errorModal = $modal.open({
controller: function($scope, $modalInstance, scope) {
// Give the modal reference to the root scope so it can access the
// `retry` variable. It needs to be passed by reference, not by
// value, since its value is changed outside the scope of the
// modal.
$scope.rootScope = scope;
},
resolve: {
scope: function() { return $scope; }
},
templateUrl: "template/dialog/masterGone.html"
});
// Make it such that everytime we hide the error-modal, we stop the
// countdown and restart the polling.
errorModal.result.then(function() {
if ($scope.countdown != null) {
if ($timeout.cancel($scope.countdown)) {
// Restart since they cancelled the countdown.
$scope.delay = 2000;
}
}
// Start polling again, but do it asynchronously (and wait at
// least a second because otherwise the error-modal won't get
// properly shown).
$timeout(poll, 1000);
});
$scope.retry = $scope.delay;
var countdown = function() {
if ($scope.retry === 0) {
errorModal.close();
} else {
$scope.retry = $scope.retry - 1000;
$scope.countdown = $timeout(countdown, 1000);
}
};
countdown();
});
};
poll();
}]);
mesosApp.controller('HomeCtrl', function($dialog, $scope) {
$scope.log = function($event) {
if (!$scope.state.external_log_file && !$scope.state.log_dir) {
$dialog.messageBox(
'Logging to a file is not enabled',
"Set the 'external_log_file' or 'log_dir' option if you wish to access the logs.",
[{label: 'Continue'}]
).open();
} else {
pailer(
'/mesos/',
'/master/log',
'Mesos Master');
}
};
});
mesosApp.controller('FrameworksCtrl', function() {});
mesosApp.controller('OffersCtrl', function() {});
mesosApp.controller('FrameworkCtrl', function($scope, $routeParams) {
var update = function() {
if ($routeParams.id in $scope.completed_frameworks) {
$scope.framework = $scope.completed_frameworks[$routeParams.id];
$scope.alert_message = 'This framework has terminated!';
$('#alert').show();
$('#framework').show();
} else if ($routeParams.id in $scope.frameworks) {
$scope.framework = $scope.frameworks[$routeParams.id];
$('#framework').show();
} else {
$scope.alert_message = 'No framework found with ID: ' + $routeParams.id;
$('#alert').show();
}
};
if ($scope.state) {
update();
}
var removeListener = $scope.$on('state_updated', update);
$scope.$on('$routeChangeStart', removeListener);
});
mesosApp.controller('SlavesCtrl', function() {});
mesosApp.controller('SlaveCtrl', [
'$dialog', '$scope', '$routeParams', '$http', '$q', '$timeout', 'top',
function($dialog, $scope, $routeParams, $http, $q, $timeout, $top) {
$scope.slave_id = $routeParams.slave_id;
var update = function() {
if (!($routeParams.slave_id in $scope.slaves)) {
$scope.alert_message = 'No slave found with ID: ' + $routeParams.slave_id;
$('#alert').show();
return;
}
var pid = $scope.slaves[$routeParams.slave_id].pid;
var id = pid.substring(0, pid.indexOf('@'));
var host = '/mesos/slave/' + $routeParams.slave_id + '/';
$scope.log = function($event) {
if (!$scope.state.external_log_file && !$scope.state.log_dir) {
$dialog.messageBox(
'Logging to a file is not enabled',
"Set the 'external_log_file' or 'log_dir' option if you wish to access the logs.",
[{label: 'Continue'}]
).open();
} else {
pailer(host, '/slave/log', 'Mesos Slave');
} | $top.start(host, $scope);
}
$http.jsonp('/mesos/slave/' + $routeParams.slave_id + '/' + id + '/state.json?jsonp=JSON_CALLBACK')
.success(function (response) {
$scope.state = response;
$scope.slave = {};
$scope.slave.frameworks = {};
$scope.slave.completed_frameworks = {};
$scope.slave.staging_tasks = 0;
$scope.slave.starting_tasks = 0;
$scope.slave.running_tasks = 0;
// Computes framework stats by setting new attributes on the 'framework'
// object.
function computeFrameworkStats(framework) {
framework.num_tasks = 0;
framework.cpus = 0;
framework.mem = 0;
_.each(framework.executors, function(executor) {
framework.num_tasks += _.size(executor.tasks);
framework.cpus += executor.resources.cpus;
framework.mem += executor.resources.mem;
});
}
// Compute framework stats and update slave's mappings of those
// frameworks.
_.each($scope.state.frameworks, function(framework) {
$scope.slave.frameworks[framework.id] = framework;
computeFrameworkStats(framework);
});
_.each($scope.state.completed_frameworks, function(framework) {
$scope.slave.completed_frameworks[framework.id] = framework;
computeFrameworkStats(framework);
});
$('#slave').show();
})
.error(function(reason) {
$scope.alert_message = 'Failed to get slave usage / state: ' + reason;
$('#alert').show();
});
};
if ($scope.state) {
update();
}
var removeListener = $scope.$on('state_updated', update);
$scope.$on('$routeChangeStart', removeListener);
}]);
mesosApp.controller('SlaveFrameworkCtrl', [
'$scope', '$routeParams', '$http', '$q', '$timeout', 'top',
function($scope, $routeParams, $http, $q, $timeout, $top) {
$scope.slave_id = $routeParams.slave_id;
$scope.framework_id = $routeParams.framework_id;
var update = function() {
if (!($routeParams.slave_id in $scope.slaves)) {
$scope.alert_message = 'No slave found with ID: ' + $routeParams.slave_id;
$('#alert').show();
return;
}
var pid = $scope.slaves[$routeParams.slave_id].pid;
var id = pid.substring(0, pid.indexOf('@'));
var host = '/mesos/slave/' + $routeParams.slave_id + '/';
// Set up polling for the monitor if this is the first update.
if (!$top.started()) {
$top.start(host, $scope);
}
$http.jsonp(host + id + '/state.json?jsonp=JSON_CALLBACK')
.success(function (response) {
$scope.state = response;
$scope.slave = {};
function matchFramework(framework) {
return $scope.framework_id === framework.id;
}
// Find the framework; it's either active or completed.
$scope.framework =
_.find($scope.state.frameworks, matchFramework) ||
_.find($scope.state.completed_frameworks, matchFramework);
if (!$scope.framework) {
$scope.alert_message = 'No framework found with ID: ' + $routeParams.framework_id;
$('#alert').show();
return;
}
// Compute the framework stats.
$scope.framework.num_tasks = 0;
$scope.framework.cpus = 0;
$scope.framework.mem = 0;
_.each($scope.framework.executors, function(executor) {
$scope.framework.num_tasks += _.size(executor.tasks);
$scope.framework.cpus += executor.resources.cpus;
$scope.framework.mem += executor.resources.mem;
});
$('#slave').show();
})
.error(function (reason) {
$scope.alert_message = 'Failed to get slave usage / state: ' + reason;
$('#alert').show();
});
};
if ($scope.state) {
update();
}
var removeListener = $scope.$on('state_updated', update);
$scope.$on('$routeChangeStart', removeListener);
}]);
mesosApp.controller('SlaveExecutorCtrl', [
'$scope', '$routeParams', '$http', '$q', '$timeout', 'top',
function($scope, $routeParams, $http, $q, $timeout, $top) {
$scope.slave_id = $routeParams.slave_id;
$scope.framework_id = $routeParams.framework_id;
$scope.executor_id = $routeParams.executor_id;
var update = function() {
if (!($routeParams.slave_id in $scope.slaves)) {
$scope.alert_message = 'No slave found with ID: ' + $routeParams.slave_id;
$('#alert').show();
return;
}
var pid = $scope.slaves[$routeParams.slave_id].pid;
var id = pid.substring(0, pid.indexOf('@'));
var host = '/mesos/slave/' + $routeParams.slave_id + '/';
// Set up polling for the monitor if this is the first update.
if (!$top.started()) {
$top.start(host, $scope);
}
$http.jsonp(host + id + '/state.json?jsonp=JSON_CALLBACK')
.success(function (response) {
$scope.state = response;
$scope.slave = {};
function matchFramework(framework) {
return $scope.framework_id === framework.id;
}
// Find the framework; it's either active or completed.
$scope.framework =
_.find($scope.state.frameworks, matchFramework) ||
_.find($scope.state.completed_frameworks, matchFramework);
if (!$scope.framework) {
$scope.alert_message = 'No framework found with ID: ' + $routeParams.framework_id;
$('#alert').show();
return;
}
function matchExecutor(executor) {
return $scope.executor_id === executor.id;
}
// Look for the executor; it's either active or completed.
$scope.executor =
_.find($scope.framework.executors, matchExecutor) ||
_.find($scope.framework.completed_executors, matchExecutor);
if (!$scope.executor) {
$scope.alert_message = 'No executor found with ID: ' + $routeParams.executor_id;
$('#alert').show();
return;
}
$('#slave').show();
})
.error(function (reason) {
$scope.alert_message = 'Failed to get slave usage / state: ' + reason;
$('#alert').show();
});
};
if ($scope.state) {
update();
}
var removeListener = $scope.$on('state_updated', update);
$scope.$on('$routeChangeStart', removeListener);
}]);
// Reroutes a request like
// '//mesos/slave/:slave_id/frameworks/:framework_id/executors/:executor_id/browse'
// to the executor's sandbox. This requires a second request because the
// directory to browse is known by the slave but not by the master. Request
// the directory from the slave, and then redirect to it.
//
// TODO(ssorallen): Add `executor.directory` to the state.json output so this
// controller of rerouting is no longer necessary.
mesosApp.controller('SlaveExecutorRerouterCtrl',
function($alert, $http, $location, $routeParams, $scope, $window) {
function goBack(flashMessageOrOptions) {
if (flashMessageOrOptions) {
$alert.danger(flashMessageOrOptions);
}
if ($window.history.length > 1) {
// If the browser has something in its history, just go back.
$window.history.back();
} else {
// Otherwise navigate to the framework page, which is likely the
// previous page anyway.
$location.path('/frameworks/' + $routeParams.framework_id).replace();
}
}
// When navigating directly to this page, e.g. pasting the URL into the
// browser, the previous page is not a page in Mesos. In that case, navigate
// home.
if (!$scope.slaves) {
$alert.danger({
message: "Navigate to the slave's sandbox via the Mesos UI.",
title: "Failed to find slaves."
});
return $location.path('/').replace();
}
var slave = $scope.slaves[$routeParams.slave_id];
// If the slave doesn't exist, send the user back.
if (!slave) {
return goBack("Slave with ID '" + $routeParams.slave_id + "' does not exist.");
}
var pid = slave.pid;
var id = pid.substring(0, pid.indexOf('@'));
var host = '/mesos/slave/' + $routeParams.slave_id + '/';
// Request slave details to get access to the route executor's "directory"
// to navigate directly to the executor's sandbox.
$http.jsonp(host + id + '/state.json?jsonp=JSON_CALLBACK')
.success(function(response) {
function matchFramework(framework) {
return $routeParams.framework_id === framework.id;
}
var framework =
_.find(response.frameworks, matchFramework) ||
_.find(response.completed_frameworks, matchFramework);
if (!framework) {
return goBack(
"Framework with ID '" + $routeParams.framework_id +
"' does not exist on slave with ID '" + $routeParams.slave_id +
"'."
);
}
function matchExecutor(executor) {
return $routeParams.executor_id === executor.id;
}
var executor =
_.find(framework.executors, matchExecutor) ||
_.find(framework.completed_executors, matchExecutor);
if (!executor) {
return goBack(
"Executor with ID '" + $routeParams.executor_id +
"' does not exist on slave with ID '" + $routeParams.slave_id +
"'."
);
}
// Navigate to a path like '/mesos/slave/:id/browse?path=%2Ftmp%2F', the
// recognized "browse" endpoint for a slave.
$location.path('slaves/' + $routeParams.slave_id + '/browse')
.search({path: executor.directory})
.replace();
})
.error(function(response) {
$alert.danger({
bullets: [
"The slave is not accessible",
"The slave timed out or went offline"
],
message: "Potential reasons:",
title: "Failed to connect to slave '" + $routeParams.slave_id +
"' on '" + host + "'."
});
// Is the slave dead? Navigate home since returning to the slave might
// end up in an endless loop.
$location.path('/').replace();
});
});
mesosApp.controller('BrowseCtrl', function($scope, $routeParams, $http) {
var update = function() {
if ($routeParams.slave_id in $scope.slaves && $routeParams.path) {
$scope.slave_id = $routeParams.slave_id;
$scope.path = $routeParams.path;
var url = '/mesos/slave/' + $scope.slave_id + '/files/browse.json?jsonp=JSON_CALLBACK';
$scope.pail = function($event, path) {
pailer('/mesos/slave/' + $scope.slave_id + '/', path, decodeURIComponent(path));
};
$scope.slave_host = '/mesos/slave/' + $scope.slave_id + '/';
// TODO(bmahler): Try to get the error code / body in the error callback.
// This wasn't working with the current version of angular.
$http.jsonp(url, {params: {path: $routeParams.path}})
.success(function(data) {
$scope.listing = data;
$('#listing').show();
})
.error(function() {
$scope.alert_message = 'Error browsing path: ' + $routeParams.path;
$('#alert').show();
});
} else {
if (!($routeParams.slave_id in $scope.slaves)) {
$scope.alert_message = 'No slave found with ID: ' + $routeParams.slave_id;
} else {
$scope.alert_message = 'Missing "path" request parameter.';
}
$('#alert').show();
}
};
if ($scope.state) {
update();
}
var removeListener = $scope.$on('state_updated', update);
$scope.$on('$routeChangeStart', removeListener);
});
})(); | };
// Set up polling for the monitor if this is the first update.
if (!$top.started()) { |
imaplib2.py | #!/usr/bin/env python
"""Threaded IMAP4 client.
Based on RFC 3501 and original imaplib module.
Public classes: IMAP4
IMAP4_SSL
IMAP4_stream
Public functions: Internaldate2Time
ParseFlags
Time2Internaldate
"""
__all__ = ("IMAP4", "IMAP4_SSL", "IMAP4_stream",
"Internaldate2Time", "ParseFlags", "Time2Internaldate")
__version__ = "2.33"
__release__ = "2"
__revision__ = "33"
__credits__ = """
Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998.
String method conversion by ESR, February 2001.
GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001.
IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002.
GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002.
PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002.
IDLE via threads suggested by Philippe Normand <phil@respyre.org> January 2005.
GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005.
COMPRESS/DEFLATE contributed by Bron Gondwana <brong@brong.net> May 2009.
STARTTLS from Jython's imaplib by Alan Kennedy.
ID contributed by Dave Baggett <dave@baggett.org> November 2009.
Improved untagged responses handling suggested by Dave Baggett <dave@baggett.org> November 2009.
Improved thread naming, and 0 read detection contributed by Grant Edwards <grant.b.edwards@gmail.com> June 2010.
Improved timeout handling contributed by Ivan Vovnenko <ivovnenko@gmail.com> October 2010.
Timeout handling further improved by Ethan Glasser-Camp <glasse@cs.rpi.edu> December 2010.
Time2Internaldate() patch to match RFC2060 specification of English month names from bugs.python.org/issue11024 March 2011.
starttls() bug fixed with the help of Sebastian Spaeth <sebastian@sspaeth.de> April 2011.
Threads now set the "daemon" flag (suggested by offlineimap-project) April 2011.
Single quoting introduced with the help of Vladimir Marek <vladimir.marek@oracle.com> August 2011."""
__author__ = "Piers Lauder <piers@janeelix.com>"
__URL__ = "http://imaplib2.sourceforge.net"
__license__ = "Python License"
import binascii, errno, os, Queue, random, re, select, socket, sys, time, threading, zlib
select_module = select
# Globals
CRLF = '\r\n'
Debug = None # Backward compatibility
IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
IDLE_TIMEOUT_RESPONSE = '* IDLE TIMEOUT\r\n'
IDLE_TIMEOUT = 60*29 # Don't stay in IDLE state longer
READ_POLL_TIMEOUT = 30 # Without this timeout interrupted network connections can hang reader
READ_SIZE = 32768 # Consume all available in socket
DFLT_DEBUG_BUF_LVL = 3 # Level above which the logging output goes directly to stderr
AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
# Commands
CMD_VAL_STATES = 0
CMD_VAL_ASYNC = 1
NONAUTH, AUTH, SELECTED, LOGOUT = 'NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'
Commands = {
# name valid states asynchronous
'APPEND': ((AUTH, SELECTED), False),
'AUTHENTICATE': ((NONAUTH,), False),
'CAPABILITY': ((NONAUTH, AUTH, SELECTED), True),
'CHECK': ((SELECTED,), True),
'CLOSE': ((SELECTED,), False),
'COMPRESS': ((AUTH,), False),
'COPY': ((SELECTED,), True),
'CREATE': ((AUTH, SELECTED), True),
'DELETE': ((AUTH, SELECTED), True),
'DELETEACL': ((AUTH, SELECTED), True),
'EXAMINE': ((AUTH, SELECTED), False),
'EXPUNGE': ((SELECTED,), True),
'FETCH': ((SELECTED,), True),
'GETACL': ((AUTH, SELECTED), True),
'GETANNOTATION':((AUTH, SELECTED), True),
'GETQUOTA': ((AUTH, SELECTED), True),
'GETQUOTAROOT': ((AUTH, SELECTED), True),
'ID': ((NONAUTH, AUTH, LOGOUT, SELECTED), True),
'IDLE': ((SELECTED,), False),
'LIST': ((AUTH, SELECTED), True),
'LOGIN': ((NONAUTH,), False),
'LOGOUT': ((NONAUTH, AUTH, LOGOUT, SELECTED), False),
'LSUB': ((AUTH, SELECTED), True),
'MYRIGHTS': ((AUTH, SELECTED), True),
'NAMESPACE': ((AUTH, SELECTED), True),
'NOOP': ((NONAUTH, AUTH, SELECTED), True),
'PARTIAL': ((SELECTED,), True),
'PROXYAUTH': ((AUTH,), False),
'RENAME': ((AUTH, SELECTED), True),
'SEARCH': ((SELECTED,), True),
'SELECT': ((AUTH, SELECTED), False),
'SETACL': ((AUTH, SELECTED), False),
'SETANNOTATION':((AUTH, SELECTED), True),
'SETQUOTA': ((AUTH, SELECTED), False),
'SORT': ((SELECTED,), True),
'STARTTLS': ((NONAUTH,), False),
'STATUS': ((AUTH, SELECTED), True),
'STORE': ((SELECTED,), True),
'SUBSCRIBE': ((AUTH, SELECTED), False),
'THREAD': ((SELECTED,), True),
'UID': ((SELECTED,), True),
'UNSUBSCRIBE': ((AUTH, SELECTED), False),
}
UID_direct = ('SEARCH', 'SORT', 'THREAD')
def Int2AP(num):
"""string = Int2AP(num)
Return 'num' converted to a string using characters from the set 'A'..'P'
"""
val, a2p = [], 'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val.insert(0, a2p[mod])
return ''.join(val)
class Request(object):
"""Private class to represent a request awaiting response."""
def __init__(self, parent, name=None, callback=None, cb_arg=None, cb_self=False):
self.parent = parent
self.name = name
self.callback = callback # Function called to process result
if not cb_self:
self.callback_arg = cb_arg # Optional arg passed to "callback"
else:
self.callback_arg = (self, cb_arg) # Self reference required in callback arg
self.tag = '%s%s' % (parent.tagpre, parent.tagnum)
parent.tagnum += 1
self.ready = threading.Event()
self.response = None
self.aborted = None
self.data = None
def abort(self, typ, val):
self.aborted = (typ, val)
self.deliver(None)
def get_response(self, exc_fmt=None):
self.callback = None
if __debug__: self.parent._log(3, '%s:%s.ready.wait' % (self.name, self.tag))
self.ready.wait()
if self.aborted is not None:
typ, val = self.aborted
if exc_fmt is None:
exc_fmt = '%s - %%s' % typ
raise typ(exc_fmt % str(val))
return self.response
def deliver(self, response):
if self.callback is not None:
self.callback((response, self.callback_arg, self.aborted))
return
self.response = response
self.ready.set()
if __debug__: self.parent._log(3, '%s:%s.ready.set' % (self.name, self.tag))
class IMAP4(object):
"""Threaded IMAP4 client class.
Instantiate with:
IMAP4(host=None, port=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None)
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port);
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response (default: no timeout),
debug_buf_lvl - debug level at which buffering is turned off.
All IMAP4rev1 commands are supported by methods of the same name.
Each command returns a tuple: (type, [data, ...]) where 'type'
is usually 'OK' or 'NO', and 'data' is either the text from the
tagged response, or untagged results from command. Each 'data' is
either a string, or a tuple. If a tuple, then the first part is the
header of the response, and the second part contains the data (ie:
'literal' value).
Errors raise the exception class <instance>.error("<reason>").
IMAP4 server errors raise <instance>.abort("<reason>"), which is
a sub-class of 'error'. Mailbox status changes from READ-WRITE to
READ-ONLY raise the exception class <instance>.readonly("<reason>"),
which is a sub-class of 'abort'.
"error" exceptions imply a program error.
"abort" exceptions imply the connection should be reset, and
the command re-tried.
"readonly" exceptions imply the command should be re-tried.
All commands take two optional named arguments:
'callback' and 'cb_arg'
If 'callback' is provided then the command is asynchronous, so after
the command is queued for transmission, the call returns immediately
with the tuple (None, None).
The result will be posted by invoking "callback" with one arg, a tuple:
callback((result, cb_arg, None))
or, if there was a problem:
callback((None, cb_arg, (exception class, reason)))
Otherwise the command is synchronous (waits for result). But note
that state-changing commands will both block until previous commands
have completed, and block subsequent commands until they have finished.
All (non-callback) arguments to commands are converted to strings,
except for AUTHENTICATE, and the last argument to APPEND which is
passed as an IMAP4 literal. If necessary (the string contains any
non-printing characters or white-space and isn't enclosed with
either parentheses or double or single quotes) each string is
quoted. However, the 'password' argument to the LOGIN command is
always quoted. If you want to avoid having an argument string
quoted (eg: the 'flags' argument to STORE) then enclose the string
in parentheses (eg: "(\Deleted)"). If you are using "sequence sets"
containing the wildcard character '*', then enclose the argument
in single quotes: the quotes will be removed and the resulting
string passed unquoted. Note also that you can pass in an argument
with a type that doesn't evaluate to 'basestring' (eg: 'bytearray')
and it will be converted to a string without quoting.
There is one instance variable, 'state', that is useful for tracking
whether the client needs to login to the server. If it has the
value "AUTH" after instantiating the class, then the connection
is pre-authenticated (otherwise it will be "NONAUTH"). Selecting a
mailbox changes the state to be "SELECTED", closing a mailbox changes
back to "AUTH", and once the client has logged out, the state changes
to "LOGOUT" and no further commands may be issued.
Note: to use this module, you must read the RFCs pertaining to the
IMAP4 protocol, as the semantics of the arguments to each IMAP4
command are left to the invoker, not to mention the results. Also,
most IMAP servers implement a sub-set of the commands available here.
Note also that you must call logout() to shut down threads before
discarding an instance.
"""
class error(Exception): pass # Logical errors - debug required
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
continuation_cre = re.compile(r'\+( (?P<data>.*))?')
literal_cre = re.compile(r'.*{(?P<size>\d+)}$')
mapCRLF_cre = re.compile(r'\r\n|\r|\n')
# Need to quote "atom-specials" :-
# "(" / ")" / "{" / SP / 0x00 - 0x1f / 0x7f / "%" / "*" / DQUOTE / "\" / "]"
# so match not the inverse set
mustquote_cre = re.compile(r"[^!#$&'+,./0-9:;<=>?@A-Z\[^_`a-z|}~-]")
response_code_cre = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
# sequence_set_cre = re.compile(r"^[0-9]+(:([0-9]+|\*))?(,[0-9]+(:([0-9]+|\*))?)*$")
untagged_response_cre = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
untagged_status_cre = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?')
def __init__(self, host=None, port=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.state = NONAUTH # IMAP4 protocol state
self.literal = None # A literal argument to a command
self.tagged_commands = {} # Tagged commands awaiting response
self.untagged_responses = [] # [[typ: [data, ...]], ...]
self.mailbox = None # Current mailbox selected
self.mailboxes = {} # Untagged responses state per mailbox
self.is_readonly = False # READ-ONLY desired state
self.idle_rqb = None # Server IDLE Request - see _IdleCont
self.idle_timeout = None # Must prod server occasionally
self._expecting_data = 0 # Expecting message data
self._accumulated_data = [] # Message data accumulated so far
self._literal_expected = None # Message data descriptor
self.compressor = None # COMPRESS/DEFLATE if not None
self.decompressor = None
# Create unique tag for this session,
# and compile tagged response matcher.
self.tagnum = 0
self.tagpre = Int2AP(random.randint(4096, 65535))
self.tagre = re.compile(r'(?P<tag>'
+ self.tagpre
+ r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
if __debug__: self._init_debug(debug, debug_file, debug_buf_lvl)
self.resp_timeout = timeout # Timeout waiting for command response
if timeout is not None and timeout < READ_POLL_TIMEOUT:
self.read_poll_timeout = timeout
else:
self.read_poll_timeout = READ_POLL_TIMEOUT
self.read_size = READ_SIZE
# Open socket to server.
self.open(host, port)
if __debug__:
if debug:
self._mesg('connected to %s on port %s' % (self.host, self.port))
# Threading
if identifier is not None:
self.identifier = identifier
else:
self.identifier = self.host
if self.identifier:
self.identifier += ' '
self.Terminate = self.TerminateReader = False
self.state_change_free = threading.Event()
self.state_change_pending = threading.Lock()
self.commands_lock = threading.Lock()
self.idle_lock = threading.Lock()
self.ouq = Queue.Queue(10)
self.inq = Queue.Queue()
self.wrth = threading.Thread(target=self._writer)
self.wrth.setDaemon(True)
self.wrth.start()
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
self.inth = threading.Thread(target=self._handler)
self.inth.setDaemon(True)
self.inth.start()
# Get server welcome message,
# request and store CAPABILITY response.
try:
self.welcome = self._request_push(tag='continuation').get_response('IMAP4 protocol error: %s')[1]
if self._get_untagged_response('PREAUTH'):
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
elif self._get_untagged_response('OK'):
if __debug__: self._log(1, 'state => NONAUTH')
else:
raise self.error('unrecognised server welcome message: %s' % `self.welcome`)
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
if __debug__: self._log(1, 'CAPABILITY: %r' % (self.capabilities,))
for version in AllowedVersions:
if not version in self.capabilities:
continue
self.PROTOCOL_VERSION = version
break
else:
raise self.error('server not IMAP4 compliant')
except:
self._close_threads()
raise
def __getattr__(self, attr):
# Allow UPPERCASE variants of IMAP4 command methods.
if attr in Commands:
return getattr(self, attr.lower())
raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
# Overridable methods
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
read, send, shutdown, socket."""
self.host = self._choose_nonull_or_dflt('', host)
self.port = self._choose_nonull_or_dflt(IMAP4_PORT, port)
self.sock = self.open_socket()
self.read_fd = self.sock.fileno()
def open_socket(self):
"""open_socket()
Open socket choosing first address family available."""
msg = (-1, 'could not open socket')
for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
continue
try:
for i in (0, 1):
try:
s.connect(sa)
break
except socket.error, msg:
if len(msg.args) < 2 or msg.args[0] != errno.EINTR:
raise
else:
raise socket.error(msg)
except socket.error, msg:
s.close()
continue
break
else:
raise socket.error(msg)
return s
def ssl_wrap_socket(self):
# Allow sending of keep-alive messages - seems to prevent some servers
# from closing SSL, leading to deadlocks.
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
try:
import ssl
if self.ca_certs is not None:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile, ca_certs=self.ca_certs, cert_reqs=cert_reqs)
ssl_exc = ssl.SSLError
self.read_fd = self.sock.fileno()
except ImportError:
# No ssl module, and socket.ssl has no fileno(), and does not allow certificate verification
raise socket.sslerror("imaplib2 SSL mode does not work without ssl module")
if self.cert_verify_cb is not None:
cert_err = self.cert_verify_cb(self.sock.getpeercert(), self.host)
if cert_err:
raise ssl_exc(cert_err)
def start_compressing(self):
"""start_compressing()
Enable deflate compression on the socket (RFC 4978)."""
# rfc 1951 - pure DEFLATE, so use -15 for both windows
self.decompressor = zlib.decompressobj(-15)
self.compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return self.sock.recv(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sock.recv(READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.sock.sendall(data)
def shutdown(self):
"""shutdown()
Close I/O established in "open"."""
self.sock.close()
def socket(self):
"""socket = socket()
Return socket instance used to connect to IMAP4 server."""
return self.sock
# Utility methods
def enable_compression(self):
"""enable_compression()
Ask the server to start compressing the connection.
Should be called from user of this class after instantiation, as in:
if 'COMPRESS=DEFLATE' in imapobj.capabilities:
imapobj.enable_compression()"""
try:
typ, dat = self._simple_command('COMPRESS', 'DEFLATE')
if typ == 'OK':
self.start_compressing()
if __debug__: self._log(1, 'Enabled COMPRESS=DEFLATE')
finally:
self._release_state_change()
def pop_untagged_responses(self):
""" for typ,data in pop_untagged_responses(): pass
Generator for any remaining untagged responses.
Returns and removes untagged responses in order of reception.
Use at your own risk!"""
while self.untagged_responses:
self.commands_lock.acquire()
try:
yield self.untagged_responses.pop(0)
finally:
self.commands_lock.release()
def recent(self, **kw):
"""(typ, [data]) = recent()
Return 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
'data' is None if no new messages,
else list of RECENT responses, most recent last."""
name = 'RECENT'
typ, dat = self._untagged_response(None, [None], name)
if dat != [None]:
return self._deliver_dat(typ, dat, kw)
kw['untagged_response'] = name
return self.noop(**kw) # Prod server for response
def response(self, code, **kw):
"""(code, [data]) = response(code)
Return data for response 'code' if received, or None.
Old value for response 'code' is cleared."""
typ, dat = self._untagged_response(code, [None], code.upper())
return self._deliver_dat(typ, dat, kw)
# IMAP4 commands
def append(self, mailbox, flags, date_time, message, **kw):
"""(typ, [data]) = append(mailbox, flags, date_time, message)
Append message to named mailbox.
All args except `message' can be None."""
name = 'APPEND'
if not mailbox:
mailbox = 'INBOX'
if flags:
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags
else:
flags = None
if date_time:
date_time = Time2Internaldate(date_time)
else:
date_time = None
self.literal = self.mapCRLF_cre.sub(CRLF, message)
try:
return self._simple_command(name, mailbox, flags, date_time, **kw)
finally:
self._release_state_change()
def authenticate(self, mechanism, authobject, **kw):
"""(typ, [data]) = authenticate(mechanism, authobject)
Authenticate command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - it must appear in <instance>.capabilities in the
form AUTH=<mechanism>.
'authobject' must be a callable object:
data = authobject(response)
It will be called to process server continuation responses.
It should return data that will be encoded and sent to server.
It should return None if the client abort response '*' should
be sent instead."""
self.literal = _Authenticator(authobject).process
try:
typ, dat = self._simple_command('AUTHENTICATE', mechanism.upper())
if typ != 'OK':
self._deliver_exc(self.error, dat[-1], kw)
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
finally:
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def capability(self, **kw):
"""(typ, [data]) = capability()
Fetch capabilities list from server."""
name = 'CAPABILITY'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def check(self, **kw):
"""(typ, [data]) = check()
Checkpoint mailbox on server."""
return self._simple_command('CHECK', **kw)
def close(self, **kw):
"""(typ, [data]) = close()
Close currently selected mailbox.
Deleted messages are removed from writable mailbox.
This is the recommended command before 'LOGOUT'."""
if self.state != 'SELECTED':
raise self.error('No mailbox selected.')
try:
typ, dat = self._simple_command('CLOSE')
finally:
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def copy(self, message_set, new_mailbox, **kw):
"""(typ, [data]) = copy(message_set, new_mailbox)
Copy 'message_set' messages onto end of 'new_mailbox'."""
return self._simple_command('COPY', message_set, new_mailbox, **kw)
def create(self, mailbox, **kw):
"""(typ, [data]) = create(mailbox)
Create new mailbox."""
return self._simple_command('CREATE', mailbox, **kw)
def delete(self, mailbox, **kw):
"""(typ, [data]) = delete(mailbox)
Delete old mailbox."""
return self._simple_command('DELETE', mailbox, **kw)
def deleteacl(self, mailbox, who, **kw):
"""(typ, [data]) = deleteacl(mailbox, who)
Delete the ACLs (remove any rights) set for who on mailbox."""
return self._simple_command('DELETEACL', mailbox, who, **kw)
def examine(self, mailbox='INBOX', **kw):
"""(typ, [data]) = examine(mailbox='INBOX')
Select a mailbox for READ-ONLY access. (Flushes all untagged responses.)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via "response('FLAGS')" etc."""
return self.select(mailbox=mailbox, readonly=True, **kw)
def expunge(self, **kw):
"""(typ, [data]) = expunge()
Permanently remove deleted items from selected mailbox.
Generates 'EXPUNGE' response for each deleted message.
'data' is list of 'EXPUNGE'd message numbers in order received."""
name = 'EXPUNGE'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def fetch(self, message_set, message_parts, **kw):
"""(typ, [data, ...]) = fetch(message_set, message_parts)
Fetch (parts of) messages.
'message_parts' should be a string of selected parts
enclosed in parentheses, eg: "(UID BODY[TEXT])".
'data' are tuples of message part envelope and data,
followed by a string containing the trailer."""
name = 'FETCH'
kw['untagged_response'] = name
return self._simple_command(name, message_set, message_parts, **kw)
def getacl(self, mailbox, **kw):
"""(typ, [data]) = getacl(mailbox)
Get the ACLs for a mailbox."""
kw['untagged_response'] = 'ACL'
return self._simple_command('GETACL', mailbox, **kw)
def getannotation(self, mailbox, entry, attribute, **kw):
"""(typ, [data]) = getannotation(mailbox, entry, attribute)
Retrieve ANNOTATIONs."""
kw['untagged_response'] = 'ANNOTATION'
return self._simple_command('GETANNOTATION', mailbox, entry, attribute, **kw)
def getquota(self, root, **kw):
"""(typ, [data]) = getquota(root)
Get the quota root's resource usage and limits.
(Part of the IMAP4 QUOTA extension defined in rfc2087.)"""
kw['untagged_response'] = 'QUOTA'
return self._simple_command('GETQUOTA', root, **kw)
def getquotaroot(self, mailbox, **kw):
# Hmmm, this is non-std! Left for backwards-compatibility, sigh.
# NB: usage should have been defined as:
# (typ, [QUOTAROOT responses...]) = getquotaroot(mailbox)
# (typ, [QUOTA responses...]) = response('QUOTA')
"""(typ, [[QUOTAROOT responses...], [QUOTA responses...]]) = getquotaroot(mailbox)
Get the list of quota roots for the named mailbox."""
typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
typ, quota = self._untagged_response(typ, dat, 'QUOTA')
typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
return self._deliver_dat(typ, [quotaroot, quota], kw)
def id(self, *kv_pairs, **kw):
"""(typ, [data]) = <instance>.id(kv_pairs)
'kv_pairs' is a possibly empty list of keys and values.
'data' is a list of ID key value pairs or NIL.
NB: a single argument is assumed to be correctly formatted and is passed through unchanged
(for backward compatibility with earlier version).
Exchange information for problem analysis and determination.
The ID extension is defined in RFC 2971. """
name = 'ID'
kw['untagged_response'] = name
if not kv_pairs:
data = 'NIL'
elif len(kv_pairs) == 1:
data = kv_pairs[0] # Assume invoker passing correctly formatted string (back-compat)
else:
data = '(%s)' % ' '.join([(arg and self._quote(arg) or 'NIL') for arg in kv_pairs])
return self._simple_command(name, (data,), **kw)
def idle(self, timeout=None, **kw):
""""(typ, [data]) = idle(timeout=None)
Put server into IDLE mode until server notifies some change,
or 'timeout' (secs) occurs (default: 29 minutes),
or another IMAP4 command is scheduled."""
name = 'IDLE'
self.literal = _IdleCont(self, timeout).process
try:
return self._simple_command(name, **kw)
finally:
self._release_state_change()
def list(self, directory='""', pattern='*', **kw):
"""(typ, [data]) = list(directory='""', pattern='*')
List mailbox names in directory matching pattern.
'data' is list of LIST responses.
NB: for 'pattern':
% matches all except separator ( so LIST "" "%" returns names at root)
* matches all (so LIST "" "*" returns whole directory tree from root)"""
name = 'LIST'
kw['untagged_response'] = name
return self._simple_command(name, directory, pattern, **kw)
def login(self, user, password, **kw):
"""(typ, [data]) = login(user, password)
Identify client using plaintext password.
NB: 'password' will be quoted."""
try:
typ, dat = self._simple_command('LOGIN', user, self._quote(password))
if typ != 'OK':
self._deliver_exc(self.error, dat[-1], kw)
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
finally:
self._release_state_change()
return self._deliver_dat(typ, dat, kw)
def login_cram_md5(self, user, password, **kw):
"""(typ, [data]) = login_cram_md5(user, password)
Force use of CRAM-MD5 authentication."""
self.user, self.password = user, password
return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH, **kw)
def _CRAM_MD5_AUTH(self, challenge):
"""Authobject to use with CRAM-MD5 authentication."""
import hmac
return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest()
def logout(self, **kw):
"""(typ, [data]) = logout()
Shutdown connection to server.
Returns server 'BYE' response.
NB: You must call this to shut down threads before discarding an instance."""
self.state = LOGOUT
if __debug__: self._log(1, 'state => LOGOUT')
try:
try:
typ, dat = self._simple_command('LOGOUT')
except:
typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
if __debug__: self._log(1, dat)
self._close_threads()
finally:
self._release_state_change()
if __debug__: self._log(1, 'connection closed')
bye = self._get_untagged_response('BYE', leave=True)
if bye:
typ, dat = 'BYE', bye
return self._deliver_dat(typ, dat, kw)
def lsub(self, directory='""', pattern='*', **kw):
"""(typ, [data, ...]) = lsub(directory='""', pattern='*')
List 'subscribed' mailbox names in directory matching pattern.
'data' are tuples of message part envelope and data."""
name = 'LSUB'
kw['untagged_response'] = name
return self._simple_command(name, directory, pattern, **kw)
def myrights(self, mailbox, **kw):
"""(typ, [data]) = myrights(mailbox)
Show my ACLs for a mailbox (i.e. the rights that I have on mailbox)."""
name = 'MYRIGHTS'
kw['untagged_response'] = name
return self._simple_command(name, mailbox, **kw)
def namespace(self, **kw):
"""(typ, [data, ...]) = namespace()
Returns IMAP namespaces ala rfc2342."""
name = 'NAMESPACE'
kw['untagged_response'] = name
return self._simple_command(name, **kw)
def | (self, **kw):
"""(typ, [data]) = noop()
Send NOOP command."""
if __debug__: self._dump_ur(3)
return self._simple_command('NOOP', **kw)
def partial(self, message_num, message_part, start, length, **kw):
"""(typ, [data, ...]) = partial(message_num, message_part, start, length)
Fetch truncated part of a message.
'data' is tuple of message part envelope and data.
NB: obsolete."""
name = 'PARTIAL'
kw['untagged_response'] = 'FETCH'
return self._simple_command(name, message_num, message_part, start, length, **kw)
def proxyauth(self, user, **kw):
"""(typ, [data]) = proxyauth(user)
Assume authentication as 'user'.
(Allows an authorised administrator to proxy into any user's mailbox.)"""
try:
return self._simple_command('PROXYAUTH', user, **kw)
finally:
self._release_state_change()
def rename(self, oldmailbox, newmailbox, **kw):
"""(typ, [data]) = rename(oldmailbox, newmailbox)
Rename old mailbox name to new."""
return self._simple_command('RENAME', oldmailbox, newmailbox, **kw)
def search(self, charset, *criteria, **kw):
"""(typ, [data]) = search(charset, criterion, ...)
Search mailbox for matching messages.
'data' is space separated list of matching message numbers."""
name = 'SEARCH'
kw['untagged_response'] = name
if charset:
return self._simple_command(name, 'CHARSET', charset, *criteria, **kw)
return self._simple_command(name, *criteria, **kw)
def select(self, mailbox='INBOX', readonly=False, **kw):
"""(typ, [data]) = select(mailbox='INBOX', readonly=False)
Select a mailbox. (Restores any previous untagged responses.)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via "response('FLAGS')" etc."""
self.commands_lock.acquire()
# Save state of old mailbox, restore state for new...
self.mailboxes[self.mailbox] = self.untagged_responses
self.untagged_responses = self.mailboxes.setdefault(mailbox, [])
self.commands_lock.release()
self.mailbox = mailbox
self.is_readonly = readonly and True or False
if readonly:
name = 'EXAMINE'
else:
name = 'SELECT'
try:
rqb = self._command(name, mailbox)
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
if typ != 'OK':
if self.state == SELECTED:
self.state = AUTH
if __debug__: self._log(1, 'state => AUTH')
if typ == 'BAD':
self._deliver_exc(self.error, '%s command error: %s %s. Data: %.100s' % (name, typ, dat, mailbox), kw)
return self._deliver_dat(typ, dat, kw)
self.state = SELECTED
if __debug__: self._log(1, 'state => SELECTED')
finally:
self._release_state_change()
if self._get_untagged_response('READ-ONLY', leave=True) and not readonly:
if __debug__: self._dump_ur(1)
self._deliver_exc(self.readonly, '%s is not writable' % mailbox, kw)
typ, dat = self._untagged_response(typ, [None], 'EXISTS')
return self._deliver_dat(typ, dat, kw)
def setacl(self, mailbox, who, what, **kw):
"""(typ, [data]) = setacl(mailbox, who, what)
Set a mailbox acl."""
try:
return self._simple_command('SETACL', mailbox, who, what, **kw)
finally:
self._release_state_change()
def setannotation(self, *args, **kw):
"""(typ, [data]) = setannotation(mailbox[, entry, attribute]+)
Set ANNOTATIONs."""
kw['untagged_response'] = 'ANNOTATION'
return self._simple_command('SETANNOTATION', *args, **kw)
def setquota(self, root, limits, **kw):
"""(typ, [data]) = setquota(root, limits)
Set the quota root's resource limits."""
kw['untagged_response'] = 'QUOTA'
try:
return self._simple_command('SETQUOTA', root, limits, **kw)
finally:
self._release_state_change()
def sort(self, sort_criteria, charset, *search_criteria, **kw):
"""(typ, [data]) = sort(sort_criteria, charset, search_criteria, ...)
IMAP4rev1 extension SORT command."""
name = 'SORT'
if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
sort_criteria = '(%s)' % sort_criteria
kw['untagged_response'] = name
return self._simple_command(name, sort_criteria, charset, *search_criteria, **kw)
def starttls(self, keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None, **kw):
"""(typ, [data]) = starttls(keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None)
Start TLS negotiation as per RFC 2595."""
name = 'STARTTLS'
if name not in self.capabilities:
raise self.abort('TLS not supported by server')
if hasattr(self, '_tls_established') and self._tls_established:
raise self.abort('TLS session already established')
# Must now shutdown reader thread after next response, and restart after changing read_fd
self.read_size = 1 # Don't consume TLS handshake
self.TerminateReader = True
try:
typ, dat = self._simple_command(name)
finally:
self._release_state_change()
self.rdth.join()
self.TerminateReader = False
self.read_size = READ_SIZE
if typ != 'OK':
# Restart reader thread and error
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
raise self.error("Couldn't establish TLS session: %s" % dat)
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
self.cert_verify_cb = cert_verify_cb
try:
self.ssl_wrap_socket()
finally:
# Restart reader thread
self.rdth = threading.Thread(target=self._reader)
self.rdth.setDaemon(True)
self.rdth.start()
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
self.capabilities = tuple(dat[-1].upper().split())
self._tls_established = True
typ, dat = self._untagged_response(typ, dat, name)
return self._deliver_dat(typ, dat, kw)
def status(self, mailbox, names, **kw):
"""(typ, [data]) = status(mailbox, names)
Request named status conditions for mailbox."""
name = 'STATUS'
kw['untagged_response'] = name
return self._simple_command(name, mailbox, names, **kw)
def store(self, message_set, command, flags, **kw):
"""(typ, [data]) = store(message_set, command, flags)
Alters flag dispositions for messages in mailbox."""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
kw['untagged_response'] = 'FETCH'
return self._simple_command('STORE', message_set, command, flags, **kw)
def subscribe(self, mailbox, **kw):
"""(typ, [data]) = subscribe(mailbox)
Subscribe to new mailbox."""
try:
return self._simple_command('SUBSCRIBE', mailbox, **kw)
finally:
self._release_state_change()
def thread(self, threading_algorithm, charset, *search_criteria, **kw):
"""(type, [data]) = thread(threading_alogrithm, charset, search_criteria, ...)
IMAPrev1 extension THREAD command."""
name = 'THREAD'
kw['untagged_response'] = name
return self._simple_command(name, threading_algorithm, charset, *search_criteria, **kw)
def uid(self, command, *args, **kw):
"""(typ, [data]) = uid(command, arg, ...)
Execute "command arg ..." with messages identified by UID,
rather than message number.
Assumes 'command' is legal in current state.
Returns response appropriate to 'command'."""
command = command.upper()
if command in UID_direct:
resp = command
else:
resp = 'FETCH'
kw['untagged_response'] = resp
return self._simple_command('UID', command, *args, **kw)
def unsubscribe(self, mailbox, **kw):
"""(typ, [data]) = unsubscribe(mailbox)
Unsubscribe from old mailbox."""
try:
return self._simple_command('UNSUBSCRIBE', mailbox, **kw)
finally:
self._release_state_change()
def xatom(self, name, *args, **kw):
"""(typ, [data]) = xatom(name, arg, ...)
Allow simple extension commands notified by server in CAPABILITY response.
Assumes extension command 'name' is legal in current state.
Returns response appropriate to extension command 'name'."""
name = name.upper()
if not name in Commands:
Commands[name] = ((self.state,), False)
try:
return self._simple_command(name, *args, **kw)
finally:
self._release_state_change()
# Internal methods
def _append_untagged(self, typ, dat):
# Append new 'dat' to end of last untagged response if same 'typ',
# else append new response.
if dat is None: dat = ''
self.commands_lock.acquire()
if self.untagged_responses:
urn, urd = self.untagged_responses[-1]
if urn != typ:
urd = None
else:
urd = None
if urd is None:
urd = []
self.untagged_responses.append([typ, urd])
urd.append(dat)
self.commands_lock.release()
if __debug__: self._log(5, 'untagged_responses[%s] %s += ["%s"]' % (typ, len(urd)-1, dat))
def _check_bye(self):
bye = self._get_untagged_response('BYE', leave=True)
if bye:
raise self.abort(bye[-1])
def _checkquote(self, arg):
# Must quote command args if "atom-specials" present,
# and not already quoted. NB: single quotes are removed.
if not isinstance(arg, basestring):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')):
return arg
if len(arg) >= 2 and (arg[0],arg[-1]) in (("'","'"),):
return arg[1:-1]
if arg and self.mustquote_cre.search(arg) is None:
return arg
return self._quote(arg)
def _choose_nonull_or_dflt(self, dflt, *args):
if isinstance(dflt, basestring):
dflttyp = basestring # Allow any string type
else:
dflttyp = type(dflt)
for arg in args:
if arg is not None:
if isinstance(arg, dflttyp):
return arg
if __debug__: self._log(0, 'bad arg is %s, expecting %s' % (type(arg), dflttyp))
return dflt
def _command(self, name, *args, **kw):
if Commands[name][CMD_VAL_ASYNC]:
cmdtyp = 'async'
else:
cmdtyp = 'sync'
if __debug__: self._log(1, '[%s] %s %s' % (cmdtyp, name, args))
if __debug__: self._log(3, 'state_change_pending.acquire')
self.state_change_pending.acquire()
self._end_idle()
if cmdtyp == 'async':
self.state_change_pending.release()
if __debug__: self._log(3, 'state_change_pending.release')
else:
# Need to wait for all async commands to complete
self._check_bye()
self.commands_lock.acquire()
if self.tagged_commands:
self.state_change_free.clear()
need_event = True
else:
need_event = False
self.commands_lock.release()
if need_event:
if __debug__: self._log(3, 'sync command %s waiting for empty commands Q' % name)
self.state_change_free.wait()
if __debug__: self._log(3, 'sync command %s proceeding' % name)
if self.state not in Commands[name][CMD_VAL_STATES]:
self.literal = None
raise self.error('command %s illegal in state %s'
% (name, self.state))
self._check_bye()
for typ in ('OK', 'NO', 'BAD'):
self._get_untagged_response(typ)
if self._get_untagged_response('READ-ONLY', leave=True) and not self.is_readonly:
self.literal = None
raise self.readonly('mailbox status changed to READ-ONLY')
if self.Terminate:
raise self.abort('connection closed')
rqb = self._request_push(name=name, **kw)
data = '%s %s' % (rqb.tag, name)
for arg in args:
if arg is None: continue
data = '%s %s' % (data, self._checkquote(arg))
literal = self.literal
if literal is not None:
self.literal = None
if isinstance(literal, basestring):
literator = None
data = '%s {%s}' % (data, len(literal))
else:
literator = literal
if __debug__: self._log(4, 'data=%s' % data)
rqb.data = '%s%s' % (data, CRLF)
if literal is None:
self.ouq.put(rqb)
return rqb
# Must setup continuation expectancy *before* ouq.put
crqb = self._request_push(tag='continuation')
self.ouq.put(rqb)
while True:
# Wait for continuation response
ok, data = crqb.get_response('command: %s => %%s' % name)
if __debug__: self._log(4, 'continuation => %s, %s' % (ok, data))
# NO/BAD response?
if not ok:
break
# Send literal
if literator is not None:
literal = literator(data, rqb)
if literal is None:
break
if literator is not None:
# Need new request for next continuation response
crqb = self._request_push(tag='continuation')
if __debug__: self._log(4, 'write literal size %s' % len(literal))
crqb.data = '%s%s' % (literal, CRLF)
self.ouq.put(crqb)
if literator is None:
break
return rqb
def _command_complete(self, rqb, kw):
# Called for non-callback commands
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
self._check_bye()
if typ == 'BAD':
if __debug__: self._print_log()
raise self.error('%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data))
if 'untagged_response' in kw:
return self._untagged_response(typ, dat, kw['untagged_response'])
return typ, dat
def _command_completer(self, (response, cb_arg, error)):
# Called for callback commands
rqb, kw = cb_arg
rqb.callback = kw['callback']
rqb.callback_arg = kw.get('cb_arg')
if error is not None:
if __debug__: self._print_log()
typ, val = error
rqb.abort(typ, val)
return
bye = self._get_untagged_response('BYE', leave=True)
if bye:
rqb.abort(self.abort, bye[-1])
return
typ, dat = response
if typ == 'BAD':
if __debug__: self._print_log()
rqb.abort(self.error, '%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data))
return
if 'untagged_response' in kw:
response = self._untagged_response(typ, dat, kw['untagged_response'])
rqb.deliver(response)
def _deliver_dat(self, typ, dat, kw):
if 'callback' in kw:
kw['callback'](((typ, dat), kw.get('cb_arg'), None))
return typ, dat
def _deliver_exc(self, exc, dat, kw):
if 'callback' in kw:
kw['callback']((None, kw.get('cb_arg'), (exc, dat)))
raise exc(dat)
def _end_idle(self):
self.idle_lock.acquire()
irqb = self.idle_rqb
if irqb is None:
self.idle_lock.release()
return
self.idle_rqb = None
self.idle_timeout = None
self.idle_lock.release()
irqb.data = 'DONE%s' % CRLF
self.ouq.put(irqb)
if __debug__: self._log(2, 'server IDLE finished')
def _get_untagged_response(self, name, leave=False):
self.commands_lock.acquire()
for i, (typ, dat) in enumerate(self.untagged_responses):
if typ == name:
if not leave:
del self.untagged_responses[i]
self.commands_lock.release()
if __debug__: self._log(5, '_get_untagged_response(%s) => %s' % (name, dat))
return dat
self.commands_lock.release()
return None
def _match(self, cre, s):
# Run compiled regular expression 'cre' match method on 's'.
# Save result, return success.
self.mo = cre.match(s)
return self.mo is not None
def _put_response(self, resp):
if self._expecting_data > 0:
rlen = len(resp)
dlen = min(self._expecting_data, rlen)
self._expecting_data -= dlen
if rlen <= dlen:
self._accumulated_data.append(resp)
return
self._accumulated_data.append(resp[:dlen])
resp = resp[dlen:]
if self._accumulated_data:
typ, dat = self._literal_expected
self._append_untagged(typ, (dat, ''.join(self._accumulated_data)))
self._accumulated_data = []
# Protocol mandates all lines terminated by CRLF
resp = resp[:-2]
if 'continuation' in self.tagged_commands:
continuation_expected = True
else:
continuation_expected = False
if self._literal_expected is not None:
dat = resp
if self._match(self.literal_cre, dat):
self._literal_expected[1] = dat
self._expecting_data = int(self.mo.group('size'))
if __debug__: self._log(4, 'expecting literal size %s' % self._expecting_data)
return
typ = self._literal_expected[0]
self._literal_expected = None
self._append_untagged(typ, dat) # Tail
if __debug__: self._log(4, 'literal completed')
else:
# Command completion response?
if self._match(self.tagre, resp):
tag = self.mo.group('tag')
typ = self.mo.group('type')
dat = self.mo.group('data')
if not tag in self.tagged_commands:
if __debug__: self._log(1, 'unexpected tagged response: %s' % resp)
else:
self._request_pop(tag, (typ, [dat]))
else:
dat2 = None
# '*' (untagged) responses?
if not self._match(self.untagged_response_cre, resp):
if self._match(self.untagged_status_cre, resp):
dat2 = self.mo.group('data2')
if self.mo is None:
# Only other possibility is '+' (continuation) response...
if self._match(self.continuation_cre, resp):
if not continuation_expected:
if __debug__: self._log(1, "unexpected continuation response: '%s'" % resp)
return
self._request_pop('continuation', (True, self.mo.group('data')))
return
if __debug__: self._log(1, "unexpected response: '%s'" % resp)
return
typ = self.mo.group('type')
dat = self.mo.group('data')
if dat is None: dat = '' # Null untagged response
if dat2: dat = dat + ' ' + dat2
# Is there a literal to come?
if self._match(self.literal_cre, dat):
self._expecting_data = int(self.mo.group('size'))
if __debug__: self._log(4, 'read literal size %s' % self._expecting_data)
self._literal_expected = [typ, dat]
return
self._append_untagged(typ, dat)
if typ != 'OK': # NO, BYE, IDLE
self._end_idle()
# Bracketed response information?
if typ in ('OK', 'NO', 'BAD') and self._match(self.response_code_cre, dat):
self._append_untagged(self.mo.group('type'), self.mo.group('data'))
# Command waiting for aborted continuation response?
if continuation_expected:
self._request_pop('continuation', (False, resp))
# Bad news?
if typ in ('NO', 'BAD', 'BYE'):
if typ == 'BYE':
self.Terminate = True
if __debug__: self._log(1, '%s response: %s' % (typ, dat))
def _quote(self, arg):
return '"%s"' % arg.replace('\\', '\\\\').replace('"', '\\"')
def _release_state_change(self):
if self.state_change_pending.locked():
self.state_change_pending.release()
if __debug__: self._log(3, 'state_change_pending.release')
def _request_pop(self, name, data):
self.commands_lock.acquire()
rqb = self.tagged_commands.pop(name)
if not self.tagged_commands:
if __debug__: self._log(3, 'state_change_free.set')
self.state_change_free.set()
self.commands_lock.release()
if __debug__: self._log(4, '_request_pop(%s, %s) = %s' % (name, data, rqb.tag))
rqb.deliver(data)
def _request_push(self, tag=None, name=None, **kw):
self.commands_lock.acquire()
rqb = Request(self, name=name, **kw)
if tag is None:
tag = rqb.tag
self.tagged_commands[tag] = rqb
self.commands_lock.release()
if __debug__: self._log(4, '_request_push(%s, %s, %s) = %s' % (tag, name, `kw`, rqb.tag))
return rqb
def _simple_command(self, name, *args, **kw):
if 'callback' in kw:
# Note: old calling sequence for back-compat with python <2.6
self._command(name, callback=self._command_completer, cb_arg=kw, cb_self=True, *args)
return (None, None)
return self._command_complete(self._command(name, *args), kw)
def _untagged_response(self, typ, dat, name):
if typ == 'NO':
return typ, dat
data = self._get_untagged_response(name)
if not data:
return typ, [None]
while True:
dat = self._get_untagged_response(name)
if not dat:
break
data += dat
if __debug__: self._log(4, '_untagged_response(%s, ?, %s) => %s' % (typ, name, data))
return typ, data
# Threads
def _close_threads(self):
if __debug__: self._log(1, '_close_threads')
self.ouq.put(None)
self.wrth.join()
if __debug__: self._log(1, 'call shutdown')
self.shutdown()
self.rdth.join()
self.inth.join()
def _handler(self):
resp_timeout = self.resp_timeout
threading.currentThread().setName(self.identifier + 'handler')
time.sleep(0.1) # Don't start handling before main thread ready
if __debug__: self._log(1, 'starting')
typ, val = self.abort, 'connection terminated'
while not self.Terminate:
try:
if self.idle_timeout is not None:
timeout = self.idle_timeout - time.time()
if timeout <= 0:
timeout = 1
if __debug__:
if self.idle_rqb is not None:
self._log(5, 'server IDLING, timeout=%.2f' % timeout)
else:
timeout = resp_timeout
line = self.inq.get(True, timeout)
except Queue.Empty:
if self.idle_rqb is None:
if resp_timeout is not None and self.tagged_commands:
if __debug__: self._log(1, 'response timeout')
typ, val = self.abort, 'no response after %s secs' % resp_timeout
break
continue
if self.idle_timeout > time.time():
continue
if __debug__: self._log(2, 'server IDLE timedout')
line = IDLE_TIMEOUT_RESPONSE
if line is None:
if __debug__: self._log(1, 'inq None - terminating')
break
if not isinstance(line, basestring):
typ, val = line
break
try:
self._put_response(line)
except:
typ, val = self.error, 'program error: %s - %s' % sys.exc_info()[:2]
break
self.Terminate = True
if __debug__: self._log(1, 'terminating: %s' % `val`)
while not self.ouq.empty():
try:
self.ouq.get_nowait().abort(typ, val)
except Queue.Empty:
break
self.ouq.put(None)
self.commands_lock.acquire()
for name in self.tagged_commands.keys():
rqb = self.tagged_commands.pop(name)
rqb.abort(typ, val)
self.state_change_free.set()
self.commands_lock.release()
if __debug__: self._log(3, 'state_change_free.set')
if __debug__: self._log(1, 'finished')
if hasattr(select_module, "poll"):
def _reader(self):
threading.currentThread().setName(self.identifier + 'reader')
if __debug__: self._log(1, 'starting using poll')
def poll_error(state):
PollErrors = {
select.POLLERR: 'Error',
select.POLLHUP: 'Hang up',
select.POLLNVAL: 'Invalid request: descriptor not open',
}
return ' '.join([PollErrors[s] for s in PollErrors.keys() if (s & state)])
line_part = ''
poll = select.poll()
poll.register(self.read_fd, select.POLLIN)
rxzero = 0
terminate = False
read_poll_timeout = self.read_poll_timeout * 1000 # poll() timeout is in millisecs
while not (terminate or self.Terminate):
if self.state == LOGOUT:
timeout = 1
else:
timeout = read_poll_timeout
try:
r = poll.poll(timeout)
if __debug__: self._log(5, 'poll => %s' % `r`)
if not r:
continue # Timeout
fd,state = r[0]
if state & select.POLLIN:
data = self.read(self.read_size) # Drain ssl buffer if present
start = 0
dlen = len(data)
if __debug__: self._log(5, 'rcvd %s' % dlen)
if dlen == 0:
rxzero += 1
if rxzero > 5:
raise IOError("Too many read 0")
time.sleep(0.1)
continue # Try again
rxzero = 0
while True:
stop = data.find('\n', start)
if stop < 0:
line_part += data[start:]
break
stop += 1
line_part, start, line = \
'', stop, line_part + data[start:stop]
if __debug__: self._log(4, '< %s' % line)
self.inq.put(line)
if self.TerminateReader:
terminate = True
if state & ~(select.POLLIN):
raise IOError(poll_error(state))
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
self.inq.put((self.abort, reason))
break
poll.unregister(self.read_fd)
if __debug__: self._log(1, 'finished')
else:
# No "poll" - use select()
def _reader(self):
threading.currentThread().setName(self.identifier + 'reader')
if __debug__: self._log(1, 'starting using select')
line_part = ''
rxzero = 0
terminate = False
while not (terminate or self.Terminate):
if self.state == LOGOUT:
timeout = 1
else:
timeout = self.read_poll_timeout
try:
r,w,e = select.select([self.read_fd], [], [], timeout)
if __debug__: self._log(5, 'select => %s, %s, %s' % (r,w,e))
if not r: # Timeout
continue
data = self.read(self.read_size) # Drain ssl buffer if present
start = 0
dlen = len(data)
if __debug__: self._log(5, 'rcvd %s' % dlen)
if dlen == 0:
rxzero += 1
if rxzero > 5:
raise IOError("Too many read 0")
time.sleep(0.1)
continue # Try again
rxzero = 0
while True:
stop = data.find('\n', start)
if stop < 0:
line_part += data[start:]
break
stop += 1
line_part, start, line = \
'', stop, line_part + data[start:stop]
if __debug__: self._log(4, '< %s' % line)
self.inq.put(line)
if self.TerminateReader:
terminate = True
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
self.inq.put((self.abort, reason))
break
if __debug__: self._log(1, 'finished')
def _writer(self):
threading.currentThread().setName(self.identifier + 'writer')
if __debug__: self._log(1, 'starting')
reason = 'Terminated'
while not self.Terminate:
rqb = self.ouq.get()
if rqb is None:
break # Outq flushed
try:
self.send(rqb.data)
if __debug__: self._log(4, '> %s' % rqb.data)
except:
reason = 'socket error: %s - %s' % sys.exc_info()[:2]
if __debug__:
if not self.Terminate:
self._print_log()
if self.debug: self.debug += 4 # Output all
self._log(1, reason)
rqb.abort(self.abort, reason)
break
self.inq.put((self.abort, reason))
if __debug__: self._log(1, 'finished')
# Debugging
if __debug__:
def _init_debug(self, debug=None, debug_file=None, debug_buf_lvl=None):
self.debug = self._choose_nonull_or_dflt(0, debug, Debug)
self.debug_file = self._choose_nonull_or_dflt(sys.stderr, debug_file)
self.debug_buf_lvl = self._choose_nonull_or_dflt(DFLT_DEBUG_BUF_LVL, debug_buf_lvl)
self.debug_lock = threading.Lock()
self._cmd_log_len = 20
self._cmd_log_idx = 0
self._cmd_log = {} # Last `_cmd_log_len' interactions
if self.debug:
self._mesg('imaplib2 version %s' % __version__)
self._mesg('imaplib2 debug level %s, buffer level %s' % (self.debug, self.debug_buf_lvl))
def _dump_ur(self, lvl):
if lvl > self.debug:
return
l = self.untagged_responses
if not l:
return
t = '\n\t\t'
l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
self.debug_lock.acquire()
self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
self.debug_lock.release()
def _log(self, lvl, line):
if lvl > self.debug:
return
if line[-2:] == CRLF:
line = line[:-2] + '\\r\\n'
tn = threading.currentThread().getName()
if lvl <= 1 or self.debug > self.debug_buf_lvl:
self.debug_lock.acquire()
self._mesg(line, tn)
self.debug_lock.release()
if lvl != 1:
return
# Keep log of last `_cmd_log_len' interactions for debugging.
self.debug_lock.acquire()
self._cmd_log[self._cmd_log_idx] = (line, tn, time.time())
self._cmd_log_idx += 1
if self._cmd_log_idx >= self._cmd_log_len:
self._cmd_log_idx = 0
self.debug_lock.release()
def _mesg(self, s, tn=None, secs=None):
if secs is None:
secs = time.time()
if tn is None:
tn = threading.currentThread().getName()
tm = time.strftime('%M:%S', time.localtime(secs))
try:
self.debug_file.write(' %s.%02d %s %s\n' % (tm, (secs*100)%100, tn, s))
self.debug_file.flush()
finally:
pass
def _print_log(self):
self.debug_lock.acquire()
i, n = self._cmd_log_idx, self._cmd_log_len
if n: self._mesg('last %d log messages:' % n)
while n:
try:
self._mesg(*self._cmd_log[i])
except:
pass
i += 1
if i >= self._cmd_log_len:
i = 0
n -= 1
self.debug_lock.release()
class IMAP4_SSL(IMAP4):
"""IMAP4 client class over SSL connection
Instantiate with:
IMAP4_SSL(host=None, port=None, keyfile=None, certfile=None, debug=None, debug_file=None, identifier=None, timeout=None)
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port);
keyfile - PEM formatted file that contains your private key (default: None);
certfile - PEM formatted certificate chain file (default: None);
ca_certs - PEM formatted certificate chain file used to validate server certificates (default: None);
cert_verify_cb - function to verify authenticity of server certificates (default: None);
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response.
debug_buf_lvl - debug level at which buffering is turned off.
For more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host=None, port=None, keyfile=None, certfile=None, ca_certs=None, cert_verify_cb=None, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
self.cert_verify_cb = cert_verify_cb
IMAP4.__init__(self, host, port, debug, debug_file, identifier, timeout, debug_buf_lvl)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup secure connection to remote server on "host:port"
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, send, shutdown, socket, ssl."""
self.host = self._choose_nonull_or_dflt('', host)
self.port = self._choose_nonull_or_dflt(IMAP4_SSL_PORT, port)
self.sock = self.open_socket()
self.ssl_wrap_socket()
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return self.sock.read(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sock.read(READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
if hasattr(self.sock, "sendall"):
self.sock.sendall(data)
else:
bytes = len(data)
while bytes > 0:
sent = self.sock.write(data)
if sent == bytes:
break # avoid copy
data = data[sent:]
bytes = bytes - sent
def ssl(self):
"""ssl = ssl()
Return ssl instance used to communicate with the IMAP4 server."""
return self.sock
class IMAP4_stream(IMAP4):
"""IMAP4 client class over a stream
Instantiate with:
IMAP4_stream(command, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None)
command - string that can be passed to subprocess.Popen();
debug - debug level (default: 0 - no debug);
debug_file - debug stream (default: sys.stderr);
identifier - thread identifier prefix (default: host);
timeout - timeout in seconds when expecting a command response.
debug_buf_lvl - debug level at which buffering is turned off.
For more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, command, debug=None, debug_file=None, identifier=None, timeout=None, debug_buf_lvl=None):
self.command = command
self.host = command
self.port = None
self.sock = None
self.writefile, self.readfile = None, None
self.read_fd = None
IMAP4.__init__(self, None, None, debug, debug_file, identifier, timeout, debug_buf_lvl)
def open(self, host=None, port=None):
"""open(host=None, port=None)
Setup a stream connection via 'self.command'.
This connection will be used by the routines:
read, send, shutdown, socket."""
from subprocess import Popen, PIPE
self._P = Popen(self.command, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
self.writefile, self.readfile = self._P.stdin, self._P.stdout
self.read_fd = self.readfile.fileno()
def read(self, size):
"""Read 'size' bytes from remote."""
if self.decompressor is None:
return os.read(self.read_fd, size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = os.read(self.read_fd, READ_SIZE)
return self.decompressor.decompress(data, size)
def send(self, data):
"""Send data to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.writefile.write(data)
self.writefile.flush()
def shutdown(self):
"""Close I/O established in "open"."""
self.readfile.close()
self.writefile.close()
class _Authenticator(object):
"""Private class to provide en/de-coding
for base64 authentication conversation."""
def __init__(self, mechinst):
self.mech = mechinst # Callable object to provide/process data
def process(self, data, rqb):
ret = self.mech(self.decode(data))
if ret is None:
return '*' # Abort conversation
return self.encode(ret)
def encode(self, inp):
#
# Invoke binascii.b2a_base64 iteratively with
# short even length buffers, strip the trailing
# line feed from the result and append. "Even"
# means a number that factors to both 6 and 8,
# so when it gets to the end of the 8-bit input
# there's no partial 6-bit output.
#
oup = ''
while inp:
if len(inp) > 48:
t = inp[:48]
inp = inp[48:]
else:
t = inp
inp = ''
e = binascii.b2a_base64(t)
if e:
oup = oup + e[:-1]
return oup
def decode(self, inp):
if not inp:
return ''
return binascii.a2b_base64(inp)
class _IdleCont(object):
"""When process is called, server is in IDLE state
and will send asynchronous changes."""
def __init__(self, parent, timeout):
self.parent = parent
self.timeout = parent._choose_nonull_or_dflt(IDLE_TIMEOUT, timeout)
self.parent.idle_timeout = self.timeout + time.time()
def process(self, data, rqb):
self.parent.idle_lock.acquire()
self.parent.idle_rqb = rqb
self.parent.idle_timeout = self.timeout + time.time()
self.parent.idle_lock.release()
if __debug__: self.parent._log(2, 'server IDLE started, timeout in %.2f secs' % self.timeout)
return None
MonthNames = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
Mon2num = dict(zip((x.encode() for x in MonthNames[1:]), range(1, 13)))
InternalDate = re.compile(r'.*INTERNALDATE "'
r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
r'"')
def Internaldate2Time(resp):
"""time_tuple = Internaldate2Time(resp)
Convert IMAP4 INTERNALDATE to UT."""
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == '-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = time.mktime(tt)
# Following is necessary because the time module has no 'mkgmtime'.
# 'mktime' assumes arg in local timezone, so adds timezone/altzone.
lt = time.localtime(utc)
if time.daylight and lt[-1]:
zone = zone + time.altzone
else:
zone = zone + time.timezone
return time.localtime(utc - zone)
Internaldate2tuple = Internaldate2Time # (Backward compatible)
def Time2Internaldate(date_time):
"""'"DD-Mmm-YYYY HH:MM:SS +HHMM"' = Time2Internaldate(date_time)
Convert 'date_time' to IMAP4 INTERNALDATE representation."""
if isinstance(date_time, (int, float)):
tt = time.localtime(date_time)
elif isinstance(date_time, (tuple, time.struct_time)):
tt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
if time.daylight and tt[-1]:
zone = -time.altzone
else:
zone = -time.timezone
return ('"%2d-%s-%04d %02d:%02d:%02d %+03d%02d"' %
((tt[2], MonthNames[tt[1]], tt[0]) + tt[3:6] +
divmod(zone//60, 60)))
FLAGS_cre = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
def ParseFlags(resp):
"""('flag', ...) = ParseFlags(line)
Convert IMAP4 flags response to python tuple."""
mo = FLAGS_cre.match(resp)
if not mo:
return ()
return tuple(mo.group('flags').split())
| noop |
ht16k33.py | class HT16K33:
def __init__(self, i2, a = 0x70):
self.i2 = i2
self.a = a
self.command(0x21) # Clock on
self.command(0x81) # Display on
self.bright(15)
self.load([0] * 16)
def bright(self, n):
assert 0 <= n < 16
self.command(0xe0 + n)
def command(self, b):
assert(self.i2.start(self.a, 0))
assert(self.i2.write([b]))
self.i2.stop()
def load(self, b128):
| self.i2.start(self.a, 0)
self.i2.write([0] + b128)
self.i2.stop() |
|
validate_secure_metadata.py | #!/usr/bin/env python3
#
# Copyright (c) 2020, Somia Reality Oy
# All rights reserved.
# Installing dependencies:
#
# - Ubuntu/Debian: apt install python3-cryptography python3-jwcrypto
# - Using pip: pip3 install cryptography jwcrypto
from argparse import ArgumentParser
from base64 import b64decode, urlsafe_b64decode, urlsafe_b64encode
from calendar import timegm
from datetime import datetime
from hashlib import sha512
from hmac import compare_digest
from json import dumps, loads
from time import time
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from jwcrypto.jwe import JWE
from jwcrypto.jwk import JWK
MAX_EXPIRE_WINDOW = 10 * 24 * 60 * 60 # 10 days
def assert_secure_metadata(master_key_type, master_key_id, master_key_secret, secure_metadata_str, time_now=None):
if secure_metadata_str.count(".") < 2:
assert master_key_type == "ninchat"
return assert_ninchat_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now)
else:
assert master_key_type == "jwt"
return assert_jwt_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now)
def assert_ninchat_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now=None):
if "." in secure_metadata_str:
key_id, msg_b64 = secure_metadata_str.split(".", 1)
msg_iv = unpadded_urlsafe_b64decode(msg_b64)
else:
key_id, msg_b64 = secure_metadata_str.split("-", 1)
msg_iv = b64decode(msg_b64)
assert key_id == master_key_id
key = b64decode(master_key_secret)
msg_hashed = decrypt_aes_cbc(key, msg_iv)
sha = sha512()
digest = msg_hashed[:sha.digest_size]
msg_padded = msg_hashed[sha.digest_size:]
msg_json = msg_padded.rstrip(b"\0")
sha.update(msg_json)
assert compare_digest(sha.digest(), digest)
msg_json_bytes = msg_json.decode()
msg = loads(msg_json_bytes)
assert_not_expired(msg["expire"], time_now)
assert "user_id" not in msg
return msg["metadata"]
def assert_jwt_secure_metadata(master_key_id, master_key_secret, secure_metadata_str, time_now=None):
jwe = JWE()
jwe.allowed_algs = ["dir", "A256GCM"]
jwe.deserialize(secure_metadata_str)
assert jwe.jose_header["alg"] == "dir"
assert jwe.jose_header["enc"] == "A256GCM"
assert jwe.jose_header["kid"] == master_key_id
key = b64decode(master_key_secret)
jwk = JWK(kty="oct", k=urlsafe_b64encode(key).rstrip(b"=").decode())
jwe.decrypt(jwk)
msg_json_bytes = jwe.payload.decode()
msg = loads(msg_json_bytes)
assert_not_expired(msg["exp"], time_now)
assert "user_id" not in msg
return msg["ninchat.com/metadata"]
def assert_not_expired(expire_time, time_now=None):
if not time_now:
time_now = time()
assert isinstance(expire_time, (int, float))
assert expire_time > time_now
assert expire_time < time_now + MAX_EXPIRE_WINDOW
def decrypt_aes_cbc(key_bytes, iv_ciphertext):
|
def unpadded_urlsafe_b64decode(unpadded_str):
unpadded_bytes = unpadded_str.encode()
padded_bytes = unpadded_bytes + (b"", None, b"==", b"=")[len(unpadded_bytes) & 3]
return urlsafe_b64decode(padded_bytes)
def main():
time_format = "%Y-%m-%dT%H:%M:%SZ"
time_example = datetime.utcnow().strftime(time_format)
parser = ArgumentParser()
parser.add_argument("--now", metavar="TIME", help="fake timestamp (UTC) for checking expiration (example: {})".format(time_example))
parser.add_argument("master-key-type", help='"ninchat" or "jwt"')
parser.add_argument("master-key-id", help="encryption key id")
parser.add_argument("master-key-secret", help="base64-encoded encryption key (as received from Ninchat)")
parser.add_argument("secure-metadata", help="the string to validate")
args = parser.parse_args()
metadata = assert_secure_metadata(
getattr(args, "master-key-type"),
getattr(args, "master-key-id"),
getattr(args, "master-key-secret"),
getattr(args, "secure-metadata"),
timegm(datetime.strptime(args.now, time_format).utctimetuple()) if args.now else None,
)
print(dumps(metadata, indent=2))
if __name__ == "__main__":
main()
| block_len = AES.block_size // 8
assert len(iv_ciphertext) >= 2 * block_len
assert (len(iv_ciphertext) % block_len) == 0
iv = iv_ciphertext[:block_len]
ciphertext = iv_ciphertext[block_len:]
algo = AES(key_bytes)
mode = CBC(iv)
c = Cipher(algo, mode, default_backend())
d = c.decryptor()
plaintext = d.update(ciphertext)
plaintext += d.finalize()
return plaintext |
launcher.py | #!/usr/bin/env python
# pylint: disable=invalid-name
"""The container launcher script that launches DMLC with the right env variable."""
import glob
import sys
import os
import subprocess
def unzip_archives(ar_list, env):
for fname in ar_list:
if not os.path.exists(fname):
continue
if fname.endswith('.zip'):
subprocess.call(args=['unzip', fname], env=env)
elif fname.find('.tar') != -1:
subprocess.call(args=['tar', '-xf', fname], env=env)
def main():
"""Main moduke of the launcher."""
if len(sys.argv) < 2:
print('Usage: launcher.py your command')
sys.exit(0)
hadoop_home = os.getenv('HADOOP_HOME')
hdfs_home = os.getenv('HADOOP_HDFS_HOME')
java_home = os.getenv('JAVA_HOME')
hadoop_home = os.getenv('HADOOP_PREFIX') if hadoop_home is None else hadoop_home
cluster = os.getenv('DMLC_JOB_CLUSTER')
assert cluster is not None, 'need to have DMLC_JOB_CLUSTER'
env = os.environ.copy()
library_path = ['./']
class_path = []
if cluster == 'yarn':
assert hadoop_home is not None, 'need to set HADOOP_HOME'
assert hdfs_home is not None, 'need to set HADOOP_HDFS_HOME'
assert java_home is not None, 'need to set JAVA_HOME'
if cluster == 'sge':
num_worker = int(env['DMLC_NUM_WORKER'])
task_id = int(env['DMLC_TASK_ID'])
if task_id < num_worker:
env['DMLC_ROLE'] = 'worker'
else:
env['DMLC_ROLE'] = 'server'
if hadoop_home:
library_path.append('%s/lib/native' % hdfs_home)
library_path.append('%s/lib' % hdfs_home)
(classpath, _) = subprocess.Popen('%s/bin/hadoop classpath' % hadoop_home,
stdout=subprocess.PIPE, shell=True,
env=os.environ).communicate()
for f in classpath.split(':'):
class_path += glob.glob(f)
if java_home:
library_path.append('%s/jre/lib/amd64/server' % java_home)
env['CLASSPATH'] = '${CLASSPATH}:' + (':'.join(class_path))
| env['LIBHDFS_OPTS'] = env['DMLC_HDFS_OPTS']
elif 'LIBHDFS_OPTS' not in env:
env['LIBHDFS_OPTS'] = '--Xmx128m'
LD_LIBRARY_PATH = env['LD_LIBRARY_PATH'] if 'LD_LIBRARY_PATH' in env else ''
env['LD_LIBRARY_PATH'] = LD_LIBRARY_PATH + ':' + ':'.join(library_path)
# unzip the archives.
if 'DMLC_JOB_ARCHIVES' in env:
unzip_archives(env['DMLC_JOB_ARCHIVES'].split(':'), env)
ret = subprocess.call(args=sys.argv[1:], env=env)
sys.exit(ret)
if __name__ == '__main__':
main() | # setup hdfs options
if 'DMLC_HDFS_OPTS' in env: |
main.py | # this file was created by Chris Cozort
# Sources: goo.gl/2KMivS
# now available in github
'''
Curious, Creative, Tenacious(requires hopefulness)
Game ideas:
Walls closing in on player
'''
import pygame as pg
import random
from settings import *
from sprites import *
from os import path
class Game:
def __init__(self):
#init game window
# init pygame and create window
pg.init()
# init sound mixer
pg.mixer.init()
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption("jumpy")
self.clock = pg.time.Clock()
self.running = True
self.font_name = pg.font.match_font(FONT_NAME)
self.load_data()
def load_data(self):
print("load data is called...")
# sets up directory name
self.dir = path.dirname(__file__)
img_dir = path.join(self.dir, 'img')
# opens file with write options
''' with is a contextual option that handles both opening and closing of files to avoid
issues with forgetting to close
'''
try:
# changed to r to avoid overwriting error
with open(path.join(self.dir, "highscore.txt"), 'r') as f:
self.highscore = int(f.read())
print(self.highscore)
except:
with open(path.join(self.dir, HS_FILE), 'w') as f:
self.highscore = 0
print("exception")
# load spritesheet image
self.spritesheet = Spritesheet(path.join(img_dir, SPRITESHEET))
#load cloud images
self.cloud_images = []
for i in range(1,4):
self.cloud_images.append(pg.image.load(path.join(img_dir, 'cloud{}.png'.format(i))).convert())
# load sounds
# great place for creating sounds: https://www.bfxr.net/
self.snd_dir = path.join(self.dir, 'snd')
self.jump_sound = [pg.mixer.Sound(path.join(self.snd_dir, 'Jump18.wav')),
pg.mixer.Sound(path.join(self.snd_dir, 'Jump24.wav'))]
self.boost_sound = pg.mixer.Sound(path.join(self.snd_dir, 'Jump29.wav'))
self.head_jump_sound = pg.mixer.Sound(path.join(self.snd_dir, 'Jump39.wav'))
def new(self):
self.score = 0
# add all sprites to the pg group
# below no longer needed - using LayeredUpdate group
# self.all_sprites = pg.sprite.Group()
self.all_sprites = pg.sprite.LayeredUpdates()
# create platforms group
self.platforms = pg.sprite.Group()
# create clouds group
self.clouds = pg.sprite.Group()
# add powerups
self.powerups = pg.sprite.Group()
self.mob_timer = 0
# add a player 1 to the group
self.player = Player(self)
# add mobs
self.mobs = pg.sprite.Group()
# no longer needed after passing self.groups in Sprites library file
# self.all_sprites.add(self.player)
# instantiate new platform
for plat in PLATFORM_LIST:
# no longer need to assign to variable because we're passing self.groups in Sprite library
# p = Platform(self, *plat)
Platform(self, *plat)
# no longer needed because we pass in Sprite lib file
# self.all_sprites.add(p)
# self.platforms.add(p)
for i in range(8):
c = Cloud(self)
c.rect.y += 500
# load music
pg.mixer.music.load(path.join(self.snd_dir, 'happy.ogg'))
# call the run method
self.run()
def run(self):
# game loop
# play music
pg.mixer.music.play(loops=-1)
# set boolean playing to true
self.playing = True
while self.playing:
self.clock.tick(FPS)
self.events()
self.update()
self.draw()
pg.mixer.music.fadeout(1000)
# other things that happen when not playing anymore
def update(self):
self.all_sprites.update()
# shall we spawn a mob?
now = pg.time.get_ticks()
if now - self.mob_timer > 5000 + random.choice([-1000, -500, 0, 500, 1000]):
self.mob_timer = now
Mob(self)
##### check for mob collisions ######
# now using collision mask to determine collisions
# can use rectangle collisions here first if we encounter performance issues
mob_hits = pg.sprite.spritecollide(self.player, self.mobs, False, pg.sprite.collide_mask)
if mob_hits:
# can use mask collide here if mob count gets too high and creates performance issues
if self.player.pos.y - 35 < mob_hits[0].rect_top:
print("hit top")
print("player is " + str(self.player.pos.y))
print("mob is " + str(mob_hits[0].rect_top))
self.head_jump_sound.play()
self.player.vel.y = -BOOST_POWER
else:
print("player is " + str(self.player.pos.y))
print("mob is " + str(mob_hits[0].rect_top))
self.playing = False
# check to see if player can jump - if falling
if self.player.vel.y > 0:
hits = pg.sprite.spritecollide(self.player, self.platforms, False)
if hits:
# set var to be current hit in list to find which to 'pop' to when two or more collide with player
find_lowest = hits[0]
for hit in hits:
if hit.rect.bottom > find_lowest.rect.bottom:
print("hit rect bottom " + str(hit.rect.bottom))
find_lowest = hit
# fall if center is off platform
if self.player.pos.x < find_lowest.rect.right + 10 and self.player.pos.x > find_lowest.rect.left - 10:
if self.player.pos.y < find_lowest.rect.centery:
self.player.pos.y = find_lowest.rect.top
self.player.vel.y = 0
self.player.jumping = False
# if player reaches top 1/4 of screen...
if self.player.rect.top <= HEIGHT / 4:
# spawn a cloud
if randrange(100) < 13:
Cloud(self)
# set player location based on velocity
self.player.pos.y += max(abs(self.player.vel.y), 2)
for cloud in self.clouds:
cloud.rect.y += max(abs(self.player.vel.y / randrange(2,10)), 2)
# creates slight scroll at the top based on player y velocity
# scroll plats with player
for mob in self.mobs:
# creates slight scroll based on player y velocity
mob.rect.y += max(abs(self.player.vel.y), 2)
for plat in self.platforms:
# creates slight scroll based on player y velocity
plat.rect.y += max(abs(self.player.vel.y), 2)
if plat.rect.top >= HEIGHT + 40:
plat.kill()
self.score += 10
# if player hits a power up
pow_hits = pg.sprite.spritecollide(self.player, self.powerups, True)
for pow in pow_hits:
if pow.type == 'boost':
self.boost_sound.play()
self.player.vel.y = -BOOST_POWER
self.player.jumping = False
# Die!
if self.player.rect.bottom > HEIGHT:
'''make all sprites fall up when player falls'''
for sprite in self.all_sprites:
sprite.rect.y -= max(self.player.vel.y, 10)
'''get rid of sprites as they fall up'''
if sprite.rect.bottom < -25:
sprite.kill()
if len(self.platforms) == 0:
self.playing = False
# generate new random platforms
while len(self.platforms) < 6:
width = random.randrange(50, 100)
''' removed widths and height params to allow for sprites '''
""" changed due to passing into groups through sprites lib file """
# p = Platform(self, random.randrange(0,WIDTH-width),
# random.randrange(-75, -30))
Platform(self, random.randrange(0,WIDTH-width),
random.randrange(-75, -30))
# self.platforms.add(p)
# self.all_sprites.add(p)
def events(self):
for event in pg.event.get():
if event.type == pg.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_UP:
self.player.jump()
if event.type == pg.KEYUP:
if event.key == pg.K_UP:
""" # cuts the jump short if the space bar is released """
self.player.jump_cut()
if event.type == pg.K_DOWN:
self.player.duck()
if event.type == pg.KEYDOWN:
if event.key == pg.K_DOWN:
self.player.duck()
self.player.duck_cut()
def draw(self):
self.screen.fill(SKY_BLUE)
self.all_sprites.draw(self.screen)
""" # not needed now that we're using LayeredUpdates """
# self.screen.blit(self.player.image, self.player.rect)
self.draw_text(str(self.score), 22, WHITE, WIDTH / 2, 15) | while waiting:
self.clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
waiting = False
self.running = False
if event.type ==pg.KEYUP:
waiting = False
def show_start_screen(self):
""" # game splash screen """
self.screen.fill(BLACK)
self.draw_text(TITLE, 48, WHITE, WIDTH/2, HEIGHT/4)
self.draw_text("Arrows to move, Up Arrow to Jump", 22, WHITE, WIDTH/2, HEIGHT/2)
self.draw_text("Press any key to play...", 22, WHITE, WIDTH / 2, HEIGHT * 3/4)
self.draw_text("High score " + str(self.highscore), 22, WHITE, WIDTH / 2, 15)
pg.display.flip()
self.wait_for_key()
def show_go_screen(self):
""" # game splash screen """
if not self.running:
print("not running...")
return
self.screen.fill(BLACK)
self.draw_text(TITLE, 48, WHITE, WIDTH/2, HEIGHT/4)
self.draw_text("Arrows to move, Up Arrow to jump", 22, WHITE, WIDTH/2, HEIGHT/2)
self.draw_text("Press any key to play...", 22, WHITE, WIDTH / 2, HEIGHT * 3/4)
self.draw_text("High score " + str(self.highscore), 22, WHITE, WIDTH / 2, HEIGHT/2 + 40)
if self.score > self.highscore:
self.highscore = self.score
self.draw_text("new high score!", 22, WHITE, WIDTH / 2, HEIGHT/2 + 60)
with open(path.join(self.dir, HS_FILE), 'w') as f:
f.write(str(self.score))
else:
self.draw_text("High score " + str(self.highscore), 22, WHITE, WIDTH / 2, HEIGHT/2 + 40)
pg.display.flip()
self.wait_for_key()
def draw_text(self, text, size, color, x, y):
font = pg.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
g = Game()
g.show_start_screen()
while g.running:
g.new()
try:
g.show_go_screen()
except:
print("can't load go screen...")
g.new()
try:
g.show_go_screen()
except:
print("can't load go screen...") | # double buffering - renders a frame "behind" the displayed frame
pg.display.flip()
def wait_for_key(self):
waiting = True |
ogr_shape.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Shapefile driver testing.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2008-2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import os
import shutil
import struct
import sys
sys.path.append('../pymod')
import gdaltest
import ogrtest
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
###############################################################################
# Open Shapefile
def ogr_shape_1():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
shape_drv.DeleteDataSource('tmp')
gdaltest.shape_ds = shape_drv.CreateDataSource('tmp')
if gdaltest.shape_ds is not None:
return 'success'
return 'fail'
###############################################################################
# Create table from data/poly.shp
def ogr_shape_2():
if gdaltest.shape_ds is None:
return 'skip'
#######################################################
# Create memory Layer
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer('tpoly')
#######################################################
# Setup Schema
ogrtest.quick_create_layer_def(gdaltest.shape_lyr,
[('AREA', ogr.OFTReal),
('EAS_ID', ogr.OFTInteger),
('PRFEDEA', ogr.OFTString)])
#######################################################
# Copy in poly.shp
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while feat is not None:
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
gdaltest.shape_lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
# Destroy required.
shp_ds.Destroy()
return 'success'
###############################################################################
# Verify that stuff we just wrote is still OK.
def ogr_shape_3():
if gdaltest.shape_ds is None:
return 'skip'
expect = [168, 169, 166, 158, 165]
gdaltest.shape_lyr.SetAttributeFilter('eas_id < 170')
tr = ogrtest.check_features_against_list(gdaltest.shape_lyr,
'eas_id', expect)
gdaltest.shape_lyr.SetAttributeFilter(None)
for i in range(len(gdaltest.poly_feat)):
orig_feat = gdaltest.poly_feat[i]
read_feat = gdaltest.shape_lyr.GetNextFeature()
if ogrtest.check_feature_geometry(read_feat, orig_feat.GetGeometryRef(),
max_error=0.000000001) != 0:
return 'fail'
for fld in range(3):
if orig_feat.GetField(fld) != read_feat.GetField(fld):
gdaltest.post_reason('Attribute %d does not match' % fld)
return 'fail'
gdaltest.poly_feat = None
return 'success' if tr else 'fail'
###############################################################################
# Write a feature without a geometry, and verify that it works OK.
def ogr_shape_4():
if gdaltest.shape_ds is None:
return 'skip'
######################################################################
# Create feature without geometry.
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
dst_feat.SetField('PRFEDEA', 'nulled')
gdaltest.shape_lyr.CreateFeature(dst_feat)
######################################################################
# Read back the feature and get the geometry.
gdaltest.shape_lyr.SetAttributeFilter("PRFEDEA = 'nulled'")
feat_read = gdaltest.shape_lyr.GetNextFeature()
if feat_read is None:
gdaltest.post_reason('Didnt get feature with null geometry back.')
return 'fail'
if feat_read.GetGeometryRef() is not None:
print(feat_read.GetGeometryRef())
print(feat_read.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Didnt get null geometry as expected.')
return 'fail'
return 'success'
###############################################################################
# Test ExecuteSQL() results layers without geometry.
def ogr_shape_5():
if gdaltest.shape_ds is None:
return 'skip'
expect = [179, 173, 172, 171, 170, 169, 168, 166, 165, 158, None]
sql_lyr = gdaltest.shape_ds.ExecuteSQL('select distinct eas_id from tpoly order by eas_id desc')
tr = ogrtest.check_features_against_list(sql_lyr, 'eas_id', expect)
gdaltest.shape_ds.ReleaseResultSet(sql_lyr)
return 'success' if tr else 'fail'
###############################################################################
# Test ExecuteSQL() results layers with geometry.
def ogr_shape_6():
if gdaltest.shape_ds is None:
return 'skip'
sql_lyr = gdaltest.shape_ds.ExecuteSQL(
"select * from tpoly where prfedea = '35043413'")
tr = ogrtest.check_features_against_list(sql_lyr, 'prfedea', ['35043413'])
if tr:
sql_lyr.ResetReading()
feat_read = sql_lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat_read, 'POLYGON ((479750.688 4764702.000,479658.594 4764670.000,479640.094 4764721.000,479735.906 4764752.000,479750.688 4764702.000))', max_error=0.001) != 0:
tr = 0
gdaltest.shape_ds.ReleaseResultSet(sql_lyr)
return 'success' if tr else 'fail'
###############################################################################
# Test spatial filtering.
def ogr_shape_7():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_lyr.SetAttributeFilter(None)
geom = ogr.CreateGeometryFromWkt(
'LINESTRING(479505 4763195,480526 4762819)')
gdaltest.shape_lyr.SetSpatialFilter(geom)
geom.Destroy()
tr = ogrtest.check_features_against_list(gdaltest.shape_lyr, 'eas_id',
[158])
gdaltest.shape_lyr.SetSpatialFilter(None)
return 'success' if tr else 'fail'
###############################################################################
# Create spatial index, and verify we get the same results.
def ogr_shape_8():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_lyr.SetAttributeFilter(None)
gdaltest.shape_ds.ExecuteSQL('CREATE SPATIAL INDEX ON tpoly')
if not os.access('tmp/tpoly.qix', os.F_OK):
gdaltest.post_reason('tpoly.qix not created')
return 'fail'
geom = ogr.CreateGeometryFromWkt(
'LINESTRING(479505 4763195,480526 4762819)')
gdaltest.shape_lyr.SetSpatialFilter(geom)
geom.Destroy()
tr = ogrtest.check_features_against_list(gdaltest.shape_lyr, 'eas_id',
[158])
gdaltest.shape_lyr.SetSpatialFilter(None)
if not tr:
return 'fail'
# Test recreating while already existing
gdaltest.shape_ds.ExecuteSQL('CREATE SPATIAL INDEX ON tpoly')
gdaltest.shape_ds.ExecuteSQL('DROP SPATIAL INDEX ON tpoly')
if os.access('tmp/tpoly.qix', os.F_OK):
gdaltest.post_reason('tpoly.qix not deleted')
return 'fail'
return 'success'
###############################################################################
# Test that we don't return a polygon if we are "inside" but non-overlapping.
def ogr_shape_9():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_ds = None
gdaltest.shape_ds = ogr.Open('data/testpoly.shp')
gdaltest.shape_lyr = gdaltest.shape_ds.GetLayer(0)
gdaltest.shape_lyr.SetSpatialFilterRect(-10, -130, 10, -110)
if ogrtest.have_geos() and gdaltest.shape_lyr.GetFeatureCount() == 0:
return 'success'
if not ogrtest.have_geos() and gdaltest.shape_lyr.GetFeatureCount() == 1:
return 'success'
return 'fail'
###############################################################################
# Do a fair size query that should pull in a few shapes.
def ogr_shape_10():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_lyr.SetSpatialFilterRect(-400, 22, -120, 400)
tr = ogrtest.check_features_against_list(gdaltest.shape_lyr, 'FID',
[0, 4, 8])
return 'success' if tr else 'fail'
###############################################################################
# Do a mixed indexed attribute and spatial query.
def ogr_shape_11():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_lyr.SetAttributeFilter('FID = 5')
gdaltest.shape_lyr.SetSpatialFilterRect(-400, 22, -120, 400)
tr = ogrtest.check_features_against_list(gdaltest.shape_lyr, 'FID',
[])
if not tr:
return 'fail'
gdaltest.shape_lyr.SetAttributeFilter('FID = 4')
gdaltest.shape_lyr.SetSpatialFilterRect(-400, 22, -120, 400)
tr = ogrtest.check_features_against_list(gdaltest.shape_lyr, 'FID',
[4])
gdaltest.shape_lyr.SetAttributeFilter(None)
gdaltest.shape_lyr.SetSpatialFilter(None)
return 'success' if tr else 'fail'
###############################################################################
# Check that multipolygon of asm.shp is properly returned.
def ogr_shape_12():
if gdaltest.shape_ds is None:
return 'skip'
asm_ds = ogr.Open('data/asm.shp')
asm_lyr = asm_ds.GetLayer(0)
feat = asm_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.GetCoordinateDimension() != 2:
gdaltest.post_reason('dimension wrong.')
return 'fail'
if geom.GetGeometryName() != 'MULTIPOLYGON':
gdaltest.post_reason('Geometry of wrong type.')
return 'fail'
if geom.GetGeometryCount() != 5:
gdaltest.post_reason('Did not get the expected number of polygons.')
return 'fail'
counts = [15, 11, 17, 20, 9]
for i in range(5):
poly = geom.GetGeometryRef(i)
if poly.GetGeometryName() != 'POLYGON':
gdaltest.post_reason('Did not get right type for polygons')
return 'fail'
if poly.GetGeometryCount() != 1:
gdaltest.post_reason('polygon with more than one ring.')
return 'fail'
pnt_count = poly.GetGeometryRef(0).GetPointCount()
if pnt_count != counts[i]:
gdaltest.post_reason(('Polygon %d has %d points instead of %d.' %
(i, pnt_count, counts[i])))
return 'fail'
return 'success'
###############################################################################
# Perform a SetFeature() on a couple features, resetting the size.
def ogr_shape_13():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_ds = None
gdaltest.shape_ds = ogr.Open('tmp/tpoly.shp', update=1)
gdaltest.shape_lyr = gdaltest.shape_ds.GetLayer(0)
######################################################################
# Update FID 9 (EAS_ID=170), making the polygon larger.
feat = gdaltest.shape_lyr.GetFeature(9)
feat.SetField('AREA', '6000.00')
geom = ogr.CreateGeometryFromWkt(
'POLYGON ((0 0, 0 60, 100 60, 100 0, 200 30, 0 0))')
feat.SetGeometry(geom)
if gdaltest.shape_lyr.SetFeature(feat) != 0:
gdaltest.post_reason('SetFeature() failed.')
return 'fail'
######################################################################
# Update FID 8 (EAS_ID=165), making the polygon smaller.
feat = gdaltest.shape_lyr.GetFeature(8)
feat.SetField('AREA', '7000.00')
geom = ogr.CreateGeometryFromWkt(
'POLYGON ((0 0, 0 60, 100 60, 100 0, 0 0))')
feat.SetGeometry(geom)
if gdaltest.shape_lyr.SetFeature(feat) != 0:
gdaltest.post_reason('SetFeature() failed.')
return 'fail'
return 'success'
###############################################################################
# Verify last changes.
def ogr_shape_14():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_ds = None
gdaltest.shape_ds = ogr.Open('tmp/tpoly.shp', update=1)
gdaltest.shape_lyr = gdaltest.shape_ds.GetLayer(0)
######################################################################
# Check FID 9.
feat = gdaltest.shape_lyr.GetFeature(9)
if feat.GetField('AREA') != 6000.0:
gdaltest.post_reason('AREA update failed, FID 9.')
return 'fail'
if ogrtest.check_feature_geometry(feat, 'POLYGON ((0 0, 0 60, 100 60, 100 0, 200 30, 0 0))') != 0:
gdaltest.post_reason('Geometry update failed, FID 9.')
return 'fail'
######################################################################
# Update FID 8 (EAS_ID=165), making the polygon smaller.
feat = gdaltest.shape_lyr.GetFeature(8)
if feat.GetField('AREA') != 7000.0:
gdaltest.post_reason('AREA update failed, FID 8.')
return 'fail'
if ogrtest.check_feature_geometry(feat, 'POLYGON ((0 0, 0 60, 100 60, 100 0, 0 0))') != 0:
gdaltest.post_reason('Geometry update failed, FID 8.')
return 'fail'
return 'success'
###############################################################################
# Delete a feature, and verify reduced count.
def ogr_shape_15():
if gdaltest.shape_ds is None:
return 'skip'
######################################################################
# Delete FID 9.
if gdaltest.shape_lyr.DeleteFeature(9) != 0:
gdaltest.post_reason('DeleteFeature failed.')
return 'fail'
######################################################################
# Count features, verifying that none are FID 9.
count = 0
feat = gdaltest.shape_lyr.GetNextFeature()
while feat is not None:
if feat.GetFID() == 9:
gdaltest.post_reason('Still an FID 9 in dataset.')
return 'fail'
count = count + 1
feat = gdaltest.shape_lyr.GetNextFeature()
if count != 10:
gdaltest.post_reason('Did not get expected FID count.')
return 'fail'
return 'success'
###############################################################################
# Repack and verify a few things.
def ogr_shape_16():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_ds.ExecuteSQL('REPACK tpoly')
######################################################################
# Count features.
got_9 = 0
count = 0
gdaltest.shape_lyr.ResetReading()
feat = gdaltest.shape_lyr.GetNextFeature()
while feat is not None:
if feat.GetFID() == 9:
got_9 = 1
count = count + 1
feat = gdaltest.shape_lyr.GetNextFeature()
if count != 10:
gdaltest.post_reason('Did not get expected FID count.')
return 'fail'
if got_9 == 0:
gdaltest.post_reason('Did not get FID 9 as expected.')
return 'fail'
feat = gdaltest.shape_lyr.GetFeature(9)
return 'success'
###############################################################################
# Test adding a field to the schema of a populated layer.
def ogr_shape_16_1():
if gdaltest.shape_ds is None:
return 'skip'
######################################################################
# Add a new field.
field_defn = ogr.FieldDefn('NEWFLD', ogr.OFTString)
field_defn.SetWidth(12)
result = gdaltest.shape_lyr.CreateField(field_defn)
if result != 0:
gdaltest.post_reason('failed to create new field.')
return 'fail'
######################################################################
# Check at least one feature.
feat = gdaltest.shape_lyr.GetFeature(8)
if feat.EAS_ID != 165:
gdaltest.post_reason('Got wrong EAS_ID')
return 'fail'
if not feat.IsFieldNull('NEWFLD'):
gdaltest.post_reason('Expected NULL NEWFLD value!')
return 'fail'
return 'success'
###############################################################################
# Simple test with point shapefile with no associated .dbf
def ogr_shape_17():
if gdaltest.shape_ds is None:
return 'skip'
shutil.copy('data/can_caps.shp', 'tmp/can_caps.shp')
shutil.copy('data/can_caps.shx', 'tmp/can_caps.shx')
shp_ds = ogr.Open('tmp/can_caps.shp', update=1)
shp_lyr = shp_ds.GetLayer(0)
if shp_lyr.GetLayerDefn().GetFieldCount() != 0:
gdaltest.post_reason('Unexpectedly got attribute fields.')
return 'fail'
count = 0
while 1:
feat = shp_lyr.GetNextFeature()
if feat is None:
break
# Re-write feature to test that we can use SetFeature() without
# a DBF
shp_lyr.SetFeature(feat)
count += 1
if count != 13:
gdaltest.post_reason('Got wrong number of features.')
return 'fail'
# Create new feature without a DBF
feat = ogr.Feature(shp_lyr.GetLayerDefn())
shp_lyr.CreateFeature(feat)
if feat.GetFID() != 13:
print(feat.GetFID())
gdaltest.post_reason('Got wrong FID.')
return 'fail'
shp_lyr = None
shp_ds = None
os.remove('tmp/can_caps.shp')
os.remove('tmp/can_caps.shx')
return 'success'
###############################################################################
# Test reading data/poly.PRJ file with mixed-case file name
def ogr_shape_18():
shp_ds = ogr.Open('data/poly.shp')
shp_lyr = shp_ds.GetLayer(0)
srs_lyr = shp_lyr.GetSpatialRef()
if srs_lyr is None:
gdaltest.post_reason('Missing projection definition.')
return 'fail'
# data/poly.shp has arbitrarily assigned EPSG:27700
srs = osr.SpatialReference()
srs.ImportFromEPSG(27700)
# srs.StripCTParms()
if not srs_lyr.IsSame(srs):
print('')
print('expected = %s' % srs.ExportToPrettyWkt())
print('existing = %s' % srs_lyr.ExportToPrettyWkt())
gdaltest.post_reason('Projections differ')
return 'fail'
return 'success'
###############################################################################
# Test polygon formation logic - recognising what rings are inner/outer
# and deciding on polygon vs. multipolygon (#1217)
def ogr_shape_19():
ds = ogr.Open('data/Stacks.shp')
lyr = ds.GetLayer(0)
lyr.ResetReading()
feat = lyr.GetNextFeature()
wkt = 'MULTIPOLYGON (((3115478.809630727861077 13939288.008583962917328,3134266.47213465673849 13971973.394036004319787,3176989.101938112173229 13957303.575368551537395,3198607.7820796193555 13921787.172278933227062,3169010.779504936654121 13891675.439224690198898,3120368.749186545144767 13897852.204979406669736,3115478.809630727861077 13939288.008583962917328),(3130405.993537959177047 13935427.529987264424562,3135038.567853996530175 13902742.144535223022103,3167209.22282647760585 13902227.414055664092302,3184452.693891727831215 13922559.267998272553086,3172871.258101634215564 13947781.061496697366238,3144561.081725850701332 13957818.305848112329841,3130405.993537959177047 13935427.529987264424562)),((3143016.890287171583623 13932596.512349685654044,3152282.038919246289879 13947266.331017138436437,3166179.761867358349264 13940060.104303302243352,3172099.162382294889539 13928221.303273428231478,3169268.144744716584682 13916897.23272311501205,3158201.439434182830155 13911235.197447959333658,3144818.446965630631894 13911749.927927518263459,3139928.507409813348204 13916382.502243556082249,3143016.890287171583623 13932596.512349685654044),(3149193.65604188805446 13926677.11183474957943,3150737.84748056717217 13918698.789401574060321,3158458.804673962760717 13919728.250360693782568,3164892.935668459162116 13923331.36371761187911,3163863.474709339439869 13928736.033752989023924,3157171.978475063573569 13935427.529987264424562,3149193.65604188805446 13926677.11183474957943)))'
if ogrtest.check_feature_geometry(feat, wkt,
max_error=0.00000001) != 0:
return 'fail'
return 'success'
###############################################################################
# Test empty multipoint, multiline, multipolygon.
# From GDAL 1.6.0, the expected behaviour is to return a feature with a NULL geometry
def ogr_shape_20():
if gdaltest.shape_ds is None:
return 'skip'
ds = ogr.Open('data/emptymultipoint.shp')
lyr = ds.GetLayer(0)
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
return 'fail'
if feat.GetGeometryRef() is not None:
return 'fail'
ds = ogr.Open('data/emptymultiline.shp')
lyr = ds.GetLayer(0)
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
return 'fail'
if feat.GetGeometryRef() is not None:
return 'fail'
ds = ogr.Open('data/emptymultipoly.shp')
lyr = ds.GetLayer(0)
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
return 'fail'
if feat.GetGeometryRef() is not None:
return 'fail'
return 'success'
###############################################################################
# Test robutness towards broken/unfriendly shapefiles
def ogr_shape_21():
if gdaltest.shape_ds is None:
return 'skip'
files = ['data/buggypoint.shp',
'data/buggymultipoint.shp',
'data/buggymultiline.shp',
'data/buggymultipoly.shp',
'data/buggymultipoly2.shp']
for f in files:
ds = ogr.Open(f)
lyr = ds.GetLayer(0)
lyr.ResetReading()
gdal.PushErrorHandler('CPLQuietErrorHandler')
feat = lyr.GetNextFeature()
gdal.PopErrorHandler()
if feat.GetGeometryRef() is not None:
return 'fail'
# Test fix for #3665
lyr.ResetReading()
(minx, maxx, miny, maxy) = lyr.GetExtent()
lyr.SetSpatialFilterRect(minx + 1e-9, miny + 1e-9, maxx - 1e-9, maxy - 1e-9)
gdal.PushErrorHandler('CPLQuietErrorHandler')
feat = lyr.GetNextFeature()
gdal.PopErrorHandler()
if feat is not None and feat.GetGeometryRef() is not None:
return 'fail'
return 'success'
###############################################################################
# Test writing and reading all handled data types
def ogr_shape_22():
if gdaltest.shape_ds is None:
return 'skip'
#######################################################
# Create memory Layer
gdaltest.shape_ds = None
gdaltest.shape_ds = ogr.GetDriverByName('ESRI Shapefile').Open('tmp', update=1)
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer('datatypes')
#######################################################
# Setup Schema
ogrtest.quick_create_layer_def(gdaltest.shape_lyr,
[('REAL', ogr.OFTReal),
('INTEGER', ogr.OFTInteger),
('STRING', ogr.OFTString),
('DATE', ogr.OFTDate)])
#######################################################
# Create a feature
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
dst_feat.SetField('REAL', 1.2)
dst_feat.SetField('INTEGER', 3)
dst_feat.SetField('STRING', 'aString')
dst_feat.SetField('DATE', '2005/10/12')
gdaltest.shape_lyr.CreateFeature(dst_feat)
gdaltest.shape_ds = None
#######################################################
# Read back the feature
gdaltest.shape_ds = ogr.GetDriverByName('ESRI Shapefile').Open('tmp', update=1)
gdaltest.shape_lyr = gdaltest.shape_ds.GetLayerByName('datatypes')
feat_read = gdaltest.shape_lyr.GetNextFeature()
if feat_read.GetField('REAL') != 1.2 or \
feat_read.GetField('INTEGER') != 3 or \
feat_read.GetField('STRING') != 'aString' or \
feat_read.GetFieldAsString('DATE') != '2005/10/12':
return 'fail'
return 'success'
###############################################################################
# Function used internally by ogr_shape_23.
def ogr_shape_23_write_valid_and_invalid(layer_name, wkt, invalid_wkt, wkbType, isEmpty):
#######################################################
# Create a layer
if wkbType == ogr.wkbUnknown:
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer(layer_name)
else:
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer(layer_name, geom_type=wkbType)
#######################################################
# Write a geometry
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
dst_feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(wkt))
gdaltest.shape_lyr.CreateFeature(dst_feat)
#######################################################
# Write an invalid geometry for this layer type
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
dst_feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(invalid_wkt))
gdal.PushErrorHandler('CPLQuietErrorHandler')
gdaltest.shape_lyr.CreateFeature(dst_feat)
gdal.PopErrorHandler()
#######################################################
# Check feature
gdaltest.shape_ds = None
gdaltest.shape_ds = ogr.GetDriverByName('ESRI Shapefile').Open('tmp', update=1)
read_lyr = gdaltest.shape_ds.GetLayerByName(layer_name)
if read_lyr.GetFeatureCount() != 1:
return 'fail'
feat_read = read_lyr.GetNextFeature()
if isEmpty and feat_read.GetGeometryRef() is None:
return 'success'
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt(wkt),
max_error=0.000000001) != 0:
print(feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
def ogr_shape_23_write_geom(layer_name, geom, expected_geom, wkbType):
#######################################################
# Create a layer
if wkbType == ogr.wkbUnknown:
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer(layer_name)
else:
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer(layer_name, geom_type=wkbType)
#######################################################
# Write a geometry
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
dst_feat.SetGeometry(geom)
gdaltest.shape_lyr.CreateFeature(dst_feat)
#######################################################
# Check feature
gdaltest.shape_lyr = None
gdaltest.shape_ds = None
gdaltest.shape_ds = ogr.GetDriverByName('ESRI Shapefile').Open('tmp', update=1)
read_lyr = gdaltest.shape_ds.GetLayerByName(layer_name)
if read_lyr.GetFeatureCount() != 1:
return 'fail'
feat_read = read_lyr.GetNextFeature()
if expected_geom is None:
if feat_read.GetGeometryRef() is not None:
print(feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
if ogrtest.check_feature_geometry(feat_read, expected_geom,
max_error=0.000000001) != 0:
print(feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
###############################################################################
# Test writing and reading all handled geometry types
def ogr_shape_23():
if gdaltest.shape_ds is None:
return 'skip'
test_geom_array = [
('points', 'POINT(0 1)', 'LINESTRING(0 1)', ogr.wkbPoint),
('points25D', 'POINT(0 1 2)', 'LINESTRING(0 1)', ogr.wkbPoint25D),
('multipoints', 'MULTIPOINT(0 1,2 3)', 'POINT (0 1)', ogr.wkbMultiPoint),
('multipoints25D', 'MULTIPOINT(0 1 2,3 4 5)', 'POINT (0 1)', ogr.wkbMultiPoint25D),
('linestrings', 'LINESTRING(0 1,2 3,4 5,0 1)', 'POINT (0 1)', ogr.wkbLineString),
('linestrings25D', 'LINESTRING(0 1 2,3 4 5,6 7 8,0 1 2)', 'POINT (0 1)', ogr.wkbLineString25D),
('multilinestrings', 'MULTILINESTRING((0 1,2 3,4 5,0 1), (0 1,2 3,4 5,0 1))', 'POINT (0 1)', ogr.wkbMultiLineString),
('multilinestrings25D', 'MULTILINESTRING((0 1 2,3 4 5,6 7 8,0 1 2),(0 1 2,3 4 5,6 7 8,0 1 2))', 'POINT (0 1)', ogr.wkbMultiLineString25D),
('polygons', 'POLYGON((0 0,0 10,10 10,0 0),(0.25 0.5,1 1,0.5 1,0.25 0.5))', 'POINT (0 1)', ogr.wkbPolygon),
('polygons25D', 'POLYGON((0 0 2,0 10 5,10 10 8,0 1 2))', 'POINT (0 1)', ogr.wkbPolygon25D),
('multipolygons', 'MULTIPOLYGON(((0 0,0 10,10 10,0 0),(0.25 0.5,1 1,0.5 1,0.25 0.5)),((100 0,100 10,110 10,100 0),(100.25 0.5,100.5 1,100 1,100.25 0.5)))', 'POINT (0 1)', ogr.wkbMultiPolygon),
('multipolygons25D', 'MULTIPOLYGON(((0 0 0,0 10,10 10,0 0),(0.25 0.5,1 1,0.5 1,0.25 0.5)),((100 0,100 10,110 10,100 0),(100.25 0.5,100.5 1,100 1,100.25 0.5)))', 'POINT (0 1)', ogr.wkbMultiPolygon25D),
]
test_empty_geom_array = [
('emptypoints', 'POINT EMPTY', 'LINESTRING(0 1)', ogr.wkbPoint),
('emptymultipoints', 'MULTIPOINT EMPTY', 'POINT(0 1)', ogr.wkbMultiPoint),
('emptylinestrings', 'LINESTRING EMPTY', 'POINT(0 1)', ogr.wkbLineString),
('emptymultilinestrings', 'MULTILINESTRING EMPTY', 'POINT(0 1)', ogr.wkbMultiLineString),
('emptypolygons', 'POLYGON EMPTY', 'POINT(0 1)', ogr.wkbPolygon),
('emptymultipolygons', 'MULTIPOLYGON EMPTY', 'POINT(0 1)', ogr.wkbMultiPolygon),
]
#######################################################
# Write a feature in a new layer (geometry type unset at layer creation)
for item in test_geom_array:
if ogr_shape_23_write_valid_and_invalid(item[0], item[1], item[2], ogr.wkbUnknown, 0) != 'success':
gdaltest.post_reason('Test for layer %s failed' % item[0])
return 'fail'
for item in test_empty_geom_array:
if ogr_shape_23_write_valid_and_invalid(item[0], item[1], item[2], ogr.wkbUnknown, 1) != 'success':
gdaltest.post_reason('Test for layer %s failed' % item[0])
return 'fail'
#######################################################
# Same test but use the wkb type when creating the layer
gdaltest.shape_ds = None
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
shape_drv.DeleteDataSource('tmp')
gdaltest.shape_ds = shape_drv.CreateDataSource('tmp')
for item in test_geom_array:
if ogr_shape_23_write_valid_and_invalid(item[0], item[1], item[2], item[3], 0) != 'success':
gdaltest.post_reason('(2) Test for layer %s failed' % item[0])
return 'fail'
for item in test_empty_geom_array:
if ogr_shape_23_write_valid_and_invalid(item[0], item[1], item[2], item[3], 1) != 'success':
gdaltest.post_reason('(2) Test for layer %s failed' % item[0])
return 'fail'
#######################################################
# Test writing of a geometrycollection
layer_name = 'geometrycollections'
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer(layer_name, geom_type=ogr.wkbMultiPolygon)
# This geometry collection is not compatible with a multipolygon layer
geom = ogr.CreateGeometryFromWkt('GEOMETRYCOLLECTION(POINT (0 0))')
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
dst_feat.SetGeometry(geom)
gdal.PushErrorHandler('CPLQuietErrorHandler')
gdaltest.shape_lyr.CreateFeature(dst_feat)
gdal.PopErrorHandler()
# This geometry will be dealt as a multipolygon
wkt = 'GEOMETRYCOLLECTION(POLYGON((0 0,0 10,10 10,0 0),(0.25 0.5,1 1,0.5 1,0.25 0.5)),POLYGON((100 0,100 10,110 10,100 0),(100.25 0.5,100.5 1,100 1,100.25 0.5)))'
geom = ogr.CreateGeometryFromWkt(wkt)
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
dst_feat.SetGeometry(geom)
gdaltest.shape_lyr.CreateFeature(dst_feat)
gdaltest.shape_ds = None
gdaltest.shape_ds = ogr.GetDriverByName('ESRI Shapefile').Open('tmp', update=1)
read_lyr = gdaltest.shape_ds.GetLayerByName(layer_name)
feat_read = read_lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('MULTIPOLYGON(((0 0 0,0 10,10 10,0 0),(0.25 0.5,1 1,0.5 1,0.25 0.5)),((100 0,100 10,110 10,100 0),(100.25 0.5,100.5 1,100 1,100.25 0.5)))'),
max_error=0.000000001) != 0:
print(feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
#######################################################
# Test writing of a multipoint with an empty point inside
layer_name = 'strangemultipoints'
wkt = 'MULTIPOINT(0 1)'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPoint))
if ogr_shape_23_write_geom(layer_name, geom, ogr.CreateGeometryFromWkt(geom.ExportToWkt()), ogr.wkbUnknown) != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
#######################################################
# Test writing of a multilinestring with an empty linestring inside
layer_name = 'strangemultilinestrings'
wkt = 'MULTILINESTRING((0 1,2 3,4 5,0 1), (0 1,2 3,4 5,0 1))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLineString))
if ogr_shape_23_write_geom(layer_name, geom, ogr.CreateGeometryFromWkt(geom.ExportToWkt()), ogr.wkbUnknown) != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
#######################################################
# Test writing of a polygon with an empty external ring
layer_name = 'polygonwithemptyexternalring'
geom = ogr.CreateGeometryFromWkt('POLYGON EMPTY')
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
ring = ogr.Geometry(type=ogr.wkbLinearRing)
ring.AddPoint_2D(0, 0)
ring.AddPoint_2D(10, 0)
ring.AddPoint_2D(10, 10)
ring.AddPoint_2D(0, 10)
ring.AddPoint_2D(0, 0)
geom.AddGeometry(ring)
if ogr_shape_23_write_geom(layer_name, geom, None, ogr.wkbUnknown) != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
#######################################################
# Test writing of a polygon with an empty external ring
layer_name = 'polygonwithemptyinternalring'
wkt = 'POLYGON((100 0,100 10,110 10,100 0))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
if ogr_shape_23_write_geom(layer_name, geom, ogr.CreateGeometryFromWkt(geom.ExportToWkt()), ogr.wkbUnknown) != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
#######################################################
# Test writing of a multipolygon with an empty polygon and a polygon with an empty external ring
layer_name = 'strangemultipolygons'
wkt = 'MULTIPOLYGON(((0 0,0 10,10 10,0 0)), ((100 0,100 10,110 10,100 0)))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
poly = ogr.CreateGeometryFromWkt('POLYGON((100 0,100 10,110 10,100 0))')
poly.AddGeometry(ogr.Geometry(type=ogr.wkbLinearRing))
geom.AddGeometry(poly)
if ogr_shape_23_write_geom(layer_name, geom, ogr.CreateGeometryFromWkt(geom.ExportToWkt()), ogr.wkbUnknown) != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
return 'success'
###############################################################################
# Test reading a polygon whose outer and the inner ring touches at one point (#2589)
def ogr_shape_24():
if gdaltest.shape_ds is None:
return 'skip'
layer_name = 'touchingrings'
wkt = 'MULTIPOLYGON(((0 0,0 10,10 10,0 0), (0 0,1 1,0 1,0 0)), ((100 100,100 200,200 200,200 100,100 100)))'
geom = ogr.CreateGeometryFromWkt(wkt)
if ogr_shape_23_write_geom(layer_name, geom, ogr.CreateGeometryFromWkt(geom.ExportToWkt()), ogr.wkbUnknown) != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
return 'success'
###############################################################################
# Test reading a multipolygon with one part inside the bounding box of the other
# part, but not inside it, and sharing the same first point... (#2589)
def ogr_shape_25():
layer_name = 'touchingrings2'
wkt = 'MULTIPOLYGON(((10 5, 5 5,5 0,0 0,0 10,10 10,10 5)),((10 5,10 0,5 0,5 4.9,10 5)), ((100 100,100 200,200 200,200 100,100 100)))'
geom = ogr.CreateGeometryFromWkt(wkt)
if ogr_shape_23_write_geom(layer_name, geom, ogr.CreateGeometryFromWkt(geom.ExportToWkt()), ogr.wkbUnknown) != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
# Same test, but use OGR_ORGANIZE_POLYGONS=DEFAULT to avoid relying only on the winding order
layer_name = 'touchingrings3'
wkt = 'MULTIPOLYGON(((10 5, 5 5,5 0,0 0,0 10,10 10,10 5)),((10 5,10 0,5 0,5 4.9,10 5)), ((100 100,100 200,200 200,200 100,100 100)))'
geom = ogr.CreateGeometryFromWkt(wkt)
gdal.SetConfigOption('OGR_ORGANIZE_POLYGONS', 'DEFAULT')
ret = ogr_shape_23_write_geom(layer_name, geom, ogr.CreateGeometryFromWkt(geom.ExportToWkt()), ogr.wkbUnknown)
gdal.SetConfigOption('OGR_ORGANIZE_POLYGONS', '')
if ret != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
return 'success'
###############################################################################
# Test a polygon made of one outer ring and two inner rings (special case
# in organizePolygons()
def ogr_shape_26():
layer_name = 'oneouterring'
wkt = 'POLYGON ((100 100,100 200,200 200,200 100,100 100),(110 110,120 110,120 120,110 120,110 110),(130 110,140 110,140 120,130 120,130 110))'
geom = ogr.CreateGeometryFromWkt(wkt)
ret = ogr_shape_23_write_geom(layer_name, geom, ogr.CreateGeometryFromWkt(geom.ExportToWkt()), ogr.wkbUnknown)
if ret != 'success':
gdaltest.post_reason('Test for layer %s failed' % layer_name)
return 'fail'
return 'success'
###############################################################################
# Test alternate date formatting (#2746)
def ogr_shape_27():
result = 'success'
ds = ogr.Open('data/water_main_dist.dbf')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.installe_1 != '1989/04/25':
print(feat.installe_1)
gdaltest.post_reason('got wrong date result!')
result = 'fail'
feat = None
lyr = None
ds = None
return result
###############################################################################
# Test reading a 3 GB .DBF (#3011)
def ogr_shape_28():
# Determine if the filesystem supports sparse files (we don't want to create a real 3 GB
# file !
if not gdaltest.filesystem_supports_sparse_files('tmp'):
return 'skip'
for filename in ('tmp/hugedbf.dbf', 'tmp/hugedbf.shp', 'tmp/hugedbf.shx'):
try:
os.remove(filename)
except OSError:
pass
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/hugedbf.shp')
lyr = ds.CreateLayer('test')
field_defn = ogr.FieldDefn()
field_defn.SetName('test')
field_defn.SetWidth(99)
lyr.CreateField(field_defn)
ds = None
os.remove('tmp/hugedbf.shp')
os.remove('tmp/hugedbf.shx')
f = open("tmp/hugedbf.dbf", "rb+")
# Set record count to 24,000,000
f.seek(4, 0)
f.write("\x00".encode('latin1'))
f.write("\x36".encode('latin1'))
f.write("\x6e".encode('latin1'))
f.write("\x01".encode('latin1'))
# Set value for record 23,900,000 at
# offset 2,390,000,066 = (23,900,000 * (99 + 1) + 65) + 1
f.seek(2390000066, 0)
f.write("value_over_2GB".encode('latin1'))
# Extend to 3 GB file
f.seek(3000000000, 0)
f.write("0".encode('latin1'))
f.close()
ds = ogr.Open('tmp/hugedbf.dbf', update=1)
if ds is None:
gdaltest.post_reason('Cannot open tmp/hugedbf.dbf')
return 'fail'
# Check that the hand-written value can be read back
lyr = ds.GetLayer(0)
feat = lyr.GetFeature(23900000)
if feat.GetFieldAsString(0) != 'value_over_2GB':
print(feat.GetFieldAsString(0))
return 'fail'
# Update with a new value
feat.SetField(0, 'updated_value')
lyr.SetFeature(feat)
feat = None
# Test creating a feature over 2 GB file limit -> should work
gdal.ErrorReset()
feat = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret != 0:
gdaltest.post_reason('failure')
return 'fail'
feat = None
if gdal.GetLastErrorMsg().find('2GB file size limit reached') < 0:
gdaltest.post_reason('did not find expected warning')
return 'fail'
ds = None
# Re-open and check the new value
gdal.SetConfigOption('SHAPE_2GB_LIMIT', 'TRUE')
ds = ogr.Open('tmp/hugedbf.dbf', 1)
gdal.SetConfigOption('SHAPE_2GB_LIMIT', None)
lyr = ds.GetLayer(0)
feat = lyr.GetFeature(23900000)
if feat.GetFieldAsString(0) != 'updated_value':
print(feat.GetFieldAsString(0))
return 'fail'
feat = None
# Test creating a feature over 2 GB file limit -> should fail
gdal.ErrorReset()
feat = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('failure')
return 'fail'
feat = None
if gdal.GetLastErrorMsg().find('2GB file size limit reached') < 0:
gdaltest.post_reason('did not find expected warning')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test that REPACK doesn't change extension case (#3293)
def ogr_shape_29():
os.mkdir('tmp/UPPERCASE')
shutil.copy('data/poly.shp', 'tmp/UPPERCASE/UPPERCASE.SHP')
shutil.copy('data/poly.shx', 'tmp/UPPERCASE/UPPERCASE.SHX')
shutil.copy('data/poly.dbf', 'tmp/UPPERCASE/UPPERCASE.DBF')
f = open('tmp/UPPERCASE/UPPERCASE.CPG', 'wb')
f.write('UTF-8'.encode('ascii'))
f.close()
ds = ogr.Open('tmp/UPPERCASE', update=1)
lyr = ds.GetLayer(0)
lyr.DeleteFeature(0)
ds.ExecuteSQL('REPACK UPPERCASE')
ds = None
lst = gdal.ReadDir('tmp/UPPERCASE')
if len(lst) != 6:
print(lst)
return 'fail'
for filename in lst:
if filename not in ['.', '..', 'UPPERCASE.SHP', 'UPPERCASE.SHX', 'UPPERCASE.DBF', 'UPPERCASE.CPG']:
gdaltest.post_reason('fail')
print(lst)
print(filename)
return 'fail'
if filename.find('packed') >= 0:
gdaltest.post_reason('fail')
print(lst)
print(filename)
return 'fail'
return 'success'
###############################################################################
# Test that REPACK doesn't change extension case (#3293)
def ogr_shape_30():
os.mkdir('tmp/lowercase')
shutil.copy('data/poly.shp', 'tmp/lowercase/lowercase.shp')
shutil.copy('data/poly.shx', 'tmp/lowercase/lowercase.shx')
shutil.copy('data/poly.dbf', 'tmp/lowercase/lowercase.dbf')
ds = ogr.Open('tmp/lowercase', update=1)
lyr = ds.GetLayer(0)
lyr.DeleteFeature(0)
ds.ExecuteSQL('REPACK lowercase')
ds = None
lst = gdal.ReadDir('tmp/lowercase')
if len(lst) != 5:
print(lst)
return 'fail'
for filename in lst:
if filename not in ['.', '..', 'lowercase.shp', 'lowercase.shx', 'lowercase.dbf']:
print(lst)
return 'fail'
return 'success'
###############################################################################
# Test truncation of long and duplicate field names.
# FIXME: Empty field names are allowed now!
def ogr_shape_31():
if gdaltest.shape_ds is None:
return 'skip'
fields = [('a', ogr.OFTReal),
('A', ogr.OFTInteger),
('A_1', ogr.OFTInteger),
('A_1', ogr.OFTInteger),
('a_1_2', ogr.OFTInteger),
('aaaaaAAAAAb', ogr.OFTInteger),
('aAaaaAAAAAc', ogr.OFTInteger),
('aaaaaAAAABa', ogr.OFTInteger),
('aaaaaAAAABb', ogr.OFTInteger),
('aaaaaAAA_1', ogr.OFTInteger),
('aaaaaAAAABc', ogr.OFTInteger),
('aaaaaAAAABd', ogr.OFTInteger),
('aaaaaAAAABe', ogr.OFTInteger),
('aaaaaAAAABf', ogr.OFTInteger),
('aaaaaAAAABg', ogr.OFTInteger),
('aaaaaAAAABh', ogr.OFTInteger),
('aaaaaAAAABi', ogr.OFTInteger),
('aaaaaAAA10', ogr.OFTString),
('', ogr.OFTInteger),
('', ogr.OFTInteger)]
expected_fields = ['a',
'A_1',
'A_1_1',
'A_1_2',
'a_1_2_1',
'aaaaaAAAAA',
'aAaaaAAA_1',
'aaaaaAAAAB',
'aaaaaAAA_2',
'aaaaaAAA_3',
'aaaaaAAA_4',
'aaaaaAAA_5',
'aaaaaAAA_6',
'aaaaaAAA_7',
'aaaaaAAA_8',
'aaaaaAAA_9',
'aaaaaAAA10',
'aaaaaAAA11',
'',
'_1']
#######################################################
# Create Layer
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer('Fields')
#######################################################
# Setup Schema with weird field names
gdal.PushErrorHandler('CPLQuietErrorHandler')
ogrtest.quick_create_layer_def(gdaltest.shape_lyr, fields)
gdal.PopErrorHandler()
layer_defn = gdaltest.shape_lyr.GetLayerDefn()
error_occurred = False
for i in range(layer_defn.GetFieldCount()):
if layer_defn.GetFieldDefn(i).GetNameRef() != expected_fields[i]:
print('Expected ', expected_fields[i], ',but got', layer_defn.GetFieldDefn(i).GetNameRef())
error_occurred = True
if error_occurred:
return 'fail'
return 'success'
###############################################################################
# Test creating a nearly 4GB (2^32 Bytes) .shp (#3236)
# Check for proper error report.
# Assuming 2^32 is the max value for unsigned int.
def ogr_shape_32():
# This test takes a few minutes and disk space. Hence, skipped by default.
# To run this test, make sure that the directory BigFilePath points to has
# 4.5 GB space available or give a new directory that does and delete the
# directory afterwards.
return 'skip' # pylint: disable=unreachable
# pylint: disable=unreachable
from decimal import Decimal
BigFilePath = '/tmp'
#######################################################
# Create a layer
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
gdaltest.shape_ds_big = shape_drv.CreateDataSource(BigFilePath)
gdaltest.shape_lyr = gdaltest.shape_ds_big.CreateLayer("bigLayer", geom_type=ogr.wkbPolygon)
#######################################################
# Write a geometry repeatedly.
# File size is pre-calculated according to the geometry's size.
wkt = 'POLYGON((0 0,0 10,10 10,0 0),(0.25 0.5,1 1.1,0.5 1,0.25 0.5))'
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AddGeometry(ogr.Geometry(type=ogr.wkbPolygon))
ret = 0
n = 0
print('')
for n in range(0, 22845571):
dst_feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
dst_feat.SetGeometry(geom)
ret = gdaltest.shape_lyr.CreateFeature(dst_feat)
if ret != 0 and n < 22845570:
print('File limit reached before 4GB!')
return 'fail'
if (n % 22846) == 0:
sys.stdout.write('\r%.1f%% ' % (n / Decimal('228460.0')))
sys.stdout.flush()
#######################################################
# Check some features
gdaltest.shape_ds_big = None
gdaltest.shape_ds_big = ogr.GetDriverByName('ESRI Shapefile').Open(BigFilePath, update=0)
read_lyr = gdaltest.shape_ds_big.GetLayerByName('bigLayer')
for i in [0, 1, read_lyr.GetFeatureCount() - 1]:
feat_read = read_lyr.GetFeature(i)
if feat_read is None:
print('Could not retrieve geometry at FID', i)
return 'fail'
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('POLYGON((0 0,0 10,10 10,0 0),(0.25 0.5,1 1.1,0.5 1,0.25 0.5))'),
max_error=0.000000001) != 0:
print('Wrong geometry encountered at FID', i, ':', (feat_read.GetGeometryRef().ExportToWkt()))
return 'fail'
return 'success'
###############################################################################
# Check that we can detect correct winding order even with polygons with big
# coordinate offset (#3356)
def ogr_shape_33():
ds = ogr.Open('data/bigoffset.shp')
lyr = ds.GetLayer(0)
feat_read = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('MULTIPOLYGON( ((0 0,0 1,1 1,1 0,0 0)),((100000000000 100000000000,100000000000 100000000001,100000000001 100000000001,100000000001 100000000000,100000000000 100000000000)) )'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
###############################################################################
# Check that we can write correct winding order even with polygons with big
# coordinate offset (#33XX)
def ogr_shape_34():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/bigoffset.shp')
lyr = ds.CreateLayer('bigoffset')
feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
geom_wkt = 'MULTIPOLYGON( ((0 0,0 1,1 1,1 0,0 0)),((100000000000 100000000000,100000000000 100000000001,100000000001 100000000001,100000000001 100000000000,100000000000 100000000000)) )'
geom = ogr.CreateGeometryFromWkt(geom_wkt)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('tmp/bigoffset.shp')
lyr = ds.GetLayer(0)
feat_read = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('MULTIPOLYGON( ((0 0,0 1,1 1,1 0,0 0)),((100000000000 100000000000,100000000000 100000000001,100000000001 100000000001,100000000001 100000000000,100000000000 100000000000)) )'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
###############################################################################
# Check that we can read & write a VSI*L dataset
def ogr_shape_35():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/test35.shp')
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
lyr = ds.CreateLayer('test35', srs=srs)
feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
geom_wkt = 'POINT(0 1)'
geom = ogr.CreateGeometryFromWkt(geom_wkt)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('/vsimem/test35.shp')
lyr = ds.GetLayer(0)
srs_read = lyr.GetSpatialRef()
if srs_read.ExportToWkt() != srs.ExportToWkt():
gdaltest.post_reason('did not get expected SRS')
print(srs_read)
return 'fail'
feat_read = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('POINT(0 1)'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
###############################################################################
# Check that we can read from the root of a .ZIP file
def ogr_shape_36():
ds = ogr.Open('/vsizip/data/poly.zip')
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
srs = lyr.GetSpatialRef()
wkt = srs.ExportToWkt()
if wkt.find('OSGB') == -1:
gdaltest.post_reason('did not get expected SRS')
return 'fail'
feat_read = lyr.GetFeature(9)
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('POLYGON ((479750.6875 4764702.0,479658.59375 4764670.0,479640.09375 4764721.0,479735.90625 4764752.0,479750.6875 4764702.0))'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
###############################################################################
# Check that we can read from the root of a .tar.gz file
def ogr_shape_37():
ds = ogr.Open('/vsitar/data/poly.tar.gz')
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
srs = lyr.GetSpatialRef()
wkt = srs.ExportToWkt()
if wkt.find('OSGB') == -1:
gdaltest.post_reason('did not get expected SRS')
return 'fail'
for i in range(10):
feat_read = lyr.GetNextFeature()
if i == 9:
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('POLYGON ((479750.6875 4764702.0,479658.59375 4764670.0,479640.09375 4764721.0,479735.90625 4764752.0,479750.6875 4764702.0))'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
lyr.ResetReading()
feat_read = lyr.GetFeature(9)
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('POLYGON ((479750.6875 4764702.0,479658.59375 4764670.0,479640.09375 4764721.0,479735.90625 4764752.0,479750.6875 4764702.0))'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
ds = None
gdal.Unlink('data/poly.tar.gz.properties')
return 'success'
###############################################################################
# Check that we can read from a .tar file
def ogr_shape_37_bis():
ds = ogr.Open('/vsitar/data/poly.tar')
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
srs = lyr.GetSpatialRef()
wkt = srs.ExportToWkt()
if wkt.find('OSGB') == -1:
gdaltest.post_reason('did not get expected SRS')
return 'fail'
for i in range(10):
feat_read = lyr.GetNextFeature()
if i == 9:
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('POLYGON ((479750.6875 4764702.0,479658.59375 4764670.0,479640.09375 4764721.0,479735.90625 4764752.0,479750.6875 4764702.0))'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
lyr.ResetReading()
feat_read = lyr.GetFeature(9)
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('POLYGON ((479750.6875 4764702.0,479658.59375 4764670.0,479640.09375 4764721.0,479735.90625 4764752.0,479750.6875 4764702.0))'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
###############################################################################
# Check that we cannot create duplicated layers
def ogr_shape_38():
ds = ogr.Open('/vsimem/', update=1)
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr = ds.CreateLayer('test35')
gdal.PopErrorHandler()
ds = None
if lyr is not None:
gdaltest.post_reason('should not have created a new layer')
return 'fail'
return 'success'
###############################################################################
# Check that we can detect correct winding order even with polygons with big
# coordinate offset (#3356)
def ogr_shape_39():
ds = ogr.Open('data/multipatch.shp')
lyr = ds.GetLayer(0)
feat_read = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat_read, ogr.CreateGeometryFromWkt('GEOMETRYCOLLECTION (TIN (((5 4 10,0 0 5,10 0 5,5 4 10)),((5 4 10,10 0 5,10 8 5,5 4 10)),((5 4 10,10 8 5,0 8 5,5 4 10)),((5 4 10,0 8 5,0 0 5,5 4 10))),TIN (((10 0 5,10 0 0,10 8 5,10 0 5)),((10 0 0,10 8 5,10 8 0,10 0 0)),((10 8 5,10 8 0,0 8 5,10 8 5)),((10 8 0,0 8 5,0 8 0,10 8 0)),((0 8 5,0 8 0,0 0 5,0 8 5)),((0 8 0,0 0 5,0 0 0,0 8 0))),MULTIPOLYGON (((0 0 0,0 0 5,10 0 5,10 0 0,6 0 0,6 0 3,4 0 3,4 0 0,0 0 0),(1 0 2,3 0 2,3 0 4,1 0 4,1 0 2),(7 0 2,9 0 2,9 0 4,7 0 4,7 0 2))))'),
max_error=0.000000001) != 0:
print('Wrong geometry : %s' % feat_read.GetGeometryRef().ExportToWkt())
return 'fail'
return 'success'
###############################################################################
# Make some changes to a shapefile and check the index files. qix, sbn & sbx
def ogr_shape_40():
if gdaltest.shape_ds is None:
return 'skip'
datafiles = ('gjpoint.dbf', 'gjpoint.shp', 'gjpoint.shx')
indexfiles = ('gjpoint.sbn', 'gjpoint.sbx', 'gjpoint.qix')
for f in datafiles:
shutil.copy(os.path.join('data', f), os.path.join('tmp', f))
for i in range(2):
shutil.copy(os.path.join('data', indexfiles[i]), os.path.join('tmp', indexfiles[i]))
gdaltest.shape_ds = ogr.Open('tmp/gjpoint.shp', update=1)
gdaltest.shape_lyr = gdaltest.shape_ds.GetLayer(0)
gdaltest.shape_lyr.SetAttributeFilter(None)
gdaltest.shape_ds.ExecuteSQL('CREATE SPATIAL INDEX ON gjpoint')
# Check if updating a feature removes the indices
feat = gdaltest.shape_lyr.GetFeature(0)
geom = ogr.CreateGeometryFromWkt('POINT (99 1)')
feat.SetGeometry(geom)
for f in indexfiles:
if not os.path.exists(os.path.join('tmp', f)):
print('SetFeature(): ' + f)
return 'fail'
gdaltest.shape_lyr.SetFeature(feat)
for f in indexfiles:
if os.path.exists(os.path.join('tmp', f)):
print('SetFeature(): ' + f)
return 'fail'
# Check if adding a feature removes the indices
for i in range(2):
shutil.copy(os.path.join('data', indexfiles[i]), os.path.join('tmp', indexfiles[i]))
gdaltest.shape_ds = ogr.Open('tmp/gjpoint.shp', update=1)
gdaltest.shape_lyr = gdaltest.shape_ds.GetLayer(0)
gdaltest.shape_lyr.SetAttributeFilter(None)
gdaltest.shape_ds.ExecuteSQL('CREATE SPATIAL INDEX ON gjpoint')
feat = ogr.Feature(gdaltest.shape_lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT (98 2)')
feat.SetGeometry(geom)
feat.SetField('NAME', 'Point 2')
feat.SetField('FID', '2')
feat.SetFID(1)
for f in indexfiles:
if not os.path.exists(os.path.join('tmp', f)):
print('CreateFeature(): ' + f)
return 'fail'
gdaltest.shape_lyr.CreateFeature(feat)
for f in indexfiles:
if os.path.exists(os.path.join('tmp', f)):
print('CreateFeature(): ' + f)
return 'fail'
# Check if deleting a feature removes the indices
for i in range(2):
shutil.copy(os.path.join('data', indexfiles[i]), os.path.join('tmp', indexfiles[i]))
gdaltest.shape_ds = ogr.Open('tmp/gjpoint.shp', update=1)
gdaltest.shape_lyr = gdaltest.shape_ds.GetLayer(0)
gdaltest.shape_lyr.SetAttributeFilter(None)
gdaltest.shape_ds.ExecuteSQL('CREATE SPATIAL INDEX ON gjpoint')
for f in indexfiles:
if not os.path.exists(os.path.join('tmp', f)):
print('DeleteFeature(): ' + f)
return 'fail'
if gdaltest.shape_lyr.DeleteFeature(0) != 0:
gdaltest.post_reason('DeleteFeature failed.')
return 'fail'
for f in indexfiles:
if os.path.exists(os.path.join('tmp', f)):
print('DeleteFeature(): ' + f)
return 'fail'
return 'success'
###############################################################################
# Run test_ogrsf
def ogr_shape_41():
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
shutil.copy('data/poly.shp', 'tmp/poly.shp')
shutil.copy('data/poly.shx', 'tmp/poly.shx')
shutil.copy('data/poly.dbf', 'tmp/poly.dbf')
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -fsf tmp/poly.shp')
os.remove('tmp/poly.shp')
os.remove('tmp/poly.shx')
os.remove('tmp/poly.dbf')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Run test_ogrsf with -sql
def ogr_shape_42():
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
shutil.copy('data/poly.shp', 'tmp/poly.shp')
shutil.copy('data/poly.shx', 'tmp/poly.shx')
shutil.copy('data/poly.dbf', 'tmp/poly.dbf')
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' tmp/poly.shp -sql "SELECT * FROM poly"')
os.remove('tmp/poly.shp')
os.remove('tmp/poly.shx')
os.remove('tmp/poly.dbf')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test /vsizip//vsicurl/
def ogr_shape_43():
drv = gdal.GetDriverByName('HTTP')
if drv is None:
return 'skip'
conn = gdaltest.gdalurlopen('https://raw.githubusercontent.com/OSGeo/gdal/master/autotest/ogr/data/poly.zip')
if conn is None:
print('cannot open URL')
return 'skip'
conn.close()
ds = ogr.Open('/vsizip//vsicurl/https://raw.githubusercontent.com/OSGeo/gdal/master/autotest/ogr/data/poly.zip')
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
srs = lyr.GetSpatialRef()
wkt = srs.ExportToWkt()
if wkt.find('OSGB') == -1:
gdaltest.post_reason('did not get expected SRS')
return 'fail'
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('did not get expected feature')
return 'fail'
return 'success'
###############################################################################
# Test /vsicurl/ on a directory
def ogr_shape_44_DISABLED():
drv = gdal.GetDriverByName('HTTP')
if drv is None:
return 'skip'
conn = gdaltest.gdalurlopen('https://raw.githubusercontent.com/OSGeo/gdal/master/autotest/ogr/data/poly.zip')
if conn is None:
print('cannot open URL')
return 'skip'
conn.close()
ds = ogr.Open('/vsicurl/https://raw.githubusercontent.com/OSGeo/gdal/master/autotest/ogr/data/testshp')
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
srs = lyr.GetSpatialRef()
wkt = srs.ExportToWkt()
if wkt.find('OSGB') == -1:
gdaltest.post_reason('did not get expected SRS')
return 'fail'
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('did not get expected feature')
return 'fail'
return 'success'
###############################################################################
# Test ignored fields works ok on a shapefile.
def ogr_shape_45():
shp_ds = ogr.Open('data/poly.shp')
shp_layer = shp_ds.GetLayer(0)
shp_layer.SetIgnoredFields(['AREA'])
feat = shp_layer.GetNextFeature()
if feat.IsFieldSet('AREA'):
gdaltest.post_reason('got area despite request to ignore it.')
return 'fail'
if feat.GetFieldAsInteger('EAS_ID') != 168:
gdaltest.post_reason('missing or wrong eas_id')
return 'fail'
wkt = 'POLYGON ((479819.84375 4765180.5,479690.1875 4765259.5,479647.0 4765369.5,479730.375 4765400.5,480039.03125 4765539.5,480035.34375 4765558.5,480159.78125 4765610.5,480202.28125 4765482.0,480365.0 4765015.5,480389.6875 4764950.0,480133.96875 4764856.5,480080.28125 4764979.5,480082.96875 4765049.5,480088.8125 4765139.5,480059.90625 4765239.5,480019.71875 4765319.5,479980.21875 4765409.5,479909.875 4765370.0,479859.875 4765270.0,479819.84375 4765180.5))'
if ogrtest.check_feature_geometry(feat, wkt,
max_error=0.00000001) != 0:
return 'fail'
fd = shp_layer.GetLayerDefn()
fld = fd.GetFieldDefn(0) # area
if not fld.IsIgnored():
gdaltest.post_reason('AREA unexpectedly not marked as ignored.')
return 'fail'
fld = fd.GetFieldDefn(1) # eas_id
if fld.IsIgnored():
gdaltest.post_reason('EASI unexpectedly marked as ignored.')
return 'fail'
if fd.IsGeometryIgnored():
gdaltest.post_reason('geometry unexpectedly ignored.')
return 'fail'
if fd.IsStyleIgnored():
gdaltest.post_reason('style unexpectedly ignored.')
return 'fail'
fd.SetGeometryIgnored(1)
if not fd.IsGeometryIgnored():
gdaltest.post_reason('geometry unexpectedly not ignored.')
return 'fail'
feat = shp_layer.GetNextFeature()
if feat.GetGeometryRef() is not None:
gdaltest.post_reason('Unexpectedly got a geometry on feature 2.')
return 'fail'
if feat.IsFieldSet('AREA'):
gdaltest.post_reason('got area despite request to ignore it.')
return 'fail'
if feat.GetFieldAsInteger('EAS_ID') != 179:
gdaltest.post_reason('missing or wrong eas_id')
return 'fail'
feat = None
shp_layer = None
shp_ds = None
return 'success'
###############################################################################
# This is a very weird use case : the user creates/open a datasource
# made of a single shapefile 'foo.shp' and wants to add a new layer
# to it, 'bar'. So we create a new shapefile 'bar.shp' in the same
# directory as 'foo.shp'
def ogr_shape_46():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_46.shp')
ds.CreateLayer('you_can_put_here_what_you_want_i_dont_care')
ds.CreateLayer('this_one_i_care_46')
ds = None
ds = ogr.Open('/vsimem/ogr_shape_46.shp')
if ds.GetLayerCount() != 1:
return 'fail'
ds = None
ds = ogr.Open('/vsimem/this_one_i_care_46.shp')
if ds.GetLayerCount() != 1:
return 'fail'
ds = None
return 'success'
###############################################################################
# Test that we can open a symlink whose pointed filename isn't a real
# file, but a filename that OGR recognizes
def ogr_shape_47():
if not gdaltest.support_symlink():
return 'skip'
gdal.Unlink('tmp/poly.zip')
os.symlink('/vsizip/data/poly.zip', 'tmp/poly.zip')
ds = ogr.Open('tmp/poly.zip')
if ds is None:
gdaltest.post_reason('tmp/polyzip symlink does not open.')
return 'fail'
ds = None
os.remove('tmp/poly.zip')
return 'success'
###############################################################################
# Test RECOMPUTE EXTENT ON (#4027)
def ogr_shape_48():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_48.shp')
lyr = ds.CreateLayer('ogr_shape_48')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 2)'))
lyr.CreateFeature(feat)
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(3 4)'))
lyr.SetFeature(feat)
extent = lyr.GetExtent()
if extent != (1, 3, 2, 4):
gdaltest.post_reason('did not get expected extent (1)')
print(lyr.GetExtent())
return 'fail'
ds.ExecuteSQL('RECOMPUTE EXTENT ON ogr_shape_48')
extent = lyr.GetExtent()
if extent != (3, 3, 4, 4):
gdaltest.post_reason('did not get expected extent (2)')
print(lyr.GetExtent())
return 'fail'
ds = None
ds = ogr.Open('/vsimem/ogr_shape_48.shp')
lyr = ds.GetLayer(0)
extent = lyr.GetExtent()
if extent != (3, 3, 4, 4):
gdaltest.post_reason('did not get expected extent (3)')
print(lyr.GetExtent())
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_48.shp')
# Test with Polygon
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_48.shp')
lyr = ds.CreateLayer('ogr_shape_48')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((0 0,0 -1,-1 -1,-1 0,0 0))'))
lyr.CreateFeature(feat)
feat.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((0 0,0 1,1 1,1 0,0 0))'))
lyr.SetFeature(feat)
ds.ExecuteSQL('RECOMPUTE EXTENT ON ogr_shape_48')
extent = lyr.GetExtent()
if extent != (0, 1, 0, 1):
gdaltest.post_reason('did not get expected extent (4)')
print(lyr.GetExtent())
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_48.shp')
# Test with PolygonZ
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_48.shp')
lyr = ds.CreateLayer('ogr_shape_48')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((0 0 -2,0 -1 -2,-1 -1 -2,-1 0 -2,0 0 -2))'))
lyr.CreateFeature(feat)
feat.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((0 0 2,0 1 2,1 1 2,1 0 2,0 0 2))'))
lyr.SetFeature(feat)
ds.ExecuteSQL('RECOMPUTE EXTENT ON ogr_shape_48')
# FIXME: when we have a GetExtent3D
extent = lyr.GetExtent()
if extent != (0, 1, 0, 1):
gdaltest.post_reason('did not get expected extent (4)')
print(lyr.GetExtent())
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_48.shp')
return 'success'
###############################################################################
# Test that we can read at an LDID/87 file and recode to UTF-8.
def ogr_shape_49():
ds = ogr.Open('data/facility_surface_dd.dbf')
lyr = ds.GetLayer(0)
feat = lyr.GetFeature(91)
name = feat.GetField('NAME')
# Setup the utf-8 string.
if sys.version_info >= (3, 0, 0):
gdaltest.exp_name = 'OSEBERG S\u00D8R'
else:
exec("gdaltest.exp_name = u'OSEBERG S\u00D8R'")
gdaltest.exp_name = gdaltest.exp_name.encode('utf-8')
if name != gdaltest.exp_name:
gdaltest.post_reason('Did not get expected name, encoding problems?')
return 'fail'
return 'success'
###############################################################################
# Test that we can read encoded field names
def ogr_shape_50():
ds = ogr.Open('data/chinese.dbf')
if ds is None:
return 'skip'
lyr = ds.GetLayer(0)
reconv_possible = lyr.TestCapability(ogr.OLCStringsAsUTF8) == 1
if gdal.GetLastErrorMsg().find('Recode from CP936 to UTF-8 not supported, treated as ISO-8859-1 to UTF-8.') != -1:
if reconv_possible:
gdaltest.post_reason('Recode failed, but TestCapability(OLCStringsAsUTF8) returns TRUE')
return 'fail'
gdaltest.post_reason('skipping test: iconv support needed')
return 'skip'
# Setup the utf-8 string.
if sys.version_info >= (3, 0, 0):
gdaltest.fieldname = '\u4e2d\u56fd'
else:
exec("gdaltest.fieldname = u'\u4e2d\u56fd'")
gdaltest.fieldname = gdaltest.fieldname.encode('utf-8')
if lyr.GetLayerDefn().GetFieldIndex(gdaltest.fieldname) != 0:
print(lyr.GetLayerDefn().GetFieldDefn(0).GetNameRef())
return 'fail'
if not reconv_possible:
gdaltest.post_reason('TestCapability(OLCStringsAsUTF8) should return TRUE')
return 'fail'
return 'success'
###############################################################################
# Test that we can add a field when there's no dbf file initially
def ogr_shape_51():
if int(gdal.VersionInfo('VERSION_NUM')) < 1900:
gdaltest.post_reason('would crash')
return 'skip'
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_51.shp')
lyr = ds.CreateLayer('ogr_shape_51')
feat = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(feat)
ds = None
gdal.Unlink('/vsimem/ogr_shape_51.dbf')
ds = ogr.Open('/vsimem/ogr_shape_51.shp', update=1)
lyr = ds.GetLayer(0)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
feat = lyr.GetNextFeature()
feat.SetField(0, 'bar')
lyr.SetFeature(feat)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_51.shp')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
value = feat.GetFieldAsString(0)
field_count = lyr.GetLayerDefn().GetFieldCount()
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_51.shp')
if field_count != 1:
gdaltest.post_reason('did not get expected field count')
print(field_count)
return 'fail'
if value != 'bar':
gdaltest.post_reason('did not get expected value')
print(value)
return 'fail'
return 'success'
###############################################################################
# Test fix for #3356
def ogr_shape_52():
expected_geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON (((175.524709766699999 -40.17203475,175.524757883299998 -40.172050566700001,175.52480505 -40.1720663,175.524858766699992 -40.172091433299997,175.524913916700001 -40.172112966699999,175.524966049999989 -40.172136933300003,175.525030633299991 -40.17216185,175.5250873 -40.17218215,175.52515168330001 -40.1722011,175.525217666700001 -40.172221216700002,175.525269416700013 -40.172234466699997,175.5253165 -40.1722478,175.52535415 -40.1722577667,175.52538385 -40.17226365,175.525436816699994 -40.1722814333,175.525507016700004 -40.17229905,175.525594783299994 -40.172322033299999,175.525669933300009 -40.172339533299997,175.52574 -40.17235335,175.525807566699996 -40.1723672,175.52585005 -40.17237395,175.52588115 -40.172378683300003,175.525969816700012 -40.172388633300002,175.526057266700008 -40.1724020833,175.52723455 -40.17253515,175.527275583299996 -40.1725388,175.527324533300003 -40.17254675,175.527394866700007 -40.172552766700001,175.527473066699997 -40.172561616700001,175.527576666700014 -40.172572916699998,175.527678333300003 -40.172584266699999,175.527787883299993 -40.17259845,175.52789345 -40.172609716700002,175.527953933300012 -40.17261295,175.528028083300001 -40.1726174,175.52809835 -40.1726219333,175.528151650000012 -40.172625833300003,175.528190349999988 -40.17262725,175.528230900000011 -40.172631183299998,175.5282776 -40.1726338,175.528322800000012 -40.172637633299999,175.5283648 -40.17263915,175.5284115 -40.172641766700004,175.528452133299993 -40.17264435,175.528492133300006 -40.172646033299998,175.52856465 -40.17264805,175.528621733300014 -40.1726492,175.52868035 -40.172650333299998,175.528751333299994 -40.172652383299997,175.528814566699992 -40.1726534,175.528883933299994 -40.172653116699998,175.528939383300013 -40.17265195,175.529002566700001 -40.1726518,175.529070350000012 -40.172650366699997,175.529136633299998 -40.17265015,175.529193616700013 -40.17264895,175.529250616700011 -40.172647733300003,175.529313800000011 -40.172647583299998,175.529376783299995 -40.172647016699997,175.52895773329999 -40.172694633299997,175.528450866700013 -40.172752216699998,175.52835635 -40.172753466700001,175.52741181670001 -40.1727757333,175.52685245 -40.172532333299998,175.52627245 -40.172501266700003,175.5262405167 -40.172502816700003,175.5258356 -40.172522816700003,175.5256125 -40.172533833300001,175.525424433300003 -40.172543116699998,175.524834133300004 -40.1725533,175.524739033299994 -40.172414983300001,175.5247128 -40.17207405,175.524709766699999 -40.17203475)),((175.531267916699989 -40.17286525,175.5312654 -40.172863283300003,175.531252849999987 -40.172853516700002,175.531054566699993 -40.172822366699997,175.530193283300008 -40.172687333299997,175.529890266699994 -40.1726398,175.529916116700008 -40.172639383300002,175.529972483300014 -40.172639216699999,175.53002885 -40.1726398,175.530085183300002 -40.17264115,175.530141500000013 -40.17264325,175.530197733300014 -40.172646133299999,175.530253916699991 -40.172649766699998,175.530309983299986 -40.172654166699999,175.53036595 -40.172659333299997,175.5304218 -40.17266525,175.53047748329999 -40.172671916699997,175.530533016699991 -40.17267935,175.5305883833 -40.1726875333,175.530643533300008 -40.172696466700003,175.530722333299991 -40.172710633299999,175.530800633300004 -40.1727263167,175.5308541 -40.17273795,175.5309073 -40.1727503,175.530960216700009 -40.172763366700003,175.531012816700013 -40.172777133300002,175.5310651 -40.1727916,175.53111705 -40.172806766699999,175.531168650000012 -40.172822633300001,175.531219883299997 -40.172839183299999,175.531270733300005 -40.1728564,175.531267916699989 -40.17286525)))')
ds = ogr.Open('data/test3356.shp')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, expected_geom,
max_error=0.000000001) != 0:
gdaltest.post_reason('failed reading geom')
return 'fail'
ds = None
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_52.shp')
lyr = ds.CreateLayer('ogr_shape_52')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(expected_geom)
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_52.shp')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat, expected_geom,
max_error=0.000000001) != 0:
gdaltest.post_reason('failed writing and reading back geom')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test various expected error cases
def ogr_shape_53():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_53.shp')
lyr = ds.CreateLayer('ogr_shape_53')
# Test ReorderFields() when there are no fields
ret = lyr.ReorderFields([])
if ret != 0:
gdaltest.post_reason('failed')
return 'fail'
# Test REPACK when there are no features
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = ds.ExecuteSQL("REPACK ogr_shape_53")
gdal.PopErrorHandler()
# Should work without any error
if gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('failed')
return 'fail'
# Create a field
fd = ogr.FieldDefn("foo", ogr.OFTString)
lyr.CreateField(fd)
# GetFeature() on a invalid FID
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
feat = lyr.GetFeature(-1)
gdal.PopErrorHandler()
if feat is not None or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# SetFeature() on a invalid FID
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
feat = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.SetFeature(feat)
feat = None
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('failed')
return 'fail'
# SetFeature() on a invalid FID
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetFID(1000)
ret = lyr.SetFeature(feat)
feat = None
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('failed')
return 'fail'
# DeleteFeature() on a invalid FID
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.DeleteFeature(-1)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('failed')
return 'fail'
feat = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(feat)
feat = None
ret = lyr.DeleteFeature(0)
if ret != 0:
gdaltest.post_reason('failed')
return 'fail'
# Try deleting an already deleted feature
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.DeleteFeature(0)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('failed')
return 'fail'
# Test DeleteField() on a invalid index
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.DeleteField(-1)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test ReorderFields() with invalid permutation
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.ReorderFields([1])
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test AlterFieldDefn() on a invalid index
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
fd = ogr.FieldDefn("foo2", ogr.OFTString)
ret = lyr.AlterFieldDefn(-1, fd, 0)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test AlterFieldDefn() when attempting to convert from OFTString to something else
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
fd = ogr.FieldDefn("foo", ogr.OFTInteger)
ret = lyr.AlterFieldDefn(0, fd, ogr.ALTER_TYPE_FLAG)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test DROP SPATIAL INDEX ON layer without index
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = ds.ExecuteSQL("DROP SPATIAL INDEX ON ogr_shape_53")
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Re-create a feature
feat = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(feat)
feat = None
lyr = None
ds = None
# Test that some operations are not possible in read-only mode
ds = ogr.Open('/vsimem/ogr_shape_53.shp')
lyr = ds.GetLayer(0)
if lyr.TestCapability(ogr.OLCSequentialWrite) != 0:
gdaltest.post_reason('failed')
return 'fail'
if lyr.TestCapability(ogr.OLCDeleteFeature) != 0:
gdaltest.post_reason('failed')
return 'fail'
if lyr.TestCapability(ogr.OLCCreateField) != 0:
gdaltest.post_reason('failed')
return 'fail'
if lyr.TestCapability(ogr.OLCDeleteField) != 0:
gdaltest.post_reason('failed')
return 'fail'
if lyr.TestCapability(ogr.OLCReorderFields) != 0:
gdaltest.post_reason('failed')
return 'fail'
if lyr.TestCapability(ogr.OLCAlterFieldDefn) != 0:
gdaltest.post_reason('failed')
return 'fail'
# Test CreateField()
fd = ogr.FieldDefn("bar", ogr.OFTString)
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateField(fd)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test ReorderFields()
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.ReorderFields([0])
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test DeleteField()
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.DeleteField(0)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test AlterFieldDefn()
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
fd = ogr.FieldDefn("foo2", ogr.OFTString)
ret = lyr.AlterFieldDefn(0, fd, 0)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test CreateFeature()
feat = ogr.Feature(lyr.GetLayerDefn())
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test DeleteFeature()
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.DeleteFeature(0)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test SetFeature()
feat = lyr.GetNextFeature()
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.SetFeature(feat)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test REPACK
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = ds.ExecuteSQL("REPACK ogr_shape_53")
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test RECOMPUTE EXTENT ON
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = ds.ExecuteSQL("RECOMPUTE EXTENT ON ogr_shape_53")
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
feat = None
lyr = None
ds = None
# Attempt to delete shape in shapefile with no .dbf file
gdal.Unlink('/vsimem/ogr_shape_53.dbf')
ds = ogr.Open('/vsimem/ogr_shape_53.shp', update=1)
lyr = ds.GetLayer(0)
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.DeleteFeature(0)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
# Test REPACK
ds.ExecuteSQL("REPACK ogr_shape_53")
lyr = None
ds = None
# Tests on a DBF only
ds = ogr.Open('data/idlink.dbf')
lyr = ds.GetLayer(0)
# Test GetExtent()
# FIXME : GetExtent() should fail. Currently we'll get garbage here
lyr.GetExtent()
# Test RECOMPUTE EXTENT ON
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = ds.ExecuteSQL("RECOMPUTE EXTENT ON ogr_shape_53")
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
lyr = None
ds = None
return 'success'
###############################################################################
# Test accessing a shape datasource with hundreds of layers (#4306)
def ogr_shape_54_create_layer(ds, layer_index):
lyr = ds.CreateLayer('layer%03d' % layer_index)
lyr.CreateField(ogr.FieldDefn('strfield', ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 'val%d' % layer_index)
if (layer_index % 2) == 0:
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (%d %d)' % (layer_index, layer_index + 1)))
lyr.CreateFeature(feat)
feat = None
def ogr_shape_54_test_layer(ds, layer_index):
lyr = ds.GetLayerByName('layer%03d' % layer_index)
if lyr is None:
gdaltest.post_reason('failed for layer %d' % layer_index)
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed for layer %d' % layer_index)
return 'fail'
if feat.GetField(0) != 'val%d' % layer_index:
gdaltest.post_reason('failed for layer %d' % layer_index)
return 'fail'
if (layer_index % 2) == 0:
if feat.GetGeometryRef() is None or \
feat.GetGeometryRef().ExportToWkt() != 'POINT (%d %d)' % (layer_index, layer_index + 1):
gdaltest.post_reason('failed for layer %d' % layer_index)
return 'fail'
return 'success'
def ogr_shape_54():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds_name = '/vsimem/ogr_shape_54'
# ds_name = 'tmp/ogr_shape_54'
N = 500
LRUListSize = 100
# Test creating N layers
ds = shape_drv.CreateDataSource(ds_name)
for i in range(N):
ogr_shape_54_create_layer(ds, i)
ds = None
# Test access to the N layers in sequence
ds = ogr.Open(ds_name)
for i in range(N):
ret = ogr_shape_54_test_layer(ds, i)
if ret != 'success':
return ret
# Now some 'random' access
ret = ogr_shape_54_test_layer(ds, N - 1 - LRUListSize)
if ret != 'success':
return ret
ret = ogr_shape_54_test_layer(ds, N - LRUListSize / 2)
if ret != 'success':
return ret
ret = ogr_shape_54_test_layer(ds, N - LRUListSize / 4)
if ret != 'success':
return ret
ret = ogr_shape_54_test_layer(ds, 0)
if ret != 'success':
return ret
ret = ogr_shape_54_test_layer(ds, 0)
if ret != 'success':
return ret
ret = ogr_shape_54_test_layer(ds, 2)
if ret != 'success':
return ret
ret = ogr_shape_54_test_layer(ds, 1)
if ret != 'success':
return ret
ds = None
# Test adding a new layer
ds = ogr.Open(ds_name, update=1)
ogr_shape_54_create_layer(ds, N)
ds = None
# Test accessing the new layer
ds = ogr.Open(ds_name)
ret = ogr_shape_54_test_layer(ds, N)
if ret != 'success':
return ret
ds = None
# Test deleting layers
ds = ogr.Open(ds_name, update=1)
for i in range(N):
ret = ogr_shape_54_test_layer(ds, i)
if ret != 'success':
return ret
for i in range(N - LRUListSize + 1, N):
ds.ExecuteSQL('DROP TABLE layer%03d' % i)
ret = ogr_shape_54_test_layer(ds, N - LRUListSize)
if ret != 'success':
return ret
ogr_shape_54_create_layer(ds, N + 2)
for i in range(0, N - LRUListSize + 1):
ds.ExecuteSQL('DROP TABLE layer%03d' % i)
ret = ogr_shape_54_test_layer(ds, N)
if ret != 'success':
return ret
ret = ogr_shape_54_test_layer(ds, N + 2)
if ret != 'success':
return ret
ds = None
# Destroy and recreate datasource
shape_drv.DeleteDataSource(ds_name)
ds = shape_drv.CreateDataSource(ds_name)
for i in range(N):
ogr_shape_54_create_layer(ds, i)
ds = None
# Reopen in read-only so as to be able to delete files */
# if testing on a real filesystem.
ds = ogr.Open(ds_name)
# Test corner case where we cannot reopen a closed layer
ideletedlayer = 0
gdal.Unlink(ds_name + '/' + 'layer%03d.shp' % ideletedlayer)
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr = ds.GetLayerByName('layer%03d' % ideletedlayer)
gdal.PopErrorHandler()
if lyr is not None:
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr.ResetReading()
lyr.GetNextFeature()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('failed')
return 'fail'
gdal.ErrorReset()
ideletedlayer = 1
gdal.Unlink(ds_name + '/' + 'layer%03d.dbf' % ideletedlayer)
lyr = ds.GetLayerByName('layer%03d' % ideletedlayer)
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr.ResetReading()
lyr.GetNextFeature()
gdal.PopErrorHandler()
# if gdal.GetLastErrorMsg() == '':
# gdaltest.post_reason('failed')
# return 'fail'
gdal.ErrorReset()
ds = None
return 'success'
###############################################################################
# Test that we cannot add more fields that the maximum allowed
def ogr_shape_55():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds_name = '/vsimem/ogr_shape_55'
ds = shape_drv.CreateDataSource(ds_name)
lyr = ds.CreateLayer('ogr_shape_55')
max_field_count = int((65535 - 33) / 32) # 2046
for i in range(max_field_count):
if i == 255:
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateField(ogr.FieldDefn('foo%d' % i, ogr.OFTInteger))
if i == 255:
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('expecting a warning for 256th field added')
return 'fail'
if ret != 0:
gdaltest.post_reason('failed creating field foo%d' % i)
return 'fail'
i = max_field_count
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateField(ogr.FieldDefn('foo%d' % i, ogr.OFTInteger))
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed creating field foo%d' % i)
return 'fail'
feat = ogr.Feature(lyr.GetLayerDefn())
for i in range(max_field_count):
feat.SetField(i, i)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
for i in range(max_field_count):
feat.SetField(i, i)
lyr.CreateFeature(feat)
ds = None
return 'success'
###############################################################################
# Test that we cannot add more fields that the maximum allowed record length
def ogr_shape_56():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds_name = '/vsimem/ogr_shape_56'
ds = shape_drv.CreateDataSource(ds_name)
lyr = ds.CreateLayer('ogr_shape_56')
max_field_count = int(65535 / 80) # 819
for i in range(max_field_count):
if i == 255:
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateField(ogr.FieldDefn('foo%d' % i, ogr.OFTString))
if i == 255:
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('expecting a warning for 256th field added')
return 'fail'
if ret != 0:
gdaltest.post_reason('failed creating field foo%d' % i)
return 'fail'
i = max_field_count
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateField(ogr.FieldDefn('foo%d' % i, ogr.OFTString))
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('should have failed creating field foo%d' % i)
return 'fail'
feat = ogr.Feature(lyr.GetLayerDefn())
for i in range(max_field_count):
feat.SetField(i, 'foo%d' % i)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
for i in range(max_field_count):
feat.SetField(i, 'foo%d' % i)
lyr.CreateFeature(feat)
ds = None
return 'success'
###############################################################################
# Test that we emit a warning if the truncation of a field value occurs
def ogr_shape_57():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds_name = '/vsimem/ogr_shape_57'
ds = shape_drv.CreateDataSource(ds_name)
lyr = ds.CreateLayer('ogr_shape_57')
field_defn = ogr.FieldDefn('foo', ogr.OFTString)
field_defn.SetWidth(1024)
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr.CreateField(field_defn)
gdal.PopErrorHandler()
# print(gdal.GetLastErrorMsg())
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('expecting a warning')
return 'fail'
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, '0123456789' * 27)
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr.CreateFeature(feat)
gdal.PopErrorHandler()
# print(gdal.GetLastErrorMsg())
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('expecting a warning')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test creating and reading back all geometry types
def ogr_shape_58():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds_name = '/vsimem/ogr_shape_58'
ds = shape_drv.CreateDataSource(ds_name)
wkt_list = ['POINT (0 1)',
'POINT (0 1 2)',
'MULTIPOINT (0 1,2 3)',
'MULTIPOINT (0 1 2,3 4 5)',
'LINESTRING (0 1,2 3)',
'LINESTRING (0 1 2,3 4 5)',
'MULTILINESTRING ((0 1,2 3),(0 1,2 3))',
'MULTILINESTRING ((0 1 2,3 4 5),(0 1 2,3 4 5))',
'POLYGON ((0 0,0 1,1 1,1 0,0 0))',
'POLYGON ((0 0 2,0 1 2,1 1 2,1 0 2,0 0 2))',
'MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)),((0 0,0 1,1 1,1 0,0 0)))',
'MULTIPOLYGON (((0 0 2,0 1 2,1 1 2,1 0 2,0 0 2)),((0 0 2,0 1 2,1 1 2,1 0 2,0 0 2)))']
for wkt in wkt_list:
geom = ogr.CreateGeometryFromWkt(wkt)
layer_name = geom.GetGeometryName()
if geom.GetGeometryType() & ogr.wkb25Bit:
layer_name = layer_name + "3D"
lyr = ds.CreateLayer(layer_name)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_58')
for wkt in wkt_list:
geom = ogr.CreateGeometryFromWkt(wkt)
layer_name = geom.GetGeometryName()
if geom.GetGeometryType() & ogr.wkb25Bit:
layer_name = layer_name + "3D"
lyr = ds.GetLayerByName(layer_name)
lyr.ResetReading()
feat = lyr.GetNextFeature()
geom_read = feat.GetGeometryRef()
if geom_read.ExportToWkt() != wkt:
gdaltest.post_reason(
'did not get expected geom for field %s' % layer_name)
print(geom_read.ExportToWkt())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a shape with XYM geometries
def ogr_shape_59():
if gdaltest.shape_ds is None:
return 'skip'
shp_ds = ogr.Open('data/testpointm.shp')
if shp_ds is None:
return 'skip'
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.GetGeometryName() != 'POINT':
print(geom.GetGeometryName())
gdaltest.post_reason('Geometry of wrong type.')
return 'fail'
if geom.GetCoordinateDimension() != 2:
gdaltest.post_reason('dimension wrong.')
return 'fail'
if geom.GetPointZM(0) != (1.0, 2.0, 0.0, 3.0):
print(geom.GetPoint(0))
gdaltest.post_reason('Did not get right point result.')
return 'fail'
shp_ds = ogr.Open('data/arcm_with_m.shp')
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.ExportToIsoWkt() != 'LINESTRING M (0 0 10,1 1 20)':
print(geom.ExportToIsoWkt())
gdaltest.post_reason('fail')
return 'fail'
feat = shp_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.ExportToIsoWkt() != 'MULTILINESTRING M ((0 0 10,1 1 20),(2 2 30,3 3 40))':
gdaltest.post_reason('fail')
return 'fail'
geom = None
feat = None
shp_ds = ogr.Open('data/polygonm_with_m.shp')
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.ExportToIsoWkt() != 'POLYGON M ((0 0 10,0 1 20,1 1 30,0 0 40))':
print(geom.ExportToIsoWkt())
gdaltest.post_reason('fail')
return 'fail'
feat = shp_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.ExportToIsoWkt() != 'POLYGON M ((0 0 10,0 1 20,1 1 30,0 0 40),(0.25 0.25 50,0.75 0.75 60,0.25 0.75 70,0.25 0.25 80))':
print(geom.ExportToIsoWkt())
gdaltest.post_reason('fail')
return 'fail'
geom = None
feat = None
return 'success'
###############################################################################
# Test reading a shape with XYZM geometries
def ogr_shape_60():
if gdaltest.shape_ds is None:
return 'skip'
shp_ds = ogr.Open('data/testpointzm.shp')
if shp_ds is None:
return 'skip'
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.GetGeometryName() != 'POINT':
gdaltest.post_reason('Geometry of wrong type.')
return 'fail'
if geom.GetCoordinateDimension() != 3:
gdaltest.post_reason('dimension wrong.')
return 'fail'
if geom.GetPoint(0) != (1.0, 2.0, 3.0):
print(geom.GetPoint(0))
gdaltest.post_reason('Did not get right point result.')
return 'fail'
geom = None
feat = None
return 'success'
###############################################################################
# Test field auto-growing
def ogr_shape_61():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds_name = '/vsimem/ogr_shape_61'
ds = shape_drv.CreateDataSource(ds_name)
lyr = ds.CreateLayer('ogr_shape_61')
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
field_defn.SetWidth(1)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, '0123456789' * 8)
feat.SetField(1, 2)
lyr.CreateFeature(feat)
feat = None
field_defn = lyr.GetLayerDefn().GetFieldDefn(0)
if field_defn.GetWidth() != 80:
gdaltest.post_reason('did not get initial field size')
print(field_defn.GetWidth())
return 'fail'
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, '0123456789' * 9)
feat.SetField(1, 34)
lyr.CreateFeature(feat)
feat = None
field_defn = lyr.GetLayerDefn().GetFieldDefn(0)
if field_defn.GetWidth() != 90:
gdaltest.post_reason('did not extend field')
print(field_defn.GetWidth())
return 'fail'
field_defn = lyr.GetLayerDefn().GetFieldDefn(1)
if field_defn.GetWidth() != 2:
gdaltest.post_reason('did not extend field')
print(field_defn.GetWidth())
return 'fail'
ds = None
ds = ogr.Open(ds_name)
lyr = ds.GetLayer(0)
field_defn = lyr.GetLayerDefn().GetFieldDefn(0)
if field_defn.GetWidth() != 90:
gdaltest.post_reason('did not get expected field size')
print(field_defn.GetWidth())
return 'fail'
feat = lyr.GetFeature(1)
val = feat.GetFieldAsString(0)
if val != '0123456789' * 9:
gdaltest.post_reason('did not get expected field value')
print(val)
return 'fail'
val = feat.GetFieldAsInteger(1)
if val != 34:
gdaltest.post_reason('did not get expected field value')
print(val)
return 'fail'
return 'success'
###############################################################################
# Test field resizing
def ogr_shape_62():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds_name = '/vsimem/ogr_shape_62'
ds = shape_drv.CreateDataSource(ds_name)
lyr = ds.CreateLayer('ogr_shape_62', options=['RESIZE=YES'])
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('bar', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('baz', ogr.OFTInteger))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 'hugehugehugehuge')
lyr.CreateFeature(feat)
feat = None
lyr.DeleteFeature(0)
values = ['ab', 'deef', 'ghi']
for value in values:
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, value)
feat.SetField(2, 12)
lyr.CreateFeature(feat)
feat = None
ds = None
# Reopen file
ds = ogr.Open(ds_name)
lyr = ds.GetLayer(0)
# Check
field_defn = lyr.GetLayerDefn().GetFieldDefn(0)
if field_defn.GetWidth() != 4:
gdaltest.post_reason('did not get expected field size')
print(field_defn.GetWidth())
return 'fail'
# Reopen file
ds = ogr.Open(ds_name, update=1)
lyr = ds.GetLayer(0)
# Should do nothing
ds.ExecuteSQL('RESIZE ogr_shape_62')
# Check
lyr.ResetReading()
for expected_value in values:
feat = lyr.GetNextFeature()
got_val = feat.GetFieldAsString(0)
if got_val != expected_value:
gdaltest.post_reason('did not get expected value')
print(got_val)
return 'fail'
got_val = feat.GetFieldAsInteger(2)
if got_val != 12:
gdaltest.post_reason('did not get expected value')
print(got_val)
return 'fail'
ds = None
return 'success'
###############################################################################
# More testing of recoding
def ogr_shape_63():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_63.dbf')
lyr = ds.CreateLayer('ogr_shape_63', geom_type=ogr.wkbNone)
gdaltest.fieldname = '\xc3\xa9'
if lyr.CreateField(ogr.FieldDefn(gdaltest.fieldname, ogr.OFTString)) != 0:
gdaltest.post_reason('failed')
return 'fail'
gdaltest.fieldname = '\xc3\xa9\xc3\xa9'
if lyr.AlterFieldDefn(0, ogr.FieldDefn(gdaltest.fieldname, ogr.OFTString), ogr.ALTER_NAME_FLAG) != 0:
gdaltest.post_reason('failed')
return 'fail'
chinese_str = struct.pack('B' * 6, 229, 144, 141, 231, 167, 176)
if sys.version_info >= (3, 0, 0):
chinese_str = chinese_str.decode('UTF-8')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.AlterFieldDefn(0, ogr.FieldDefn(chinese_str, ogr.OFTString), ogr.ALTER_NAME_FLAG)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('failed')
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateField(ogr.FieldDefn(chinese_str, ogr.OFTString))
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('failed')
return 'fail'
ds = None
ds = ogr.Open('/vsimem/ogr_shape_63.dbf')
lyr = ds.GetLayer(0)
if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:
gdaltest.post_reason('failed')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(0).GetName() != gdaltest.fieldname:
gdaltest.post_reason('failed')
print(gdaltest.fieldname)
return 'fail'
ds = None
# Set an invalid encoding
gdal.FileFromMemBuffer('/vsimem/ogr_shape_63.cpg', 'FOO')
ds = ogr.Open('/vsimem/ogr_shape_63.dbf')
lyr = ds.GetLayer(0)
# TestCapability(OLCStringsAsUTF8) should return FALSE
if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 0:
gdaltest.post_reason('failed')
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_shape_63.dbf')
gdal.Unlink('/vsimem/ogr_shape_63.cpg')
return 'success'
###############################################################################
# Test creating layers whose name include dot character
def ogr_shape_64():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_64')
lyr = ds.CreateLayer('a.b')
if lyr.GetName() != 'a.b':
gdaltest.post_reason('failed')
return 'fail'
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('foo', 'bar')
lyr.CreateFeature(feat)
feat = None
lyr = ds.CreateLayer('a.c')
if lyr.GetName() != 'a.c':
gdaltest.post_reason('failed')
return 'fail'
# Test that we cannot create a duplicate layer
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr = ds.CreateLayer('a.b')
gdal.PopErrorHandler()
if lyr is not None:
gdaltest.post_reason('failed')
return 'fail'
ds = None
ds = ogr.Open('/vsimem/ogr_shape_64/a.b.shp')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetFieldAsString('foo') != 'bar':
gdaltest.post_reason('failed')
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_shape_64/a.b.shp')
gdal.Unlink('/vsimem/ogr_shape_64/a.b.shx')
gdal.Unlink('/vsimem/ogr_shape_64/a.b.dbf')
gdal.Unlink('/vsimem/ogr_shape_64/a.c.shp')
gdal.Unlink('/vsimem/ogr_shape_64/a.c.shx')
gdal.Unlink('/vsimem/ogr_shape_64/a.c.dbf')
gdal.Unlink('/vsimem/ogr_shape_64')
return 'success'
###############################################################################
# Test reading a DBF with a 'nan' as a numeric value (#4799)
def ogr_shape_65():
ds = ogr.Open('data/nan.dbf')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
val = feat.GetFieldAsDouble(0)
feat = None
ds = None
if not gdaltest.isnan(val):
print(val)
return 'fail'
return 'success'
###############################################################################
# Test failures when creating files and datasources
def ogr_shape_66():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/i_dont_exist/bar.dbf')
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr = ds.CreateLayer('bar', geom_type=ogr.wkbNone)
gdal.PopErrorHandler()
if lyr is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = None
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/i_dont_exist/bar.shp')
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr = ds.CreateLayer('bar', geom_type=ogr.wkbPoint)
gdal.PopErrorHandler()
if lyr is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/i_dont_exist/bar')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
f = open('tmp/foo', 'wb')
f.close()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/foo')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
os.unlink('tmp/foo')
return 'success'
###############################################################################
# Test opening an empty .sbn spatial index
def ogr_shape_67():
shutil.copy('data/emptyshapefilewithsbn.shp', 'tmp/emptyshapefilewithsbn.shp')
shutil.copy('data/emptyshapefilewithsbn.shx', 'tmp/emptyshapefilewithsbn.shx')
shutil.copy('data/emptyshapefilewithsbn.sbn', 'tmp/emptyshapefilewithsbn.sbn')
shutil.copy('data/emptyshapefilewithsbn.sbx', 'tmp/emptyshapefilewithsbn.sbx')
ds = ogr.Open('tmp/emptyshapefilewithsbn.shp', update=1)
ds.ExecuteSQL('DROP SPATIAL INDEX ON emptyshapefilewithsbn')
ds = None
try:
os.stat('tmp/emptyshapefilewithsbn.sbn')
return 'fail'
except OSError:
pass
os.unlink('tmp/emptyshapefilewithsbn.shp')
os.unlink('tmp/emptyshapefilewithsbn.shx')
return 'success'
###############################################################################
# Test opening a shape datasource with files with mixed case and then REPACK
def ogr_shape_68():
if sys.platform == 'darwin':
print("Fails on MacOSX. Not sure why.")
return 'skip'
for i in range(2):
if i == 1 and sys.platform != 'win32':
break
try:
shutil.rmtree('tmp/mixedcase')
except OSError:
pass
os.mkdir('tmp/mixedcase')
shutil.copy('data/poly.shp', 'tmp/mixedcase/mixedcase.shp')
shutil.copy('data/poly.shx', 'tmp/mixedcase/mixedcase.shx')
shutil.copy('data/poly.dbf', 'tmp/mixedcase/MIXEDCASE.DBF') # funny !
ds = ogr.Open('tmp/mixedcase', update=1)
if sys.platform == 'win32':
expected_layer_count = 1
else:
expected_layer_count = 2
if ds.GetLayerCount() != expected_layer_count:
gdaltest.post_reason('expected %d layers, got %d' % (expected_layer_count, ds.GetLayerCount()))
return 'fail'
if i == 1:
lyr = ds.GetLayerByName('mixedcase')
else:
lyr = ds.GetLayerByName('MIXEDCASE')
lyr.DeleteFeature(0)
if i == 1:
ds.ExecuteSQL('REPACK mixedcase')
else:
ds.ExecuteSQL('REPACK MIXEDCASE')
if sys.platform == 'win32':
if lyr.GetGeomType() != ogr.wkbPolygon:
gdaltest.post_reason('fail')
return 'fail'
else:
if lyr.GetGeomType() != ogr.wkbNone:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayerByName('mixedcase')
if lyr.GetGeomType() != ogr.wkbPolygon:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.DeleteFeature(0)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('expected failure on DeleteFeature()')
return 'fail'
# gdal.ErrorReset()
# gdal.PushErrorHandler('CPLQuietErrorHandler')
ds.ExecuteSQL('REPACK mixedcase')
# gdal.PopErrorHandler()
# if gdal.GetLastErrorMsg() == '':
# gdaltest.post_reason('expected failure on REPACK mixedcase')
# return 'fail'
ds = None
ori_shp_size = os.stat('data/poly.shp').st_size
ori_shx_size = os.stat('data/poly.shx').st_size
ori_dbf_size = os.stat('data/poly.dbf').st_size
new_shp_size = os.stat('tmp/mixedcase/mixedcase.shp').st_size
new_shx_size = os.stat('tmp/mixedcase/mixedcase.shx').st_size
new_dbf_size = os.stat('tmp/mixedcase/MIXEDCASE.DBF').st_size
if new_dbf_size == ori_dbf_size:
gdaltest.post_reason('fail')
return 'fail'
if sys.platform == 'win32':
if new_shp_size == ori_shp_size:
gdaltest.post_reason('fail')
return 'fail'
if new_shx_size == ori_shx_size:
gdaltest.post_reason('fail')
return 'fail'
else:
if new_shp_size != ori_shp_size:
gdaltest.post_reason('fail')
return 'fail'
if new_shx_size != ori_shx_size:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test fix for #5135 (creating a field of type Integer with a big width)
def ogr_shape_69():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_69.shp')
lyr = ds.CreateLayer('ogr_shape_69')
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
field_defn.SetWidth(64)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 123456)
lyr.CreateFeature(feat)
feat = None
ds = None
ds = ogr.Open('/vsimem/ogr_shape_69.shp')
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTReal:
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField(0) != 123456:
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_69.shp')
return 'success'
###############################################################################
# Test fix for https://github.com/OSGeo/gdal/pull/17
# (shapefile opened twice on Windows)
def ogr_shape_70():
if sys.platform != 'win32':
return 'skip'
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/ogr_shape_70.shp')
lyr = ds.CreateLayer('ogr_shape_70')
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(feat)
fid = feat.GetFID()
feat = None
lyr.DeleteFeature(fid)
# Locks the file. No way to do this on Unix easily
f = open('tmp/ogr_shape_70.dbf', 'r+')
gdal.ErrorReset()
gdal.PushErrorHandler()
old_val = gdal.GetConfigOption('OGR_SHAPE_PACK_IN_PLACE')
gdal.SetConfigOption('OGR_SHAPE_PACK_IN_PLACE', 'NO')
ds.ExecuteSQL('REPACK ogr_shape_70')
gdal.SetConfigOption('OGR_SHAPE_PACK_IN_PLACE', old_val)
gdal.PopErrorHandler()
errmsg = gdal.GetLastErrorMsg()
ds = None
f.close()
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/ogr_shape_70.shp')
if errmsg == '':
return 'fail'
return 'success'
###############################################################################
# Test heterogeneous file permissions on .shp and .dbf.
def ogr_shape_71():
if sys.platform.find('linux') != 0:
return 'skip'
if os.getuid() == 0:
print('running as root... skipping')
return 'skip'
import stat
shutil.copy('data/poly.shp', 'tmp/ogr_shape_71.shp')
shutil.copy('data/poly.shx', 'tmp/ogr_shape_71.shx')
shutil.copy('data/poly.dbf', 'tmp/ogr_shape_71.dbf')
old_mode = os.stat('tmp/ogr_shape_71.dbf').st_mode
os.chmod('tmp/ogr_shape_71.dbf', stat.S_IREAD)
with gdaltest.error_handler():
ds = ogr.Open('tmp/ogr_shape_71.shp', update=1)
ok = ds is None
ds = None
os.chmod('tmp/ogr_shape_71.dbf', old_mode)
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/ogr_shape_71.shp')
if not ok:
return 'fail'
return 'success'
###############################################################################
# Test shapefile size limit
def ogr_shape_72():
# Determine if the filesystem supports sparse files (we don't want to create a real 3 GB
# file !
if gdaltest.filesystem_supports_sparse_files('tmp') is False:
return 'skip'
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/ogr_shape_72.shp')
lyr = ds.CreateLayer('2gb', geom_type=ogr.wkbPoint)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (1 2)'))
lyr.CreateFeature(feat)
ds = None
f = open('tmp/ogr_shape_72.shp', 'rb+')
f.seek(24)
f.write(struct.pack('B' * 4, 0x7f, 0xff, 0xff, 0xfe))
f.close()
# Test creating a feature over 4 GB file limit -> should fail
ds = ogr.Open('tmp/ogr_shape_72.shp', update=1)
lyr = ds.GetLayer(0)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (3 4)'))
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
ds = None
f = open('tmp/ogr_shape_72.shp', 'rb+')
f.seek(24)
f.write(struct.pack('B' * 4, 0x3f, 0xff, 0xff, 0xfe))
f.close()
# Test creating a feature over 2 GB file limit -> should fail
gdal.SetConfigOption('SHAPE_2GB_LIMIT', 'TRUE')
ds = ogr.Open('tmp/ogr_shape_72.shp', update=1)
gdal.SetConfigOption('SHAPE_2GB_LIMIT', None)
lyr = ds.GetLayer(0)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (5 6)'))
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
ds = None
# Test creating a feature over 2 GB file limit -> should succeed with warning
ds = ogr.Open('tmp/ogr_shape_72.shp', update=1)
lyr = ds.GetLayer(0)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (7 8)'))
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.CreateFeature(feat)
gdal.PopErrorHandler()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('2GB file size limit reached') < 0:
gdaltest.post_reason('did not find expected warning')
return 'fail'
ds = None
ds = ogr.Open('tmp/ogr_shape_72.shp')
lyr = ds.GetLayer(0)
feat = lyr.GetFeature(1)
if feat.GetGeometryRef().ExportToWkt() != 'POINT (7 8)':
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test that isClockwise() works correctly on a degenerated ring that passes
# twice by the same point (#5342)
def ogr_shape_73():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_73.shp')
lyr = ds.CreateLayer('ogr_shape_73', geom_type=ogr.wkbPolygon)
feat = ogr.Feature(lyr.GetLayerDefn())
# (5 1) is the first(and last) point, and the pivot point selected by the
# algorithm (lowest rightmost vertex), but it is also reused later in the
# coordinate list
# But the second ring is counter-clock-wise
geom = ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 10,10 10,10 0,0 0),(5 1,4 3,4 2,5 1,6 2,6 3,5 1))')
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = None
ds = None
ds = ogr.Open('/vsimem/ogr_shape_73.shp')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
got_geom = feat.GetGeometryRef()
if geom.ExportToWkt() != got_geom.ExportToWkt():
feat.DumpReadable()
return 'fail'
ds = None
return 'success'
###############################################################################
# Test organizePolygons() in OGR_ORGANIZE_POLYGONS=DEFAULT mode when
# two outer rings are touching, by the first vertex of one.
def ogr_shape_74():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_74.shp')
lyr = ds.CreateLayer('ogr_shape_74', geom_type=ogr.wkbPolygon)
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 10,10 10,10 0,0 0,0 1,9 1,9 9,0 9,0 10)),((9 5,5 4,0 5,5 6, 9 5)))')
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = None
ds = None
ds = ogr.Open('/vsimem/ogr_shape_74.shp')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
got_geom = feat.GetGeometryRef()
if geom.ExportToWkt() != got_geom.ExportToWkt():
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
lyr.ResetReading()
gdal.SetConfigOption('OGR_ORGANIZE_POLYGONS', 'DEFAULT')
feat = lyr.GetNextFeature()
gdal.SetConfigOption('OGR_ORGANIZE_POLYGONS', None)
got_geom = feat.GetGeometryRef()
if geom.ExportToWkt() != got_geom.ExportToWkt():
gdaltest.post_reason('fail')
feat.DumpReadable()
return 'fail'
ds = None
return 'success'
###############################################################################
# Test GetFileList()
def ogr_shape_75():
ds = gdal.OpenEx('data/poly.shp')
if ds.GetFileList() != ['data/poly.shp', 'data/poly.shx', 'data/poly.dbf', 'data/poly.PRJ'] and \
ds.GetFileList() != ['data/poly.shp', 'data/poly.shx', 'data/poly.dbf', 'data/poly.prj']:
gdaltest.post_reason('fail')
print(ds.GetFileList())
return 'fail'
ds = None
ds = gdal.OpenEx('data/idlink.dbf')
if ds.GetFileList() != ['data/idlink.dbf']:
gdaltest.post_reason('fail')
print(ds.GetFileList())
return 'fail'
ds = None
ds = gdal.OpenEx('data/testpoly.shp')
if ds.GetFileList() != ['data/testpoly.shp', 'data/testpoly.shx', 'data/testpoly.dbf', 'data/testpoly.qix']:
gdaltest.post_reason('fail')
print(ds.GetFileList())
return 'fail'
ds = None
ds = gdal.OpenEx('data/emptyshapefilewithsbn.shx')
if ds.GetFileList() != ['data/emptyshapefilewithsbn.shp', 'data/emptyshapefilewithsbn.shx', 'data/emptyshapefilewithsbn.sbn', 'data/emptyshapefilewithsbn.sbx']:
gdaltest.post_reason('fail')
print(ds.GetFileList())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test opening shapefile whose .prj has a UTF-8 BOM marker
def ogr_shape_76():
ds = ogr.Open('data/prjwithutf8bom.shp')
lyr = ds.GetLayer(0)
sr = lyr.GetSpatialRef()
if sr.ExportToWkt().find('GEOGCS["NAD83"') != 0:
return 'fail'
return 'success'
###############################################################################
# Test opening shapefile whose .shx doesn't follow the official shapefile spec (#5608)
def ogr_shape_77():
ds = ogr.Open('data/nonconformant_shx_ticket5608.shp')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.ExportToWkt() != 'LINESTRING (0 1,2 3)':
return 'fail'
return 'success'
###############################################################################
# Test writing integer values through double fields, and cases of truncation or
# loss of precision (#5625)
def ogr_shape_78():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_78.dbf')
lyr = ds.CreateLayer('ogr_shape_78')
fd = ogr.FieldDefn('dblfield', ogr.OFTReal)
fd.SetWidth(20)
lyr.CreateField(fd)
fd = ogr.FieldDefn('dblfield2', ogr.OFTReal)
fd.SetWidth(20)
fd.SetPrecision(1)
lyr.CreateField(fd)
# Integer values up to 2^53 can be exactly converted into a double.
gdal.ErrorReset()
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('dblfield', (2**53) * 1.0)
lyr.CreateFeature(f)
if gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('got unexpected error/warning')
return 'fail'
# Field width too small
gdal.ErrorReset()
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('dblfield2', 1e21)
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr.CreateFeature(f)
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('did not get expected error/warning')
return 'fail'
# Likely precision loss
gdal.ErrorReset()
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('dblfield', (2**53) * 1.0 + 2) # 2^53+1 == 2^53 !
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr.CreateFeature(f)
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('did not get expected error/warning')
return 'fail'
gdal.ErrorReset()
ds = None
ds = ogr.Open('/vsimem/ogr_shape_78.dbf')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetField('dblfield') != 9007199254740992.:
gdaltest.post_reason('did not get expected value')
f.DumpReadable()
return 'fail'
ds = None
return 'success'
###############################################################################
# Test adding a field after creating features with 0 field
def ogr_shape_79():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_79.dbf')
lyr = ds.CreateLayer('ogr_shape_79')
# This will create a (for now) invisible 'FID' field
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
# This will delete the implicit field
fd = ogr.FieldDefn('field1', ogr.OFTReal)
lyr.CreateField(fd)
fd = ogr.FieldDefn('field2', ogr.OFTReal)
lyr.CreateField(fd)
# If the implicit field isn't deleted, this will cause crash
lyr.ReorderField(0, 1)
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
ds = None
ds = ogr.Open('/vsimem/ogr_shape_79.dbf')
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a shape with invalid extent (nan values) (#5702)
def ogr_shape_80():
ds = ogr.Open('data/extentnan.shp')
lyr = ds.GetLayer(0)
extent = lyr.GetExtent()
if extent is not None and extent[0] != extent[0]:
gdaltest.post_reason('fail')
print(extent)
return 'fail'
ds = None
return 'success'
###############################################################################
# Test REPACK after SetFeature() and geometry change (#XXXX)
def ogr_shape_81():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_81.shp')
lyr = ds.CreateLayer('ogr_shape_81')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 1)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,-1 -1)'))
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('/vsimem/ogr_shape_81.shp', update=1)
lyr = ds.GetLayer(0)
# Add junk behind our back
f = gdal.VSIFOpenL('/vsimem/ogr_shape_81.shp', 'ab')
gdal.VSIFWriteL('foo', 1, 3, f)
gdal.VSIFCloseL(f)
size_before = gdal.VSIStatL('/vsimem/ogr_shape_81.shp').size
# Should be a no-op
ds.ExecuteSQL('REPACK ogr_shape_81')
size_after = gdal.VSIStatL('/vsimem/ogr_shape_81.shp').size
if size_after != size_before:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(2 2,3 3)'))
lyr.SetFeature(f)
# Should be a no-op
ds.ExecuteSQL('REPACK ogr_shape_81')
size_after = gdal.VSIStatL('/vsimem/ogr_shape_81.shp').size
if size_after != size_before:
gdaltest.post_reason('fail')
return 'fail'
# Writes a longer geometry. So .shp will be extended
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(2 2,3 3,4 4)'))
lyr.SetFeature(f)
size_after = gdal.VSIStatL('/vsimem/ogr_shape_81.shp').size
if size_after == size_before:
gdaltest.post_reason('fail')
return 'fail'
# Should do something
size_before = size_after
ds.ExecuteSQL('REPACK ogr_shape_81')
size_after = gdal.VSIStatL('/vsimem/ogr_shape_81.shp').size
if size_after == size_before:
gdaltest.post_reason('fail')
return 'fail'
# Writes a shorter geometry, so .shp should not change size.
size_before = size_after
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(3 3,4 4)'))
lyr.SetFeature(f)
size_after = gdal.VSIStatL('/vsimem/ogr_shape_81.shp').size
if size_after != size_before:
gdaltest.post_reason('fail')
return 'fail'
size_before = size_after
# Should do something
ds.ExecuteSQL('REPACK ogr_shape_81')
size_after = gdal.VSIStatL('/vsimem/ogr_shape_81.shp').size
if size_after == size_before:
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test string length more than 254 bytes in UTF-8 encoding cut to 254 bytes
def ogr_shape_82():
if gdaltest.shape_ds is None:
return 'skip'
# create ogrlayer to test cut long strings with UTF-8 encoding
gdaltest.shape_lyr = gdaltest.shape_ds.CreateLayer('test_utf_cut', geom_type=ogr.wkbPoint, options=['ENCODING=UTF-8'])
# create field to put strings to automatic cut (254 is longest field length)
field_defn = ogr.FieldDefn('cut_field', ogr.OFTString)
field_defn.SetWidth(254)
result = gdaltest.shape_lyr.CreateField(field_defn)
if result != 0:
gdaltest.post_reason('failed to create new field.')
return 'fail'
# Insert feature with long string in Russian. Shoe repair ad.
feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
init_rus = (
'ัะฐะฑะพัะฐะตั ะดะฒะฐ ะผะฐััะตัะฐ, ัััะฐะฝะพะฒะบะฐ ะฝะฐะฑะพะตะบ, ะทะฐะผะตะฝะฐ ะฟะพะดะพัะฒั, ะทะฐะผะตะฝะฐ '
'ะบะฐะฑะปัะบะพะฒ, ัะฐัััะถะบะฐ ะพะฑัะฒะธ, ัะฐัััะถะบะฐ ะณะพะปะตะฝะธั ัะฐะฟะพะณ, ัะฒะตะนะฝัะต ัะฐะฑะพัั, '
'ััะธะฒ ะณะพะปะตะฝะธัะฐ ัะฐะฟะพะณะฐ, ัะธััะบะฐ ะพะฑัะฒะธ, ัะธััะบะฐ ะทะฐะผัะตะฒะพะน ะพะฑัะฒะธ, ะทะฐะผะตะฝะฐ '
'ััะตะปะตะบ'
)
result_rus = (
'ัะฐะฑะพัะฐะตั ะดะฒะฐ ะผะฐััะตัะฐ, ัััะฐะฝะพะฒะบะฐ ะฝะฐะฑะพะตะบ, ะทะฐะผะตะฝะฐ ะฟะพะดะพัะฒั, ะทะฐะผะตะฝะฐ '
'ะบะฐะฑะปัะบะพะฒ, ัะฐัััะถะบะฐ ะพะฑัะฒะธ, ัะฐัััะถะบะฐ ะณะพะปะตะฝะธั ัะฐะฟะพะณ, ัะฒะตะนะฝัะต ัะฐะฑะพัั, '
'ััะธะฒ ะณะพะปะตะฝ'
)
feat.SetField('cut_field', init_rus)
with gdaltest.error_handler():
gdaltest.shape_lyr.CreateFeature(feat)
# Insert feature with long a string in Russian. Shoe repair ad.
init_en = (
'Remont kablukov i ih zamena; zamena naboek; profilaktika i remont '
'podoshvy; remont i zamena supinatorov; zamena stelek; zamena obuvnoj '
'furnitury; remont golenishha; rastjazhka obuvi; chistka i pokraska '
'obuvi. Smolenskaja oblast, p. Monastyrshhina, ulica Sovetskaja, '
'd. 38. Rabotaet ponedelnik โ chetverg s 9.00 do 18.00, pjatnica s '
'10.00 do 17.00, vyhodnoj: subbota'
)
result_en = (
'Remont kablukov i ih zamena; zamena naboek; profilaktika i remont '
'podoshvy; remont i zamena supinatorov; zamena stelek; zamena '
'obuvnoj furnitury; remont golenishha; rastjazhka obuvi; chistka '
'i pokraska obuvi. Smolenskaja oblast, p. Monastyrshhina, ulica'
)
feat = ogr.Feature(feature_def=gdaltest.shape_lyr.GetLayerDefn())
feat.SetField('cut_field', init_en)
gdaltest.shape_lyr.CreateFeature(feat)
# TODO: check your language
# save layer?
# Read strings and compare with correct values.
feat = gdaltest.shape_lyr.GetFeature(0) # rus
if feat.cut_field != result_rus:
gdaltest.post_reason('Wrong rus string cut')
return 'fail'
feat = gdaltest.shape_lyr.GetFeature(1) # en
if feat.cut_field != result_en:
gdaltest.post_reason('Wrong en string cut')
print(feat.cut_field)
print(result_en)
return 'fail'
return 'success'
###############################################################################
# Test behaviour with curve geometries
def ogr_shape_83():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_83.shp')
lyr = ds.CreateLayer('ogr_shape_83', geom_type=ogr.wkbCurvePolygon)
if lyr.GetGeomType() != ogr.wkbPolygon:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('CURVEPOLYGON((0 0,0 1,1 1,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
f = lyr.GetFeature(0)
if f.GetGeometryRef().GetGeometryType() != ogr.wkbPolygon:
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test SPATIAL_INDEX creation option
def ogr_shape_84():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_84.shp')
lyr = ds.CreateLayer('ogr_shape_84', options=['SPATIAL_INDEX=YES'])
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((0 0,0 1,1 1,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
ds = None
if gdal.VSIStatL('/vsimem/ogr_shape_84.qix') is None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test Integer64
def ogr_shape_85():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_85.shp')
lyr = ds.CreateLayer('ogr_shape_85')
lyr.CreateField(ogr.FieldDefn('int', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('int64', ogr.OFTInteger64))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField(0, 123456789)
f.SetField(1, 123456789012345678)
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('/vsimem/ogr_shape_85.shp', update=1)
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTInteger:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(1).GetType() != ogr.OFTInteger64:
gdaltest.post_reason('failure')
return 'fail'
f = lyr.GetNextFeature()
if f.GetField(0) != 123456789 or f.GetField(1) != 123456789012345678:
gdaltest.post_reason('failure')
return 'fail'
# Passing from 9 to 10 figures causes "promotion" to Integer64
f.SetField(0, 2000000000)
# Passing from 18 to 19 figures causes "promotion" to Real
f.SetField(1, 9000000000000000000)
lyr.SetFeature(f)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_85.shp')
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTInteger64:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(1).GetType() != ogr.OFTReal:
gdaltest.post_reason('failure')
return 'fail'
f = lyr.GetNextFeature()
if f.GetField(0) != 2000000000 or f.GetField(1) != 9000000000000000000:
gdaltest.post_reason('failure')
return 'fail'
ds = None
# Test open option ADJUST_TYPE
ds = gdal.OpenEx('/vsimem/ogr_shape_85.shp', gdal.OF_VECTOR, open_options=['ADJUST_TYPE=YES'])
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTInteger:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetLayerDefn().GetFieldDefn(1).GetType() != ogr.OFTInteger64:
gdaltest.post_reason('failure')
return 'fail'
f = lyr.GetNextFeature()
if f.GetField(0) != 2000000000 or f.GetField(1) != 9000000000000000000:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_85.shp')
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_85.shp')
lyr = ds.CreateLayer('ogr_shape_85')
lyr.CreateField(ogr.FieldDefn('int', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField(0, 123456789)
lyr.CreateFeature(f)
fd = ogr.FieldDefn("foo", ogr.OFTInteger64)
ret = lyr.AlterFieldDefn(0, fd, ogr.ALTER_TYPE_FLAG)
if ret != 0:
gdaltest.post_reason('failure')
return 'fail'
f.SetField(0, 123456789012345678)
lyr.SetFeature(f)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_85.shp', update=1)
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldDefn(0).GetType() != ogr.OFTInteger64:
gdaltest.post_reason('failure')
return 'fail'
f = lyr.GetNextFeature()
if f.GetField(0) != 123456789012345678:
gdaltest.post_reason('failure')
return 'fail'
ds = None
return 'success'
###############################################################################
# Robustness: test reading a non-conformant shapefile that mixes different shape type
# OGR can not produce such a file (unless patched)
def ogr_shape_86():
ds = ogr.Open('data/mixed_shape_type_non_conformant.shp')
sql_lyr = ds.ExecuteSQL("select count(distinct ogr_geometry) from mixed_shape_type_non_conformant")
f = sql_lyr.GetNextFeature()
val = f.GetField(0)
ds.ReleaseResultSet(sql_lyr)
if val != 6:
return 'fail'
return 'success'
###############################################################################
# Check we accept opening standalone .dbf files with weird header lengths (#6035)
def ogr_shape_87():
ds = ogr.Open('data/weird_header_length.dbf')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetField(0) != 1:
return 'fail'
return 'success'
###############################################################################
# Test REPACK after SetFeature() and geometry change, without DBF
def ogr_shape_88():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_88.shp')
lyr = ds.CreateLayer('ogr_shape_88')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 1)'))
lyr.CreateFeature(f)
f = None
ds = None
gdal.Unlink('/vsimem/ogr_shape_88.dbf')
ds = ogr.Open('/vsimem/ogr_shape_88.shp', update=1)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 1,2 2)'))
lyr.SetFeature(f)
ds.ExecuteSQL('REPACK ogr_shape_88')
ds = None
return 'success'
###############################################################################
# Test reading geometry bigger than 10 MB
def ogr_shape_89():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_89.shp')
lyr = ds.CreateLayer('ogr_shape_89')
f = ogr.Feature(lyr.GetLayerDefn())
g = ogr.Geometry(ogr.wkbLineString)
g.AddPoint_2D(0, 0)
g.AddPoint_2D(1, 1)
f.SetGeometryDirectly(g)
lyr.CreateFeature(f)
f = None
ds = None
gdal.Unlink('/vsimem/ogr_shape_89.dbf')
# The declare file size doesn't match the real one
f = gdal.VSIFOpenL('/vsimem/ogr_shape_89.shx', 'rb+')
gdal.VSIFSeekL(f, 100 + 4, 0)
gdal.VSIFWriteL(struct.pack('>i', int((10 * 1024 * 1024) / 2)), 1, 4, f)
gdal.VSIFCloseL(f)
ds = ogr.Open('/vsimem/ogr_shape_89.shp')
lyr = ds.GetLayer(0)
with gdaltest.error_handler():
f = lyr.GetNextFeature()
if f is not None and f.GetGeometryRef() is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_shape_89.shp', 'rb+')
gdal.VSIFSeekL(f, 100 + 8 + 10 * 1024 * 1024 - 1, 0)
gdal.VSIFWriteL(struct.pack('B', 0), 1, 1, f)
gdal.VSIFCloseL(f)
ds = ogr.Open('/vsimem/ogr_shape_89.shp')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
g = f.GetGeometryRef()
if g.GetPointCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading a lot of geometries
def ogr_shape_90():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_90.shp')
lyr = ds.CreateLayer('ogr_shape_90')
g = ogr.CreateGeometryFromWkt('POINT(0 0)')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(g)
lyr.CreateFeature(f)
ds = None
gdal.Unlink('/vsimem/ogr_shape_90.dbf')
# The declare file size doesn't match the real one
f = gdal.VSIFOpenL('/vsimem/ogr_shape_90.shx', 'rb+')
filesize = int((100 + 8 * 1024 * 1024) / 2)
gdal.VSIFSeekL(f, 24, 0)
gdal.VSIFWriteL(struct.pack('>i', filesize), 1, 4, f)
gdal.VSIFCloseL(f)
ds = ogr.Open('/vsimem/ogr_shape_90.shp')
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 1:
return 'fail'
# Now it is consistent
f = gdal.VSIFOpenL('/vsimem/ogr_shape_90.shx', 'rb+')
gdal.VSIFSeekL(f, 100 + 8 * 1024 * 1024 - 1, 0)
gdal.VSIFWriteL(struct.pack('B', 0), 1, 1, f)
gdal.VSIFCloseL(f)
ds = ogr.Open('/vsimem/ogr_shape_90.shp')
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 1024 * 1024:
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading XYM geometries but with missing M array (#6317)
def ogr_shape_91():
ds = ogr.Open('data/arcm_without_m.shp')
lyr = ds.GetLayer(0)
for _ in lyr:
pass
ds = ogr.Open('data/polygonm_without_m.shp')
lyr = ds.GetLayer(0)
for _ in lyr:
pass
return 'success'
###############################################################################
# Test reading multipoint Z geometries without M
def ogr_shape_92():
ds = ogr.Open('data/multipointz_without_m.shp')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
wkt = f.GetGeometryRef().ExportToIsoWkt()
if wkt != 'MULTIPOINT Z ((0 1 2),(3 4 5))':
gdaltest.post_reason('fail')
print(wkt)
return 'fail'
return 'success'
###############################################################################
# Test reading point Z geometries without M
def ogr_shape_93():
ds = ogr.Open('data/pointz_without_m.shp')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
wkt = f.GetGeometryRef().ExportToIsoWkt()
if wkt != 'POINT Z (1 2 3)':
gdaltest.post_reason('fail')
print(wkt)
return 'fail'
return 'success'
###############################################################################
# Test SHPT creation option / CreateLayer(geom_type = xxx)
def ogr_shape_94():
tests = [["POINT", ogr.wkbPoint, "POINT (1 2)"],
["POINTM", ogr.wkbPointM, "POINT M (1 2 3)"],
["POINTZ", ogr.wkbPoint25D, "POINT Z (1 2 3)"],
["POINTZM", ogr.wkbPointZM, "POINT ZM (1 2 3 4)"],
["MULTIPOINT", ogr.wkbMultiPoint, "MULTIPOINT ((1 2))"],
["MULTIPOINTM", ogr.wkbMultiPointM, "MULTIPOINT M ((1 2 3))"],
["MULTIPOINTZ", ogr.wkbMultiPoint25D, "MULTIPOINT Z ((1 2 3))"],
["MULTIPOINTZM", ogr.wkbMultiPointZM, "MULTIPOINT ZM ((1 2 3 4))"],
["ARC", ogr.wkbLineString, "LINESTRING (1 2,3 4)"],
["ARCM", ogr.wkbLineStringM, "LINESTRING M (1 2 3,5 6 7)"],
["ARCZ", ogr.wkbLineString25D, "LINESTRING Z (1 2 3,5 6 7)"],
["ARCZM", ogr.wkbLineStringZM, "LINESTRING ZM (1 2 3 4,5 6 7 8)"],
["ARC", ogr.wkbMultiLineString, "MULTILINESTRING ((1 2,3 4),(1 2,3 4))"],
["ARCM", ogr.wkbMultiLineStringM, "MULTILINESTRING M ((1 2 3,5 6 7),(1 2 3,5 6 7))"],
["ARCZ", ogr.wkbMultiLineString25D, "MULTILINESTRING Z ((1 2 3,5 6 7),(1 2 3,5 6 7))"],
["ARCZM", ogr.wkbMultiLineStringZM, "MULTILINESTRING ZM ((1 2 3 4,5 6 7 8),(1 2 3 4,5 6 7 8))"],
["POLYGON", ogr.wkbPolygon, "POLYGON ((0 0,0 1,1 1,1 0))"],
["POLYGONM", ogr.wkbPolygonM, "POLYGON M ((0 0 2,0 1 2,1 1 2,1 0 2))"],
["POLYGONZ", ogr.wkbPolygon25D, "POLYGON Z ((0 0 2,0 1 2,1 1 2,1 0 2))"],
["POLYGONZM", ogr.wkbPolygonZM, "POLYGON ZM ((0 0 2 3,0 1 2 3,1 1 2 3,1 0 2 3))"],
["POLYGON", ogr.wkbMultiPolygon, "MULTIPOLYGON (((0 0,0 1,1 1,1 0)),((0 0,0 1,1 1,1 0)))"],
["POLYGONM", ogr.wkbMultiPolygonM, "MULTIPOLYGON M (((0 0 2,0 1 2,1 1 2,1 0 2)),((0 0 2,0 1 2,1 1 2,1 0 2)))"],
["POLYGONZ", ogr.wkbMultiPolygon25D, "MULTIPOLYGON Z (((0 0 2,0 1 2,1 1 2,1 0 2)),((0 0 2,0 1 2,1 1 2,1 0 2)))"],
["POLYGONZM", ogr.wkbMultiPolygonZM, "MULTIPOLYGON ZM (((0 0 2 3,0 1 2 3,1 1 2 3,1 0 2 3)),((0 0 2 3,0 1 2 3,1 1 2 3,1 0 2 3)))"],
]
for test in tests:
try:
(shpt, geom_type, wkt, expected_fail) = test
except ValueError:
(shpt, geom_type, wkt) = test
expected_fail = False
for i in range(2):
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_94.shp')
if i == 0:
lyr = ds.CreateLayer('ogr_shape_94', options=['SHPT=' + shpt])
else:
lyr = ds.CreateLayer('ogr_shape_94', geom_type=geom_type)
test_lyr_geom_type = ogr.GT_Flatten(geom_type) != ogr.wkbMultiLineString and ogr.GT_Flatten(geom_type) != ogr.wkbMultiPolygon
if test_lyr_geom_type and lyr.GetGeomType() != geom_type:
gdaltest.post_reason('fail')
print(i, shpt, geom_type, wkt, lyr.GetGeomType())
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt(wkt))
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('/vsimem/ogr_shape_94.shp')
lyr = ds.GetLayer(0)
if test_lyr_geom_type and lyr.GetGeomType() != geom_type:
gdaltest.post_reason('fail')
print(shpt, geom_type, wkt, lyr.GetGeomType())
return 'fail'
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != wkt:
if expected_fail:
print('FIXME!:', i, shpt, geom_type, wkt, f.GetGeometryRef().ExportToIsoWkt())
else:
gdaltest.post_reason('fail')
print(i, shpt, geom_type, wkt, f.GetGeometryRef().ExportToIsoWkt())
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_94.shp')
return 'success'
###############################################################################
# Test demoting of ZM to Z when the M values are nodata
def ogr_shape_95():
ds = gdal.OpenEx('data/pointzm_with_all_nodata_m.shp')
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbPoint25D:
gdaltest.post_reason('fail')
print(lyr.GetGeomType())
return 'fail'
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != 'POINT Z (1 2 3)':
gdaltest.post_reason('fail')
print(lyr.GetGeomType())
return 'fail'
ds = gdal.OpenEx('data/pointzm_with_all_nodata_m.shp', open_options=['ADJUST_GEOM_TYPE=NO'])
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbPointZM:
gdaltest.post_reason('fail')
print(lyr.GetGeomType())
return 'fail'
f = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(f, ogr.CreateGeometryFromWkt('POINT ZM (1 2 3 -1.79769313486232e+308)')) != 0:
gdaltest.post_reason('fail')
print(f.GetGeometryRef().ExportToIsoWkt())
return 'fail'
# The shape with a non nodata M is the second one
ds = gdal.OpenEx('data/pointzm_with_one_valid_m.shp', open_options=['ADJUST_GEOM_TYPE=FIRST_SHAPE'])
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbPoint25D:
gdaltest.post_reason('fail')
print(lyr.GetGeomType())
return 'fail'
ds = gdal.OpenEx('data/pointzm_with_one_valid_m.shp', open_options=['ADJUST_GEOM_TYPE=ALL_SHAPES'])
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbPointZM:
gdaltest.post_reason('fail')
print(lyr.GetGeomType())
return 'fail'
return 'success'
###############################################################################
# Test updating a XYM shapefile (#6331)
def ogr_shape_96():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_96.shp')
lyr = ds.CreateLayer('ogr_shape_96')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT M (1 2 3)'))
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_96.shp', update=1)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != 'POINT M (1 2 3)':
gdaltest.post_reason('fail')
print(f.GetGeometryRef().ExportToIsoWkt())
return 'fail'
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT M (1 2 4)'))
lyr.SetFeature(f)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_96.shp')
lyr = ds.GetLayer(0) | gdaltest.post_reason('fail')
print(f.GetGeometryRef().ExportToIsoWkt())
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_96.shp')
return 'success'
###############################################################################
# Test updating a XYZM shapefile
def ogr_shape_97():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_97.shp')
lyr = ds.CreateLayer('ogr_shape_97')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT ZM (1 2 3 4)'))
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_97.shp', update=1)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != 'POINT ZM (1 2 3 4)':
gdaltest.post_reason('fail')
print(f.GetGeometryRef().ExportToIsoWkt())
return 'fail'
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT ZM (1 2 5 6)'))
lyr.SetFeature(f)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_97.shp')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != 'POINT ZM (1 2 5 6)':
gdaltest.post_reason('fail')
print(f.GetGeometryRef().ExportToIsoWkt())
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_97.shp')
return 'success'
###############################################################################
# Test restore function when .shx file is missing
def ogr_shape_98():
if gdaltest.shape_ds is None:
return 'skip'
gdal.SetConfigOption('SHAPE_RESTORE_SHX', 'TRUE')
shutil.copy('data/can_caps.shp', 'tmp/can_caps.shp')
shp_ds = ogr.Open('tmp/can_caps.shp', update=1)
shp_lyr = shp_ds.GetLayer(0)
if shp_lyr.GetFeatureCount() != 13:
gdaltest.post_reason('Got wrong number of features.')
return 'fail'
shp_lyr = None
shp_ds = None
gdal.SetConfigOption('SHAPE_RESTORE_SHX', None)
ref_shx = open('data/can_caps.shx', 'rb').read()
got_shx = open('tmp/can_caps.shx', 'rb').read()
os.remove('tmp/can_caps.shp')
os.remove('tmp/can_caps.shx')
if ref_shx != got_shx:
gdaltest.post_reason('Rebuilt shx is different from original shx.')
return 'fail'
return 'success'
###############################################################################
# Import TOWGS84 from EPSG when possible (#6485)
def ogr_shape_99():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_99.shp')
lyr = ds.CreateLayer('ogr_shape_99')
ds = None
gdal.FileFromMemBuffer('/vsimem/ogr_shape_99.prj', """PROJCS["CH1903_LV03",GEOGCS["GCS_CH1903",DATUM["D_CH1903",SPHEROID["Bessel_1841",6377397.155,299.1528128]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],PARAMETER["False_Easting",600000.0],PARAMETER["False_Northing",200000.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Azimuth",90.0],PARAMETER["Longitude_Of_Center",7.439583333333333],PARAMETER["Latitude_Of_Center",46.95240555555556],UNIT["Meter",1.0],AUTHORITY["EPSG",21781]]""")
ds = ogr.Open('/vsimem/ogr_shape_99.shp')
lyr = ds.GetLayer(0)
got_wkt = lyr.GetSpatialRef().ExportToPrettyWkt()
expected_wkt = """PROJCS["CH1903 / LV03",
GEOGCS["CH1903",
DATUM["CH1903",
SPHEROID["Bessel 1841",6377397.155,299.1528128,
AUTHORITY["EPSG","7004"]],
TOWGS84[674.374,15.056,405.346,0,0,0,0],
AUTHORITY["EPSG","6149"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4149"]],
PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],
PARAMETER["latitude_of_center",46.95240555555556],
PARAMETER["longitude_of_center",7.439583333333333],
PARAMETER["azimuth",90],
PARAMETER["rectified_grid_angle",90],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",600000],
PARAMETER["false_northing",200000],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["Y",EAST],
AXIS["X",NORTH],
AUTHORITY["EPSG","21781"]]"""
ds = None
if got_wkt != expected_wkt:
gdaltest.post_reason('Projections differ')
print(got_wkt)
return 'fail'
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_99.shp')
return 'success'
###############################################################################
# Test REPACK with both implementations
def ogr_shape_100():
old_val = gdal.GetConfigOption('OGR_SHAPE_PACK_IN_PLACE')
for variant in ['YES', 'NO']:
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('tmp/ogr_shape_100.shp')
lyr = ds.CreateLayer('ogr_shape_100')
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 1)'))
f.SetField('foo', '1')
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(1 1,2 2,3 3)'))
f.SetField('foo', '2')
lyr.CreateFeature(f)
f = None
lyr.DeleteFeature(0)
gdal.SetConfigOption('OGR_SHAPE_PACK_IN_PLACE', variant)
f_dbf = None
f_shp = None
f_shx = None
if sys.platform == 'win32' and variant == 'YES':
# Locks the files. No way to do this on Unix easily
f_dbf = open('tmp/ogr_shape_100.dbf', 'rb')
f_shp = open('tmp/ogr_shape_100.shp', 'rb')
f_shx = open('tmp/ogr_shape_100.shx', 'rb')
ds.ExecuteSQL('REPACK ogr_shape_100')
del f_dbf
del f_shp
del f_shx
gdal.SetConfigOption('OGR_SHAPE_PACK_IN_PLACE', old_val)
if gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('fail')
print(variant)
return 'fail'
for ext in ['dbf', 'shp', 'shx', 'cpg']:
if gdal.VSIStatL('tmp/ogr_shape_100_packed.' + ext) is not None:
gdaltest.post_reason('fail')
print(variant)
print(ext)
return 'fail'
f = lyr.GetFeature(0)
if f['foo'] != '2' or f.GetGeometryRef().ExportToWkt() != 'LINESTRING (1 1,2 2,3 3)':
gdaltest.post_reason('fail')
print(variant)
f.DumpReadable()
return 'fail'
with gdaltest.error_handler():
f = lyr.GetFeature(1)
if f is not None:
gdaltest.post_reason('fail')
print(variant)
return 'fail'
lyr.ResetReading()
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
print(variant)
return 'fail'
f = lyr.GetNextFeature()
if f['foo'] != '2' or f.GetGeometryRef().ExportToWkt() != 'LINESTRING (1 1,2 2,3 3)':
gdaltest.post_reason('fail')
print(variant)
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
print(variant)
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (3 3,4 4,5 5,6 6)'))
f.SetField('foo', '3')
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('tmp/ogr_shape_100.shp')
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 2:
gdaltest.post_reason('fail')
print(variant)
return 'fail'
f = lyr.GetNextFeature()
if f['foo'] != '2' or f.GetGeometryRef().ExportToWkt() != 'LINESTRING (1 1,2 2,3 3)':
gdaltest.post_reason('fail')
print(variant)
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f['foo'] != '3' or f.GetGeometryRef().ExportToWkt() != 'LINESTRING (3 3,4 4,5 5,6 6)':
gdaltest.post_reason('fail')
print(variant)
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
print(variant)
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/ogr_shape_100.shp')
return 'success'
###############################################################################
# Test auto repack
def ogr_shape_101():
for i in range(2):
# Auto-repack on create
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_101.shp')
lyr = ds.CreateLayer('ogr_shape_101')
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 1)'))
f.SetField('foo', '1')
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(1 1,2 2,3 3)'))
f.SetField('foo', '2')
lyr.CreateFeature(f)
f = None
lyr.DeleteFeature(0)
if i == 0:
ds = None
else:
ds.SyncToDisk()
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
print(i)
return 'fail'
# No-op
ds.ExecuteSQL('REPACK ogr_shape_101')
ds_read = ogr.Open('/vsimem/ogr_shape_101.shp')
lyr = ds_read.GetLayer(0)
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
print(i)
return 'fail'
f = lyr.GetNextFeature()
if f.GetFID() != 0 or f['foo'] != '2' or f.GetGeometryRef().ExportToWkt() != 'LINESTRING (1 1,2 2,3 3)':
gdaltest.post_reason('fail')
print(i)
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
print(i)
return 'fail'
ds = None
ds_read = None
if i == 0:
# Auto-repack on update
ds = ogr.Open('/vsimem/ogr_shape_101.shp', update=1)
lyr = ds.GetLayer(0)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (3 3,4 4,5 5,6 6)'))
f.SetField('foo', '3')
lyr.CreateFeature(f)
lyr.DeleteFeature(0)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_101.shp')
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
print(i)
return 'fail'
f = lyr.GetNextFeature()
if f.GetFID() != 0 or f['foo'] != '3' or f.GetGeometryRef().ExportToWkt() != 'LINESTRING (3 3,4 4,5 5,6 6)':
gdaltest.post_reason('fail')
print(i)
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
print(i)
return 'fail'
ds = None
# Test disabling auto-repack on update
ds = gdal.OpenEx('/vsimem/ogr_shape_101.shp', gdal.OF_UPDATE, open_options=['AUTO_REPACK=NO'])
lyr = ds.GetLayer(0)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('foo', '4')
lyr.CreateFeature(f)
lyr.DeleteFeature(0)
ds = None
ds = ogr.Open('/vsimem/ogr_shape_101.shp')
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 2:
gdaltest.post_reason('fail')
print(i)
return 'fail'
f = lyr.GetNextFeature()
if f.GetFID() != 1 or f['foo'] != '4':
gdaltest.post_reason('fail')
print(i)
f.DumpReadable()
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_101.shp')
return 'success'
###############################################################################
# Test reading invalid .prj
def ogr_shape_102():
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource('/vsimem/ogr_shape_102.shp')
lyr = ds.CreateLayer('ogr_shape_102', geom_type=ogr.wkbPoint)
ds = None
gdal.FileFromMemBuffer('/vsimem/ogr_shape_102.prj', 'invalid')
ds = ogr.Open('/vsimem/ogr_shape_102.shp')
lyr = ds.GetLayer(0)
lyr.GetSpatialRef()
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('/vsimem/ogr_shape_102.shp')
return 'success'
###############################################################################
# Test handling of EOF character
def check_EOF(filename, expected=True):
f = gdal.VSIFOpenL(filename, 'rb')
if f is None:
print('%s does not exist' % filename)
return False
size = gdal.VSIStatL(filename).size
content = gdal.VSIFReadL(1, size, f)
gdal.VSIFCloseL(f)
pos = content.find('\x1A'.encode('LATIN1'))
if expected:
if pos < 0:
print('Did not find EOF char')
return False
if pos != size - 1:
print('Found EOF char but not at end of file!')
return False
elif pos >= 0:
print('Found EOF char but we did not expect that !')
return False
return True
def ogr_shape_103():
filename = '/vsimem/ogr_shape_103.dbf'
for (options, expected) in [(['DBF_EOF_CHAR=YES'], True),
([], True),
(['DBF_EOF_CHAR=NO'], False)]:
options += ['DBF_DATE_LAST_UPDATE=1970-01-01']
# Create empty file
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(filename)
lyr = ds.CreateLayer('ogr_shape_103', geom_type=ogr.wkbNone, options=options)
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
print(options)
return 'fail'
# Add field
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
# Add record
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
# Add another field
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
lyr.CreateField(ogr.FieldDefn('foo2', ogr.OFTString))
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
# Grow a field
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
fd = lyr.GetLayerDefn().GetFieldDefn(0)
new_fd = ogr.FieldDefn(fd.GetName(), fd.GetType())
new_fd.SetWidth(fd.GetWidth() + 1)
lyr.AlterFieldDefn(0, fd, ogr.ALTER_ALL_FLAG)
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
# Reorder fields
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
lyr.ReorderFields([1, 0])
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
# Shrink a field
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
fd = lyr.GetLayerDefn().GetFieldDefn(0)
new_fd = ogr.FieldDefn(fd.GetName(), fd.GetType())
new_fd.SetWidth(fd.GetWidth() + 1)
lyr.AlterFieldDefn(0, fd, ogr.ALTER_ALL_FLAG)
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
# Remove a field
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
lyr.DeleteField(0)
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource(filename)
# Create file with one field but no record
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(filename)
lyr = ds.CreateLayer('ogr_shape_103', geom_type=ogr.wkbNone, options=options)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource(filename)
# Create file with two records
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(filename)
lyr = ds.CreateLayer('ogr_shape_103', geom_type=ogr.wkbNone, options=options)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
# Test editing a record that is not the last one
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
lyr.SetFeature(lyr.GetNextFeature())
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
# Test editing the last record
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=options)
lyr = ds.GetLayer(0)
lyr.GetNextFeature()
lyr.SetFeature(lyr.GetNextFeature())
ds = None
if not check_EOF(filename, expected=expected):
gdaltest.post_reason('fail')
return 'fail'
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource(filename)
# Test appending to a file without a EOF marker
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(filename)
lyr = ds.CreateLayer('ogr_shape_103', geom_type=ogr.wkbNone, options=['DBF_EOF_CHAR=NO'] + ['DBF_DATE_LAST_UPDATE=1970-01-01'])
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
ds = None
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=['DBF_DATE_LAST_UPDATE=1970-01-01'])
lyr = ds.GetLayer(0)
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
ds.FlushCache()
if not check_EOF(filename):
gdaltest.post_reason('fail')
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource(filename)
# Test editing a record (that is not the last one ) in a file without a EOF marker
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(filename)
lyr = ds.CreateLayer('ogr_shape_103', geom_type=ogr.wkbNone, options=['DBF_EOF_CHAR=NO'] + ['DBF_DATE_LAST_UPDATE=1970-01-01'])
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
ds = None
ds = gdal.OpenEx(filename, gdal.OF_UPDATE, open_options=['DBF_DATE_LAST_UPDATE=1970-01-01'])
lyr = ds.GetLayer(0)
lyr.SetFeature(lyr.GetNextFeature())
ds = None
# To document our current behaviour. Could make sense to be changed.
if not check_EOF(filename, expected=False):
gdaltest.post_reason('fail')
return 'fail'
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource(filename)
return 'success'
###############################################################################
# Test writing MULTIPATCH
def ogr_shape_104():
for (wkt, lyr_type, options, expected_wkt) in \
[['TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)))', ogr.wkbUnknown, [], None],
['TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)),((0 0 0,1 1 3,2 2 4,0 0 0)))', ogr.wkbUnknown, [], None], # triangle fan
['TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)),((0 1 2,1 1 3,4 4 5,0 1 2)))', ogr.wkbUnknown, [], None], # triangle strip
['TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)),((1 1 3,0 1 2,4 4 5,1 1 3)))', ogr.wkbUnknown, [], None], # no fan no strip
['TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)),((0 0 0,0 1 2,1 1 3,0 0 0)),((1 1 3,0 1 2,4 4 5,1 1 3)))', ogr.wkbUnknown, [],
'TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)),((1 1 3,0 1 2,4 4 5,1 1 3)))'],
# no fan no strip with duplicated triangle (as found in #5888)
['POLYHEDRALSURFACE Z (((0 0 0,0 1 2,1 1 3,0 0 0)))', ogr.wkbUnknown, [], 'TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)))'],
['GEOMETRYCOLLECTION Z (TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0))))', ogr.wkbUnknown, [], 'TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)))'],
['TRIANGLE Z ((0 0 0,0 1 2,1 1 3,0 0 0))', ogr.wkbUnknown, ['SHPT=MULTIPATCH'], 'TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)))'],
['TRIANGLE Z ((0 0 0,0 1 2,1 1 3,0 0 0))', ogr.wkbTINZ, [], 'TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)))'],
['POLYGON Z ((0 0 0,0 1 2,1 1 3,0 0 0))', ogr.wkbTINZ, [], 'TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)))'],
['MULTIPOLYGON Z (((0 0 0,0 1 2,1 1 3,0 0 0)))', ogr.wkbTINZ, [], 'TIN Z (((0 0 0,0 1 2,1 1 3,0 0 0)))'],
]:
if expected_wkt is None:
expected_wkt = wkt
filename = '/vsimem/ogr_shape_104.shp'
ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(filename)
lyr = ds.CreateLayer('ogr_shape_104', geom_type=lyr_type, options=options)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt(wkt))
lyr.CreateFeature(f)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != expected_wkt:
gdaltest.post_reason('fail')
print(wkt, lyr_type, options)
f.DumpReadable()
return 'fail'
ds = None
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource(filename)
return 'success'
###############################################################################
# Test reading .dbf with substantial padding after last field definition.
def ogr_shape_105():
ds = ogr.Open('data/padding_after_field_defns.dbf')
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f['id'] != '1' or f['foo'] != '2':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test that rewriting the last shape reuses the space it took. (#6787)
def ogr_shape_106():
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource('/vsimem/ogr_shape_106.shp')
lyr = ds.CreateLayer('ogr_shape_81')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 1)'))
lyr.CreateFeature(f)
f = None
ds = None
size = gdal.VSIStatL('/vsimem/ogr_shape_106.shp').size
if size != 188:
gdaltest.post_reason('fail')
print(size)
return 'fail'
ds = ogr.Open('/vsimem/ogr_shape_106.shp', update=1)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
# Write larger shape
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(2 2,3 3,4 4)'))
lyr.SetFeature(f)
ds = None
size = gdal.VSIStatL('/vsimem/ogr_shape_106.shp').size
if size != 188 + 2 * 8:
gdaltest.post_reason('fail')
print(size)
return 'fail'
shape_drv.DeleteDataSource('/vsimem/ogr_shape_106.shp')
return 'success'
###############################################################################
# Compare to VSI*L file
def is_same(filename1, filename2, verbose=True):
f1 = gdal.VSIFOpenL(filename1, "rb")
if f1 is None:
if verbose:
print('%s does not exist' % filename1)
return False
f2 = gdal.VSIFOpenL(filename2, "rb")
if f2 is None:
if verbose:
print('%s does not exist' % filename2)
gdal.VSIFCloseL(f1)
return False
ret = True
size1 = gdal.VSIStatL(filename1).size
size2 = gdal.VSIStatL(filename2).size
if size1 != size2:
if verbose:
print('%s size is %d, whereas %s size is %d' % (filename1, size1, filename2, size2))
ret = False
if ret:
data1 = gdal.VSIFReadL(1, size1, f1)
data2 = gdal.VSIFReadL(1, size2, f2)
if data1 != data2:
if verbose:
print('File content of %s and %s are different' % (filename1, filename2))
print(struct.unpack('B' * len(data1), data1))
print(struct.unpack('B' * len(data2), data2))
ret = False
gdal.VSIFCloseL(f1)
gdal.VSIFCloseL(f2)
return ret
###############################################################################
# Test that multiple edition of the last shape works properly (#7031)
def ogr_shape_107():
layer_name = 'ogr_shape_107'
filename = '/vsimem/' + layer_name + '.shp'
copy_filename = '/vsimem/' + layer_name + '_copy.shp'
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource(filename)
lyr = ds.CreateLayer(layer_name)
# Create a shape
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(2.5 3.5)'))
lyr.CreateFeature(f)
# Modify it to be larger
f = lyr.GetFeature(0)
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (1 2,3 4)'))
lyr.SetFeature(f)
# Insert new feature
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (5 6)'))
lyr.CreateFeature(f)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToWkt() != 'LINESTRING (1 2,3 4)':
gdaltest.post_reason('fail')
f.DumpReadable()
return'fail'
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToWkt() != 'LINESTRING (5 6)':
gdaltest.post_reason('fail')
f.DumpReadable()
return'fail'
ds = None
gdal.VectorTranslate(copy_filename, filename)
if not is_same(copy_filename, filename):
gdaltest.post_reason('fail')
return'fail'
shape_drv.DeleteDataSource(copy_filename)
shape_drv.DeleteDataSource(filename)
ds = shape_drv.CreateDataSource(filename)
lyr = ds.CreateLayer(layer_name)
# Create a shape
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (1 2,1.5 2.5,3 4)'))
lyr.CreateFeature(f)
# Modify it to be smaller
f = lyr.GetFeature(0)
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(1 2,3 4)'))
lyr.SetFeature(f)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToWkt() != 'LINESTRING (1 2,3 4)':
gdaltest.post_reason('fail')
f.DumpReadable()
return'fail'
ds = None
gdal.VectorTranslate(copy_filename, filename)
if not is_same(copy_filename, filename):
gdaltest.post_reason('fail')
return'fail'
shape_drv.DeleteDataSource(copy_filename)
shape_drv.DeleteDataSource(filename)
return 'success'
###############################################################################
# Test spatial + attribute filter
def ogr_shape_108():
ds = ogr.Open('data/poly.shp')
lyr = ds.GetLayer(0)
lyr.SetSpatialFilterRect(479750.6875, 4764702.0, 479750.6875, 4764702.0)
expected_fc = lyr.GetFeatureCount()
lyr.SetAttributeFilter("1=1")
if lyr.GetFeatureCount() != expected_fc:
gdaltest.post_reason('fail')
print(lyr.GetFeatureCount(), expected_fc)
return'fail'
return 'success'
###############################################################################
# Test writing invalid polygon
def ogr_shape_109():
layer_name = 'ogr_shape_109'
filename = '/vsimem/' + layer_name + '.shp'
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource(filename)
lyr = ds.CreateLayer(layer_name)
# Create a shape
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((1 2))'))
lyr.CreateFeature(f)
ds = None
shape_drv.DeleteDataSource(filename)
return 'success'
###############################################################################
def ogr_shape_110_write_invalid_multipatch():
layer_name = 'ogr_shape_110'
filename = '/vsimem/' + layer_name + '.shp'
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource(filename)
lyr = ds.CreateLayer(layer_name, options=['SHPT=MULTIPATCH'])
# Create a shape
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('GEOMETRYCOLLECTION(POINT(0 0))'))
lyr.CreateFeature(f)
ds = None
shape_drv.DeleteDataSource(filename)
return 'success'
###############################################################################
def ogr_shape_111_delete_field_no_record():
layer_name = 'ogr_shape_111_delete_field_no_record'
filename = '/vsimem/' + layer_name + '.shp'
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
ds = shape_drv.CreateDataSource(filename)
lyr = ds.CreateLayer(layer_name)
lyr.CreateField(ogr.FieldDefn('field_1'))
lyr.CreateField(ogr.FieldDefn('field_2'))
ds = None
ds = ogr.Open(filename, update = 1)
lyr = ds.GetLayer(0)
lyr.DeleteField(1)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldDefn(0).GetName() != 'field_1':
return 'fail'
ds = None
shape_drv.DeleteDataSource(filename)
return 'success'
###############################################################################
def ogr_shape_cleanup():
if gdaltest.shape_ds is None:
return 'skip'
gdaltest.shape_ds = None
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
shape_drv.DeleteDataSource('tmp')
shape_drv.DeleteDataSource('tmp/UPPERCASE')
shape_drv.DeleteDataSource('tmp/lowercase')
shape_drv.DeleteDataSource('tmp/mixedcase')
shape_drv.DeleteDataSource('/vsimem/test35.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_46.shp')
shape_drv.DeleteDataSource('/vsimem/this_one_i_care_46.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_52.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_53.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_54')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_55')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_56')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_57')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_58')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_61')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_62')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_73.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_74.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_78.dbf')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_79.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_81.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_83.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_84.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_85.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_88.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_89.shp')
shape_drv.DeleteDataSource('/vsimem/ogr_shape_90.shp')
if os.path.exists('tmp/ogr_shape_100.shp'):
shape_drv.DeleteDataSource('tmp/ogr_shape_100.shp')
return 'success'
gdaltest_list = [
ogr_shape_1,
ogr_shape_2,
ogr_shape_3,
ogr_shape_4,
ogr_shape_5,
ogr_shape_6,
ogr_shape_7,
ogr_shape_8,
ogr_shape_9,
ogr_shape_10,
ogr_shape_11,
ogr_shape_12,
ogr_shape_13,
ogr_shape_14,
ogr_shape_15,
ogr_shape_16,
ogr_shape_16_1,
ogr_shape_17,
ogr_shape_18,
ogr_shape_19,
ogr_shape_20,
ogr_shape_21,
ogr_shape_22,
ogr_shape_23,
ogr_shape_24,
ogr_shape_25,
ogr_shape_26,
ogr_shape_27,
ogr_shape_28,
ogr_shape_29,
ogr_shape_30,
ogr_shape_31,
ogr_shape_32,
ogr_shape_33,
ogr_shape_34,
ogr_shape_35,
ogr_shape_36,
ogr_shape_37,
ogr_shape_37_bis,
ogr_shape_38,
ogr_shape_39,
ogr_shape_40,
ogr_shape_41,
ogr_shape_42,
ogr_shape_43,
# ogr_shape_44,
ogr_shape_45,
ogr_shape_46,
ogr_shape_47,
ogr_shape_48,
ogr_shape_49,
ogr_shape_50,
ogr_shape_51,
ogr_shape_52,
ogr_shape_53,
ogr_shape_54,
ogr_shape_55,
ogr_shape_56,
ogr_shape_57,
ogr_shape_58,
ogr_shape_59,
ogr_shape_60,
ogr_shape_61,
ogr_shape_62,
ogr_shape_63,
ogr_shape_64,
ogr_shape_65,
ogr_shape_66,
ogr_shape_67,
ogr_shape_68,
ogr_shape_69,
ogr_shape_70,
ogr_shape_71,
ogr_shape_72,
ogr_shape_73,
ogr_shape_74,
ogr_shape_75,
ogr_shape_76,
ogr_shape_77,
ogr_shape_78,
ogr_shape_79,
ogr_shape_80,
ogr_shape_81,
ogr_shape_82,
ogr_shape_83,
ogr_shape_84,
ogr_shape_85,
ogr_shape_86,
ogr_shape_87,
ogr_shape_88,
ogr_shape_89,
ogr_shape_90,
ogr_shape_91,
ogr_shape_92,
ogr_shape_93,
ogr_shape_94,
ogr_shape_95,
ogr_shape_96,
ogr_shape_97,
ogr_shape_98,
ogr_shape_99,
ogr_shape_100,
ogr_shape_101,
ogr_shape_102,
ogr_shape_103,
ogr_shape_104,
ogr_shape_105,
ogr_shape_106,
ogr_shape_107,
ogr_shape_108,
ogr_shape_109,
ogr_shape_110_write_invalid_multipatch,
ogr_shape_111_delete_field_no_record,
ogr_shape_cleanup]
# gdaltest_list = [ ogr_shape_107 ]
if __name__ == '__main__':
gdaltest.setup_run('ogr_shape')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize() | f = lyr.GetNextFeature()
if f.GetGeometryRef().ExportToIsoWkt() != 'POINT M (1 2 4)': |
main.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [START recommendationengine_v1beta1_generated_UserEventService_WriteUserEvent_sync]
package main
import (
"context"
recommendationengine "cloud.google.com/go/recommendationengine/apiv1beta1"
recommendationenginepb "google.golang.org/genproto/googleapis/cloud/recommendationengine/v1beta1"
)
func main() |
// [END recommendationengine_v1beta1_generated_UserEventService_WriteUserEvent_sync]
| {
// import recommendationenginepb "google.golang.org/genproto/googleapis/cloud/recommendationengine/v1beta1"
ctx := context.Background()
c, err := recommendationengine.NewUserEventClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &recommendationenginepb.WriteUserEventRequest{
// TODO: Fill request struct fields.
}
resp, err := c.WriteUserEvent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
} |
[id].tsx | import { NextApiRequest, NextApiResponse } from "next";
// eslint-disable-next-line import/no-anonymous-default-export
export default (request: NextApiRequest, response: NextApiResponse) => {
console.log(request.query);
const users = [
{ id: 1, name: 'Joรฃo' }, | { id: 2, name: 'Diego' },
{ id: 3, name: 'Lucas' },
{ id: 4, name: 'Mathues' },
]
return response.json(users);
} | |
txn_speed.rs | use diesel::PgConnection;
use std::{collections::HashMap, thread::sleep, time::Duration};
use graph::prelude::anyhow;
use graph_store_postgres::connection_pool::ConnectionPool;
use crate::manager::catalog;
pub fn run(pool: ConnectionPool, delay: u64) -> Result<(), anyhow::Error> | {
fn query(conn: &PgConnection) -> Result<Vec<(String, i64, i64)>, anyhow::Error> {
use catalog::pg_catalog::pg_stat_database as d;
use diesel::dsl::*;
use diesel::sql_types::BigInt;
use diesel::{ExpressionMethods, QueryDsl, RunQueryDsl};
let rows = d::table
.filter(d::datname.eq(any(vec!["explorer", "graph"])))
.select((
d::datname,
sql::<BigInt>("(xact_commit + xact_rollback)::bigint"),
sql::<BigInt>("txid_current()::bigint"),
))
//.select((d::datname))
.load::<(Option<String>, i64, i64)>(conn)?;
Ok(rows
.into_iter()
.map(|(datname, all_txn, write_txn)| {
(datname.unwrap_or("none".to_string()), all_txn, write_txn)
})
.collect())
}
let mut speeds = HashMap::new();
let conn = pool.get()?;
for (datname, all_txn, write_txn) in query(&conn)? {
speeds.insert(datname, (all_txn, write_txn));
}
println!(
"Looking for number of transactions performed in {}s ...",
delay
);
sleep(Duration::from_secs(delay));
println!("Number of transactions/minute");
println!("{:10} {:>7} {}", "database", "all", "write");
for (datname, all_txn, write_txn) in query(&conn)? {
let (all_speed, write_speed) = speeds
.get(&datname)
.map(|(all_txn_old, write_txn_old)| {
(all_txn - *all_txn_old, write_txn - *write_txn_old)
})
.unwrap_or((0, 0));
let all_speed = all_speed as f64 * 60.0 / delay as f64;
let write_speed = write_speed as f64 * 60.0 / delay as f64;
println!("{:10} {:>7} {}", datname, all_speed, write_speed);
}
Ok(())
} |
|
nist_xray_tran_en_db_converter.py | import json
import sys
import os
from tqdm import tqdm
from mdf_refinery.validator import Validator
from mdf_refinery.parsers.tab_parser import parse_tab
# VERSION 0.3.0
# This is the converter for the NIST X-Ray Transition Energies Database
# Arguments:
# input_path (string): The file or directory where the data resides.
# NOTE: Do not hard-code the path to the data in the converter. The converter should be portable.
# metadata (string or dict): The path to the JSON dataset metadata file, a dict or json.dumps string containing the dataset metadata, or None to specify the metadata here. Default None.
# verbose (bool): Should the script print status messages to standard output? Default False.
# NOTE: The converter should have NO output if verbose is False, unless there is an error.
def convert(input_path, metadata=None, verbose=False):
if verbose:
print("Begin converting")
# Collect the metadata
if not metadata:
dataset_metadata = {
"mdf": {
"title": "NIST X-Ray Transition Energies Database",
"acl": ["public"],
"source_name": "nist_xray_tran_en_db",
"citation": ["http://physics.nist.gov/PhysRefData/XrayTrans/Html/refs.html"],
"data_contact": {
"given_name": "Lawrence",
"family_name": "Hudson", | "email": "lawrence.hudson@nist.gov",
"institution": "National Institute of Standards and Technology"
},
# "author": ,
# "license": ,
"collection": "NIST X-Ray Transition Energies",
"tags": ["Radiation", "Spectroscopy", "Reference data"],
"description": "This x-ray transition table provides the energies for K transitions connecting the K shell (n = 1) to the shells with principal quantum numbers n = 2 to 4 and L transitions connecting the L1, L2, and L3 shells (n = 2) to the shells with principal quantum numbers n = 3 and 4. The elements covered include Z = 10, neon to Z = 100, fermium. There are two unique features of this database: (1) all experimental values are on a scale consistent with the International System of measurement (the SI) and the numerical values are determined using constants from the Recommended Values of the Fundamental Physical Constants: 1998 [115] and (2) accurate theoretical estimates are included for all transitions. The user will find that for many of the transitions, the experimental and theoretical values are very consistent. It is our hope that the theoretical values will provide a useful estimate for missing or poorly measured experimental values.",
"year": 2003,
"links": {
"landing_page": "https://www.nist.gov/pml/x-ray-transition-energies-database",
# "publication": ,
# "dataset_doi": ,
# "related_id": ,
# data links: {
#"globus_endpoint": ,
#"http_host": ,
#"path": ,
#}
},
# "mrr": ,
"data_contributor": {
"given_name": "Jonathon",
"family_name": "Gaff",
"email": "jgaff@uchicago.edu",
"institution": "The University of Chicago",
"github": "jgaff"
}
}
}
elif type(metadata) is str:
try:
dataset_metadata = json.loads(metadata)
except Exception:
try:
with open(metadata, 'r') as metadata_file:
dataset_metadata = json.load(metadata_file)
except Exception as e:
sys.exit("Error: Unable to read metadata: " + repr(e))
elif type(metadata) is dict:
dataset_metadata = metadata
else:
sys.exit("Error: Invalid metadata parameter")
dataset_validator = Validator(dataset_metadata)
# Get the data
headers = ['element', 'A', 'transition', 'theory_(eV)', 'theory_uncertainty_(eV)', 'direct_(eV)', 'direct_uncertainty_(eV)', 'combined_(eV)', 'combined_uncertainty_(eV)', 'vapor_(eV)', 'vapor_uncertainty_(eV)', 'blend', 'reference']
with open(os.path.join(input_path, "xray_tran_en_db.txt")) as in_file:
raw_data = in_file.read()
for record in tqdm(parse_tab(raw_data, sep="\t", headers=headers), desc="Processing data", disable= not verbose):
record_metadata = {
"mdf": {
"title": "X-Ray Transition - " + record["element"],
"acl": ["public"],
# "tags": ,
# "description": ,
"composition": record["element"],
"raw": json.dumps(record),
"links": {
"landing_page": "http://physics.nist.gov/PhysRefData/XrayTrans/Html/search.html",
# "publication": ,
# "dataset_doi": ,
# "related_id": ,
# data links: {
#"globus_endpoint": ,
#"http_host": ,
#"path": ,
#},
},
# "citation": ,
# "data_contact": {
# "given_name": ,
# "family_name": ,
# "email": ,
# "institution":,
# IDs
# },
# "author": ,
# "license": ,
# "collection": ,
# "data_format": ,
# "data_type": ,
# "year": ,
# "mrr":
# "processing": ,
# "structure":,
}
}
# Pass each individual record to the Validator
result = dataset_validator.write_record(record_metadata)
# Check if the Validator accepted the record, and print a message if it didn't
# If the Validator returns "success" == True, the record was written successfully
if result["success"] is not True:
print("Error:", result["message"])
if verbose:
print("Finished converting") | |
admission_test.go | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
)
// TestAdmission
func | (t *testing.T) {
namespaceObj := &api.Namespace{
ObjectMeta: api.ObjectMeta{
Name: "test",
Namespace: "",
},
Status: api.NamespaceStatus{
Phase: api.NamespaceActive,
},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
store.Add(namespaceObj)
fakeWatch := watch.NewFake()
mockClient := &testclient.Fake{}
mockClient.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
if getAction, ok := action.(testclient.GetAction); ok && getAction.GetName() == namespaceObj.Name {
return true, namespaceObj, nil
}
return true, nil, fmt.Errorf("No result for action %v", action)
})
mockClient.AddReactor("list", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
return true, &api.NamespaceList{Items: []api.Namespace{*namespaceObj}}, nil
})
lfhandler := NewLifecycle(mockClient).(*lifecycle)
lfhandler.store = store
handler := admission.NewChainHandler(lfhandler)
pod := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespaceObj.Name},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image"}},
},
}
badPod := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "456", Namespace: "doesnotexist"},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image"}},
},
}
err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler: %v", err)
}
// change namespace state to terminating
namespaceObj.Status.Phase = api.NamespaceTerminating
store.Add(namespaceObj)
// verify create operations in the namespace cause an error
err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
if err == nil {
t.Errorf("Expected error rejecting creates in a namespace when it is terminating")
}
// verify update operations in the namespace can proceed
err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Update, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler: %v", err)
}
// verify delete operations in the namespace can proceed
err = handler.Admit(admission.NewAttributesRecord(nil, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Delete, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler: %v", err)
}
// verify delete of namespace default can never proceed
err = handler.Admit(admission.NewAttributesRecord(nil, "Namespace", "", api.NamespaceDefault, "namespaces", "", admission.Delete, nil))
if err == nil {
t.Errorf("Expected an error that this namespace can never be deleted")
}
// verify delete of namespace other than default can proceed
err = handler.Admit(admission.NewAttributesRecord(nil, "Namespace", "", "other", "namespaces", "", admission.Delete, nil))
if err != nil {
t.Errorf("Did not expect an error %v", err)
}
// verify create/update/delete of object in non-existant namespace throws error
err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an aerror that objects cannot be created in non-existant namespaces", err)
}
err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Update, nil))
if err == nil {
t.Errorf("Expected an aerror that objects cannot be updated in non-existant namespaces", err)
}
err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Delete, nil))
if err == nil {
t.Errorf("Expected an aerror that objects cannot be deleted in non-existant namespaces", err)
}
}
| TestAdmission |
cmp.rs | //! Functionality for ordering and comparison.
//!
//! This module contains various tools for ordering and comparing values. In
//! summary:
//!
//! * [`Eq`] and [`PartialEq`] are traits that allow you to define total and
//! partial equality between values, respectively. Implementing them overloads
//! the `==` and `!=` operators.
//! * [`Ord`] and [`PartialOrd`] are traits that allow you to define total and
//! partial orderings between values, respectively. Implementing them overloads
//! the `<`, `<=`, `>`, and `>=` operators.
//! * [`Ordering`] is an enum returned by the main functions of [`Ord`] and
//! [`PartialOrd`], and describes an ordering.
//! * [`Reverse`] is a struct that allows you to easily reverse an ordering.
//! * [`max`] and [`min`] are functions that build off of [`Ord`] and allow you
//! to find the maximum or minimum of two values.
//!
//! For more details, see the respective documentation of each item in the list.
//!
//! [`max`]: Ord::max
//! [`min`]: Ord::min
#![stable(feature = "rust1", since = "1.0.0")]
use self::Ordering::*;
/// Trait for equality comparisons which are [partial equivalence
/// relations](https://en.wikipedia.org/wiki/Partial_equivalence_relation).
///
/// This trait allows for partial equality, for types that do not have a full
/// equivalence relation. For example, in floating point numbers `NaN != NaN`,
/// so floating point types implement `PartialEq` but not [`Eq`].
///
/// Formally, the equality must be (for all `a`, `b` and `c`):
///
/// - symmetric: `a == b` implies `b == a`; and
/// - transitive: `a == b` and `b == c` implies `a == c`.
///
/// Note that these requirements mean that the trait itself must be implemented
/// symmetrically and transitively: if `T: PartialEq<U>` and `U: PartialEq<V>`
/// then `U: PartialEq<T>` and `T: PartialEq<V>`.
///
/// ## Derivable
///
/// This trait can be used with `#[derive]`. When `derive`d on structs, two
/// instances are equal if all fields are equal, and not equal if any fields
/// are not equal. When `derive`d on enums, each variant is equal to itself
/// and not equal to the other variants.
///
/// ## How can I implement `PartialEq`?
///
/// `PartialEq` only requires the [`eq`] method to be implemented; [`ne`] is defined
/// in terms of it by default. Any manual implementation of [`ne`] *must* respect
/// the rule that [`eq`] is a strict inverse of [`ne`]; that is, `!(a == b)` if and
/// only if `a != b`.
///
/// Implementations of `PartialEq`, [`PartialOrd`], and [`Ord`] *must* agree with
/// each other. It's easy to accidentally make them disagree by deriving some
/// of the traits and manually implementing others.
///
/// An example implementation for a domain in which two books are considered
/// the same book if their ISBN matches, even if the formats differ:
///
/// ```
/// enum BookFormat {
/// Paperback,
/// Hardback,
/// Ebook,
/// }
///
/// struct Book {
/// isbn: i32,
/// format: BookFormat,
/// }
///
/// impl PartialEq for Book {
/// fn eq(&self, other: &Self) -> bool {
/// self.isbn == other.isbn
/// }
/// }
///
/// let b1 = Book { isbn: 3, format: BookFormat::Paperback };
/// let b2 = Book { isbn: 3, format: BookFormat::Ebook };
/// let b3 = Book { isbn: 10, format: BookFormat::Paperback };
///
/// assert!(b1 == b2);
/// assert!(b1 != b3);
/// ```
///
/// ## How can I compare two different types?
///
/// The type you can compare with is controlled by `PartialEq`'s type parameter.
/// For example, let's tweak our previous code a bit:
///
/// ```
/// // The derive implements <BookFormat> == <BookFormat> comparisons
/// #[derive(PartialEq)]
/// enum BookFormat {
/// Paperback,
/// Hardback,
/// Ebook,
/// }
///
/// struct Book {
/// isbn: i32,
/// format: BookFormat,
/// }
///
/// // Implement <Book> == <BookFormat> comparisons
/// impl PartialEq<BookFormat> for Book {
/// fn eq(&self, other: &BookFormat) -> bool {
/// self.format == *other
/// }
/// }
///
/// // Implement <BookFormat> == <Book> comparisons
/// impl PartialEq<Book> for BookFormat {
/// fn eq(&self, other: &Book) -> bool {
/// *self == other.format
/// }
/// }
///
/// let b1 = Book { isbn: 3, format: BookFormat::Paperback };
///
/// assert!(b1 == BookFormat::Paperback);
/// assert!(BookFormat::Ebook != b1);
/// ```
///
/// By changing `impl PartialEq for Book` to `impl PartialEq<BookFormat> for Book`,
/// we allow `BookFormat`s to be compared with `Book`s.
///
/// A comparison like the one above, which ignores some fields of the struct,
/// can be dangerous. It can easily lead to an unintended violation of the
/// requirements for a partial equivalence relation. For example, if we kept
/// the above implementation of `PartialEq<Book>` for `BookFormat` and added an
/// implementation of `PartialEq<Book>` for `Book` (either via a `#[derive]` or
/// via the manual implementation from the first example) then the result would
/// violate transitivity:
///
/// ```should_panic
/// #[derive(PartialEq)]
/// enum BookFormat {
/// Paperback,
/// Hardback,
/// Ebook,
/// }
///
/// #[derive(PartialEq)]
/// struct Book {
/// isbn: i32,
/// format: BookFormat,
/// }
///
/// impl PartialEq<BookFormat> for Book {
/// fn eq(&self, other: &BookFormat) -> bool {
/// self.format == *other
/// }
/// }
///
/// impl PartialEq<Book> for BookFormat {
/// fn eq(&self, other: &Book) -> bool {
/// *self == other.format
/// }
/// }
///
/// fn main() {
/// let b1 = Book { isbn: 1, format: BookFormat::Paperback };
/// let b2 = Book { isbn: 2, format: BookFormat::Paperback };
///
/// assert!(b1 == BookFormat::Paperback);
/// assert!(BookFormat::Paperback == b2);
///
/// // The following should hold by transitivity but doesn't.
/// assert!(b1 == b2); // <-- PANICS
/// }
/// ```
///
/// # Examples
///
/// ```
/// let x: u32 = 0;
/// let y: u32 = 1;
///
/// assert_eq!(x == y, false);
/// assert_eq!(x.eq(&y), false);
/// ```
///
/// [`eq`]: PartialEq::eq
/// [`ne`]: PartialEq::ne
#[lang = "eq"]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(alias = "==")]
#[doc(alias = "!=")]
#[rustc_on_unimplemented(
message = "can't compare `{Self}` with `{Rhs}`",
label = "no implementation for `{Self} == {Rhs}`"
)]
pub trait PartialEq<Rhs: ?Sized = Self> {
/// This method tests for `self` and `other` values to be equal, and is used
/// by `==`.
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn eq(&self, other: &Rhs) -> bool;
/// This method tests for `!=`.
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn ne(&self, other: &Rhs) -> bool {
!self.eq(other)
}
}
/// Derive macro generating an impl of the trait `PartialEq`.
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics, structural_match)]
pub macro PartialEq($item:item) {
/* compiler built-in */
}
/// Trait for equality comparisons which are [equivalence relations](
/// https://en.wikipedia.org/wiki/Equivalence_relation).
///
/// This means, that in addition to `a == b` and `a != b` being strict inverses, the equality must
/// be (for all `a`, `b` and `c`):
///
/// - reflexive: `a == a`;
/// - symmetric: `a == b` implies `b == a`; and
/// - transitive: `a == b` and `b == c` implies `a == c`.
///
/// This property cannot be checked by the compiler, and therefore `Eq` implies
/// [`PartialEq`], and has no extra methods.
///
/// ## Derivable
///
/// This trait can be used with `#[derive]`. When `derive`d, because `Eq` has
/// no extra methods, it is only informing the compiler that this is an
/// equivalence relation rather than a partial equivalence relation. Note that
/// the `derive` strategy requires all fields are `Eq`, which isn't
/// always desired.
///
/// ## How can I implement `Eq`?
///
/// If you cannot use the `derive` strategy, specify that your type implements
/// `Eq`, which has no methods:
///
/// ```
/// enum BookFormat { Paperback, Hardback, Ebook }
/// struct Book {
/// isbn: i32,
/// format: BookFormat,
/// }
/// impl PartialEq for Book {
/// fn eq(&self, other: &Self) -> bool {
/// self.isbn == other.isbn
/// }
/// }
/// impl Eq for Book {}
/// ```
#[doc(alias = "==")]
#[doc(alias = "!=")]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Eq: PartialEq<Self> {
// this method is used solely by #[deriving] to assert
// that every component of a type implements #[deriving]
// itself, the current deriving infrastructure means doing this
// assertion without using a method on this trait is nearly
// impossible.
//
// This should never be implemented by hand.
#[doc(hidden)]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn assert_receiver_is_total_eq(&self) {}
}
/// Derive macro generating an impl of the trait `Eq`.
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match)]
pub macro Eq($item:item) {
/* compiler built-in */
}
// FIXME: this struct is used solely by #[derive] to
// assert that every component of a type implements Eq.
//
// This struct should never appear in user code.
#[doc(hidden)]
#[allow(missing_debug_implementations)]
#[unstable(feature = "derive_eq", reason = "deriving hack, should not be public", issue = "none")]
pub struct AssertParamIsEq<T: Eq + ?Sized> {
_field: crate::marker::PhantomData<T>,
}
/// An `Ordering` is the result of a comparison between two values.
///
/// # Examples
///
/// ```
/// use std::cmp::Ordering;
///
/// let result = 1.cmp(&2);
/// assert_eq!(Ordering::Less, result);
///
/// let result = 1.cmp(&1);
/// assert_eq!(Ordering::Equal, result);
///
/// let result = 2.cmp(&1);
/// assert_eq!(Ordering::Greater, result);
/// ```
#[derive(Clone, Copy, PartialEq, Debug, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Ordering {
/// An ordering where a compared value is less than another.
#[stable(feature = "rust1", since = "1.0.0")]
Less = -1,
/// An ordering where a compared value is equal to another.
#[stable(feature = "rust1", since = "1.0.0")]
Equal = 0,
/// An ordering where a compared value is greater than another.
#[stable(feature = "rust1", since = "1.0.0")]
Greater = 1,
}
impl Ordering {
/// Returns `true` if the ordering is the `Equal` variant.
///
/// # Examples
///
/// ```
/// #![feature(ordering_helpers)]
/// use std::cmp::Ordering;
///
/// assert_eq!(Ordering::Less.is_eq(), false);
/// assert_eq!(Ordering::Equal.is_eq(), true);
/// assert_eq!(Ordering::Greater.is_eq(), false);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "ordering_helpers", issue = "79885")]
pub const fn is_eq(self) -> bool {
matches!(self, Equal)
}
/// Returns `true` if the ordering is not the `Equal` variant.
///
/// # Examples
///
/// ```
/// #![feature(ordering_helpers)]
/// use std::cmp::Ordering;
///
/// assert_eq!(Ordering::Less.is_ne(), true);
/// assert_eq!(Ordering::Equal.is_ne(), false);
/// assert_eq!(Ordering::Greater.is_ne(), true);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "ordering_helpers", issue = "79885")]
pub const fn is_ne(self) -> bool {
!matches!(self, Equal)
}
/// Returns `true` if the ordering is the `Less` variant.
///
/// # Examples
///
/// ```
/// #![feature(ordering_helpers)]
/// use std::cmp::Ordering;
///
/// assert_eq!(Ordering::Less.is_lt(), true);
/// assert_eq!(Ordering::Equal.is_lt(), false);
/// assert_eq!(Ordering::Greater.is_lt(), false);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "ordering_helpers", issue = "79885")]
pub const fn is_lt(self) -> bool {
matches!(self, Less)
}
/// Returns `true` if the ordering is the `Greater` variant.
///
/// # Examples
///
/// ```
/// #![feature(ordering_helpers)]
/// use std::cmp::Ordering;
///
/// assert_eq!(Ordering::Less.is_gt(), false);
/// assert_eq!(Ordering::Equal.is_gt(), false);
/// assert_eq!(Ordering::Greater.is_gt(), true);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "ordering_helpers", issue = "79885")]
pub const fn is_gt(self) -> bool {
matches!(self, Greater)
}
/// Returns `true` if the ordering is either the `Less` or `Equal` variant.
///
/// # Examples
///
/// ```
/// #![feature(ordering_helpers)]
/// use std::cmp::Ordering;
///
/// assert_eq!(Ordering::Less.is_le(), true);
/// assert_eq!(Ordering::Equal.is_le(), true);
/// assert_eq!(Ordering::Greater.is_le(), false);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "ordering_helpers", issue = "79885")]
pub const fn is_le(self) -> bool {
!matches!(self, Greater)
}
/// Returns `true` if the ordering is either the `Greater` or `Equal` variant.
///
/// # Examples
///
/// ```
/// #![feature(ordering_helpers)]
/// use std::cmp::Ordering;
///
/// assert_eq!(Ordering::Less.is_ge(), false);
/// assert_eq!(Ordering::Equal.is_ge(), true);
/// assert_eq!(Ordering::Greater.is_ge(), true);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "ordering_helpers", issue = "79885")]
pub const fn is_ge(self) -> bool {
!matches!(self, Less)
}
/// Reverses the `Ordering`.
///
/// * `Less` becomes `Greater`.
/// * `Greater` becomes `Less`.
/// * `Equal` becomes `Equal`.
///
/// # Examples
///
/// Basic behavior:
///
/// ```
/// use std::cmp::Ordering;
///
/// assert_eq!(Ordering::Less.reverse(), Ordering::Greater);
/// assert_eq!(Ordering::Equal.reverse(), Ordering::Equal);
/// assert_eq!(Ordering::Greater.reverse(), Ordering::Less);
/// ```
///
/// This method can be used to reverse a comparison:
///
/// ```
/// let data: &mut [_] = &mut [2, 10, 5, 8];
///
/// // sort the array from largest to smallest.
/// data.sort_by(|a, b| a.cmp(b).reverse());
///
/// let b: &mut [_] = &mut [10, 8, 5, 2];
/// assert!(data == b);
/// ```
#[inline]
#[must_use]
#[rustc_const_stable(feature = "const_ordering", since = "1.48.0")]
#[stable(feature = "rust1", since = "1.0.0")]
pub const fn reverse(self) -> Ordering {
match self {
Less => Greater,
Equal => Equal,
Greater => Less,
}
}
/// Chains two orderings.
///
/// Returns `self` when it's not `Equal`. Otherwise returns `other`.
///
/// # Examples
///
/// ```
/// use std::cmp::Ordering;
///
/// let result = Ordering::Equal.then(Ordering::Less);
/// assert_eq!(result, Ordering::Less);
///
/// let result = Ordering::Less.then(Ordering::Equal);
/// assert_eq!(result, Ordering::Less);
///
/// let result = Ordering::Less.then(Ordering::Greater);
/// assert_eq!(result, Ordering::Less);
///
/// let result = Ordering::Equal.then(Ordering::Equal);
/// assert_eq!(result, Ordering::Equal);
///
/// let x: (i64, i64, i64) = (1, 2, 7);
/// let y: (i64, i64, i64) = (1, 5, 3);
/// let result = x.0.cmp(&y.0).then(x.1.cmp(&y.1)).then(x.2.cmp(&y.2));
///
/// assert_eq!(result, Ordering::Less);
/// ```
#[inline]
#[must_use]
#[rustc_const_stable(feature = "const_ordering", since = "1.48.0")]
#[stable(feature = "ordering_chaining", since = "1.17.0")]
pub const fn then(self, other: Ordering) -> Ordering {
match self {
Equal => other,
_ => self,
}
}
/// Chains the ordering with the given function.
///
/// Returns `self` when it's not `Equal`. Otherwise calls `f` and returns
/// the result.
///
/// # Examples
///
/// ```
/// use std::cmp::Ordering;
///
/// let result = Ordering::Equal.then_with(|| Ordering::Less);
/// assert_eq!(result, Ordering::Less);
///
/// let result = Ordering::Less.then_with(|| Ordering::Equal);
/// assert_eq!(result, Ordering::Less);
///
/// let result = Ordering::Less.then_with(|| Ordering::Greater);
/// assert_eq!(result, Ordering::Less);
///
/// let result = Ordering::Equal.then_with(|| Ordering::Equal);
/// assert_eq!(result, Ordering::Equal);
///
/// let x: (i64, i64, i64) = (1, 2, 7);
/// let y: (i64, i64, i64) = (1, 5, 3);
/// let result = x.0.cmp(&y.0).then_with(|| x.1.cmp(&y.1)).then_with(|| x.2.cmp(&y.2));
///
/// assert_eq!(result, Ordering::Less);
/// ```
#[inline]
#[must_use]
#[stable(feature = "ordering_chaining", since = "1.17.0")]
pub fn then_with<F: FnOnce() -> Ordering>(self, f: F) -> Ordering {
match self {
Equal => f(),
_ => self,
}
}
}
/// A helper struct for reverse ordering.
///
/// This struct is a helper to be used with functions like [`Vec::sort_by_key`] and
/// can be used to reverse order a part of a key.
///
/// [`Vec::sort_by_key`]: ../../std/vec/struct.Vec.html#method.sort_by_key
///
/// # Examples
///
/// ```
/// use std::cmp::Reverse;
///
/// let mut v = vec![1, 2, 3, 4, 5, 6];
/// v.sort_by_key(|&num| (num > 3, Reverse(num)));
/// assert_eq!(v, vec![3, 2, 1, 6, 5, 4]);
/// ```
#[derive(PartialEq, Eq, Debug, Copy, Clone, Default, Hash)]
#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
pub struct Reverse<T>(#[stable(feature = "reverse_cmp_key", since = "1.19.0")] pub T);
#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
impl<T: PartialOrd> PartialOrd for Reverse<T> {
#[inline]
fn partial_cmp(&self, other: &Reverse<T>) -> Option<Ordering> {
other.0.partial_cmp(&self.0)
}
#[inline]
fn lt(&self, other: &Self) -> bool {
other.0 < self.0
}
#[inline]
fn le(&self, other: &Self) -> bool {
other.0 <= self.0
}
#[inline]
fn gt(&self, other: &Self) -> bool {
other.0 > self.0
}
#[inline]
fn ge(&self, other: &Self) -> bool {
other.0 >= self.0
}
}
#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
impl<T: Ord> Ord for Reverse<T> {
#[inline]
fn cmp(&self, other: &Reverse<T>) -> Ordering {
other.0.cmp(&self.0)
}
}
/// Trait for types that form a [total order](https://en.wikipedia.org/wiki/Total_order).
///
/// An order is a total order if it is (for all `a`, `b` and `c`):
///
/// - total and asymmetric: exactly one of `a < b`, `a == b` or `a > b` is true; and
/// - transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
/// ## Derivable
///
/// This trait can be used with `#[derive]`. When `derive`d on structs, it will produce a
/// [lexicographic](https://en.wikipedia.org/wiki/Lexicographic_order) ordering based on the top-to-bottom declaration order of the struct's members.
/// When `derive`d on enums, variants are ordered by their top-to-bottom discriminant order.
///
/// ## Lexicographical comparison
///
/// Lexicographical comparison is an operation with the following properties:
/// - Two sequences are compared element by element.
/// - The first mismatching element defines which sequence is lexicographically less or greater than the other.
/// - If one sequence is a prefix of another, the shorter sequence is lexicographically less than the other.
/// - If two sequence have equivalent elements and are of the same length, then the sequences are lexicographically equal.
/// - An empty sequence is lexicographically less than any non-empty sequence.
/// - Two empty sequences are lexicographically equal.
///
/// ## How can I implement `Ord`?
///
/// `Ord` requires that the type also be [`PartialOrd`] and [`Eq`] (which requires [`PartialEq`]).
///
/// Then you must define an implementation for [`cmp`]. You may find it useful to use
/// [`cmp`] on your type's fields.
///
/// Implementations of [`PartialEq`], [`PartialOrd`], and `Ord` *must*
/// agree with each other. That is, `a.cmp(b) == Ordering::Equal` if
/// and only if `a == b` and `Some(a.cmp(b)) == a.partial_cmp(b)` for
/// all `a` and `b`. It's easy to accidentally make them disagree by
/// deriving some of the traits and manually implementing others.
///
/// Here's an example where you want to sort people by height only, disregarding `id`
/// and `name`:
///
/// ```
/// use std::cmp::Ordering;
///
/// #[derive(Eq)]
/// struct Person {
/// id: u32,
/// name: String,
/// height: u32,
/// }
///
/// impl Ord for Person {
/// fn cmp(&self, other: &Self) -> Ordering {
/// self.height.cmp(&other.height)
/// }
/// }
///
/// impl PartialOrd for Person {
/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
/// Some(self.cmp(other))
/// }
/// }
///
/// impl PartialEq for Person {
/// fn eq(&self, other: &Self) -> bool {
/// self.height == other.height
/// }
/// }
/// ```
///
/// [`cmp`]: Ord::cmp
#[doc(alias = "<")]
#[doc(alias = ">")]
#[doc(alias = "<=")]
#[doc(alias = ">=")]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Ord: Eq + PartialOrd<Self> {
/// This method returns an [`Ordering`] between `self` and `other`.
///
/// By convention, `self.cmp(&other)` returns the ordering matching the expression
/// `self <operator> other` if true.
///
/// # Examples
///
/// ```
/// use std::cmp::Ordering;
///
/// assert_eq!(5.cmp(&10), Ordering::Less);
/// assert_eq!(10.cmp(&5), Ordering::Greater);
/// assert_eq!(5.cmp(&5), Ordering::Equal);
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn cmp(&self, other: &Self) -> Ordering;
/// Compares and returns the maximum of two values.
///
/// Returns the second argument if the comparison determines them to be equal.
///
/// # Examples
///
/// ```
/// assert_eq!(2, 1.max(2));
/// assert_eq!(2, 2.max(2));
/// ```
#[stable(feature = "ord_max_min", since = "1.21.0")]
#[inline]
#[must_use]
fn max(self, other: Self) -> Self
where
Self: Sized,
{
max_by(self, other, Ord::cmp)
}
/// Compares and returns the minimum of two values.
///
/// Returns the first argument if the comparison determines them to be equal.
///
/// # Examples
///
/// ```
/// assert_eq!(1, 1.min(2));
/// assert_eq!(2, 2.min(2));
/// ```
#[stable(feature = "ord_max_min", since = "1.21.0")]
#[inline]
#[must_use]
fn min(self, other: Self) -> Self
where
Self: Sized,
{
min_by(self, other, Ord::cmp)
}
/// Restrict a value to a certain interval.
///
/// Returns `max` if `self` is greater than `max`, and `min` if `self` is
/// less than `min`. Otherwise this returns `self`.
///
/// # Panics
///
/// Panics if `min > max`.
///
/// # Examples
///
/// ```
/// assert!((-3).clamp(-2, 1) == -2);
/// assert!(0.clamp(-2, 1) == 0);
/// assert!(2.clamp(-2, 1) == 1);
/// ```
#[must_use]
#[stable(feature = "clamp", since = "1.50.0")]
fn clamp(self, min: Self, max: Self) -> Self
where
Self: Sized,
{
assert!(min <= max);
if self < min {
min
} else if self > max {
max
} else {
self
}
}
}
/// Derive macro generating an impl of the trait `Ord`.
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics)]
pub macro Ord($item:item) {
/* compiler built-in */
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Eq for Ordering {}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for Ordering {
#[inline]
fn cmp(&self, other: &Ordering) -> Ordering {
(*self as i32).cmp(&(*other as i32))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for Ordering {
#[inline]
fn partial_cmp(&self, other: &Ordering) -> Option<Ordering> {
(*self as i32).partial_cmp(&(*other as i32))
}
}
/// Trait for values that can be compared for a sort-order.
///
/// The comparison must satisfy, for all `a`, `b` and `c`:
///
/// - asymmetry: if `a < b` then `!(a > b)`, as well as `a > b` implying `!(a < b)`; and
/// - transitivity: `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
/// Note that these requirements mean that the trait itself must be implemented symmetrically and
/// transitively: if `T: PartialOrd<U>` and `U: PartialOrd<V>` then `U: PartialOrd<T>` and `T:
/// PartialOrd<V>`.
///
/// ## Derivable
///
/// This trait can be used with `#[derive]`. When `derive`d on structs, it will produce a
/// lexicographic ordering based on the top-to-bottom declaration order of the struct's members.
/// When `derive`d on enums, variants are ordered by their top-to-bottom discriminant order.
///
/// ## How can I implement `PartialOrd`?
///
/// `PartialOrd` only requires implementation of the [`partial_cmp`] method, with the others
/// generated from default implementations.
///
/// However it remains possible to implement the others separately for types which do not have a
/// total order. For example, for floating point numbers, `NaN < 0 == false` and `NaN >= 0 ==
/// false` (cf. IEEE 754-2008 section 5.11).
///
/// `PartialOrd` requires your type to be [`PartialEq`].
///
/// Implementations of [`PartialEq`], `PartialOrd`, and [`Ord`] *must* agree with each other. It's
/// easy to accidentally make them disagree by deriving some of the traits and manually
/// implementing others.
///
/// If your type is [`Ord`], you can implement [`partial_cmp`] by using [`cmp`]:
///
/// ```
/// use std::cmp::Ordering;
///
/// #[derive(Eq)]
/// struct Person {
/// id: u32,
/// name: String,
/// height: u32,
/// }
///
/// impl PartialOrd for Person {
/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
/// Some(self.cmp(other))
/// }
/// }
///
/// impl Ord for Person {
/// fn cmp(&self, other: &Self) -> Ordering {
/// self.height.cmp(&other.height)
/// }
/// }
///
/// impl PartialEq for Person {
/// fn eq(&self, other: &Self) -> bool {
/// self.height == other.height
/// }
/// }
/// ```
///
/// You may also find it useful to use [`partial_cmp`] on your type's fields. Here
/// is an example of `Person` types who have a floating-point `height` field that
/// is the only field to be used for sorting:
///
/// ```
/// use std::cmp::Ordering;
///
/// struct Person {
/// id: u32,
/// name: String,
/// height: f64,
/// }
///
/// impl PartialOrd for Person {
/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
/// self.height.partial_cmp(&other.height)
/// }
/// }
///
/// impl PartialEq for Person {
/// fn eq(&self, other: &Self) -> bool {
/// self.height == other.height
/// }
/// }
/// ```
///
/// # Examples
///
/// ```
/// let x : u32 = 0;
/// let y : u32 = 1;
///
/// assert_eq!(x < y, true);
/// assert_eq!(x.lt(&y), true);
/// ```
///
/// [`partial_cmp`]: PartialOrd::partial_cmp
/// [`cmp`]: Ord::cmp
#[lang = "partial_ord"]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(alias = ">")]
#[doc(alias = "<")]
#[doc(alias = "<=")]
#[doc(alias = ">=")]
#[rustc_on_unimplemented(
message = "can't compare `{Self}` with `{Rhs}`",
label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`"
)]
pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> {
/// This method returns an ordering between `self` and `other` values if one exists.
///
/// # Examples
///
/// ```
/// use std::cmp::Ordering;
///
/// let result = 1.0.partial_cmp(&2.0);
/// assert_eq!(result, Some(Ordering::Less));
///
/// let result = 1.0.partial_cmp(&1.0);
/// assert_eq!(result, Some(Ordering::Equal));
///
/// let result = 2.0.partial_cmp(&1.0);
/// assert_eq!(result, Some(Ordering::Greater));
/// ```
///
/// When comparison is impossible:
///
/// ```
/// let result = f64::NAN.partial_cmp(&1.0);
/// assert_eq!(result, None);
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn partial_cmp(&self, other: &Rhs) -> Option<Ordering>;
/// This method tests less than (for `self` and `other`) and is used by the `<` operator.
///
/// # Examples
///
/// ```
/// let result = 1.0 < 2.0;
/// assert_eq!(result, true);
///
/// let result = 2.0 < 1.0;
/// assert_eq!(result, false);
/// ```
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn lt(&self, other: &Rhs) -> bool {
matches!(self.partial_cmp(other), Some(Less))
}
/// This method tests less than or equal to (for `self` and `other`) and is used by the `<=`
/// operator.
///
/// # Examples
///
/// ```
/// let result = 1.0 <= 2.0;
/// assert_eq!(result, true);
///
/// let result = 2.0 <= 2.0;
/// assert_eq!(result, true);
/// ```
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn le(&self, other: &Rhs) -> bool {
matches!(self.partial_cmp(other), Some(Less | Equal))
}
/// This method tests greater than (for `self` and `other`) and is used by the `>` operator.
///
/// # Examples
///
/// ```
/// let result = 1.0 > 2.0;
/// assert_eq!(result, false);
///
/// let result = 2.0 > 2.0;
/// assert_eq!(result, false);
/// ```
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn gt(&self, other: &Rhs) -> bool {
matches!(self.partial_cmp(other), Some(Greater))
}
/// This method tests greater than or equal to (for `self` and `other`) and is used by the `>=`
/// operator.
///
/// # Examples
///
/// ```
/// let result = 2.0 >= 1.0;
/// assert_eq!(result, true);
///
/// let result = 2.0 >= 2.0;
/// assert_eq!(result, true);
/// ```
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn ge(&self, other: &Rhs) -> bool {
matches!(self.partial_cmp(other), Some(Greater | Equal))
}
}
/// Derive macro generating an impl of the trait `PartialOrd`.
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow_internal_unstable(core_intrinsics)]
pub macro PartialOrd($item:item) {
/* compiler built-in */
}
/// Compares and returns the minimum of two values.
///
/// Returns the first argument if the comparison determines them to be equal.
///
/// Internally uses an alias to [`Ord::min`].
///
/// # Examples
///
/// ```
/// use std::cmp;
///
/// assert_eq!(1, cmp::min(1, 2));
/// assert_eq!(2, cmp::min(2, 2));
/// ```
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn min<T: Ord>(v1: T, v2: T) -> T {
v1.min(v2)
}
/// Returns the minimum of two values with respect to the specified comparison function.
///
/// Returns the first argument if the comparison determines them to be equal.
///
/// # Examples
///
/// ```
/// #![feature(cmp_min_max_by)]
///
/// use std::cmp;
///
/// assert_eq!(cmp::min_by(-2, 1, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), 1);
/// assert_eq!(cmp::min_by(-2, 2, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), -2);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "cmp_min_max_by", issue = "64460")]
pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
match compare(&v1, &v2) {
Ordering::Less | Ordering::Equal => v1,
Ordering::Greater => v2,
}
}
/// Returns the element that gives the minimum value from the specified function.
///
/// Returns the first argument if the comparison determines them to be equal.
///
/// # Examples
///
/// ```
/// #![feature(cmp_min_max_by)]
///
/// use std::cmp;
///
/// assert_eq!(cmp::min_by_key(-2, 1, |x: &i32| x.abs()), 1);
/// assert_eq!(cmp::min_by_key(-2, 2, |x: &i32| x.abs()), -2);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "cmp_min_max_by", issue = "64460")]
pub fn min_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
min_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
}
/// Compares and returns the maximum of two values.
///
/// Returns the second argument if the comparison determines them to be equal.
///
/// Internally uses an alias to [`Ord::max`].
///
/// # Examples
///
/// ```
/// use std::cmp;
///
/// assert_eq!(2, cmp::max(1, 2));
/// assert_eq!(2, cmp::max(2, 2));
/// ```
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn max<T: Ord>(v1: T, v2: T) -> T {
v1.max(v2)
}
/// Returns the maximum of two values with respect to the specified comparison function.
///
/// Returns the second argument if the comparison determines them to be equal.
///
/// # Examples
///
/// ```
/// #![feature(cmp_min_max_by)]
///
/// use std::cmp;
///
/// assert_eq!(cmp::max_by(-2, 1, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), -2);
/// assert_eq!(cmp::max_by(-2, 2, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), 2);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "cmp_min_max_by", issue = "64460")]
pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
match compare(&v1, &v2) {
Ordering::Less | Ordering::Equal => v2,
Ordering::Greater => v1,
}
}
/// Returns the element that gives the maximum value from the specified function.
///
/// Returns the second argument if the comparison determines them to be equal.
///
/// # Examples
///
/// ```
/// #![feature(cmp_min_max_by)]
///
/// use std::cmp;
///
/// assert_eq!(cmp::max_by_key(-2, 1, |x: &i32| x.abs()), -2);
/// assert_eq!(cmp::max_by_key(-2, 2, |x: &i32| x.abs()), 2);
/// ```
#[inline]
#[must_use]
#[unstable(feature = "cmp_min_max_by", issue = "64460")]
pub fn max_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
}
// Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types
mod impls {
use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::hint::unreachable_unchecked;
macro_rules! partial_eq_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for $t {
#[inline]
fn eq(&self, other: &$t) -> bool { (*self) == (*other) }
#[inline]
fn ne(&self, other: &$t) -> bool { (*self) != (*other) }
}
)*)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for () {
#[inline]
fn eq(&self, _other: &()) -> bool {
true
}
#[inline]
fn ne(&self, _other: &()) -> bool {
false
}
}
partial_eq_impl! {
bool char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64
}
macro_rules! eq_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
impl Eq for $t {}
)*)
}
eq_impl! { () bool char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
macro_rules! partial_ord_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for $t {
#[inline]
fn partial_cmp(&self, other: &$t) -> Option<Ordering> {
match (self <= other, self >= other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
#[inline]
fn lt(&self, other: &$t) -> bool { (*self) < (*other) }
#[inline]
fn le(&self, other: &$t) -> bool { (*self) <= (*other) }
#[inline]
fn ge(&self, other: &$t) -> bool { (*self) >= (*other) }
#[inline]
fn gt(&self, other: &$t) -> bool { (*self) > (*other) }
}
)*)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for () {
#[inline]
fn partial_cmp(&self, _: &()) -> Option<Ordering> {
Some(Equal)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for bool {
#[inline]
fn partial_cmp(&self, other: &bool) -> Option<Ordering> {
(*self as u8).partial_cmp(&(*other as u8))
}
}
partial_ord_impl! { f32 f64 }
macro_rules! ord_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for $t {
#[inline] | fn lt(&self, other: &$t) -> bool { (*self) < (*other) }
#[inline]
fn le(&self, other: &$t) -> bool { (*self) <= (*other) }
#[inline]
fn ge(&self, other: &$t) -> bool { (*self) >= (*other) }
#[inline]
fn gt(&self, other: &$t) -> bool { (*self) > (*other) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for $t {
#[inline]
fn cmp(&self, other: &$t) -> Ordering {
// The order here is important to generate more optimal assembly.
// See <https://github.com/rust-lang/rust/issues/63758> for more info.
if *self < *other { Less }
else if *self == *other { Equal }
else { Greater }
}
}
)*)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for () {
#[inline]
fn cmp(&self, _other: &()) -> Ordering {
Equal
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for bool {
#[inline]
fn cmp(&self, other: &bool) -> Ordering {
// Casting to i8's and converting the difference to an Ordering generates
// more optimal assembly.
// See <https://github.com/rust-lang/rust/issues/66780> for more info.
match (*self as i8) - (*other as i8) {
-1 => Less,
0 => Equal,
1 => Greater,
// SAFETY: bool as i8 returns 0 or 1, so the difference can't be anything else
_ => unsafe { unreachable_unchecked() },
}
}
}
ord_impl! { char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
#[unstable(feature = "never_type", issue = "35121")]
impl PartialEq for ! {
fn eq(&self, _: &!) -> bool {
*self
}
}
#[unstable(feature = "never_type", issue = "35121")]
impl Eq for ! {}
#[unstable(feature = "never_type", issue = "35121")]
impl PartialOrd for ! {
fn partial_cmp(&self, _: &!) -> Option<Ordering> {
*self
}
}
#[unstable(feature = "never_type", issue = "35121")]
impl Ord for ! {
fn cmp(&self, _: &!) -> Ordering {
*self
}
}
// & pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized, B: ?Sized> PartialEq<&B> for &A
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &&B) -> bool {
PartialEq::eq(*self, *other)
}
#[inline]
fn ne(&self, other: &&B) -> bool {
PartialEq::ne(*self, *other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized, B: ?Sized> PartialOrd<&B> for &A
where
A: PartialOrd<B>,
{
#[inline]
fn partial_cmp(&self, other: &&B) -> Option<Ordering> {
PartialOrd::partial_cmp(*self, *other)
}
#[inline]
fn lt(&self, other: &&B) -> bool {
PartialOrd::lt(*self, *other)
}
#[inline]
fn le(&self, other: &&B) -> bool {
PartialOrd::le(*self, *other)
}
#[inline]
fn gt(&self, other: &&B) -> bool {
PartialOrd::gt(*self, *other)
}
#[inline]
fn ge(&self, other: &&B) -> bool {
PartialOrd::ge(*self, *other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized> Ord for &A
where
A: Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(*self, *other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized> Eq for &A where A: Eq {}
// &mut pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized, B: ?Sized> PartialEq<&mut B> for &mut A
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &&mut B) -> bool {
PartialEq::eq(*self, *other)
}
#[inline]
fn ne(&self, other: &&mut B) -> bool {
PartialEq::ne(*self, *other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized, B: ?Sized> PartialOrd<&mut B> for &mut A
where
A: PartialOrd<B>,
{
#[inline]
fn partial_cmp(&self, other: &&mut B) -> Option<Ordering> {
PartialOrd::partial_cmp(*self, *other)
}
#[inline]
fn lt(&self, other: &&mut B) -> bool {
PartialOrd::lt(*self, *other)
}
#[inline]
fn le(&self, other: &&mut B) -> bool {
PartialOrd::le(*self, *other)
}
#[inline]
fn gt(&self, other: &&mut B) -> bool {
PartialOrd::gt(*self, *other)
}
#[inline]
fn ge(&self, other: &&mut B) -> bool {
PartialOrd::ge(*self, *other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized> Ord for &mut A
where
A: Ord,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(*self, *other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized> Eq for &mut A where A: Eq {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized, B: ?Sized> PartialEq<&mut B> for &A
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &&mut B) -> bool {
PartialEq::eq(*self, *other)
}
#[inline]
fn ne(&self, other: &&mut B) -> bool {
PartialEq::ne(*self, *other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: ?Sized, B: ?Sized> PartialEq<&B> for &mut A
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &&B) -> bool {
PartialEq::eq(*self, *other)
}
#[inline]
fn ne(&self, other: &&B) -> bool {
PartialEq::ne(*self, *other)
}
}
} | fn partial_cmp(&self, other: &$t) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline] |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// TODO Follow 2018 idioms
#![allow(elided_lifetimes_in_paths)]
use {
anyhow::Error,
component_manager_lib::{
builtin_environment::{BuiltinEnvironment, BuiltinEnvironmentBuilder},
config::RuntimeConfig,
klog, startup,
},
fidl_fuchsia_component_internal as finternal, fuchsia_async as fasync,
fuchsia_runtime::{job_default, process_self},
fuchsia_syslog as syslog, fuchsia_trace_provider as trace_provider,
fuchsia_zircon::JobCriticalOptions,
log::*,
std::path::PathBuf,
std::{panic, process, thread, time::Duration},
};
fn main() |
/// Loads component_manager's config.
///
/// This function panics on failure because the logger is not initialized yet.
fn build_runtime_config() -> RuntimeConfig {
let args = match startup::Arguments::from_args() {
Ok(args) => args,
Err(err) => {
panic!("{}\n{}", err, startup::Arguments::usage());
}
};
let path = PathBuf::from(&args.config);
let mut config = match RuntimeConfig::load_from_file(&path) {
Ok(config) => config,
Err(err) => {
panic!("Failed to load runtime config: {}", err);
}
};
match (config.root_component_url.as_ref(), args.root_component_url.as_ref()) {
(Some(_url), None) => config,
(None, Some(url)) => {
config.root_component_url = Some(url.clone());
config
}
(None, None) => {
panic!(
"`root_component_url` not provided. This field must be provided either as a \
command line argument or config file parameter."
);
}
(Some(_), Some(_)) => {
panic!(
"`root_component_url` set in two places: as a command line argument \
and a config file parameter. This field can only be set in one of those places."
);
}
}
}
async fn build_environment(config: RuntimeConfig) -> Result<BuiltinEnvironment, Error> {
BuiltinEnvironmentBuilder::new()
.set_runtime_config(config)
.create_utc_clock()
.await?
.add_elf_runner()?
.include_namespace_resolvers()
.build()
.await
}
| {
// Make sure we exit if there is a panic. Add this hook before we init the
// KernelLogger because it installs its own hook and then calls any
// existing hook.
panic::set_hook(Box::new(|_| {
println!("Panic in component_manager, aborting process.");
// TODO remove after 43671 is resolved
std::thread::spawn(move || {
let mut nap_duration = Duration::from_secs(1);
// Do a short sleep, hopefully under "normal" circumstances the
// process will exit before this is printed
thread::sleep(nap_duration);
println!("component manager abort was started");
// set a fairly long duration so we don't spam logs
nap_duration = Duration::from_secs(30);
loop {
thread::sleep(nap_duration);
println!("component manager alive long after abort");
}
});
process::abort();
}));
// Set ourselves as critical to our job. If we do not fail gracefully, our
// job will be killed.
if let Err(err) =
job_default().set_critical(JobCriticalOptions::RETCODE_NONZERO, &process_self())
{
panic!("Component manager failed to set itself as critical: {}", err);
}
// Enable tracing in Component Manager
trace_provider::trace_provider_create_with_fdio();
let runtime_config = build_runtime_config();
match runtime_config.log_destination {
finternal::LogDestination::Syslog => {
syslog::init().expect("failed to init syslog");
}
finternal::LogDestination::Klog => {
klog::KernelLogger::init();
}
}
info!("Component manager is starting up...");
let num_threads = runtime_config.num_threads;
let fut = async move {
let mut builtin_environment = match build_environment(runtime_config).await {
Ok(environment) => environment,
Err(error) => {
error!("Component manager setup failed: {}", error);
process::exit(1);
}
};
if let Err(error) = builtin_environment.run_root().await {
error!("Failed to bind to root component: {}", error);
process::exit(1);
}
};
let mut executor = fasync::SendExecutor::new(num_threads).expect("error creating executor");
executor.run(fut);
} |
mc-water.rs | // Lumol, an extensible molecular simulation engine
// Copyright (C) Lumol's contributors โ BSD license
use lumol::input::Input;
use lumol::units;
use std::path::Path;
use std::sync::Once;
static START: Once = Once::new();
mod utils;
// This test only run a Monte Carlo simulation of water, but do not test
// anything for now. It should test the g(r) function someday.
#[test]
fn wolf_nvt() {
START.call_once(::env_logger::init);
let path = Path::new(file!()).parent()
.unwrap()
.join("data")
.join("mc-water")
.join("nvt-wolf.toml");
let mut config = Input::new(path).unwrap().read().unwrap();
config.simulation.run(&mut config.system, config.nsteps);
}
#[test]
fn wolf_npt() {
START.call_once(::env_logger::init);
let path = Path::new(file!()).parent()
.unwrap()
.join("data")
.join("mc-water")
.join("npt-wolf.toml");
let mut config = Input::new(path).unwrap().read().unwrap();
let collector = utils::Collector::starting_at((config.nsteps - 5_000) as u64);
let pressures = collector.pressures();
config.simulation.add_output(Box::new(collector));
config.simulation.run(&mut config.system, config.nsteps);
let pressure = utils::mean(pressures);
let expected = units::from(1000.0, "bar").unwrap();
let tolerance = units::from(800.0, "bar").unwrap();
assert!(f64::abs(pressure - expected) < tolerance);
}
// This test only run a Monte Carlo simulation of water, but do not test
// anything for now. It should test the g(r) function someday.
#[test]
fn ewald_nvt() {
START.call_once(::env_logger::init);
let path = Path::new(file!()).parent()
.unwrap()
.join("data")
.join("mc-water")
.join("nvt-ewald.toml");
let mut config = Input::new(path).unwrap().read().unwrap();
config.simulation.run(&mut config.system, config.nsteps);
}
#[test]
fn ew | {
START.call_once(::env_logger::init);
let path = Path::new(file!()).parent()
.unwrap()
.join("data")
.join("mc-water")
.join("npt-ewald.toml");
let mut config = Input::new(path).unwrap().read().unwrap();
let collector = utils::Collector::starting_at((config.nsteps - 5_000) as u64);
let pressures = collector.pressures();
config.simulation.add_output(Box::new(collector));
config.simulation.run(&mut config.system, config.nsteps);
let pressure = utils::mean(pressures);
let expected = units::from(1000.0, "bar").unwrap();
let tolerance = units::from(800.0, "bar").unwrap();
assert!(f64::abs(pressure - expected) < tolerance);
}
| ald_npt() |
device_handle_outlives_context.rs | extern crate libusb;
fn main() | {
let mut handle = {
let mut context = libusb::Context::new().unwrap();
let devices = context.devices().unwrap(); // ~ERROR: does not live long enough
let mut dev = devices.iter().next().unwrap();
dev.open().unwrap()
};
handle.active_configuration();
} |
|
augmented-assignments-feature-gate.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ops::AddAssign;
struct Int(i32);
impl AddAssign<i32> for Int {
fn add_assign(&mut self, _: i32) {
}
}
fn | () {
let mut x = Int(0);
x += 1;
}
| main |
forecast-compare.component.ts | import { Component, OnInit, ViewChild, AfterViewInit } from '@angular/core';
import { SelectionModel } from '@angular/cdk/collections';
import { MatTableDataSource } from '@angular/material/table';
import { MatPaginator } from '@angular/material/paginator';
import { MatSort } from '@angular/material/sort';
import { TopBtnGroupComponent } from '@app/shared/components/top-btn-group/top-btn-group.component';
import { getGlobalRibbonActions } from '@app/shared/components/top-btn-group/page-actions-map';
import { projectkey } from 'environments/projectkey';
import { ResizeEvent } from 'angular-resizable-element';
export interface PeriodicElement {
Status: string;
SalesManager: string;
Enterprise: string;
Group: string;
BillingEntity: string;
stl: string;
MaterialDescription: string;
Commodity: string;
DataSegment: string;
total: number;
}
const ELEMENT_DATA: PeriodicElement[] = [
{ Status: 'Active_New', SalesManager: 'Alexis Hartman', Enterprise: 'Tosca', Group: 'Dole Fresh Vegetables', BillingEntity: 'Dole Fresh Vegetables', stl: 'Dole Soledad', MaterialDescription: 'WW-PALLET', Commodity: 'Produce General', DataSegment: 'Final Forecast', total: 0 },
{ Status: 'Active_New', SalesManager: 'Alexis Hartman', Enterprise: 'Tosca', Group: 'Dole Fresh Vegetables', BillingEntity: 'Dole Fresh Vegetables', stl: 'Dole Soledad', MaterialDescription: 'WW-PALLET', Commodity: 'Produce General', DataSegment: 'Final Forecast', total: 0 },
{ Status: 'Active_New', SalesManager: 'Alexis Hartman', Enterprise: 'Tosca', Group: 'Dole Fresh Vegetables', BillingEntity: 'Dole Fresh Vegetables', stl: 'Dole Soledad', MaterialDescription: 'WW-PALLET', Commodity: 'Produce General', DataSegment: 'Final Forecast', total: 0 },
{ Status: 'Active_New', SalesManager: 'Alexis Hartman', Enterprise: 'Tosca', Group: 'Dole Fresh Vegetables', BillingEntity: 'Dole Fresh Vegetables', stl: 'Dole Soledad', MaterialDescription: 'WW-PALLET', Commodity: 'Produce General', DataSegment: 'Final Forecast', total: 0 },
{ Status: 'Active_New', SalesManager: 'Alexis Hartman', Enterprise: 'Tosca', Group: 'Dole Fresh Vegetables', BillingEntity: 'Dole Fresh Vegetables', stl: 'Dole Soledad', MaterialDescription: 'WW-PALLET', Commodity: 'Produce General', DataSegment: 'Final Forecast', total: 0 },
{ Status: 'Active_New', SalesManager: 'Alexis Hartman', Enterprise: 'Tosca', Group: 'Dole Fresh Vegetables', BillingEntity: 'Dole Fresh Vegetables', stl: 'Dole Soledad', MaterialDescription: 'WW-PALLET', Commodity: 'Produce General', DataSegment: 'Final Forecast', total: 0 },
{ Status: 'Active_New', SalesManager: 'Alexis Hartman', Enterprise: 'Tosca', Group: 'Dole Fresh Vegetables', BillingEntity: 'Dole Fresh Vegetables', stl: 'Dole Soledad', MaterialDescription: 'WW-PALLET', Commodity: 'Produce General', DataSegment: 'Final Forecast', total: 0 },
{ Status: 'Active_New', SalesManager: 'Alexis Hartman', Enterprise: 'Tosca', Group: 'Dole Fresh Vegetables', BillingEntity: 'Dole Fresh Vegetables', stl: 'Dole Soledad', MaterialDescription: 'WW-PALLET', Commodity: 'Produce General', DataSegment: 'Final Forecast', total: 0 },
];
export interface PeriodicElementB {
Status: string;
DataSegment: string;
total: number;
numA: number;
numB: number;
numC: number;
numD: number;
numE: number;
numF: number;
numG: number;
}
const ELEMENT_DATAB: PeriodicElementB[] = [
{ Status: 'Inactive', DataSegment: 'Final Forecast', total: 0, numA: 206516.00, numB: 206516.00, numC: 206516.00, numD: 206516.00, numE: 206516.00, numF: 206516.00, numG: 206516.00 },
{ Status: 'Active_New', DataSegment: 'Final Forecast', total: 0, numA: 206516.00, numB: 206516.00, numC: 206516.00, numD: 206516.00, numE: 206516.00, numF: 206516.00, numG: 206516.00 }
];
export interface Element {
highlighted?: boolean;
}
@Component({
selector: 'app-forecast-compare',
templateUrl: './forecast-compare.component.html',
styleUrls: ['./forecast-compare.component.css']
})
export class | implements OnInit, AfterViewInit {
@ViewChild('btnBar') btnBar: TopBtnGroupComponent;
itemList = [];
selectedItems = [];
settings = {};
itemListB = [];
selectedItemsB = [];
settingsB = {};
count = 6;
actionGroupConfig;
filter: boolean = false;
IsTosca: boolean;
displayedColumns = ['Status', 'SalesManager', 'Enterprise', 'Group', 'BillingEntity',
'stl', 'MaterialDescription', 'Commodity', 'DataSegment', 'total'];
displayedColumnsReplace = ['key_Status', 'key_Salesmanager', 'key_Enterprise', 'key_Group', 'key_BillingEntity',
'key_ShipToLocation', 'key_MaterialDescription', 'key_Commodity', 'key_DataSegment', 'key_Total'];
displayedColumnsB = ['Status', 'DataSegment', 'total', 'numA', 'numB', 'numC', 'numD', 'numE', 'numF', 'numG',];
displayedColumnsReplaceB = ['key_Status', 'key_DataSegment', 'key_Total', '201701', '201702', '201703', '201704', '201705', '201706', '201707'];
dataSource = new MatTableDataSource<PeriodicElement>(ELEMENT_DATA);
selection = new SelectionModel<PeriodicElement>(true, []);
dataSourceB = new MatTableDataSource<PeriodicElementB>(ELEMENT_DATAB);
selectionB = new SelectionModel<PeriodicElementB>(true, []);
getTotalCost() {
return ELEMENT_DATA
.map(t => t.total)
.reduce((acc, value) => acc + value, 0);
}
@ViewChild(MatPaginator) paginator: MatPaginator;
@ViewChild(MatSort) sort: MatSort;
applyFilter(filterValue: string) {
filterValue = filterValue.trim(); // Remove whitespace
filterValue = filterValue.toLowerCase(); // Datasource defaults to lowercase matches
this.dataSource.filter = filterValue;
}
/** Whether the number of selected elements matches the total number of rows. */
isAllSelected() {
const numSelected = this.selection.selected.length;
const numRows = this.dataSource.data.length;
return numSelected === numRows;
}
/** Selects all rows if they are not all selected; otherwise clear selection. */
masterToggle() {
this.isAllSelected() ?
this.selection.clear() :
this.dataSource.data.forEach(row => this.selection.select(row));
}
constructor() { }
ngOnInit(): void {
this.itemList = [
{ "id": 1, "itemName": "option 1" },
{ "id": 2, "itemName": "option 2" },
{ "id": 3, "itemName": "option 3" }
];
this.settings = {
singleSelection: false,
text: "Select",
enableCheckAll: true,
selectAllText: 'Select All',
unSelectAllText: 'UnSelect All',
enableSearchFilter: true,
addNewItemOnFilter: true,
badgeShowLimit: 2,
searchBy: ['itemName'],
//disabled: true
};
this.itemListB = [
{ "id": 1, "itemName": "option 1" },
{ "id": 2, "itemName": "option 2" },
{ "id": 3, "itemName": "option 3" }
];
this.settingsB = {
singleSelection: true,
text: "Select",
//enableCheckAll: true,
//selectAllText: 'Select All',
//unSelectAllText: 'UnSelect All',
enableSearchFilter: true,
searchBy: ['itemName'],
//addNewItemOnFilter: true,
//disabled: true
};
if (projectkey.projectname == "tosca") {
this.IsTosca = true;
}
else {
this.IsTosca = false;
}
this.actionGroupConfig = getGlobalRibbonActions();
}
actionHandler(type) {
if (type === "filter") {
this.filter = !this.filter;
}
}
onResizeEnd(event: ResizeEvent, columnName): void {
if (event.edges.right) {
const cssValue = event.rectangle.width + 'px';
const columnElts = document.getElementsByClassName('mat-column-' + columnName);
for (let i = 0; i < columnElts.length; i++) {
const currentEl = columnElts[i] as HTMLDivElement;
currentEl.style.width = cssValue;
}
}
}
highlight(element: Element) {
element.highlighted = !element.highlighted;
}
ngAfterViewInit() {
this.btnBar.hideAction('edit');
this.btnBar.hideAction('delete');
this.btnBar.hideAction('add');
this.btnBar.showAction('filter');
this.btnBar.hideTab('key_Action');
this.btnBar.hideTab('key_View');
}
onAddItem(data: string) {
this.count++;
this.itemList.push({ "id": this.count, "itemName": data });
this.selectedItems.push({ "id": this.count, "itemName": data });
this.itemListB.push({ "id": this.count, "itemName": data });
this.selectedItemsB.push({ "id": this.count, "itemName": data });
}
onItemSelect(item: any) {
console.log(item);
console.log(this.selectedItems);
}
OnItemDeSelect(item: any) {
console.log(item);
console.log(this.selectedItems);
}
onSelectAll(items: any) {
console.log(items);
}
onDeSelectAll(items: any) {
console.log(items);
}
}
| ForecastCompareComponent |
ToastAction.tsx | import { rem, themed } from '@heathmont/moon-utils';
import styled from 'styled-components';
const ToastAction = styled.p<{ actionColor: any }>(
({ actionColor, theme }) => ({ | fontSize: rem(14),
lineHeight: rem(20),
fontWeight: theme.fontWeight.semibold,
})
);
export default ToastAction; | display: 'block',
color: themed('color', actionColor)(theme) || theme.colorNew.hit, |
oslogin_test.go | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"strings"
"testing"
)
func TestFilterGoogleLines(t *testing.T) {
cmpslice := func(a, b []string) bool {
if len(a) != len(b) {
return false
}
for idx := 0; idx < len(a); idx++ {
if a[idx] != b[idx] {
return false
}
}
return true
}
var tests = []struct {
contents, want []string
}{
{
[]string{
"line1",
"line2",
googleComment,
"line3 after google comment",
"line4",
googleBlockStart,
"line5 inside google block",
"line6 inside google block",
googleBlockEnd,
"line7",
},
[]string{
"line1",
"line2",
"line4",
"line7",
},
},
{
[]string{
"line1",
"line2",
googleBlockEnd,
"line3",
"line4",
},
[]string{
"line1",
"line2",
"line3",
"line4",
},
},
{
[]string{
googleBlockStart,
"line1 inside google block",
"line2 inside google block",
googleBlockEnd,
"line3",
},
[]string{
"line3",
},
},
{
[]string{
googleBlockStart,
"line1 inside google block",
googleBlockStart,
"line2 inside google block",
googleBlockEnd,
"line3",
googleBlockEnd,
"line4",
},
[]string{
"line3",
"line4",
},
},
{
[]string{
googleBlockEnd,
googleBlockStart,
"line1 inside google block",
"line2 inside google block",
googleComment,
googleBlockEnd,
"line3",
},
[]string{
"line3",
},
},
}
for idx, tt := range tests {
if res := filterGoogleLines(strings.Join(tt.contents, "\n")); !cmpslice(res, tt.want) {
t.Errorf("test %v\nwant:\n%v\ngot:\n%v\n", idx, tt.want, res)
}
}
}
func TestUpdateNSSwitchConfig(t *testing.T) {
oslogin := " cache_oslogin oslogin"
var tests = []struct {
contents, want []string
enable bool
}{
{
contents: []string{
"line1",
"passwd: line2",
"group: line3",
},
want: []string{
"line1",
"passwd: line2" + oslogin,
"group: line3" + oslogin,
},
enable: true,
},
{
contents: []string{
"line1",
"passwd: line2" + oslogin,
"group: line3" + oslogin,
},
want: []string{
"line1",
"passwd: line2",
"group: line3",
},
enable: false,
},
{
contents: []string{
"line1",
"passwd: line2" + oslogin,
"group: line3" + oslogin,
},
want: []string{
"line1",
"passwd: line2" + oslogin,
"group: line3" + oslogin,
},
enable: true,
},
}
for idx, tt := range tests {
contents := strings.Join(tt.contents, "\n")
want := strings.Join(tt.want, "\n")
if res := updateNSSwitchConfig(contents, tt.enable); res != want {
t.Errorf("test %v\nwant:\n%v\ngot:\n%v\n", idx, want, res)
}
}
}
func | (t *testing.T) {
challengeResponseEnable := "ChallengeResponseAuthentication yes"
authorizedKeysCommand := "AuthorizedKeysCommand /usr/bin/google_authorized_keys"
authorizedKeysUser := "AuthorizedKeysCommandUser root"
twoFactorAuthMethods := "AuthenticationMethods publickey,keyboard-interactive"
matchblock1 := `Match User sa_*`
matchblock2 := ` AuthenticationMethods publickey`
var tests = []struct {
contents, want []string
enable, twofactor bool
}{
{
// Full block is created, any others removed.
contents: []string{
"line1",
googleBlockStart,
"line2",
googleBlockEnd,
},
want: []string{
googleBlockStart,
authorizedKeysCommand,
authorizedKeysUser,
twoFactorAuthMethods,
challengeResponseEnable,
googleBlockEnd,
"line1",
googleBlockStart,
matchblock1,
matchblock2,
googleBlockEnd,
},
enable: true,
twofactor: true,
},
{
// Full block is created, google comments removed.
contents: []string{
"line1",
googleComment,
"line2",
"line3",
},
want: []string{
googleBlockStart,
authorizedKeysCommand,
authorizedKeysUser,
twoFactorAuthMethods,
challengeResponseEnable,
googleBlockEnd,
"line1",
"line3",
googleBlockStart,
matchblock1,
matchblock2,
googleBlockEnd,
},
enable: true,
twofactor: true,
},
{
// Block is created without two-factor options.
contents: []string{
"line1",
"line2",
},
want: []string{
googleBlockStart,
authorizedKeysCommand,
authorizedKeysUser,
googleBlockEnd,
"line1",
"line2",
},
enable: true,
twofactor: false,
},
{
// Existing block is removed.
contents: []string{
"line1",
"line2",
googleBlockStart,
"line3",
googleBlockEnd,
},
want: []string{
"line1",
"line2",
},
enable: false,
twofactor: true,
},
}
for idx, tt := range tests {
contents := strings.Join(tt.contents, "\n")
want := strings.Join(tt.want, "\n")
if res := updateSSHConfig(contents, tt.enable, tt.twofactor); res != want {
t.Errorf("test %v\nwant:\n%v\ngot:\n%v\n", idx, want, res)
}
}
}
func TestUpdatePAMsshd(t *testing.T) {
authOSLogin := "auth [success=done perm_denied=die default=ignore] pam_oslogin_login.so"
authGroup := "auth [default=ignore] pam_group.so"
accountOSLogin := "account [success=ok ignore=ignore default=die] pam_oslogin_login.so"
accountOSLoginAdmin := "account [success=ok default=ignore] pam_oslogin_admin.so"
sessionHomeDir := "session [success=ok default=ignore] pam_mkhomedir.so"
var tests = []struct {
contents, want []string
enable, twofactor bool
}{
{
contents: []string{
"line1",
"line2",
},
want: []string{
googleBlockStart,
authOSLogin,
authGroup,
googleBlockEnd,
"line1",
"line2",
googleBlockStart,
accountOSLogin,
accountOSLoginAdmin,
sessionHomeDir,
googleBlockEnd,
},
enable: true,
twofactor: true,
},
{
contents: []string{
"line1",
"line2",
},
want: []string{
googleBlockStart,
authGroup,
googleBlockEnd,
"line1",
"line2",
googleBlockStart,
accountOSLogin,
accountOSLoginAdmin,
sessionHomeDir,
googleBlockEnd,
},
enable: true,
twofactor: false,
},
{
contents: []string{
googleBlockStart,
"line1",
googleBlockEnd,
"line2",
googleBlockStart,
"line3",
googleBlockEnd,
},
want: []string{
"line2",
},
enable: false,
twofactor: true,
},
}
for idx, tt := range tests {
contents := strings.Join(tt.contents, "\n")
want := strings.Join(tt.want, "\n")
if res := updatePAMsshd(contents, tt.enable, tt.twofactor); res != want {
t.Errorf("test %v\nwant:\n%v\ngot:\n%v\n", idx, want, res)
}
}
}
func TestUpdateGroupConf(t *testing.T) {
config := "sshd;*;*;Al0000-2400;video"
var tests = []struct {
contents, want []string
enable bool
}{
{
contents: []string{
"line1",
"line2",
},
want: []string{
"line1",
"line2",
googleComment,
config,
"",
},
enable: true,
},
{
contents: []string{
"line1",
"line2",
},
want: []string{
"line1",
"line2",
},
enable: false,
},
{
contents: []string{
"line1",
"line2",
googleComment,
"line3", // not the right line
},
want: []string{
"line1",
"line2",
googleComment,
config,
"",
},
enable: true,
},
{
contents: []string{
"line1",
"line2",
googleComment,
"line3",
},
want: []string{
"line1",
"line2",
},
enable: false,
},
}
for idx, tt := range tests {
contents := strings.Join(tt.contents, "\n")
want := strings.Join(tt.want, "\n")
if res := updateGroupConf(contents, tt.enable); res != want {
t.Errorf("test %v\nwant:\n%v\ngot:\n%v\n", idx, want, res)
}
}
}
func TestGetOSLoginEnabled(t *testing.T) {
var tests = []struct {
md string
enable, twofactor bool
}{
{
md: `{"instance": {"attributes": {"enable-oslogin": "true", "enable-oslogin-2fa": "true"}}}`,
enable: true,
twofactor: true,
},
{
md: `{"project": {"attributes": {"enable-oslogin": "true", "enable-oslogin-2fa": "true"}}}`,
enable: true,
twofactor: true,
},
{
// Instance keys take precedence
md: `{"project": {"attributes": {"enable-oslogin": "false", "enable-oslogin-2fa": "false"}}, "instance": {"attributes": {"enable-oslogin": "true", "enable-oslogin-2fa": "true"}}}`,
enable: true,
twofactor: true,
},
{
// Instance keys take precedence
md: `{"project": {"attributes": {"enable-oslogin": "true", "enable-oslogin-2fa": "true"}}, "instance": {"attributes": {"enable-oslogin": "false", "enable-oslogin-2fa": "false"}}}`,
enable: false,
twofactor: false,
},
{
// Handle weird values
md: `{"instance": {"attributes": {"enable-oslogin": "TRUE", "enable-oslogin-2fa": "foobar"}}}`,
enable: true,
twofactor: false,
},
{
// Mixed test
md: `{"project": {"attributes": {"enable-oslogin": "true", "enable-oslogin-2fa": "true"}}, "instance": {"attributes": {"enable-oslogin-2fa": "false"}}}`,
enable: true,
twofactor: false,
},
}
for idx, tt := range tests {
var md metadata
if err := json.Unmarshal([]byte(tt.md), &md); err != nil {
t.Errorf("Failed to unmarshal metadata JSON for test %v: %v", idx, err)
}
enable, twofactor := getOSLoginEnabled(&md)
if enable != tt.enable || twofactor != tt.twofactor {
t.Errorf("Test %v failed. Expected: %v/%v Got: %v/%v", idx, tt.enable, tt.twofactor, enable, twofactor)
}
}
}
| TestUpdateSSHConfig |
main.js | (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
(function (global){
!function(e){if("object"==typeof exports)module.exports=e();else if("function"==typeof define&&define.amd)define(e);else{var f;"undefined"!=typeof window?f=window:"undefined"!=typeof global?f=global:"undefined"!=typeof self&&(f=self),f.feedhenry=e()}}(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);throw new Error("Cannot find module '"+o+"'")}var f=n[o]={exports:{}};t[o][0].call(f.exports,function(e){var n=t[o][1][e];return s(n?n:e)},f,f.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(_dereq_,module,exports){
(function (global){
;__browserify_shim_require__=_dereq_;(function browserifyShim(module, exports, _dereq_, define, browserify_shim__define__module__export__) {
/*
CryptoJS v3.1.2
core.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
/**
* CryptoJS core components.
*/
var CryptoJS = CryptoJS || (function (Math, undefined) {
/**
* CryptoJS namespace.
*/
var C = {};
/**
* Library namespace.
*/
var C_lib = C.lib = {};
/**
* Base object for prototypal inheritance.
*/
var Base = C_lib.Base = (function () {
function F() {}
return {
/**
* Creates a new object that inherits from this object.
*
* @param {Object} overrides Properties to copy into the new object.
*
* @return {Object} The new object.
*
* @static
*
* @example
*
* var MyType = CryptoJS.lib.Base.extend({
* field: 'value',
*
* method: function () {
* }
* });
*/
extend: function (overrides) {
// Spawn
F.prototype = this;
var subtype = new F();
// Augment
if (overrides) {
subtype.mixIn(overrides);
}
// Create default initializer
if (!subtype.hasOwnProperty('init')) {
subtype.init = function () {
subtype.$super.init.apply(this, arguments);
};
}
// Initializer's prototype is the subtype object
subtype.init.prototype = subtype;
// Reference supertype
subtype.$super = this;
return subtype;
},
/**
* Extends this object and runs the init method.
* Arguments to create() will be passed to init().
*
* @return {Object} The new object.
*
* @static
*
* @example
*
* var instance = MyType.create();
*/
create: function () {
var instance = this.extend();
instance.init.apply(instance, arguments);
return instance;
},
/**
* Initializes a newly created object.
* Override this method to add some logic when your objects are created.
*
* @example
*
* var MyType = CryptoJS.lib.Base.extend({
* init: function () {
* // ...
* }
* });
*/
init: function () {
},
/**
* Copies properties into this object.
*
* @param {Object} properties The properties to mix in.
*
* @example
*
* MyType.mixIn({
* field: 'value'
* });
*/
mixIn: function (properties) {
for (var propertyName in properties) {
if (properties.hasOwnProperty(propertyName)) {
this[propertyName] = properties[propertyName];
}
}
// IE won't copy toString using the loop above
if (properties.hasOwnProperty('toString')) {
this.toString = properties.toString;
}
},
/**
* Creates a copy of this object.
*
* @return {Object} The clone.
*
* @example
*
* var clone = instance.clone();
*/
clone: function () {
return this.init.prototype.extend(this);
}
};
}());
/**
* An array of 32-bit words.
*
* @property {Array} words The array of 32-bit words.
* @property {number} sigBytes The number of significant bytes in this word array.
*/
var WordArray = C_lib.WordArray = Base.extend({
/**
* Initializes a newly created word array.
*
* @param {Array} words (Optional) An array of 32-bit words.
* @param {number} sigBytes (Optional) The number of significant bytes in the words.
*
* @example
*
* var wordArray = CryptoJS.lib.WordArray.create();
* var wordArray = CryptoJS.lib.WordArray.create([0x00010203, 0x04050607]);
* var wordArray = CryptoJS.lib.WordArray.create([0x00010203, 0x04050607], 6);
*/
init: function (words, sigBytes) {
words = this.words = words || [];
if (sigBytes != undefined) {
this.sigBytes = sigBytes;
} else {
this.sigBytes = words.length * 4;
}
},
/**
* Converts this word array to a string.
*
* @param {Encoder} encoder (Optional) The encoding strategy to use. Default: CryptoJS.enc.Hex
*
* @return {string} The stringified word array.
*
* @example
*
* var string = wordArray + '';
* var string = wordArray.toString();
* var string = wordArray.toString(CryptoJS.enc.Utf8);
*/
toString: function (encoder) {
return (encoder || Hex).stringify(this);
},
/**
* Concatenates a word array to this word array.
*
* @param {WordArray} wordArray The word array to append.
*
* @return {WordArray} This word array.
*
* @example
*
* wordArray1.concat(wordArray2);
*/
concat: function (wordArray) {
// Shortcuts
var thisWords = this.words;
var thatWords = wordArray.words;
var thisSigBytes = this.sigBytes;
var thatSigBytes = wordArray.sigBytes;
// Clamp excess bits
this.clamp();
// Concat
if (thisSigBytes % 4) {
// Copy one byte at a time
for (var i = 0; i < thatSigBytes; i++) {
var thatByte = (thatWords[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff;
thisWords[(thisSigBytes + i) >>> 2] |= thatByte << (24 - ((thisSigBytes + i) % 4) * 8);
}
} else if (thatWords.length > 0xffff) {
// Copy one word at a time
for (var i = 0; i < thatSigBytes; i += 4) {
thisWords[(thisSigBytes + i) >>> 2] = thatWords[i >>> 2];
}
} else {
// Copy all words at once
thisWords.push.apply(thisWords, thatWords);
}
this.sigBytes += thatSigBytes;
// Chainable
return this;
},
/**
* Removes insignificant bits.
*
* @example
*
* wordArray.clamp();
*/
clamp: function () {
// Shortcuts
var words = this.words;
var sigBytes = this.sigBytes;
// Clamp
words[sigBytes >>> 2] &= 0xffffffff << (32 - (sigBytes % 4) * 8);
words.length = Math.ceil(sigBytes / 4);
},
/**
* Creates a copy of this word array.
*
* @return {WordArray} The clone.
*
* @example
*
* var clone = wordArray.clone();
*/
clone: function () {
var clone = Base.clone.call(this);
clone.words = this.words.slice(0);
return clone;
},
/**
* Creates a word array filled with random bytes.
*
* @param {number} nBytes The number of random bytes to generate.
*
* @return {WordArray} The random word array.
*
* @static
*
* @example
*
* var wordArray = CryptoJS.lib.WordArray.random(16);
*/
random: function (nBytes) {
var words = [];
for (var i = 0; i < nBytes; i += 4) {
words.push((Math.random() * 0x100000000) | 0);
}
return new WordArray.init(words, nBytes);
}
});
/**
* Encoder namespace.
*/
var C_enc = C.enc = {};
/**
* Hex encoding strategy.
*/
var Hex = C_enc.Hex = {
/**
* Converts a word array to a hex string.
*
* @param {WordArray} wordArray The word array.
*
* @return {string} The hex string.
*
* @static
*
* @example
*
* var hexString = CryptoJS.enc.Hex.stringify(wordArray);
*/
stringify: function (wordArray) {
// Shortcuts
var words = wordArray.words;
var sigBytes = wordArray.sigBytes;
// Convert
var hexChars = [];
for (var i = 0; i < sigBytes; i++) {
var bite = (words[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff;
hexChars.push((bite >>> 4).toString(16));
hexChars.push((bite & 0x0f).toString(16));
}
return hexChars.join('');
},
/**
* Converts a hex string to a word array.
*
* @param {string} hexStr The hex string.
*
* @return {WordArray} The word array.
*
* @static
*
* @example
*
* var wordArray = CryptoJS.enc.Hex.parse(hexString);
*/
parse: function (hexStr) {
// Shortcut
var hexStrLength = hexStr.length;
// Convert
var words = [];
for (var i = 0; i < hexStrLength; i += 2) {
words[i >>> 3] |= parseInt(hexStr.substr(i, 2), 16) << (24 - (i % 8) * 4);
}
return new WordArray.init(words, hexStrLength / 2);
}
};
/**
* Latin1 encoding strategy.
*/
var Latin1 = C_enc.Latin1 = {
/**
* Converts a word array to a Latin1 string.
*
* @param {WordArray} wordArray The word array.
*
* @return {string} The Latin1 string.
*
* @static
*
* @example
*
* var latin1String = CryptoJS.enc.Latin1.stringify(wordArray);
*/
stringify: function (wordArray) {
// Shortcuts
var words = wordArray.words;
var sigBytes = wordArray.sigBytes;
// Convert
var latin1Chars = [];
for (var i = 0; i < sigBytes; i++) {
var bite = (words[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff;
latin1Chars.push(String.fromCharCode(bite));
}
return latin1Chars.join('');
},
/**
* Converts a Latin1 string to a word array.
*
* @param {string} latin1Str The Latin1 string.
*
* @return {WordArray} The word array.
*
* @static
*
* @example
*
* var wordArray = CryptoJS.enc.Latin1.parse(latin1String);
*/
parse: function (latin1Str) {
// Shortcut
var latin1StrLength = latin1Str.length;
// Convert
var words = [];
for (var i = 0; i < latin1StrLength; i++) {
words[i >>> 2] |= (latin1Str.charCodeAt(i) & 0xff) << (24 - (i % 4) * 8);
}
return new WordArray.init(words, latin1StrLength);
}
};
/**
* UTF-8 encoding strategy.
*/
var Utf8 = C_enc.Utf8 = {
/**
* Converts a word array to a UTF-8 string.
*
* @param {WordArray} wordArray The word array.
*
* @return {string} The UTF-8 string.
*
* @static
*
* @example
*
* var utf8String = CryptoJS.enc.Utf8.stringify(wordArray);
*/
stringify: function (wordArray) {
try {
return decodeURIComponent(escape(Latin1.stringify(wordArray)));
} catch (e) {
throw new Error('Malformed UTF-8 data');
}
},
/**
* Converts a UTF-8 string to a word array.
*
* @param {string} utf8Str The UTF-8 string.
*
* @return {WordArray} The word array.
*
* @static
*
* @example
*
* var wordArray = CryptoJS.enc.Utf8.parse(utf8String);
*/
parse: function (utf8Str) {
return Latin1.parse(unescape(encodeURIComponent(utf8Str)));
}
};
/**
* Abstract buffered block algorithm template.
*
* The property blockSize must be implemented in a concrete subtype.
*
* @property {number} _minBufferSize The number of blocks that should be kept unprocessed in the buffer. Default: 0
*/
var BufferedBlockAlgorithm = C_lib.BufferedBlockAlgorithm = Base.extend({
/**
* Resets this block algorithm's data buffer to its initial state.
*
* @example
*
* bufferedBlockAlgorithm.reset();
*/
reset: function () {
// Initial values
this._data = new WordArray.init();
this._nDataBytes = 0;
},
/**
* Adds new data to this block algorithm's buffer.
*
* @param {WordArray|string} data The data to append. Strings are converted to a WordArray using UTF-8.
*
* @example
*
* bufferedBlockAlgorithm._append('data');
* bufferedBlockAlgorithm._append(wordArray);
*/
_append: function (data) {
// Convert string to WordArray, else assume WordArray already
if (typeof data == 'string') {
data = Utf8.parse(data);
}
// Append
this._data.concat(data);
this._nDataBytes += data.sigBytes;
},
/**
* Processes available data blocks.
*
* This method invokes _doProcessBlock(offset), which must be implemented by a concrete subtype.
*
* @param {boolean} doFlush Whether all blocks and partial blocks should be processed.
*
* @return {WordArray} The processed data.
*
* @example
*
* var processedData = bufferedBlockAlgorithm._process();
* var processedData = bufferedBlockAlgorithm._process(!!'flush');
*/
_process: function (doFlush) {
// Shortcuts
var data = this._data;
var dataWords = data.words;
var dataSigBytes = data.sigBytes;
var blockSize = this.blockSize;
var blockSizeBytes = blockSize * 4;
// Count blocks ready
var nBlocksReady = dataSigBytes / blockSizeBytes;
if (doFlush) {
// Round up to include partial blocks
nBlocksReady = Math.ceil(nBlocksReady);
} else {
// Round down to include only full blocks,
// less the number of blocks that must remain in the buffer
nBlocksReady = Math.max((nBlocksReady | 0) - this._minBufferSize, 0);
}
// Count words ready
var nWordsReady = nBlocksReady * blockSize;
// Count bytes ready
var nBytesReady = Math.min(nWordsReady * 4, dataSigBytes);
// Process blocks
if (nWordsReady) {
for (var offset = 0; offset < nWordsReady; offset += blockSize) {
// Perform concrete-algorithm logic
this._doProcessBlock(dataWords, offset);
}
// Remove processed words
var processedWords = dataWords.splice(0, nWordsReady);
data.sigBytes -= nBytesReady;
}
// Return processed words
return new WordArray.init(processedWords, nBytesReady);
},
/**
* Creates a copy of this object.
*
* @return {Object} The clone.
*
* @example
*
* var clone = bufferedBlockAlgorithm.clone();
*/
clone: function () {
var clone = Base.clone.call(this);
clone._data = this._data.clone();
return clone;
},
_minBufferSize: 0
});
/**
* Abstract hasher template.
*
* @property {number} blockSize The number of 32-bit words this hasher operates on. Default: 16 (512 bits)
*/
var Hasher = C_lib.Hasher = BufferedBlockAlgorithm.extend({
/**
* Configuration options.
*/
cfg: Base.extend(),
/**
* Initializes a newly created hasher.
*
* @param {Object} cfg (Optional) The configuration options to use for this hash computation.
*
* @example
*
* var hasher = CryptoJS.algo.SHA256.create();
*/
init: function (cfg) {
// Apply config defaults
this.cfg = this.cfg.extend(cfg);
// Set initial values
this.reset();
},
/**
* Resets this hasher to its initial state.
*
* @example
*
* hasher.reset();
*/
reset: function () {
// Reset data buffer
BufferedBlockAlgorithm.reset.call(this);
// Perform concrete-hasher logic
this._doReset();
},
/**
* Updates this hasher with a message.
*
* @param {WordArray|string} messageUpdate The message to append.
*
* @return {Hasher} This hasher.
*
* @example
*
* hasher.update('message');
* hasher.update(wordArray);
*/
update: function (messageUpdate) {
// Append
this._append(messageUpdate);
// Update the hash
this._process();
// Chainable
return this;
},
/**
* Finalizes the hash computation.
* Note that the finalize operation is effectively a destructive, read-once operation.
*
* @param {WordArray|string} messageUpdate (Optional) A final message update.
*
* @return {WordArray} The hash.
*
* @example
*
* var hash = hasher.finalize();
* var hash = hasher.finalize('message');
* var hash = hasher.finalize(wordArray);
*/
finalize: function (messageUpdate) {
// Final message update
if (messageUpdate) {
this._append(messageUpdate);
}
// Perform concrete-hasher logic
var hash = this._doFinalize();
return hash;
},
blockSize: 512/32,
/**
* Creates a shortcut function to a hasher's object interface.
*
* @param {Hasher} hasher The hasher to create a helper for.
*
* @return {Function} The shortcut function.
*
* @static
*
* @example
*
* var SHA256 = CryptoJS.lib.Hasher._createHelper(CryptoJS.algo.SHA256);
*/
_createHelper: function (hasher) {
return function (message, cfg) {
return new hasher.init(cfg).finalize(message);
};
},
/**
* Creates a shortcut function to the HMAC's object interface.
*
* @param {Hasher} hasher The hasher to use in this HMAC helper.
*
* @return {Function} The shortcut function.
*
* @static
*
* @example
*
* var HmacSHA256 = CryptoJS.lib.Hasher._createHmacHelper(CryptoJS.algo.SHA256);
*/
_createHmacHelper: function (hasher) {
return function (message, key) {
return new C_algo.HMAC.init(hasher, key).finalize(message);
};
}
});
/**
* Algorithm namespace.
*/
var C_algo = C.algo = {};
return C;
}(Math));
/*
CryptoJS v3.1.2
enc-base64.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function () {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var WordArray = C_lib.WordArray;
var C_enc = C.enc;
/**
* Base64 encoding strategy.
*/
var Base64 = C_enc.Base64 = {
/**
* Converts a word array to a Base64 string.
*
* @param {WordArray} wordArray The word array.
*
* @return {string} The Base64 string.
*
* @static
*
* @example
*
* var base64String = CryptoJS.enc.Base64.stringify(wordArray);
*/
stringify: function (wordArray) {
// Shortcuts
var words = wordArray.words;
var sigBytes = wordArray.sigBytes;
var map = this._map;
// Clamp excess bits
wordArray.clamp();
// Convert
var base64Chars = [];
for (var i = 0; i < sigBytes; i += 3) {
var byte1 = (words[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff;
var byte2 = (words[(i + 1) >>> 2] >>> (24 - ((i + 1) % 4) * 8)) & 0xff;
var byte3 = (words[(i + 2) >>> 2] >>> (24 - ((i + 2) % 4) * 8)) & 0xff;
var triplet = (byte1 << 16) | (byte2 << 8) | byte3;
for (var j = 0; (j < 4) && (i + j * 0.75 < sigBytes); j++) {
base64Chars.push(map.charAt((triplet >>> (6 * (3 - j))) & 0x3f));
}
}
// Add padding
var paddingChar = map.charAt(64);
if (paddingChar) {
while (base64Chars.length % 4) {
base64Chars.push(paddingChar);
}
}
return base64Chars.join('');
},
/**
* Converts a Base64 string to a word array.
*
* @param {string} base64Str The Base64 string.
*
* @return {WordArray} The word array.
*
* @static
*
* @example
*
* var wordArray = CryptoJS.enc.Base64.parse(base64String);
*/
parse: function (base64Str) {
// Shortcuts
var base64StrLength = base64Str.length;
var map = this._map;
// Ignore padding
var paddingChar = map.charAt(64);
if (paddingChar) {
var paddingIndex = base64Str.indexOf(paddingChar);
if (paddingIndex != -1) {
base64StrLength = paddingIndex;
}
}
// Convert
var words = [];
var nBytes = 0;
for (var i = 0; i < base64StrLength; i++) {
if (i % 4) {
var bits1 = map.indexOf(base64Str.charAt(i - 1)) << ((i % 4) * 2);
var bits2 = map.indexOf(base64Str.charAt(i)) >>> (6 - (i % 4) * 2);
words[nBytes >>> 2] |= (bits1 | bits2) << (24 - (nBytes % 4) * 8);
nBytes++;
}
}
return WordArray.create(words, nBytes);
},
_map: 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
};
}());
/*
CryptoJS v3.1.2
cipher-core
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
/**
* Cipher core components.
*/
CryptoJS.lib.Cipher || (function (undefined) {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var Base = C_lib.Base;
var WordArray = C_lib.WordArray;
var BufferedBlockAlgorithm = C_lib.BufferedBlockAlgorithm;
var C_enc = C.enc;
var Utf8 = C_enc.Utf8;
var Base64 = C_enc.Base64;
var C_algo = C.algo;
var EvpKDF = C_algo.EvpKDF;
/**
* Abstract base cipher template.
*
* @property {number} keySize This cipher's key size. Default: 4 (128 bits)
* @property {number} ivSize This cipher's IV size. Default: 4 (128 bits)
* @property {number} _ENC_XFORM_MODE A constant representing encryption mode.
* @property {number} _DEC_XFORM_MODE A constant representing decryption mode.
*/
var Cipher = C_lib.Cipher = BufferedBlockAlgorithm.extend({
/**
* Configuration options.
*
* @property {WordArray} iv The IV to use for this operation.
*/
cfg: Base.extend(),
/**
* Creates this cipher in encryption mode.
*
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {Cipher} A cipher instance.
*
* @static
*
* @example
*
* var cipher = CryptoJS.algo.AES.createEncryptor(keyWordArray, { iv: ivWordArray });
*/
createEncryptor: function (key, cfg) {
return this.create(this._ENC_XFORM_MODE, key, cfg);
},
/**
* Creates this cipher in decryption mode.
*
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {Cipher} A cipher instance.
*
* @static
*
* @example
*
* var cipher = CryptoJS.algo.AES.createDecryptor(keyWordArray, { iv: ivWordArray });
*/
createDecryptor: function (key, cfg) {
return this.create(this._DEC_XFORM_MODE, key, cfg);
},
/**
* Initializes a newly created cipher.
*
* @param {number} xformMode Either the encryption or decryption transormation mode constant.
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @example
*
* var cipher = CryptoJS.algo.AES.create(CryptoJS.algo.AES._ENC_XFORM_MODE, keyWordArray, { iv: ivWordArray });
*/
init: function (xformMode, key, cfg) {
// Apply config defaults
this.cfg = this.cfg.extend(cfg);
// Store transform mode and key
this._xformMode = xformMode;
this._key = key;
// Set initial values
this.reset();
},
/**
* Resets this cipher to its initial state.
*
* @example
*
* cipher.reset();
*/
reset: function () {
// Reset data buffer
BufferedBlockAlgorithm.reset.call(this);
// Perform concrete-cipher logic
this._doReset();
},
/**
* Adds data to be encrypted or decrypted.
*
* @param {WordArray|string} dataUpdate The data to encrypt or decrypt.
*
* @return {WordArray} The data after processing.
*
* @example
*
* var encrypted = cipher.process('data');
* var encrypted = cipher.process(wordArray);
*/
process: function (dataUpdate) {
// Append
this._append(dataUpdate);
// Process available blocks
return this._process();
},
/**
* Finalizes the encryption or decryption process.
* Note that the finalize operation is effectively a destructive, read-once operation.
*
* @param {WordArray|string} dataUpdate The final data to encrypt or decrypt.
*
* @return {WordArray} The data after final processing.
*
* @example
*
* var encrypted = cipher.finalize();
* var encrypted = cipher.finalize('data');
* var encrypted = cipher.finalize(wordArray);
*/
finalize: function (dataUpdate) {
// Final data update
if (dataUpdate) {
this._append(dataUpdate);
}
// Perform concrete-cipher logic
var finalProcessedData = this._doFinalize();
return finalProcessedData;
},
keySize: 128/32,
ivSize: 128/32,
_ENC_XFORM_MODE: 1,
_DEC_XFORM_MODE: 2,
/**
* Creates shortcut functions to a cipher's object interface.
*
* @param {Cipher} cipher The cipher to create a helper for.
*
* @return {Object} An object with encrypt and decrypt shortcut functions.
*
* @static
*
* @example
*
* var AES = CryptoJS.lib.Cipher._createHelper(CryptoJS.algo.AES);
*/
_createHelper: (function () {
function selectCipherStrategy(key) {
if (typeof key == 'string') {
return PasswordBasedCipher;
} else {
return SerializableCipher;
}
}
return function (cipher) {
return {
encrypt: function (message, key, cfg) {
return selectCipherStrategy(key).encrypt(cipher, message, key, cfg);
},
decrypt: function (ciphertext, key, cfg) {
return selectCipherStrategy(key).decrypt(cipher, ciphertext, key, cfg);
}
};
};
}())
});
/**
* Abstract base stream cipher template.
*
* @property {number} blockSize The number of 32-bit words this cipher operates on. Default: 1 (32 bits)
*/
var StreamCipher = C_lib.StreamCipher = Cipher.extend({
_doFinalize: function () {
// Process partial blocks
var finalProcessedBlocks = this._process(!!'flush');
return finalProcessedBlocks;
},
blockSize: 1
});
/**
* Mode namespace.
*/
var C_mode = C.mode = {};
/**
* Abstract base block cipher mode template.
*/
var BlockCipherMode = C_lib.BlockCipherMode = Base.extend({
/**
* Creates this mode for encryption.
*
* @param {Cipher} cipher A block cipher instance.
* @param {Array} iv The IV words.
*
* @static
*
* @example
*
* var mode = CryptoJS.mode.CBC.createEncryptor(cipher, iv.words);
*/
createEncryptor: function (cipher, iv) {
return this.Encryptor.create(cipher, iv);
},
/**
* Creates this mode for decryption.
*
* @param {Cipher} cipher A block cipher instance.
* @param {Array} iv The IV words.
*
* @static
*
* @example
*
* var mode = CryptoJS.mode.CBC.createDecryptor(cipher, iv.words);
*/
createDecryptor: function (cipher, iv) {
return this.Decryptor.create(cipher, iv);
},
/**
* Initializes a newly created mode.
*
* @param {Cipher} cipher A block cipher instance.
* @param {Array} iv The IV words.
*
* @example
*
* var mode = CryptoJS.mode.CBC.Encryptor.create(cipher, iv.words);
*/
init: function (cipher, iv) {
this._cipher = cipher;
this._iv = iv;
}
});
/**
* Cipher Block Chaining mode.
*/
var CBC = C_mode.CBC = (function () {
/**
* Abstract base CBC mode.
*/
var CBC = BlockCipherMode.extend();
/**
* CBC encryptor.
*/
CBC.Encryptor = CBC.extend({
/**
* Processes the data block at offset.
*
* @param {Array} words The data words to operate on.
* @param {number} offset The offset where the block starts.
*
* @example
*
* mode.processBlock(data.words, offset);
*/
processBlock: function (words, offset) {
// Shortcuts
var cipher = this._cipher;
var blockSize = cipher.blockSize;
// XOR and encrypt
xorBlock.call(this, words, offset, blockSize);
cipher.encryptBlock(words, offset);
// Remember this block to use with next block
this._prevBlock = words.slice(offset, offset + blockSize);
}
});
/**
* CBC decryptor.
*/
CBC.Decryptor = CBC.extend({
/**
* Processes the data block at offset.
*
* @param {Array} words The data words to operate on.
* @param {number} offset The offset where the block starts.
*
* @example
*
* mode.processBlock(data.words, offset);
*/
processBlock: function (words, offset) {
// Shortcuts
var cipher = this._cipher;
var blockSize = cipher.blockSize;
// Remember this block to use with next block
var thisBlock = words.slice(offset, offset + blockSize);
// Decrypt and XOR
cipher.decryptBlock(words, offset);
xorBlock.call(this, words, offset, blockSize);
// This block becomes the previous block
this._prevBlock = thisBlock;
}
});
function xorBlock(words, offset, blockSize) {
// Shortcut
var iv = this._iv;
// Choose mixing block
if (iv) {
var block = iv;
// Remove IV for subsequent blocks
this._iv = undefined;
} else {
var block = this._prevBlock;
}
// XOR blocks
for (var i = 0; i < blockSize; i++) {
words[offset + i] ^= block[i];
}
}
return CBC;
}());
/**
* Padding namespace.
*/
var C_pad = C.pad = {};
/**
* PKCS #5/7 padding strategy.
*/
var Pkcs7 = C_pad.Pkcs7 = {
/**
* Pads data using the algorithm defined in PKCS #5/7.
*
* @param {WordArray} data The data to pad.
* @param {number} blockSize The multiple that the data should be padded to.
*
* @static
*
* @example
*
* CryptoJS.pad.Pkcs7.pad(wordArray, 4);
*/
pad: function (data, blockSize) {
// Shortcut
var blockSizeBytes = blockSize * 4;
// Count padding bytes
var nPaddingBytes = blockSizeBytes - data.sigBytes % blockSizeBytes;
// Create padding word
var paddingWord = (nPaddingBytes << 24) | (nPaddingBytes << 16) | (nPaddingBytes << 8) | nPaddingBytes;
// Create padding
var paddingWords = [];
for (var i = 0; i < nPaddingBytes; i += 4) {
paddingWords.push(paddingWord);
}
var padding = WordArray.create(paddingWords, nPaddingBytes);
// Add padding
data.concat(padding);
},
/**
* Unpads data that had been padded using the algorithm defined in PKCS #5/7.
*
* @param {WordArray} data The data to unpad.
*
* @static
*
* @example
*
* CryptoJS.pad.Pkcs7.unpad(wordArray);
*/
unpad: function (data) {
// Get number of padding bytes from last byte
var nPaddingBytes = data.words[(data.sigBytes - 1) >>> 2] & 0xff;
// Remove padding
data.sigBytes -= nPaddingBytes;
}
};
/**
* Abstract base block cipher template.
*
* @property {number} blockSize The number of 32-bit words this cipher operates on. Default: 4 (128 bits)
*/
var BlockCipher = C_lib.BlockCipher = Cipher.extend({
/**
* Configuration options.
*
* @property {Mode} mode The block mode to use. Default: CBC
* @property {Padding} padding The padding strategy to use. Default: Pkcs7
*/
cfg: Cipher.cfg.extend({
mode: CBC,
padding: Pkcs7
}),
reset: function () {
// Reset cipher
Cipher.reset.call(this);
// Shortcuts
var cfg = this.cfg;
var iv = cfg.iv;
var mode = cfg.mode;
// Reset block mode
if (this._xformMode == this._ENC_XFORM_MODE) {
var modeCreator = mode.createEncryptor;
} else /* if (this._xformMode == this._DEC_XFORM_MODE) */ {
var modeCreator = mode.createDecryptor;
// Keep at least one block in the buffer for unpadding
this._minBufferSize = 1;
}
this._mode = modeCreator.call(mode, this, iv && iv.words);
},
_doProcessBlock: function (words, offset) {
this._mode.processBlock(words, offset);
},
_doFinalize: function () {
// Shortcut
var padding = this.cfg.padding;
// Finalize
if (this._xformMode == this._ENC_XFORM_MODE) {
// Pad data
padding.pad(this._data, this.blockSize);
// Process final blocks
var finalProcessedBlocks = this._process(!!'flush');
} else /* if (this._xformMode == this._DEC_XFORM_MODE) */ {
// Process final blocks
var finalProcessedBlocks = this._process(!!'flush');
// Unpad data
padding.unpad(finalProcessedBlocks);
}
return finalProcessedBlocks;
},
blockSize: 128/32
});
/**
* A collection of cipher parameters.
*
* @property {WordArray} ciphertext The raw ciphertext.
* @property {WordArray} key The key to this ciphertext.
* @property {WordArray} iv The IV used in the ciphering operation.
* @property {WordArray} salt The salt used with a key derivation function.
* @property {Cipher} algorithm The cipher algorithm.
* @property {Mode} mode The block mode used in the ciphering operation.
* @property {Padding} padding The padding scheme used in the ciphering operation.
* @property {number} blockSize The block size of the cipher.
* @property {Format} formatter The default formatting strategy to convert this cipher params object to a string.
*/
var CipherParams = C_lib.CipherParams = Base.extend({
/**
* Initializes a newly created cipher params object.
*
* @param {Object} cipherParams An object with any of the possible cipher parameters.
*
* @example
*
* var cipherParams = CryptoJS.lib.CipherParams.create({
* ciphertext: ciphertextWordArray,
* key: keyWordArray,
* iv: ivWordArray,
* salt: saltWordArray,
* algorithm: CryptoJS.algo.AES,
* mode: CryptoJS.mode.CBC,
* padding: CryptoJS.pad.PKCS7,
* blockSize: 4,
* formatter: CryptoJS.format.OpenSSL
* });
*/
init: function (cipherParams) {
this.mixIn(cipherParams);
},
/**
* Converts this cipher params object to a string.
*
* @param {Format} formatter (Optional) The formatting strategy to use.
*
* @return {string} The stringified cipher params.
*
* @throws Error If neither the formatter nor the default formatter is set.
*
* @example
*
* var string = cipherParams + '';
* var string = cipherParams.toString();
* var string = cipherParams.toString(CryptoJS.format.OpenSSL);
*/
toString: function (formatter) {
return (formatter || this.formatter).stringify(this);
}
});
/**
* Format namespace.
*/
var C_format = C.format = {};
/**
* OpenSSL formatting strategy.
*/
var OpenSSLFormatter = C_format.OpenSSL = {
/**
* Converts a cipher params object to an OpenSSL-compatible string.
*
* @param {CipherParams} cipherParams The cipher params object.
*
* @return {string} The OpenSSL-compatible string.
*
* @static
*
* @example
*
* var openSSLString = CryptoJS.format.OpenSSL.stringify(cipherParams);
*/
stringify: function (cipherParams) {
// Shortcuts
var ciphertext = cipherParams.ciphertext;
var salt = cipherParams.salt;
// Format
if (salt) {
var wordArray = WordArray.create([0x53616c74, 0x65645f5f]).concat(salt).concat(ciphertext);
} else {
var wordArray = ciphertext;
}
return wordArray.toString(Base64);
},
/**
* Converts an OpenSSL-compatible string to a cipher params object.
*
* @param {string} openSSLStr The OpenSSL-compatible string.
*
* @return {CipherParams} The cipher params object.
*
* @static
*
* @example
*
* var cipherParams = CryptoJS.format.OpenSSL.parse(openSSLString);
*/
parse: function (openSSLStr) {
// Parse base64
var ciphertext = Base64.parse(openSSLStr);
// Shortcut
var ciphertextWords = ciphertext.words;
// Test for salt
if (ciphertextWords[0] == 0x53616c74 && ciphertextWords[1] == 0x65645f5f) {
// Extract salt
var salt = WordArray.create(ciphertextWords.slice(2, 4));
// Remove salt from ciphertext
ciphertextWords.splice(0, 4);
ciphertext.sigBytes -= 16;
}
return CipherParams.create({ ciphertext: ciphertext, salt: salt });
}
};
/**
* A cipher wrapper that returns ciphertext as a serializable cipher params object.
*/
var SerializableCipher = C_lib.SerializableCipher = Base.extend({
/**
* Configuration options.
*
* @property {Formatter} format The formatting strategy to convert cipher param objects to and from a string. Default: OpenSSL
*/
cfg: Base.extend({
format: OpenSSLFormatter
}),
/**
* Encrypts a message.
*
* @param {Cipher} cipher The cipher algorithm to use.
* @param {WordArray|string} message The message to encrypt.
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {CipherParams} A cipher params object.
*
* @static
*
* @example
*
* var ciphertextParams = CryptoJS.lib.SerializableCipher.encrypt(CryptoJS.algo.AES, message, key);
* var ciphertextParams = CryptoJS.lib.SerializableCipher.encrypt(CryptoJS.algo.AES, message, key, { iv: iv });
* var ciphertextParams = CryptoJS.lib.SerializableCipher.encrypt(CryptoJS.algo.AES, message, key, { iv: iv, format: CryptoJS.format.OpenSSL });
*/
encrypt: function (cipher, message, key, cfg) {
// Apply config defaults
cfg = this.cfg.extend(cfg);
// Encrypt
var encryptor = cipher.createEncryptor(key, cfg);
var ciphertext = encryptor.finalize(message);
// Shortcut
var cipherCfg = encryptor.cfg;
// Create and return serializable cipher params
return CipherParams.create({
ciphertext: ciphertext,
key: key,
iv: cipherCfg.iv,
algorithm: cipher,
mode: cipherCfg.mode,
padding: cipherCfg.padding,
blockSize: cipher.blockSize,
formatter: cfg.format
});
},
/**
* Decrypts serialized ciphertext.
*
* @param {Cipher} cipher The cipher algorithm to use.
* @param {CipherParams|string} ciphertext The ciphertext to decrypt.
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {WordArray} The plaintext.
*
* @static
*
* @example
*
* var plaintext = CryptoJS.lib.SerializableCipher.decrypt(CryptoJS.algo.AES, formattedCiphertext, key, { iv: iv, format: CryptoJS.format.OpenSSL });
* var plaintext = CryptoJS.lib.SerializableCipher.decrypt(CryptoJS.algo.AES, ciphertextParams, key, { iv: iv, format: CryptoJS.format.OpenSSL });
*/
decrypt: function (cipher, ciphertext, key, cfg) {
// Apply config defaults
cfg = this.cfg.extend(cfg);
// Convert string to CipherParams
ciphertext = this._parse(ciphertext, cfg.format);
// Decrypt
var plaintext = cipher.createDecryptor(key, cfg).finalize(ciphertext.ciphertext);
return plaintext;
},
/**
* Converts serialized ciphertext to CipherParams,
* else assumed CipherParams already and returns ciphertext unchanged.
*
* @param {CipherParams|string} ciphertext The ciphertext.
* @param {Formatter} format The formatting strategy to use to parse serialized ciphertext.
*
* @return {CipherParams} The unserialized ciphertext.
*
* @static
*
* @example
*
* var ciphertextParams = CryptoJS.lib.SerializableCipher._parse(ciphertextStringOrParams, format);
*/
_parse: function (ciphertext, format) {
if (typeof ciphertext == 'string') {
return format.parse(ciphertext, this);
} else {
return ciphertext;
}
}
});
/**
* Key derivation function namespace.
*/
var C_kdf = C.kdf = {};
/**
* OpenSSL key derivation function.
*/
var OpenSSLKdf = C_kdf.OpenSSL = {
/**
* Derives a key and IV from a password.
*
* @param {string} password The password to derive from.
* @param {number} keySize The size in words of the key to generate.
* @param {number} ivSize The size in words of the IV to generate.
* @param {WordArray|string} salt (Optional) A 64-bit salt to use. If omitted, a salt will be generated randomly.
*
* @return {CipherParams} A cipher params object with the key, IV, and salt.
*
* @static
*
* @example
*
* var derivedParams = CryptoJS.kdf.OpenSSL.execute('Password', 256/32, 128/32);
* var derivedParams = CryptoJS.kdf.OpenSSL.execute('Password', 256/32, 128/32, 'saltsalt');
*/
execute: function (password, keySize, ivSize, salt) {
// Generate random salt
if (!salt) {
salt = WordArray.random(64/8);
}
// Derive key and IV
var key = EvpKDF.create({ keySize: keySize + ivSize }).compute(password, salt);
// Separate key and IV
var iv = WordArray.create(key.words.slice(keySize), ivSize * 4);
key.sigBytes = keySize * 4;
// Return params
return CipherParams.create({ key: key, iv: iv, salt: salt });
}
};
/**
* A serializable cipher wrapper that derives the key from a password,
* and returns ciphertext as a serializable cipher params object.
*/
var PasswordBasedCipher = C_lib.PasswordBasedCipher = SerializableCipher.extend({
/**
* Configuration options.
*
* @property {KDF} kdf The key derivation function to use to generate a key and IV from a password. Default: OpenSSL
*/
cfg: SerializableCipher.cfg.extend({
kdf: OpenSSLKdf
}),
/**
* Encrypts a message using a password.
*
* @param {Cipher} cipher The cipher algorithm to use.
* @param {WordArray|string} message The message to encrypt.
* @param {string} password The password.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {CipherParams} A cipher params object.
*
* @static
*
* @example
*
* var ciphertextParams = CryptoJS.lib.PasswordBasedCipher.encrypt(CryptoJS.algo.AES, message, 'password');
* var ciphertextParams = CryptoJS.lib.PasswordBasedCipher.encrypt(CryptoJS.algo.AES, message, 'password', { format: CryptoJS.format.OpenSSL });
*/
encrypt: function (cipher, message, password, cfg) {
// Apply config defaults
cfg = this.cfg.extend(cfg);
// Derive key and other params
var derivedParams = cfg.kdf.execute(password, cipher.keySize, cipher.ivSize);
// Add IV to config
cfg.iv = derivedParams.iv;
// Encrypt
var ciphertext = SerializableCipher.encrypt.call(this, cipher, message, derivedParams.key, cfg);
// Mix in derived params
ciphertext.mixIn(derivedParams);
return ciphertext;
},
/**
* Decrypts serialized ciphertext using a password.
*
* @param {Cipher} cipher The cipher algorithm to use.
* @param {CipherParams|string} ciphertext The ciphertext to decrypt.
* @param {string} password The password.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {WordArray} The plaintext.
*
* @static
*
* @example
*
* var plaintext = CryptoJS.lib.PasswordBasedCipher.decrypt(CryptoJS.algo.AES, formattedCiphertext, 'password', { format: CryptoJS.format.OpenSSL });
* var plaintext = CryptoJS.lib.PasswordBasedCipher.decrypt(CryptoJS.algo.AES, ciphertextParams, 'password', { format: CryptoJS.format.OpenSSL });
*/
decrypt: function (cipher, ciphertext, password, cfg) {
// Apply config defaults
cfg = this.cfg.extend(cfg);
// Convert string to CipherParams
ciphertext = this._parse(ciphertext, cfg.format);
// Derive key and other params
var derivedParams = cfg.kdf.execute(password, cipher.keySize, cipher.ivSize, ciphertext.salt);
// Add IV to config
cfg.iv = derivedParams.iv;
// Decrypt
var plaintext = SerializableCipher.decrypt.call(this, cipher, ciphertext, derivedParams.key, cfg);
return plaintext;
}
});
}());
/*
CryptoJS v3.1.2
aes.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function () {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var BlockCipher = C_lib.BlockCipher;
var C_algo = C.algo;
// Lookup tables
var SBOX = [];
var INV_SBOX = [];
var SUB_MIX_0 = [];
var SUB_MIX_1 = [];
var SUB_MIX_2 = [];
var SUB_MIX_3 = [];
var INV_SUB_MIX_0 = [];
var INV_SUB_MIX_1 = [];
var INV_SUB_MIX_2 = [];
var INV_SUB_MIX_3 = [];
// Compute lookup tables
(function () {
// Compute double table
var d = [];
for (var i = 0; i < 256; i++) {
if (i < 128) {
d[i] = i << 1;
} else {
d[i] = (i << 1) ^ 0x11b;
}
}
// Walk GF(2^8)
var x = 0;
var xi = 0;
for (var i = 0; i < 256; i++) {
// Compute sbox
var sx = xi ^ (xi << 1) ^ (xi << 2) ^ (xi << 3) ^ (xi << 4);
sx = (sx >>> 8) ^ (sx & 0xff) ^ 0x63;
SBOX[x] = sx;
INV_SBOX[sx] = x;
// Compute multiplication
var x2 = d[x];
var x4 = d[x2];
var x8 = d[x4];
// Compute sub bytes, mix columns tables
var t = (d[sx] * 0x101) ^ (sx * 0x1010100);
SUB_MIX_0[x] = (t << 24) | (t >>> 8);
SUB_MIX_1[x] = (t << 16) | (t >>> 16);
SUB_MIX_2[x] = (t << 8) | (t >>> 24);
SUB_MIX_3[x] = t;
// Compute inv sub bytes, inv mix columns tables
var t = (x8 * 0x1010101) ^ (x4 * 0x10001) ^ (x2 * 0x101) ^ (x * 0x1010100);
INV_SUB_MIX_0[sx] = (t << 24) | (t >>> 8);
INV_SUB_MIX_1[sx] = (t << 16) | (t >>> 16);
INV_SUB_MIX_2[sx] = (t << 8) | (t >>> 24);
INV_SUB_MIX_3[sx] = t;
// Compute next counter
if (!x) {
x = xi = 1;
} else {
x = x2 ^ d[d[d[x8 ^ x2]]];
xi ^= d[d[xi]];
}
}
}());
// Precomputed Rcon lookup
var RCON = [0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36];
/**
* AES block cipher algorithm.
*/
var AES = C_algo.AES = BlockCipher.extend({
_doReset: function () {
// Shortcuts
var key = this._key;
var keyWords = key.words;
var keySize = key.sigBytes / 4;
// Compute number of rounds
var nRounds = this._nRounds = keySize + 6
// Compute number of key schedule rows
var ksRows = (nRounds + 1) * 4;
// Compute key schedule
var keySchedule = this._keySchedule = [];
for (var ksRow = 0; ksRow < ksRows; ksRow++) {
if (ksRow < keySize) {
keySchedule[ksRow] = keyWords[ksRow];
} else {
var t = keySchedule[ksRow - 1];
if (!(ksRow % keySize)) {
// Rot word
t = (t << 8) | (t >>> 24);
// Sub word
t = (SBOX[t >>> 24] << 24) | (SBOX[(t >>> 16) & 0xff] << 16) | (SBOX[(t >>> 8) & 0xff] << 8) | SBOX[t & 0xff];
// Mix Rcon
t ^= RCON[(ksRow / keySize) | 0] << 24;
} else if (keySize > 6 && ksRow % keySize == 4) {
// Sub word
t = (SBOX[t >>> 24] << 24) | (SBOX[(t >>> 16) & 0xff] << 16) | (SBOX[(t >>> 8) & 0xff] << 8) | SBOX[t & 0xff];
}
keySchedule[ksRow] = keySchedule[ksRow - keySize] ^ t;
}
}
// Compute inv key schedule
var invKeySchedule = this._invKeySchedule = [];
for (var invKsRow = 0; invKsRow < ksRows; invKsRow++) {
var ksRow = ksRows - invKsRow;
if (invKsRow % 4) {
var t = keySchedule[ksRow];
} else {
var t = keySchedule[ksRow - 4];
}
if (invKsRow < 4 || ksRow <= 4) {
invKeySchedule[invKsRow] = t;
} else {
invKeySchedule[invKsRow] = INV_SUB_MIX_0[SBOX[t >>> 24]] ^ INV_SUB_MIX_1[SBOX[(t >>> 16) & 0xff]] ^
INV_SUB_MIX_2[SBOX[(t >>> 8) & 0xff]] ^ INV_SUB_MIX_3[SBOX[t & 0xff]];
}
}
},
encryptBlock: function (M, offset) {
this._doCryptBlock(M, offset, this._keySchedule, SUB_MIX_0, SUB_MIX_1, SUB_MIX_2, SUB_MIX_3, SBOX);
},
decryptBlock: function (M, offset) {
// Swap 2nd and 4th rows
var t = M[offset + 1];
M[offset + 1] = M[offset + 3];
M[offset + 3] = t;
this._doCryptBlock(M, offset, this._invKeySchedule, INV_SUB_MIX_0, INV_SUB_MIX_1, INV_SUB_MIX_2, INV_SUB_MIX_3, INV_SBOX);
// Inv swap 2nd and 4th rows
var t = M[offset + 1];
M[offset + 1] = M[offset + 3];
M[offset + 3] = t;
},
_doCryptBlock: function (M, offset, keySchedule, SUB_MIX_0, SUB_MIX_1, SUB_MIX_2, SUB_MIX_3, SBOX) {
// Shortcut
var nRounds = this._nRounds;
// Get input, add round key
var s0 = M[offset] ^ keySchedule[0];
var s1 = M[offset + 1] ^ keySchedule[1];
var s2 = M[offset + 2] ^ keySchedule[2];
var s3 = M[offset + 3] ^ keySchedule[3];
// Key schedule row counter
var ksRow = 4;
// Rounds
for (var round = 1; round < nRounds; round++) {
// Shift rows, sub bytes, mix columns, add round key
var t0 = SUB_MIX_0[s0 >>> 24] ^ SUB_MIX_1[(s1 >>> 16) & 0xff] ^ SUB_MIX_2[(s2 >>> 8) & 0xff] ^ SUB_MIX_3[s3 & 0xff] ^ keySchedule[ksRow++];
var t1 = SUB_MIX_0[s1 >>> 24] ^ SUB_MIX_1[(s2 >>> 16) & 0xff] ^ SUB_MIX_2[(s3 >>> 8) & 0xff] ^ SUB_MIX_3[s0 & 0xff] ^ keySchedule[ksRow++];
var t2 = SUB_MIX_0[s2 >>> 24] ^ SUB_MIX_1[(s3 >>> 16) & 0xff] ^ SUB_MIX_2[(s0 >>> 8) & 0xff] ^ SUB_MIX_3[s1 & 0xff] ^ keySchedule[ksRow++];
var t3 = SUB_MIX_0[s3 >>> 24] ^ SUB_MIX_1[(s0 >>> 16) & 0xff] ^ SUB_MIX_2[(s1 >>> 8) & 0xff] ^ SUB_MIX_3[s2 & 0xff] ^ keySchedule[ksRow++];
// Update state
s0 = t0;
s1 = t1;
s2 = t2;
s3 = t3;
}
// Shift rows, sub bytes, add round key
var t0 = ((SBOX[s0 >>> 24] << 24) | (SBOX[(s1 >>> 16) & 0xff] << 16) | (SBOX[(s2 >>> 8) & 0xff] << 8) | SBOX[s3 & 0xff]) ^ keySchedule[ksRow++];
var t1 = ((SBOX[s1 >>> 24] << 24) | (SBOX[(s2 >>> 16) & 0xff] << 16) | (SBOX[(s3 >>> 8) & 0xff] << 8) | SBOX[s0 & 0xff]) ^ keySchedule[ksRow++];
var t2 = ((SBOX[s2 >>> 24] << 24) | (SBOX[(s3 >>> 16) & 0xff] << 16) | (SBOX[(s0 >>> 8) & 0xff] << 8) | SBOX[s1 & 0xff]) ^ keySchedule[ksRow++];
var t3 = ((SBOX[s3 >>> 24] << 24) | (SBOX[(s0 >>> 16) & 0xff] << 16) | (SBOX[(s1 >>> 8) & 0xff] << 8) | SBOX[s2 & 0xff]) ^ keySchedule[ksRow++];
// Set output
M[offset] = t0;
M[offset + 1] = t1;
M[offset + 2] = t2;
M[offset + 3] = t3;
},
keySize: 256/32
});
/**
* Shortcut functions to the cipher's object interface.
*
* @example
*
* var ciphertext = CryptoJS.AES.encrypt(message, key, cfg);
* var plaintext = CryptoJS.AES.decrypt(ciphertext, key, cfg);
*/
C.AES = BlockCipher._createHelper(AES);
}());
/*
CryptoJS v3.1.2
md5.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function (Math) {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var WordArray = C_lib.WordArray;
var Hasher = C_lib.Hasher;
var C_algo = C.algo;
// Constants table
var T = [];
// Compute constants
(function () {
for (var i = 0; i < 64; i++) {
T[i] = (Math.abs(Math.sin(i + 1)) * 0x100000000) | 0;
}
}());
/**
* MD5 hash algorithm.
*/
var MD5 = C_algo.MD5 = Hasher.extend({
_doReset: function () {
this._hash = new WordArray.init([
0x67452301, 0xefcdab89,
0x98badcfe, 0x10325476
]);
},
_doProcessBlock: function (M, offset) {
// Swap endian
for (var i = 0; i < 16; i++) {
// Shortcuts
var offset_i = offset + i;
var M_offset_i = M[offset_i];
M[offset_i] = (
(((M_offset_i << 8) | (M_offset_i >>> 24)) & 0x00ff00ff) |
(((M_offset_i << 24) | (M_offset_i >>> 8)) & 0xff00ff00)
);
}
// Shortcuts
var H = this._hash.words;
var M_offset_0 = M[offset + 0];
var M_offset_1 = M[offset + 1];
var M_offset_2 = M[offset + 2];
var M_offset_3 = M[offset + 3];
var M_offset_4 = M[offset + 4];
var M_offset_5 = M[offset + 5];
var M_offset_6 = M[offset + 6];
var M_offset_7 = M[offset + 7];
var M_offset_8 = M[offset + 8];
var M_offset_9 = M[offset + 9];
var M_offset_10 = M[offset + 10];
var M_offset_11 = M[offset + 11];
var M_offset_12 = M[offset + 12];
var M_offset_13 = M[offset + 13];
var M_offset_14 = M[offset + 14];
var M_offset_15 = M[offset + 15];
// Working varialbes
var a = H[0];
var b = H[1];
var c = H[2];
var d = H[3];
// Computation
a = FF(a, b, c, d, M_offset_0, 7, T[0]);
d = FF(d, a, b, c, M_offset_1, 12, T[1]);
c = FF(c, d, a, b, M_offset_2, 17, T[2]);
b = FF(b, c, d, a, M_offset_3, 22, T[3]);
a = FF(a, b, c, d, M_offset_4, 7, T[4]);
d = FF(d, a, b, c, M_offset_5, 12, T[5]);
c = FF(c, d, a, b, M_offset_6, 17, T[6]);
b = FF(b, c, d, a, M_offset_7, 22, T[7]);
a = FF(a, b, c, d, M_offset_8, 7, T[8]);
d = FF(d, a, b, c, M_offset_9, 12, T[9]);
c = FF(c, d, a, b, M_offset_10, 17, T[10]);
b = FF(b, c, d, a, M_offset_11, 22, T[11]);
a = FF(a, b, c, d, M_offset_12, 7, T[12]);
d = FF(d, a, b, c, M_offset_13, 12, T[13]);
c = FF(c, d, a, b, M_offset_14, 17, T[14]);
b = FF(b, c, d, a, M_offset_15, 22, T[15]);
a = GG(a, b, c, d, M_offset_1, 5, T[16]);
d = GG(d, a, b, c, M_offset_6, 9, T[17]);
c = GG(c, d, a, b, M_offset_11, 14, T[18]);
b = GG(b, c, d, a, M_offset_0, 20, T[19]);
a = GG(a, b, c, d, M_offset_5, 5, T[20]);
d = GG(d, a, b, c, M_offset_10, 9, T[21]);
c = GG(c, d, a, b, M_offset_15, 14, T[22]);
b = GG(b, c, d, a, M_offset_4, 20, T[23]);
a = GG(a, b, c, d, M_offset_9, 5, T[24]);
d = GG(d, a, b, c, M_offset_14, 9, T[25]);
c = GG(c, d, a, b, M_offset_3, 14, T[26]);
b = GG(b, c, d, a, M_offset_8, 20, T[27]);
a = GG(a, b, c, d, M_offset_13, 5, T[28]);
d = GG(d, a, b, c, M_offset_2, 9, T[29]);
c = GG(c, d, a, b, M_offset_7, 14, T[30]);
b = GG(b, c, d, a, M_offset_12, 20, T[31]);
a = HH(a, b, c, d, M_offset_5, 4, T[32]);
d = HH(d, a, b, c, M_offset_8, 11, T[33]);
c = HH(c, d, a, b, M_offset_11, 16, T[34]);
b = HH(b, c, d, a, M_offset_14, 23, T[35]);
a = HH(a, b, c, d, M_offset_1, 4, T[36]);
d = HH(d, a, b, c, M_offset_4, 11, T[37]);
c = HH(c, d, a, b, M_offset_7, 16, T[38]);
b = HH(b, c, d, a, M_offset_10, 23, T[39]);
a = HH(a, b, c, d, M_offset_13, 4, T[40]);
d = HH(d, a, b, c, M_offset_0, 11, T[41]);
c = HH(c, d, a, b, M_offset_3, 16, T[42]);
b = HH(b, c, d, a, M_offset_6, 23, T[43]);
a = HH(a, b, c, d, M_offset_9, 4, T[44]);
d = HH(d, a, b, c, M_offset_12, 11, T[45]);
c = HH(c, d, a, b, M_offset_15, 16, T[46]);
b = HH(b, c, d, a, M_offset_2, 23, T[47]);
a = II(a, b, c, d, M_offset_0, 6, T[48]);
d = II(d, a, b, c, M_offset_7, 10, T[49]);
c = II(c, d, a, b, M_offset_14, 15, T[50]);
b = II(b, c, d, a, M_offset_5, 21, T[51]);
a = II(a, b, c, d, M_offset_12, 6, T[52]);
d = II(d, a, b, c, M_offset_3, 10, T[53]);
c = II(c, d, a, b, M_offset_10, 15, T[54]);
b = II(b, c, d, a, M_offset_1, 21, T[55]);
a = II(a, b, c, d, M_offset_8, 6, T[56]);
d = II(d, a, b, c, M_offset_15, 10, T[57]);
c = II(c, d, a, b, M_offset_6, 15, T[58]);
b = II(b, c, d, a, M_offset_13, 21, T[59]);
a = II(a, b, c, d, M_offset_4, 6, T[60]);
d = II(d, a, b, c, M_offset_11, 10, T[61]);
c = II(c, d, a, b, M_offset_2, 15, T[62]);
b = II(b, c, d, a, M_offset_9, 21, T[63]);
// Intermediate hash value
H[0] = (H[0] + a) | 0;
H[1] = (H[1] + b) | 0;
H[2] = (H[2] + c) | 0;
H[3] = (H[3] + d) | 0;
},
_doFinalize: function () {
// Shortcuts
var data = this._data;
var dataWords = data.words;
var nBitsTotal = this._nDataBytes * 8;
var nBitsLeft = data.sigBytes * 8;
// Add padding
dataWords[nBitsLeft >>> 5] |= 0x80 << (24 - nBitsLeft % 32);
var nBitsTotalH = Math.floor(nBitsTotal / 0x100000000);
var nBitsTotalL = nBitsTotal;
dataWords[(((nBitsLeft + 64) >>> 9) << 4) + 15] = (
(((nBitsTotalH << 8) | (nBitsTotalH >>> 24)) & 0x00ff00ff) |
(((nBitsTotalH << 24) | (nBitsTotalH >>> 8)) & 0xff00ff00)
);
dataWords[(((nBitsLeft + 64) >>> 9) << 4) + 14] = (
(((nBitsTotalL << 8) | (nBitsTotalL >>> 24)) & 0x00ff00ff) |
(((nBitsTotalL << 24) | (nBitsTotalL >>> 8)) & 0xff00ff00)
);
data.sigBytes = (dataWords.length + 1) * 4;
// Hash final blocks
this._process();
// Shortcuts
var hash = this._hash;
var H = hash.words;
// Swap endian
for (var i = 0; i < 4; i++) {
// Shortcut
var H_i = H[i];
H[i] = (((H_i << 8) | (H_i >>> 24)) & 0x00ff00ff) |
(((H_i << 24) | (H_i >>> 8)) & 0xff00ff00);
}
// Return final computed hash
return hash;
},
clone: function () {
var clone = Hasher.clone.call(this);
clone._hash = this._hash.clone();
return clone;
}
});
function FF(a, b, c, d, x, s, t) {
var n = a + ((b & c) | (~b & d)) + x + t;
return ((n << s) | (n >>> (32 - s))) + b;
}
function GG(a, b, c, d, x, s, t) {
var n = a + ((b & d) | (c & ~d)) + x + t;
return ((n << s) | (n >>> (32 - s))) + b;
}
function HH(a, b, c, d, x, s, t) {
var n = a + (b ^ c ^ d) + x + t;
return ((n << s) | (n >>> (32 - s))) + b;
}
function II(a, b, c, d, x, s, t) {
var n = a + (c ^ (b | ~d)) + x + t;
return ((n << s) | (n >>> (32 - s))) + b;
}
/**
* Shortcut function to the hasher's object interface.
*
* @param {WordArray|string} message The message to hash.
*
* @return {WordArray} The hash.
*
* @static
*
* @example
*
* var hash = CryptoJS.MD5('message');
* var hash = CryptoJS.MD5(wordArray);
*/
C.MD5 = Hasher._createHelper(MD5);
/**
* Shortcut function to the HMAC's object interface.
*
* @param {WordArray|string} message The message to hash.
* @param {WordArray|string} key The secret key.
*
* @return {WordArray} The HMAC.
*
* @static
*
* @example
*
* var hmac = CryptoJS.HmacMD5(message, key);
*/
C.HmacMD5 = Hasher._createHmacHelper(MD5);
}(Math));
/*
CryptoJS v3.1.2
sha1.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function () {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var WordArray = C_lib.WordArray;
var Hasher = C_lib.Hasher;
var C_algo = C.algo;
// Reusable object
var W = [];
/**
* SHA-1 hash algorithm.
*/
var SHA1 = C_algo.SHA1 = Hasher.extend({
_doReset: function () {
this._hash = new WordArray.init([
0x67452301, 0xefcdab89,
0x98badcfe, 0x10325476,
0xc3d2e1f0
]);
},
_doProcessBlock: function (M, offset) {
// Shortcut
var H = this._hash.words;
// Working variables
var a = H[0];
var b = H[1];
var c = H[2];
var d = H[3];
var e = H[4];
// Computation
for (var i = 0; i < 80; i++) {
if (i < 16) {
W[i] = M[offset + i] | 0;
} else {
var n = W[i - 3] ^ W[i - 8] ^ W[i - 14] ^ W[i - 16];
W[i] = (n << 1) | (n >>> 31);
}
var t = ((a << 5) | (a >>> 27)) + e + W[i];
if (i < 20) {
t += ((b & c) | (~b & d)) + 0x5a827999;
} else if (i < 40) {
t += (b ^ c ^ d) + 0x6ed9eba1;
} else if (i < 60) {
t += ((b & c) | (b & d) | (c & d)) - 0x70e44324;
} else /* if (i < 80) */ {
t += (b ^ c ^ d) - 0x359d3e2a;
}
e = d;
d = c;
c = (b << 30) | (b >>> 2);
b = a;
a = t;
}
// Intermediate hash value
H[0] = (H[0] + a) | 0;
H[1] = (H[1] + b) | 0;
H[2] = (H[2] + c) | 0;
H[3] = (H[3] + d) | 0;
H[4] = (H[4] + e) | 0;
},
_doFinalize: function () {
// Shortcuts
var data = this._data;
var dataWords = data.words;
var nBitsTotal = this._nDataBytes * 8;
var nBitsLeft = data.sigBytes * 8;
// Add padding
dataWords[nBitsLeft >>> 5] |= 0x80 << (24 - nBitsLeft % 32);
dataWords[(((nBitsLeft + 64) >>> 9) << 4) + 14] = Math.floor(nBitsTotal / 0x100000000);
dataWords[(((nBitsLeft + 64) >>> 9) << 4) + 15] = nBitsTotal;
data.sigBytes = dataWords.length * 4;
// Hash final blocks
this._process();
// Return final computed hash
return this._hash;
},
clone: function () {
var clone = Hasher.clone.call(this);
clone._hash = this._hash.clone();
return clone;
}
});
/**
* Shortcut function to the hasher's object interface.
*
* @param {WordArray|string} message The message to hash.
*
* @return {WordArray} The hash.
*
* @static
*
* @example
*
* var hash = CryptoJS.SHA1('message');
* var hash = CryptoJS.SHA1(wordArray);
*/
C.SHA1 = Hasher._createHelper(SHA1);
/**
* Shortcut function to the HMAC's object interface.
*
* @param {WordArray|string} message The message to hash.
* @param {WordArray|string} key The secret key.
*
* @return {WordArray} The HMAC.
*
* @static
*
* @example
*
* var hmac = CryptoJS.HmacSHA1(message, key);
*/
C.HmacSHA1 = Hasher._createHmacHelper(SHA1);
}());
/*
CryptoJS v3.1.2
x64-core.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function (undefined) {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var Base = C_lib.Base;
var X32WordArray = C_lib.WordArray;
/**
* x64 namespace.
*/
var C_x64 = C.x64 = {};
/**
* A 64-bit word.
*/
var X64Word = C_x64.Word = Base.extend({
/**
* Initializes a newly created 64-bit word.
*
* @param {number} high The high 32 bits.
* @param {number} low The low 32 bits.
*
* @example
*
* var x64Word = CryptoJS.x64.Word.create(0x00010203, 0x04050607);
*/
init: function (high, low) {
this.high = high;
this.low = low;
}
/**
* Bitwise NOTs this word.
*
* @return {X64Word} A new x64-Word object after negating.
*
* @example
*
* var negated = x64Word.not();
*/
// not: function () {
// var high = ~this.high;
// var low = ~this.low;
// return X64Word.create(high, low);
// },
/**
* Bitwise ANDs this word with the passed word.
*
* @param {X64Word} word The x64-Word to AND with this word.
*
* @return {X64Word} A new x64-Word object after ANDing.
*
* @example
*
* var anded = x64Word.and(anotherX64Word);
*/
// and: function (word) {
// var high = this.high & word.high;
// var low = this.low & word.low;
// return X64Word.create(high, low);
// },
/**
* Bitwise ORs this word with the passed word.
*
* @param {X64Word} word The x64-Word to OR with this word.
*
* @return {X64Word} A new x64-Word object after ORing.
*
* @example
*
* var ored = x64Word.or(anotherX64Word);
*/
// or: function (word) {
// var high = this.high | word.high;
// var low = this.low | word.low;
// return X64Word.create(high, low);
// },
/**
* Bitwise XORs this word with the passed word.
*
* @param {X64Word} word The x64-Word to XOR with this word.
*
* @return {X64Word} A new x64-Word object after XORing.
*
* @example
*
* var xored = x64Word.xor(anotherX64Word);
*/
// xor: function (word) {
// var high = this.high ^ word.high;
// var low = this.low ^ word.low;
// return X64Word.create(high, low);
// },
/**
* Shifts this word n bits to the left.
*
* @param {number} n The number of bits to shift.
*
* @return {X64Word} A new x64-Word object after shifting.
*
* @example
*
* var shifted = x64Word.shiftL(25);
*/
// shiftL: function (n) {
// if (n < 32) {
// var high = (this.high << n) | (this.low >>> (32 - n));
// var low = this.low << n;
// } else {
// var high = this.low << (n - 32);
// var low = 0;
// }
// return X64Word.create(high, low);
// },
/**
* Shifts this word n bits to the right.
*
* @param {number} n The number of bits to shift.
*
* @return {X64Word} A new x64-Word object after shifting.
*
* @example
*
* var shifted = x64Word.shiftR(7);
*/
// shiftR: function (n) {
// if (n < 32) {
// var low = (this.low >>> n) | (this.high << (32 - n));
// var high = this.high >>> n;
// } else {
// var low = this.high >>> (n - 32);
// var high = 0;
// }
// return X64Word.create(high, low);
// },
/**
* Rotates this word n bits to the left.
*
* @param {number} n The number of bits to rotate.
*
* @return {X64Word} A new x64-Word object after rotating.
*
* @example
*
* var rotated = x64Word.rotL(25);
*/
// rotL: function (n) {
// return this.shiftL(n).or(this.shiftR(64 - n));
// },
/**
* Rotates this word n bits to the right.
*
* @param {number} n The number of bits to rotate.
*
* @return {X64Word} A new x64-Word object after rotating.
*
* @example
*
* var rotated = x64Word.rotR(7);
*/
// rotR: function (n) {
// return this.shiftR(n).or(this.shiftL(64 - n));
// },
/**
* Adds this word with the passed word.
*
* @param {X64Word} word The x64-Word to add with this word.
*
* @return {X64Word} A new x64-Word object after adding.
*
* @example
*
* var added = x64Word.add(anotherX64Word);
*/
// add: function (word) {
// var low = (this.low + word.low) | 0;
// var carry = (low >>> 0) < (this.low >>> 0) ? 1 : 0;
// var high = (this.high + word.high + carry) | 0;
// return X64Word.create(high, low);
// }
});
/**
* An array of 64-bit words.
*
* @property {Array} words The array of CryptoJS.x64.Word objects.
* @property {number} sigBytes The number of significant bytes in this word array.
*/
var X64WordArray = C_x64.WordArray = Base.extend({
/**
* Initializes a newly created word array.
*
* @param {Array} words (Optional) An array of CryptoJS.x64.Word objects.
* @param {number} sigBytes (Optional) The number of significant bytes in the words.
*
* @example
*
* var wordArray = CryptoJS.x64.WordArray.create();
*
* var wordArray = CryptoJS.x64.WordArray.create([
* CryptoJS.x64.Word.create(0x00010203, 0x04050607),
* CryptoJS.x64.Word.create(0x18191a1b, 0x1c1d1e1f)
* ]);
*
* var wordArray = CryptoJS.x64.WordArray.create([
* CryptoJS.x64.Word.create(0x00010203, 0x04050607),
* CryptoJS.x64.Word.create(0x18191a1b, 0x1c1d1e1f)
* ], 10);
*/
init: function (words, sigBytes) {
words = this.words = words || [];
if (sigBytes != undefined) {
this.sigBytes = sigBytes;
} else {
this.sigBytes = words.length * 8;
}
},
/**
* Converts this 64-bit word array to a 32-bit word array.
*
* @return {CryptoJS.lib.WordArray} This word array's data as a 32-bit word array.
*
* @example
*
* var x32WordArray = x64WordArray.toX32();
*/
toX32: function () {
// Shortcuts
var x64Words = this.words;
var x64WordsLength = x64Words.length;
// Convert
var x32Words = [];
for (var i = 0; i < x64WordsLength; i++) {
var x64Word = x64Words[i];
x32Words.push(x64Word.high);
x32Words.push(x64Word.low);
}
return X32WordArray.create(x32Words, this.sigBytes);
},
/**
* Creates a copy of this word array.
*
* @return {X64WordArray} The clone.
*
* @example
*
* var clone = x64WordArray.clone();
*/
clone: function () {
var clone = Base.clone.call(this);
// Clone "words" array
var words = clone.words = this.words.slice(0);
// Clone each X64Word object
var wordsLength = words.length;
for (var i = 0; i < wordsLength; i++) {
words[i] = words[i].clone();
}
return clone;
}
});
}());
/*
CryptoJS v3.1.2
sha256.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function (Math) {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var WordArray = C_lib.WordArray;
var Hasher = C_lib.Hasher;
var C_algo = C.algo;
// Initialization and round constants tables
var H = [];
var K = [];
// Compute constants
(function () {
function isPrime(n) {
var sqrtN = Math.sqrt(n);
for (var factor = 2; factor <= sqrtN; factor++) {
if (!(n % factor)) {
return false;
}
}
return true;
}
function getFractionalBits(n) {
return ((n - (n | 0)) * 0x100000000) | 0;
}
var n = 2;
var nPrime = 0;
while (nPrime < 64) {
if (isPrime(n)) {
if (nPrime < 8) {
H[nPrime] = getFractionalBits(Math.pow(n, 1 / 2));
}
K[nPrime] = getFractionalBits(Math.pow(n, 1 / 3));
nPrime++;
}
n++;
}
}());
// Reusable object
var W = [];
/**
* SHA-256 hash algorithm.
*/
var SHA256 = C_algo.SHA256 = Hasher.extend({
_doReset: function () {
this._hash = new WordArray.init(H.slice(0));
},
_doProcessBlock: function (M, offset) {
// Shortcut
var H = this._hash.words;
// Working variables
var a = H[0];
var b = H[1];
var c = H[2];
var d = H[3];
var e = H[4];
var f = H[5];
var g = H[6];
var h = H[7];
// Computation
for (var i = 0; i < 64; i++) {
if (i < 16) {
W[i] = M[offset + i] | 0;
} else {
var gamma0x = W[i - 15];
var gamma0 = ((gamma0x << 25) | (gamma0x >>> 7)) ^
((gamma0x << 14) | (gamma0x >>> 18)) ^
(gamma0x >>> 3);
var gamma1x = W[i - 2];
var gamma1 = ((gamma1x << 15) | (gamma1x >>> 17)) ^
((gamma1x << 13) | (gamma1x >>> 19)) ^
(gamma1x >>> 10);
W[i] = gamma0 + W[i - 7] + gamma1 + W[i - 16];
}
var ch = (e & f) ^ (~e & g);
var maj = (a & b) ^ (a & c) ^ (b & c);
var sigma0 = ((a << 30) | (a >>> 2)) ^ ((a << 19) | (a >>> 13)) ^ ((a << 10) | (a >>> 22));
var sigma1 = ((e << 26) | (e >>> 6)) ^ ((e << 21) | (e >>> 11)) ^ ((e << 7) | (e >>> 25));
var t1 = h + sigma1 + ch + K[i] + W[i];
var t2 = sigma0 + maj;
h = g;
g = f;
f = e;
e = (d + t1) | 0;
d = c;
c = b;
b = a;
a = (t1 + t2) | 0;
}
// Intermediate hash value
H[0] = (H[0] + a) | 0;
H[1] = (H[1] + b) | 0;
H[2] = (H[2] + c) | 0;
H[3] = (H[3] + d) | 0;
H[4] = (H[4] + e) | 0;
H[5] = (H[5] + f) | 0;
H[6] = (H[6] + g) | 0;
H[7] = (H[7] + h) | 0;
},
_doFinalize: function () {
// Shortcuts
var data = this._data;
var dataWords = data.words;
var nBitsTotal = this._nDataBytes * 8;
var nBitsLeft = data.sigBytes * 8;
// Add padding
dataWords[nBitsLeft >>> 5] |= 0x80 << (24 - nBitsLeft % 32);
dataWords[(((nBitsLeft + 64) >>> 9) << 4) + 14] = Math.floor(nBitsTotal / 0x100000000);
dataWords[(((nBitsLeft + 64) >>> 9) << 4) + 15] = nBitsTotal;
data.sigBytes = dataWords.length * 4;
// Hash final blocks
this._process();
// Return final computed hash
return this._hash;
},
clone: function () {
var clone = Hasher.clone.call(this);
clone._hash = this._hash.clone();
return clone;
}
});
/**
* Shortcut function to the hasher's object interface.
*
* @param {WordArray|string} message The message to hash.
*
* @return {WordArray} The hash.
*
* @static
*
* @example
*
* var hash = CryptoJS.SHA256('message');
* var hash = CryptoJS.SHA256(wordArray);
*/
C.SHA256 = Hasher._createHelper(SHA256);
/**
* Shortcut function to the HMAC's object interface.
*
* @param {WordArray|string} message The message to hash.
* @param {WordArray|string} key The secret key.
*
* @return {WordArray} The HMAC.
*
* @static
*
* @example
*
* var hmac = CryptoJS.HmacSHA256(message, key);
*/
C.HmacSHA256 = Hasher._createHmacHelper(SHA256);
}(Math));
/*
CryptoJS v3.1.2
sha512.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function () {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var Hasher = C_lib.Hasher;
var C_x64 = C.x64;
var X64Word = C_x64.Word;
var X64WordArray = C_x64.WordArray;
var C_algo = C.algo;
function X64Word_create() {
return X64Word.create.apply(X64Word, arguments);
}
// Constants
var K = [
X64Word_create(0x428a2f98, 0xd728ae22), X64Word_create(0x71374491, 0x23ef65cd),
X64Word_create(0xb5c0fbcf, 0xec4d3b2f), X64Word_create(0xe9b5dba5, 0x8189dbbc),
X64Word_create(0x3956c25b, 0xf348b538), X64Word_create(0x59f111f1, 0xb605d019),
X64Word_create(0x923f82a4, 0xaf194f9b), X64Word_create(0xab1c5ed5, 0xda6d8118),
X64Word_create(0xd807aa98, 0xa3030242), X64Word_create(0x12835b01, 0x45706fbe),
X64Word_create(0x243185be, 0x4ee4b28c), X64Word_create(0x550c7dc3, 0xd5ffb4e2),
X64Word_create(0x72be5d74, 0xf27b896f), X64Word_create(0x80deb1fe, 0x3b1696b1),
X64Word_create(0x9bdc06a7, 0x25c71235), X64Word_create(0xc19bf174, 0xcf692694),
X64Word_create(0xe49b69c1, 0x9ef14ad2), X64Word_create(0xefbe4786, 0x384f25e3),
X64Word_create(0x0fc19dc6, 0x8b8cd5b5), X64Word_create(0x240ca1cc, 0x77ac9c65),
X64Word_create(0x2de92c6f, 0x592b0275), X64Word_create(0x4a7484aa, 0x6ea6e483),
X64Word_create(0x5cb0a9dc, 0xbd41fbd4), X64Word_create(0x76f988da, 0x831153b5),
X64Word_create(0x983e5152, 0xee66dfab), X64Word_create(0xa831c66d, 0x2db43210),
X64Word_create(0xb00327c8, 0x98fb213f), X64Word_create(0xbf597fc7, 0xbeef0ee4),
X64Word_create(0xc6e00bf3, 0x3da88fc2), X64Word_create(0xd5a79147, 0x930aa725),
X64Word_create(0x06ca6351, 0xe003826f), X64Word_create(0x14292967, 0x0a0e6e70),
X64Word_create(0x27b70a85, 0x46d22ffc), X64Word_create(0x2e1b2138, 0x5c26c926),
X64Word_create(0x4d2c6dfc, 0x5ac42aed), X64Word_create(0x53380d13, 0x9d95b3df),
X64Word_create(0x650a7354, 0x8baf63de), X64Word_create(0x766a0abb, 0x3c77b2a8),
X64Word_create(0x81c2c92e, 0x47edaee6), X64Word_create(0x92722c85, 0x1482353b),
X64Word_create(0xa2bfe8a1, 0x4cf10364), X64Word_create(0xa81a664b, 0xbc423001),
X64Word_create(0xc24b8b70, 0xd0f89791), X64Word_create(0xc76c51a3, 0x0654be30),
X64Word_create(0xd192e819, 0xd6ef5218), X64Word_create(0xd6990624, 0x5565a910),
X64Word_create(0xf40e3585, 0x5771202a), X64Word_create(0x106aa070, 0x32bbd1b8),
X64Word_create(0x19a4c116, 0xb8d2d0c8), X64Word_create(0x1e376c08, 0x5141ab53),
X64Word_create(0x2748774c, 0xdf8eeb99), X64Word_create(0x34b0bcb5, 0xe19b48a8),
X64Word_create(0x391c0cb3, 0xc5c95a63), X64Word_create(0x4ed8aa4a, 0xe3418acb),
X64Word_create(0x5b9cca4f, 0x7763e373), X64Word_create(0x682e6ff3, 0xd6b2b8a3),
X64Word_create(0x748f82ee, 0x5defb2fc), X64Word_create(0x78a5636f, 0x43172f60),
X64Word_create(0x84c87814, 0xa1f0ab72), X64Word_create(0x8cc70208, 0x1a6439ec),
X64Word_create(0x90befffa, 0x23631e28), X64Word_create(0xa4506ceb, 0xde82bde9),
X64Word_create(0xbef9a3f7, 0xb2c67915), X64Word_create(0xc67178f2, 0xe372532b),
X64Word_create(0xca273ece, 0xea26619c), X64Word_create(0xd186b8c7, 0x21c0c207),
X64Word_create(0xeada7dd6, 0xcde0eb1e), X64Word_create(0xf57d4f7f, 0xee6ed178),
X64Word_create(0x06f067aa, 0x72176fba), X64Word_create(0x0a637dc5, 0xa2c898a6),
X64Word_create(0x113f9804, 0xbef90dae), X64Word_create(0x1b710b35, 0x131c471b),
X64Word_create(0x28db77f5, 0x23047d84), X64Word_create(0x32caab7b, 0x40c72493),
X64Word_create(0x3c9ebe0a, 0x15c9bebc), X64Word_create(0x431d67c4, 0x9c100d4c),
X64Word_create(0x4cc5d4be, 0xcb3e42b6), X64Word_create(0x597f299c, 0xfc657e2a),
X64Word_create(0x5fcb6fab, 0x3ad6faec), X64Word_create(0x6c44198c, 0x4a475817)
];
// Reusable objects
var W = [];
(function () {
for (var i = 0; i < 80; i++) {
W[i] = X64Word_create();
}
}());
/**
* SHA-512 hash algorithm.
*/
var SHA512 = C_algo.SHA512 = Hasher.extend({
_doReset: function () {
this._hash = new X64WordArray.init([
new X64Word.init(0x6a09e667, 0xf3bcc908), new X64Word.init(0xbb67ae85, 0x84caa73b),
new X64Word.init(0x3c6ef372, 0xfe94f82b), new X64Word.init(0xa54ff53a, 0x5f1d36f1),
new X64Word.init(0x510e527f, 0xade682d1), new X64Word.init(0x9b05688c, 0x2b3e6c1f),
new X64Word.init(0x1f83d9ab, 0xfb41bd6b), new X64Word.init(0x5be0cd19, 0x137e2179)
]);
},
_doProcessBlock: function (M, offset) {
// Shortcuts
var H = this._hash.words;
var H0 = H[0];
var H1 = H[1];
var H2 = H[2];
var H3 = H[3];
var H4 = H[4];
var H5 = H[5];
var H6 = H[6];
var H7 = H[7];
var H0h = H0.high;
var H0l = H0.low;
var H1h = H1.high;
var H1l = H1.low;
var H2h = H2.high;
var H2l = H2.low;
var H3h = H3.high;
var H3l = H3.low;
var H4h = H4.high;
var H4l = H4.low;
var H5h = H5.high;
var H5l = H5.low;
var H6h = H6.high;
var H6l = H6.low;
var H7h = H7.high;
var H7l = H7.low;
// Working variables
var ah = H0h;
var al = H0l;
var bh = H1h;
var bl = H1l;
var ch = H2h;
var cl = H2l;
var dh = H3h;
var dl = H3l;
var eh = H4h;
var el = H4l;
var fh = H5h;
var fl = H5l;
var gh = H6h;
var gl = H6l;
var hh = H7h;
var hl = H7l;
// Rounds
for (var i = 0; i < 80; i++) {
// Shortcut
var Wi = W[i];
// Extend message
if (i < 16) {
var Wih = Wi.high = M[offset + i * 2] | 0;
var Wil = Wi.low = M[offset + i * 2 + 1] | 0;
} else {
// Gamma0
var gamma0x = W[i - 15];
var gamma0xh = gamma0x.high;
var gamma0xl = gamma0x.low;
var gamma0h = ((gamma0xh >>> 1) | (gamma0xl << 31)) ^ ((gamma0xh >>> 8) | (gamma0xl << 24)) ^ (gamma0xh >>> 7);
var gamma0l = ((gamma0xl >>> 1) | (gamma0xh << 31)) ^ ((gamma0xl >>> 8) | (gamma0xh << 24)) ^ ((gamma0xl >>> 7) | (gamma0xh << 25));
// Gamma1
var gamma1x = W[i - 2];
var gamma1xh = gamma1x.high;
var gamma1xl = gamma1x.low;
var gamma1h = ((gamma1xh >>> 19) | (gamma1xl << 13)) ^ ((gamma1xh << 3) | (gamma1xl >>> 29)) ^ (gamma1xh >>> 6);
var gamma1l = ((gamma1xl >>> 19) | (gamma1xh << 13)) ^ ((gamma1xl << 3) | (gamma1xh >>> 29)) ^ ((gamma1xl >>> 6) | (gamma1xh << 26));
// W[i] = gamma0 + W[i - 7] + gamma1 + W[i - 16]
var Wi7 = W[i - 7];
var Wi7h = Wi7.high;
var Wi7l = Wi7.low;
var Wi16 = W[i - 16];
var Wi16h = Wi16.high;
var Wi16l = Wi16.low;
var Wil = gamma0l + Wi7l;
var Wih = gamma0h + Wi7h + ((Wil >>> 0) < (gamma0l >>> 0) ? 1 : 0);
var Wil = Wil + gamma1l;
var Wih = Wih + gamma1h + ((Wil >>> 0) < (gamma1l >>> 0) ? 1 : 0);
var Wil = Wil + Wi16l;
var Wih = Wih + Wi16h + ((Wil >>> 0) < (Wi16l >>> 0) ? 1 : 0);
Wi.high = Wih;
Wi.low = Wil;
}
var chh = (eh & fh) ^ (~eh & gh);
var chl = (el & fl) ^ (~el & gl);
var majh = (ah & bh) ^ (ah & ch) ^ (bh & ch);
var majl = (al & bl) ^ (al & cl) ^ (bl & cl);
var sigma0h = ((ah >>> 28) | (al << 4)) ^ ((ah << 30) | (al >>> 2)) ^ ((ah << 25) | (al >>> 7));
var sigma0l = ((al >>> 28) | (ah << 4)) ^ ((al << 30) | (ah >>> 2)) ^ ((al << 25) | (ah >>> 7));
var sigma1h = ((eh >>> 14) | (el << 18)) ^ ((eh >>> 18) | (el << 14)) ^ ((eh << 23) | (el >>> 9));
var sigma1l = ((el >>> 14) | (eh << 18)) ^ ((el >>> 18) | (eh << 14)) ^ ((el << 23) | (eh >>> 9));
// t1 = h + sigma1 + ch + K[i] + W[i]
var Ki = K[i];
var Kih = Ki.high;
var Kil = Ki.low;
var t1l = hl + sigma1l;
var t1h = hh + sigma1h + ((t1l >>> 0) < (hl >>> 0) ? 1 : 0);
var t1l = t1l + chl;
var t1h = t1h + chh + ((t1l >>> 0) < (chl >>> 0) ? 1 : 0);
var t1l = t1l + Kil;
var t1h = t1h + Kih + ((t1l >>> 0) < (Kil >>> 0) ? 1 : 0);
var t1l = t1l + Wil;
var t1h = t1h + Wih + ((t1l >>> 0) < (Wil >>> 0) ? 1 : 0);
// t2 = sigma0 + maj
var t2l = sigma0l + majl;
var t2h = sigma0h + majh + ((t2l >>> 0) < (sigma0l >>> 0) ? 1 : 0);
// Update working variables
hh = gh;
hl = gl;
gh = fh;
gl = fl;
fh = eh;
fl = el;
el = (dl + t1l) | 0;
eh = (dh + t1h + ((el >>> 0) < (dl >>> 0) ? 1 : 0)) | 0;
dh = ch;
dl = cl;
ch = bh;
cl = bl;
bh = ah;
bl = al;
al = (t1l + t2l) | 0;
ah = (t1h + t2h + ((al >>> 0) < (t1l >>> 0) ? 1 : 0)) | 0;
}
// Intermediate hash value
H0l = H0.low = (H0l + al);
H0.high = (H0h + ah + ((H0l >>> 0) < (al >>> 0) ? 1 : 0));
H1l = H1.low = (H1l + bl);
H1.high = (H1h + bh + ((H1l >>> 0) < (bl >>> 0) ? 1 : 0));
H2l = H2.low = (H2l + cl);
H2.high = (H2h + ch + ((H2l >>> 0) < (cl >>> 0) ? 1 : 0));
H3l = H3.low = (H3l + dl);
H3.high = (H3h + dh + ((H3l >>> 0) < (dl >>> 0) ? 1 : 0));
H4l = H4.low = (H4l + el);
H4.high = (H4h + eh + ((H4l >>> 0) < (el >>> 0) ? 1 : 0));
H5l = H5.low = (H5l + fl);
H5.high = (H5h + fh + ((H5l >>> 0) < (fl >>> 0) ? 1 : 0));
H6l = H6.low = (H6l + gl);
H6.high = (H6h + gh + ((H6l >>> 0) < (gl >>> 0) ? 1 : 0));
H7l = H7.low = (H7l + hl);
H7.high = (H7h + hh + ((H7l >>> 0) < (hl >>> 0) ? 1 : 0));
},
_doFinalize: function () {
// Shortcuts
var data = this._data;
var dataWords = data.words;
var nBitsTotal = this._nDataBytes * 8;
var nBitsLeft = data.sigBytes * 8;
// Add padding
dataWords[nBitsLeft >>> 5] |= 0x80 << (24 - nBitsLeft % 32);
dataWords[(((nBitsLeft + 128) >>> 10) << 5) + 30] = Math.floor(nBitsTotal / 0x100000000);
dataWords[(((nBitsLeft + 128) >>> 10) << 5) + 31] = nBitsTotal;
data.sigBytes = dataWords.length * 4;
// Hash final blocks
this._process();
// Convert hash to 32-bit word array before returning
var hash = this._hash.toX32();
// Return final computed hash
return hash;
},
clone: function () {
var clone = Hasher.clone.call(this);
clone._hash = this._hash.clone();
return clone;
},
blockSize: 1024/32
});
/**
* Shortcut function to the hasher's object interface.
*
* @param {WordArray|string} message The message to hash.
*
* @return {WordArray} The hash.
*
* @static
*
* @example
*
* var hash = CryptoJS.SHA512('message');
* var hash = CryptoJS.SHA512(wordArray);
*/
C.SHA512 = Hasher._createHelper(SHA512);
/**
* Shortcut function to the HMAC's object interface.
*
* @param {WordArray|string} message The message to hash.
* @param {WordArray|string} key The secret key.
*
* @return {WordArray} The HMAC.
*
* @static
*
* @example
*
* var hmac = CryptoJS.HmacSHA512(message, key);
*/
C.HmacSHA512 = Hasher._createHmacHelper(SHA512);
}());
/*
CryptoJS v3.1.2
sha3.js
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function (Math) {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var WordArray = C_lib.WordArray;
var Hasher = C_lib.Hasher;
var C_x64 = C.x64;
var X64Word = C_x64.Word;
var C_algo = C.algo;
// Constants tables
var RHO_OFFSETS = [];
var PI_INDEXES = [];
var ROUND_CONSTANTS = [];
// Compute Constants
(function () {
// Compute rho offset constants
var x = 1, y = 0;
for (var t = 0; t < 24; t++) {
RHO_OFFSETS[x + 5 * y] = ((t + 1) * (t + 2) / 2) % 64;
var newX = y % 5;
var newY = (2 * x + 3 * y) % 5;
x = newX;
y = newY;
}
// Compute pi index constants
for (var x = 0; x < 5; x++) {
for (var y = 0; y < 5; y++) {
PI_INDEXES[x + 5 * y] = y + ((2 * x + 3 * y) % 5) * 5;
}
}
// Compute round constants
var LFSR = 0x01;
for (var i = 0; i < 24; i++) {
var roundConstantMsw = 0;
var roundConstantLsw = 0;
for (var j = 0; j < 7; j++) {
if (LFSR & 0x01) {
var bitPosition = (1 << j) - 1;
if (bitPosition < 32) {
roundConstantLsw ^= 1 << bitPosition;
} else /* if (bitPosition >= 32) */ {
roundConstantMsw ^= 1 << (bitPosition - 32);
}
}
// Compute next LFSR
if (LFSR & 0x80) {
// Primitive polynomial over GF(2): x^8 + x^6 + x^5 + x^4 + 1
LFSR = (LFSR << 1) ^ 0x71;
} else {
LFSR <<= 1;
}
}
ROUND_CONSTANTS[i] = X64Word.create(roundConstantMsw, roundConstantLsw);
}
}());
// Reusable objects for temporary values
var T = [];
(function () {
for (var i = 0; i < 25; i++) {
T[i] = X64Word.create();
}
}());
/**
* SHA-3 hash algorithm.
*/
var SHA3 = C_algo.SHA3 = Hasher.extend({
/**
* Configuration options.
*
* @property {number} outputLength
* The desired number of bits in the output hash.
* Only values permitted are: 224, 256, 384, 512.
* Default: 512
*/
cfg: Hasher.cfg.extend({
outputLength: 512
}),
_doReset: function () {
var state = this._state = []
for (var i = 0; i < 25; i++) {
state[i] = new X64Word.init();
}
this.blockSize = (1600 - 2 * this.cfg.outputLength) / 32;
},
_doProcessBlock: function (M, offset) {
// Shortcuts
var state = this._state;
var nBlockSizeLanes = this.blockSize / 2;
// Absorb
for (var i = 0; i < nBlockSizeLanes; i++) {
// Shortcuts
var M2i = M[offset + 2 * i];
var M2i1 = M[offset + 2 * i + 1];
// Swap endian
M2i = (
(((M2i << 8) | (M2i >>> 24)) & 0x00ff00ff) |
(((M2i << 24) | (M2i >>> 8)) & 0xff00ff00)
);
M2i1 = (
(((M2i1 << 8) | (M2i1 >>> 24)) & 0x00ff00ff) |
(((M2i1 << 24) | (M2i1 >>> 8)) & 0xff00ff00)
);
// Absorb message into state
var lane = state[i];
lane.high ^= M2i1;
lane.low ^= M2i;
}
// Rounds
for (var round = 0; round < 24; round++) {
// Theta
for (var x = 0; x < 5; x++) {
// Mix column lanes
var tMsw = 0, tLsw = 0;
for (var y = 0; y < 5; y++) {
var lane = state[x + 5 * y];
tMsw ^= lane.high;
tLsw ^= lane.low;
}
// Temporary values
var Tx = T[x];
Tx.high = tMsw;
Tx.low = tLsw;
}
for (var x = 0; x < 5; x++) {
// Shortcuts
var Tx4 = T[(x + 4) % 5];
var Tx1 = T[(x + 1) % 5];
var Tx1Msw = Tx1.high;
var Tx1Lsw = Tx1.low;
// Mix surrounding columns
var tMsw = Tx4.high ^ ((Tx1Msw << 1) | (Tx1Lsw >>> 31));
var tLsw = Tx4.low ^ ((Tx1Lsw << 1) | (Tx1Msw >>> 31));
for (var y = 0; y < 5; y++) {
var lane = state[x + 5 * y];
lane.high ^= tMsw;
lane.low ^= tLsw;
}
}
// Rho Pi
for (var laneIndex = 1; laneIndex < 25; laneIndex++) {
// Shortcuts
var lane = state[laneIndex];
var laneMsw = lane.high;
var laneLsw = lane.low;
var rhoOffset = RHO_OFFSETS[laneIndex];
// Rotate lanes
if (rhoOffset < 32) {
var tMsw = (laneMsw << rhoOffset) | (laneLsw >>> (32 - rhoOffset));
var tLsw = (laneLsw << rhoOffset) | (laneMsw >>> (32 - rhoOffset));
} else /* if (rhoOffset >= 32) */ {
var tMsw = (laneLsw << (rhoOffset - 32)) | (laneMsw >>> (64 - rhoOffset));
var tLsw = (laneMsw << (rhoOffset - 32)) | (laneLsw >>> (64 - rhoOffset));
}
// Transpose lanes
var TPiLane = T[PI_INDEXES[laneIndex]];
TPiLane.high = tMsw;
TPiLane.low = tLsw;
}
// Rho pi at x = y = 0
var T0 = T[0];
var state0 = state[0];
T0.high = state0.high;
T0.low = state0.low;
// Chi
for (var x = 0; x < 5; x++) {
for (var y = 0; y < 5; y++) {
// Shortcuts
var laneIndex = x + 5 * y;
var lane = state[laneIndex];
var TLane = T[laneIndex];
var Tx1Lane = T[((x + 1) % 5) + 5 * y];
var Tx2Lane = T[((x + 2) % 5) + 5 * y];
// Mix rows
lane.high = TLane.high ^ (~Tx1Lane.high & Tx2Lane.high);
lane.low = TLane.low ^ (~Tx1Lane.low & Tx2Lane.low);
}
}
// Iota
var lane = state[0];
var roundConstant = ROUND_CONSTANTS[round];
lane.high ^= roundConstant.high;
lane.low ^= roundConstant.low;;
}
},
_doFinalize: function () {
// Shortcuts
var data = this._data;
var dataWords = data.words;
var nBitsTotal = this._nDataBytes * 8;
var nBitsLeft = data.sigBytes * 8;
var blockSizeBits = this.blockSize * 32;
// Add padding
dataWords[nBitsLeft >>> 5] |= 0x1 << (24 - nBitsLeft % 32);
dataWords[((Math.ceil((nBitsLeft + 1) / blockSizeBits) * blockSizeBits) >>> 5) - 1] |= 0x80;
data.sigBytes = dataWords.length * 4;
// Hash final blocks
this._process();
// Shortcuts
var state = this._state;
var outputLengthBytes = this.cfg.outputLength / 8;
var outputLengthLanes = outputLengthBytes / 8;
// Squeeze
var hashWords = [];
for (var i = 0; i < outputLengthLanes; i++) {
// Shortcuts
var lane = state[i];
var laneMsw = lane.high;
var laneLsw = lane.low;
// Swap endian
laneMsw = (
(((laneMsw << 8) | (laneMsw >>> 24)) & 0x00ff00ff) |
(((laneMsw << 24) | (laneMsw >>> 8)) & 0xff00ff00)
);
laneLsw = (
(((laneLsw << 8) | (laneLsw >>> 24)) & 0x00ff00ff) |
(((laneLsw << 24) | (laneLsw >>> 8)) & 0xff00ff00)
);
// Squeeze state to retrieve hash
hashWords.push(laneLsw);
hashWords.push(laneMsw);
}
// Return final computed hash
return new WordArray.init(hashWords, outputLengthBytes);
},
clone: function () {
var clone = Hasher.clone.call(this);
var state = clone._state = this._state.slice(0);
for (var i = 0; i < 25; i++) {
state[i] = state[i].clone();
}
return clone;
}
});
/**
* Shortcut function to the hasher's object interface.
*
* @param {WordArray|string} message The message to hash.
*
* @return {WordArray} The hash.
*
* @static
*
* @example
*
* var hash = CryptoJS.SHA3('message');
* var hash = CryptoJS.SHA3(wordArray);
*/
C.SHA3 = Hasher._createHelper(SHA3);
/**
* Shortcut function to the HMAC's object interface.
*
* @param {WordArray|string} message The message to hash.
* @param {WordArray|string} key The secret key.
*
* @return {WordArray} The HMAC.
*
* @static
*
* @example
*
* var hmac = CryptoJS.HmacSHA3(message, key);
*/
C.HmacSHA3 = Hasher._createHmacHelper(SHA3);
}(Math));
; browserify_shim__define__module__export__(typeof CryptoJS != "undefined" ? CryptoJS : window.CryptoJS);
}).call(global, undefined, undefined, undefined, undefined, function defineExport(ex) { module.exports = ex; });
}).call(this,typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{}],2:[function(_dereq_,module,exports){
(function (global){
;__browserify_shim_require__=_dereq_;(function browserifyShim(module, exports, _dereq_, define, browserify_shim__define__module__export__) {
/**
* Lawnchair!
* ---
* clientside json store
*
*/
var Lawnchair = function (options, callback) {
// ensure Lawnchair was called as a constructor
if (!(this instanceof Lawnchair)) return new Lawnchair(options, callback);
// lawnchair requires json
if (!JSON) throw 'JSON unavailable! Include http://www.json.org/json2.js to fix.'
// options are optional; callback is not
if (arguments.length <= 2 && arguments.length > 0) {
callback = (typeof arguments[0] === 'function') ? arguments[0] : arguments[1];
options = (typeof arguments[0] === 'function') ? {} : arguments[0];
} else {
throw 'Incorrect # of ctor args!'
}
// TODO perhaps allow for pub/sub instead?
if (typeof callback !== 'function') throw 'No callback was provided';
// default configuration
this.record = options.record || 'record' // default for records
this.name = options.name || 'records' // default name for underlying store
// mixin first valid adapter
var adapter
// if the adapter is passed in we try to load that only
if (options.adapter) {
// the argument passed should be an array of prefered adapters
// if it is not, we convert it
if(typeof(options.adapter) === 'string'){
options.adapter = [options.adapter];
}
// iterates over the array of passed adapters
for(var j = 0, k = options.adapter.length; j < k; j++){
// itirates over the array of available adapters
for (var i = Lawnchair.adapters.length-1; i >= 0; i--) {
if (Lawnchair.adapters[i].adapter === options.adapter[j]) {
adapter = Lawnchair.adapters[i].valid() ? Lawnchair.adapters[i] : undefined;
if (adapter) break
}
}
if (adapter) break
}
// otherwise find the first valid adapter for this env
}
else {
for (var i = 0, l = Lawnchair.adapters.length; i < l; i++) {
adapter = Lawnchair.adapters[i].valid() ? Lawnchair.adapters[i] : undefined
if (adapter) break
}
}
// we have failed
if (!adapter) throw 'No valid adapter.'
// yay! mixin the adapter
for (var j in adapter)
this[j] = adapter[j]
// call init for each mixed in plugin
for (var i = 0, l = Lawnchair.plugins.length; i < l; i++)
Lawnchair.plugins[i].call(this)
// init the adapter
this.init(options, callback)
}
Lawnchair.adapters = []
/**
* queues an adapter for mixin
* ===
* - ensures an adapter conforms to a specific interface
*
*/
Lawnchair.adapter = function (id, obj) {
// add the adapter id to the adapter obj
// ugly here for a cleaner dsl for implementing adapters
obj['adapter'] = id
// methods required to implement a lawnchair adapter
var implementing = 'adapter valid init keys save batch get exists all remove nuke'.split(' ')
, indexOf = this.prototype.indexOf
// mix in the adapter
for (var i in obj) {
if (indexOf(implementing, i) === -1) throw 'Invalid adapter! Nonstandard method: ' + i
}
// if we made it this far the adapter interface is valid
// insert the new adapter as the preferred adapter
Lawnchair.adapters.splice(0,0,obj)
}
Lawnchair.plugins = []
/**
* generic shallow extension for plugins
* ===
* - if an init method is found it registers it to be called when the lawnchair is inited
* - yes we could use hasOwnProp but nobody here is an asshole
*/
Lawnchair.plugin = function (obj) {
for (var i in obj)
i === 'init' ? Lawnchair.plugins.push(obj[i]) : this.prototype[i] = obj[i]
}
/**
* helpers
*
*/
Lawnchair.prototype = {
isArray: Array.isArray || function(o) { return Object.prototype.toString.call(o) === '[object Array]' },
/**
* this code exists for ie8... for more background see:
* http://www.flickr.com/photos/westcoastlogic/5955365742/in/photostream
*/
indexOf: function(ary, item, i, l) {
if (ary.indexOf) return ary.indexOf(item)
for (i = 0, l = ary.length; i < l; i++) if (ary[i] === item) return i
return -1
},
// awesome shorthand callbacks as strings. this is shameless theft from dojo.
lambda: function (callback) {
return this.fn(this.record, callback)
},
// first stab at named parameters for terse callbacks; dojo: first != best // ;D
fn: function (name, callback) {
return typeof callback == 'string' ? new Function(name, callback) : callback
},
// returns a unique identifier (by way of Backbone.localStorage.js)
// TODO investigate smaller UUIDs to cut on storage cost
uuid: function () {
var S4 = function () {
return (((1+Math.random())*0x10000)|0).toString(16).substring(1);
}
return (S4()+S4()+"-"+S4()+"-"+S4()+"-"+S4()+"-"+S4()+S4()+S4());
},
// a classic iterator
each: function (callback) {
var cb = this.lambda(callback)
// iterate from chain
if (this.__results) {
for (var i = 0, l = this.__results.length; i < l; i++) cb.call(this, this.__results[i], i)
}
// otherwise iterate the entire collection
else {
this.all(function(r) {
for (var i = 0, l = r.length; i < l; i++) cb.call(this, r[i], i)
})
}
return this
}
// --
};
// window.name code courtesy Remy Sharp: http://24ways.org/2009/breaking-out-the-edges-of-the-browser
Lawnchair.adapter('window-name', (function() {
if (typeof window==='undefined') {
window = { top: { } }; // node/optimizer compatibility
}
// edited from the original here by elsigh
// Some sites store JSON data in window.top.name, but some folks (twitter on iPad)
// put simple strings in there - we should make sure not to cause a SyntaxError.
var data = {}
try {
data = JSON.parse(window.top.name)
} catch (e) {}
return {
valid: function () {
return typeof window.top.name != 'undefined'
},
init: function (options, callback) {
data[this.name] = data[this.name] || {index:[],store:{}}
this.index = data[this.name].index
this.store = data[this.name].store
this.fn(this.name, callback).call(this, this)
return this
},
keys: function (callback) {
this.fn('keys', callback).call(this, this.index)
return this
},
save: function (obj, cb) {
// data[key] = value + ''; // force to string
// window.top.name = JSON.stringify(data);
var key = obj.key || this.uuid()
this.exists(key, function(exists) {
if (!exists) {
if (obj.key) delete obj.key
this.index.push(key)
}
this.store[key] = obj
try {
window.top.name = JSON.stringify(data) // TODO wow, this is the only diff from the memory adapter
} catch(e) {
// restore index/store to previous value before JSON exception
if (!exists) {
this.index.pop();
delete this.store[key];
}
throw e;
}
if (cb) {
obj.key = key
this.lambda(cb).call(this, obj)
}
})
return this
},
batch: function (objs, cb) {
var r = []
for (var i = 0, l = objs.length; i < l; i++) {
this.save(objs[i], function(record) {
r.push(record)
})
}
if (cb) this.lambda(cb).call(this, r)
return this
},
get: function (keyOrArray, cb) {
var r;
if (this.isArray(keyOrArray)) {
r = []
for (var i = 0, l = keyOrArray.length; i < l; i++) {
r.push(this.store[keyOrArray[i]])
}
} else {
r = this.store[keyOrArray]
if (r) r.key = keyOrArray
}
if (cb) this.lambda(cb).call(this, r)
return this
},
exists: function (key, cb) {
this.lambda(cb).call(this, !!(this.store[key]))
return this
},
all: function (cb) {
var r = []
for (var i = 0, l = this.index.length; i < l; i++) {
var obj = this.store[this.index[i]]
obj.key = this.index[i]
r.push(obj)
}
this.fn(this.name, cb).call(this, r)
return this
},
remove: function (keyOrArray, cb) {
var del = this.isArray(keyOrArray) ? keyOrArray : [keyOrArray]
for (var i = 0, l = del.length; i < l; i++) {
var key = del[i].key ? del[i].key : del[i]
var where = this.indexOf(this.index, key)
if (where < 0) continue /* key not present */
delete this.store[key]
this.index.splice(where, 1)
}
window.top.name = JSON.stringify(data)
if (cb) this.lambda(cb).call(this)
return this
},
nuke: function (cb) {
this.store = data[this.name].store = {}
this.index = data[this.name].index = []
window.top.name = JSON.stringify(data)
if (cb) this.lambda(cb).call(this)
return this
}
}
/////
})())
/**
* dom storage adapter
* ===
* - originally authored by Joseph Pecoraro
*
*/
//
// TODO does it make sense to be chainable all over the place?
// chainable: nuke, remove, all, get, save, all
// not chainable: valid, keys
//
Lawnchair.adapter('dom', (function() {
var storage = null;
try{
storage = window.localStorage;
}catch(e){
}
// the indexer is an encapsulation of the helpers needed to keep an ordered index of the keys
var indexer = function(name) {
return {
// the key
key: name + '._index_',
// returns the index
all: function() {
var a = storage.getItem(this.key)
if (a) {
a = JSON.parse(a)
}
if (a === null) storage.setItem(this.key, JSON.stringify([])) // lazy init
return JSON.parse(storage.getItem(this.key))
},
// adds a key to the index
add: function (key) {
var a = this.all()
a.push(key)
storage.setItem(this.key, JSON.stringify(a))
},
// deletes a key from the index
del: function (key) {
var a = this.all(), r = []
// FIXME this is crazy inefficient but I'm in a strata meeting and half concentrating
for (var i = 0, l = a.length; i < l; i++) {
if (a[i] != key) r.push(a[i])
}
storage.setItem(this.key, JSON.stringify(r))
},
// returns index for a key
find: function (key) {
var a = this.all()
for (var i = 0, l = a.length; i < l; i++) {
if (key === a[i]) return i
}
return false
}
}
}
// adapter api
return {
// ensure we are in an env with localStorage
valid: function () {
return !!storage && function() {
// in mobile safari if safe browsing is enabled, window.storage
// is defined but setItem calls throw exceptions.
var success = true
var value = Math.random()
try {
storage.setItem(value, value)
} catch (e) {
success = false
}
storage.removeItem(value)
return success
}()
},
init: function (options, callback) {
this.indexer = indexer(this.name)
if (callback) this.fn(this.name, callback).call(this, this)
},
save: function (obj, callback) {
var key = obj.key ? this.name + '.' + obj.key : this.name + '.' + this.uuid()
// now we kil the key and use it in the store colleciton
delete obj.key;
storage.setItem(key, JSON.stringify(obj))
// if the key is not in the index push it on
if (this.indexer.find(key) === false) this.indexer.add(key)
obj.key = key.slice(this.name.length + 1)
if (callback) {
this.lambda(callback).call(this, obj)
}
return this
},
batch: function (ary, callback) {
var saved = []
// not particularily efficient but this is more for sqlite situations
for (var i = 0, l = ary.length; i < l; i++) {
this.save(ary[i], function(r){
saved.push(r)
})
}
if (callback) this.lambda(callback).call(this, saved)
return this
},
// accepts [options], callback
keys: function(callback) {
if (callback) {
var name = this.name
var indices = this.indexer.all();
var keys = [];
//Checking for the support of map.
if(Array.prototype.map) {
keys = indices.map(function(r){ return r.replace(name + '.', '') })
} else {
for (var key in indices) {
keys.push(key.replace(name + '.', ''));
}
}
this.fn('keys', callback).call(this, keys)
}
return this // TODO options for limit/offset, return promise
},
get: function (key, callback) {
if (this.isArray(key)) {
var r = []
for (var i = 0, l = key.length; i < l; i++) {
var k = this.name + '.' + key[i]
var obj = storage.getItem(k)
if (obj) {
obj = JSON.parse(obj)
obj.key = key[i]
}
r.push(obj)
}
if (callback) this.lambda(callback).call(this, r)
} else {
var k = this.name + '.' + key
var obj = storage.getItem(k)
if (obj) {
obj = JSON.parse(obj)
obj.key = key
}
if (callback) this.lambda(callback).call(this, obj)
}
return this
},
exists: function (key, cb) {
var exists = this.indexer.find(this.name+'.'+key) === false ? false : true ;
this.lambda(cb).call(this, exists);
return this;
},
// NOTE adapters cannot set this.__results but plugins do
// this probably should be reviewed
all: function (callback) {
var idx = this.indexer.all()
, r = []
, o
, k
for (var i = 0, l = idx.length; i < l; i++) {
k = idx[i] //v
o = JSON.parse(storage.getItem(k))
o.key = k.replace(this.name + '.', '')
r.push(o)
}
if (callback) this.fn(this.name, callback).call(this, r)
return this
},
remove: function (keyOrArray, callback) {
var self = this;
if (this.isArray(keyOrArray)) {
// batch remove
var i, done = keyOrArray.length;
var removeOne = function(i) {
self.remove(keyOrArray[i], function() {
if ((--done) > 0) { return; }
if (callback) {
self.lambda(callback).call(self);
}
});
};
for (i=0; i < keyOrArray.length; i++)
removeOne(i);
return this;
}
var key = this.name + '.' +
((keyOrArray.key) ? keyOrArray.key : keyOrArray)
this.indexer.del(key)
storage.removeItem(key)
if (callback) this.lambda(callback).call(this)
return this
},
nuke: function (callback) {
this.all(function(r) {
for (var i = 0, l = r.length; i < l; i++) {
this.remove(r[i]);
}
if (callback) this.lambda(callback).call(this)
})
return this
}
}})());
Lawnchair.adapter('webkit-sqlite', (function() {
// private methods
var fail = function(e, i) {
if (console) {
console.log('error in sqlite adaptor!', e, i)
}
}, now = function() {
return new Date()
} // FIXME need to use better date fn
// not entirely sure if this is needed...
// public methods
return {
valid: function() {
return !!(window.openDatabase)
},
init: function(options, callback) {
var that = this,
cb = that.fn(that.name, callback),
create = "CREATE TABLE IF NOT EXISTS " + this.record + " (id NVARCHAR(32) UNIQUE PRIMARY KEY, value TEXT, timestamp REAL)",
win = function() {
return cb.call(that, that);
}
// open a connection and create the db if it doesn't exist
//FEEDHENRY CHANGE TO ALLOW ERROR CALLBACK
if (options && 'function' === typeof options.fail) fail = options.fail
//END CHANGE
this.db = openDatabase(this.name, '1.0.0', this.name, 65536)
this.db.transaction(function(t) {
t.executeSql(create, [], win, fail)
})
},
keys: function(callback) {
var cb = this.lambda(callback),
that = this,
keys = "SELECT id FROM " + this.record + " ORDER BY timestamp DESC"
this.db.readTransaction(function(t) {
var win = function(xxx, results) {
if (results.rows.length == 0) {
cb.call(that, [])
} else {
var r = [];
for (var i = 0, l = results.rows.length; i < l; i++) {
r.push(results.rows.item(i).id);
}
cb.call(that, r)
}
}
t.executeSql(keys, [], win, fail)
})
return this
},
// you think thats air you're breathing now?
save: function(obj, callback, error) {
var that = this
objs = (this.isArray(obj) ? obj : [obj]).map(function(o) {
if (!o.key) {
o.key = that.uuid()
}
return o
}),
ins = "INSERT OR REPLACE INTO " + this.record + " (value, timestamp, id) VALUES (?,?,?)",
win = function() {
if (callback) {
that.lambda(callback).call(that, that.isArray(obj) ? objs : objs[0])
}
}, error = error || function() {}, insvals = [],
ts = now()
try {
for (var i = 0, l = objs.length; i < l; i++) {
insvals[i] = [JSON.stringify(objs[i]), ts, objs[i].key];
}
} catch (e) {
fail(e)
throw e;
}
that.db.transaction(function(t) {
for (var i = 0, l = objs.length; i < l; i++)
t.executeSql(ins, insvals[i])
}, function(e, i) {
fail(e, i)
}, win)
return this
},
batch: function(objs, callback) {
return this.save(objs, callback)
},
get: function(keyOrArray, cb) {
var that = this,
sql = '',
args = this.isArray(keyOrArray) ? keyOrArray : [keyOrArray];
// batch selects support
sql = 'SELECT id, value FROM ' + this.record + " WHERE id IN (" +
args.map(function() {
return '?'
}).join(",") + ")"
// FIXME
// will always loop the results but cleans it up if not a batch return at the end..
// in other words, this could be faster
var win = function(xxx, results) {
var o, r, lookup = {}
// map from results to keys
for (var i = 0, l = results.rows.length; i < l; i++) {
o = JSON.parse(results.rows.item(i).value)
o.key = results.rows.item(i).id
lookup[o.key] = o;
}
r = args.map(function(key) {
return lookup[key];
});
if (!that.isArray(keyOrArray)) r = r.length ? r[0] : null
if (cb) that.lambda(cb).call(that, r)
}
this.db.readTransaction(function(t) {
t.executeSql(sql, args, win, fail)
})
return this
},
exists: function(key, cb) {
var is = "SELECT * FROM " + this.record + " WHERE id = ?",
that = this,
win = function(xxx, results) {
if (cb) that.fn('exists', cb).call(that, (results.rows.length > 0))
}
this.db.readTransaction(function(t) {
t.executeSql(is, [key], win, fail)
})
return this
},
all: function(callback) {
var that = this,
all = "SELECT * FROM " + this.record,
r = [],
cb = this.fn(this.name, callback) || undefined,
win = function(xxx, results) {
if (results.rows.length != 0) {
for (var i = 0, l = results.rows.length; i < l; i++) {
var obj = JSON.parse(results.rows.item(i).value)
obj.key = results.rows.item(i).id
r.push(obj)
}
}
if (cb) cb.call(that, r)
}
this.db.readTransaction(function(t) {
t.executeSql(all, [], win, fail)
})
return this
},
remove: function(keyOrArray, cb) {
var that = this,
args, sql = "DELETE FROM " + this.record + " WHERE id ",
win = function() {
if (cb) that.lambda(cb).call(that)
}
if (!this.isArray(keyOrArray)) {
sql += '= ?';
args = [keyOrArray];
} else {
args = keyOrArray;
sql += "IN (" +
args.map(function() {
return '?'
}).join(',') +
")";
}
args = args.map(function(obj) {
return obj.key ? obj.key : obj;
});
this.db.transaction(function(t) {
t.executeSql(sql, args, win, fail);
});
return this;
},
nuke: function(cb) {
var nuke = "DELETE FROM " + this.record,
that = this,
win = cb ? function() {
that.lambda(cb).call(that)
} : function() {}
this.db.transaction(function(t) {
t.executeSql(nuke, [], win, fail)
})
return this
}
}
})());
Lawnchair.adapter('indexed-db', (function(){
function fail(e, i) {
if(console) { console.log('error in indexed-db adapter!' + e.message, e, i); debugger;}
} ;
function getIDB(){
return window.indexedDB || window.webkitIndexedDB || window.mozIndexedDB || window.oIndexedDB || window.msIndexedDB;
};
return {
valid: function() { return !!getIDB(); },
init:function(options, callback) {
this.idb = getIDB();
this.waiting = [];
var request = this.idb.open(this.name, 2);
var self = this;
var cb = self.fn(self.name, callback);
var win = function(){ return cb.call(self, self); }
//FEEDHENRY CHANGE TO ALLOW ERROR CALLBACK
if(options && 'function' === typeof options.fail) fail = options.fail
//END CHANGE
request.onupgradeneeded = function(event){
self.store = request.result.createObjectStore("teststore", { autoIncrement: true} );
for (var i = 0; i < self.waiting.length; i++) {
self.waiting[i].call(self);
}
self.waiting = [];
win();
}
request.onsuccess = function(event) {
self.db = request.result;
if(self.db.version != "2.0") {
if(typeof self.db.setVersion == 'function'){
var setVrequest = self.db.setVersion("2.0");
// onsuccess is the only place we can create Object Stores
setVrequest.onsuccess = function(e) {
self.store = self.db.createObjectStore("teststore", { autoIncrement: true} );
for (var i = 0; i < self.waiting.length; i++) {
self.waiting[i].call(self);
}
self.waiting = [];
win();
};
setVrequest.onerror = function(e) {
// console.log("Failed to create objectstore " + e);
fail(e);
}
}
} else {
self.store = {};
for (var i = 0; i < self.waiting.length; i++) {
self.waiting[i].call(self);
}
self.waiting = [];
win();
}
}
request.onerror = fail;
},
save:function(obj, callback) {
if(!this.store) {
this.waiting.push(function() {
this.save(obj, callback);
});
return;
}
var self = this;
var win = function (e) { if (callback) { obj.key = e.target.result; self.lambda(callback).call(self, obj) }};
var accessType = "readwrite";
var trans = this.db.transaction(["teststore"],accessType);
var store = trans.objectStore("teststore");
var request = obj.key ? store.put(obj, obj.key) : store.put(obj);
request.onsuccess = win;
request.onerror = fail;
return this;
},
// FIXME this should be a batch insert / just getting the test to pass...
batch: function (objs, cb) {
var results = []
, done = false
, self = this
var updateProgress = function(obj) {
results.push(obj)
done = results.length === objs.length
}
var checkProgress = setInterval(function() {
if (done) {
if (cb) self.lambda(cb).call(self, results)
clearInterval(checkProgress)
}
}, 200)
for (var i = 0, l = objs.length; i < l; i++)
this.save(objs[i], updateProgress)
return this
},
get:function(key, callback) {
if(!this.store || !this.db) {
this.waiting.push(function() {
this.get(key, callback);
});
return;
}
var self = this;
var win = function (e) { if (callback) { self.lambda(callback).call(self, e.target.result) }};
if (!this.isArray(key)){
var req = this.db.transaction("teststore").objectStore("teststore").get(key);
req.onsuccess = win;
req.onerror = function(event) {
//console.log("Failed to find " + key);
fail(event);
};
// FIXME: again the setInterval solution to async callbacks..
} else {
// note: these are hosted.
var results = []
, done = false
, keys = key
var updateProgress = function(obj) {
results.push(obj)
done = results.length === keys.length
}
var checkProgress = setInterval(function() {
if (done) {
if (callback) self.lambda(callback).call(self, results)
clearInterval(checkProgress)
}
}, 200)
for (var i = 0, l = keys.length; i < l; i++)
this.get(keys[i], updateProgress)
}
return this;
},
all:function(callback) {
if(!this.store) {
this.waiting.push(function() {
this.all(callback);
});
return;
}
var cb = this.fn(this.name, callback) || undefined;
var self = this;
var objectStore = this.db.transaction("teststore").objectStore("teststore");
var toReturn = [];
objectStore.openCursor().onsuccess = function(event) {
var cursor = event.target.result;
if (cursor) {
toReturn.push(cursor.value);
cursor.continue();
}
else {
if (cb) cb.call(self, toReturn);
}
};
return this;
},
remove:function(keyOrObj, callback) {
if(!this.store) {
this.waiting.push(function() {
this.remove(keyOrObj, callback);
});
return;
}
if (typeof keyOrObj == "object") {
keyOrObj = keyOrObj.key;
}
var self = this;
var win = function () { if (callback) self.lambda(callback).call(self) };
var request = this.db.transaction(["teststore"], "readwrite").objectStore("teststore").delete(keyOrObj);
request.onsuccess = win;
request.onerror = fail;
return this;
},
nuke:function(callback) {
if(!this.store) {
this.waiting.push(function() {
this.nuke(callback);
});
return;
}
var self = this
, win = callback ? function() { self.lambda(callback).call(self) } : function(){};
try {
this.db
.transaction(["teststore"], "readwrite")
.objectStore("teststore").clear().onsuccess = win;
} catch(e) {
fail();
}
return this;
}
};
})());
Lawnchair.adapter('html5-filesystem', (function(global){
var fail = function( e ) {
if ( console ) console.error(e, e.name);
};
var ls = function( reader, callback, entries ) {
var result = entries || [];
reader.readEntries(function( results ) {
if ( !results.length ) {
if ( callback ) callback( result.map(function(entry) { return entry.name; }) );
} else {
ls( reader, callback, result.concat( Array.prototype.slice.call( results ) ) );
}
}, fail );
};
var filesystems = {};
var root = function( store, callback ) {
var directory = filesystems[store.name];
if ( directory ) {
callback( directory );
} else {
setTimeout(function() {
root( store, callback );
}, 10 );
}
};
var isPhoneGap = function() {
//http://stackoverflow.com/questions/10347539/detect-between-a-mobile-browser-or-a-phonegap-application
//may break.
var app = document.URL.indexOf('http://') === -1 && document.URL.indexOf('https://') === -1;
if (app) {
return true;
} else {
return false;
}
}
var createBlobOrString = function(contentstr) {
var retVal;
if (isPhoneGap()) { // phonegap filewriter works with strings, later versions also work with binary arrays, and if passed a blob will just convert to binary array anyway
retVal = contentstr;
} else {
var targetContentType = 'application/json';
try {
retVal = new Blob( [contentstr], { type: targetContentType }); // Blob doesn't exist on all androids
}
catch (e){
// TypeError old chrome and FF
var blobBuilder = window.BlobBuilder ||
window.WebKitBlobBuilder ||
window.MozBlobBuilder ||
window.MSBlobBuilder;
if (e.name == 'TypeError' && blobBuilder) {
var bb = new blobBuilder();
bb.append([contentstr.buffer]);
retVal = bb.getBlob(targetContentType);
} else {
// We can't make a Blob, so just return the stringified content
retVal = contentstr;
}
}
}
return retVal;
}
return {
// boolean; true if the adapter is valid for the current environment
valid: function() {
var fs = global.requestFileSystem || global.webkitRequestFileSystem || global.moz_requestFileSystem;
return !!fs;
},
// constructor call and callback. 'name' is the most common option
init: function( options, callback ) {
var me = this;
var error = function(e) { fail(e); if ( callback ) me.fn( me.name, callback ).call( me, me ); };
var size = options.size || 100*1024*1024;
var name = this.name;
//disable file backup to icloud
me.backup = false;
if(typeof options.backup !== 'undefined'){
me.backup = options.backup;
}
function requestFileSystem(amount) {
// console.log('in requestFileSystem');
var fs = global.requestFileSystem || global.webkitRequestFileSystem || global.moz_requestFileSystem;
var mode = window.PERSISTENT;
if(typeof LocalFileSystem !== "undefined" && typeof LocalFileSystem.PERSISTENT !== "undefined"){
mode = LocalFileSystem.PERSISTENT;
}
fs(mode, amount, function(fs) {
// console.log('got FS ', fs);
fs.root.getDirectory( name, {create:true}, function( directory ) {
// console.log('got DIR ', directory);
filesystems[name] = directory;
if ( callback ) me.fn( me.name, callback ).call( me, me );
}, function( e ) {
// console.log('error getting dir :: ', e);
error(e);
});
}, function( e ) {
// console.log('error getting FS :: ', e);
error(e);
});
};
// When in the browser we need to use the html5 file system rather than
// the one cordova supplies, but it needs to request a quota first.
if (typeof navigator.webkitPersistentStorage !== 'undefined') {
navigator.webkitPersistentStorage.requestQuota(size, requestFileSystem, function() {
logger.warn('User declined file storage');
error('User declined file storage');
});
} else {
// Amount is 0 because we pretty much have free reign over the
// amount of storage we use on an android device.
requestFileSystem(0);
}
},
// returns all the keys in the store
keys: function( callback ) {
var me = this;
root( this, function( store ) {
ls( store.createReader(), function( entries ) {
if ( callback ) me.fn( 'keys', callback ).call( me, entries );
});
});
return this;
},
// save an object
save: function( obj, callback ) {
var me = this;
var key = obj.key || this.uuid();
obj.key = key;
var error = function(e) { fail(e); if ( callback ) me.lambda( callback ).call( me ); };
root( this, function( store ) {
var writeContent = function(file, error){
file.createWriter(function( writer ) {
writer.onerror = error;
writer.onwriteend = function() {
// Clear the onWriteEnd handler so the truncate does not call it and cause an infinite loop
this.onwriteend = null;
// Truncate the file at the end of the written contents. This ensures that if we are updating
// a file which was previously longer, we will not be left with old contents beyond the end of
// the current buffer.
this.truncate(this.position);
if ( callback ) me.lambda( callback ).call( me, obj );
};
var contentStr = JSON.stringify(obj);
var writerContent = createBlobOrString(contentStr);
writer.write(writerContent);
}, error );
}
store.getFile( key, {create:true}, function( file ) {
if(typeof file.setMetadata === 'function' && (me.backup === false || me.backup === 'false')){
//set meta data on the file to make sure it won't be backed up by icloud
file.setMetadata(function(){
writeContent(file, error);
}, function(){
writeContent(file, error);
}, {'com.apple.MobileBackup': 1});
} else {
writeContent(file, error);
}
}, error );
});
return this;
},
// batch save array of objs
batch: function( objs, callback ) {
var me = this;
var saved = [];
for ( var i = 0, il = objs.length; i < il; i++ ) {
me.save( objs[i], function( obj ) {
saved.push( obj );
if ( saved.length === il && callback ) {
me.lambda( callback ).call( me, saved );
}
});
}
return this;
},
// retrieve obj (or array of objs) and apply callback to each
get: function( key /* or array */, callback ) {
var me = this;
if ( this.isArray( key ) ) {
var values = [];
for ( var i = 0, il = key.length; i < il; i++ ) {
me.get( key[i], function( result ) {
if ( result ) values.push( result );
if ( values.length === il && callback ) {
me.lambda( callback ).call( me, values );
}
});
}
} else {
var error = function(e) {
fail( e );
if ( callback ) {
me.lambda( callback ).call( me );
}
};
root( this, function( store ) {
store.getFile( key, {create:false}, function( entry ) {
entry.file(function( file ) {
var reader = new FileReader();
reader.onerror = error;
reader.onload = function(e) {
var res = {};
try {
res = JSON.parse( e.target.result);
res.key = key;
} catch (e) {
res = {key:key};
}
if ( callback ) me.lambda( callback ).call( me, res );
};
reader.readAsText( file );
}, error );
}, error );
});
}
return this;
},
// check if an obj exists in the collection
exists: function( key, callback ) {
var me = this;
root( this, function( store ) {
store.getFile( key, {create:false}, function() {
if ( callback ) me.lambda( callback ).call( me, true );
}, function() {
if ( callback ) me.lambda( callback ).call( me, false );
});
});
return this;
},
// returns all the objs to the callback as an array
all: function( callback ) {
var me = this;
if ( callback ) {
this.keys(function( keys ) {
if ( !keys.length ) {
me.fn( me.name, callback ).call( me, [] );
} else {
me.get( keys, function( values ) {
me.fn( me.name, callback ).call( me, values );
});
}
});
}
return this;
},
// remove a doc or collection of em
remove: function( key /* or object */, callback ) {
var me = this;
var error = function(e) { fail( e ); if ( callback ) me.lambda( callback ).call( me ); };
root( this, function( store ) {
store.getFile( (typeof key === 'string' ? key : key.key ), {create:false}, function( file ) {
file.remove(function() {
if ( callback ) me.lambda( callback ).call( me );
}, error );
}, error );
});
return this;
},
// destroy everything
nuke: function( callback ) {
var me = this;
var count = 0;
this.keys(function( keys ) {
if ( !keys.length ) {
if ( callback ) me.lambda( callback ).call( me );
} else {
for ( var i = 0, il = keys.length; i < il; i++ ) {
me.remove( keys[i], function() {
count++;
if ( count === il && callback ) {
me.lambda( callback ).call( me );
}
});
}
}
});
return this;
}
};
}(this)));
Lawnchair.adapter('memory', (function(){
var data = {}
return {
valid: function() { return true },
init: function (options, callback) {
data[this.name] = data[this.name] || {index:[],store:{}}
this.index = data[this.name].index
this.store = data[this.name].store
var cb = this.fn(this.name, callback)
if (cb) cb.call(this, this)
return this
},
keys: function (callback) {
this.fn('keys', callback).call(this, this.index)
return this
},
save: function(obj, cb) {
var key = obj.key || this.uuid()
this.exists(key, function(exists) {
if (!exists) {
if (obj.key) delete obj.key
this.index.push(key)
}
this.store[key] = obj
if (cb) {
obj.key = key
this.lambda(cb).call(this, obj)
}
})
return this
},
batch: function (objs, cb) {
var r = []
for (var i = 0, l = objs.length; i < l; i++) {
this.save(objs[i], function(record) {
r.push(record)
})
}
if (cb) this.lambda(cb).call(this, r)
return this
},
get: function (keyOrArray, cb) {
var r;
if (this.isArray(keyOrArray)) {
r = []
for (var i = 0, l = keyOrArray.length; i < l; i++) {
r.push(this.store[keyOrArray[i]])
}
} else {
r = this.store[keyOrArray]
if (r) r.key = keyOrArray
}
if (cb) this.lambda(cb).call(this, r)
return this
},
exists: function (key, cb) {
this.lambda(cb).call(this, !!(this.store[key]))
return this
},
all: function (cb) {
var r = []
for (var i = 0, l = this.index.length; i < l; i++) {
var obj = this.store[this.index[i]]
obj.key = this.index[i]
r.push(obj)
}
this.fn(this.name, cb).call(this, r)
return this
},
remove: function (keyOrArray, cb) {
var del = this.isArray(keyOrArray) ? keyOrArray : [keyOrArray]
for (var i = 0, l = del.length; i < l; i++) {
var key = del[i].key ? del[i].key : del[i]
var where = this.indexOf(this.index, key)
if (where < 0) continue /* key not present */
delete this.store[key]
this.index.splice(where, 1)
}
if (cb) this.lambda(cb).call(this)
return this
},
nuke: function (cb) {
this.store = data[this.name].store = {}
this.index = data[this.name].index = []
if (cb) this.lambda(cb).call(this)
return this
}
}
/////
})());
; browserify_shim__define__module__export__(typeof Lawnchair != "undefined" ? Lawnchair : window.Lawnchair);
}).call(global, undefined, undefined, undefined, undefined, function defineExport(ex) { module.exports = ex; });
}).call(this,typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{}],3:[function(_dereq_,module,exports){
// Copyright (c) 2005 Tom Wu
// All Rights Reserved.
// See "LICENSE" for details.
// Basic JavaScript BN library - subset useful for RSA encryption.
// Bits per digit
var dbits;
// JavaScript engine analysis
var canary = 0xdeadbeefcafe;
var j_lm = ((canary&0xffffff)==0xefcafe);
// (public) Constructor
function BigInteger(a,b,c) {
if(a != null)
if("number" == typeof a) this.fromNumber(a,b,c);
else if(b == null && "string" != typeof a) this.fromString(a,256);
else this.fromString(a,b);
}
// return new, unset BigInteger
function nbi() { return new BigInteger(null); }
// am: Compute w_j += (x*this_i), propagate carries,
// c is initial carry, returns final carry.
// c < 3*dvalue, x < 2*dvalue, this_i < dvalue
// We need to select the fastest one that works in this environment.
// am1: use a single mult and divide to get the high bits,
// max digit bits should be 26 because
// max internal value = 2*dvalue^2-2*dvalue (< 2^53)
function am1(i,x,w,j,c,n) {
while(--n >= 0) {
var v = x*this[i++]+w[j]+c;
c = Math.floor(v/0x4000000);
w[j++] = v&0x3ffffff;
}
return c;
}
// am2 avoids a big mult-and-extract completely.
// Max digit bits should be <= 30 because we do bitwise ops
// on values up to 2*hdvalue^2-hdvalue-1 (< 2^31)
function am2(i,x,w,j,c,n) {
var xl = x&0x7fff, xh = x>>15;
while(--n >= 0) {
var l = this[i]&0x7fff;
var h = this[i++]>>15;
var m = xh*l+h*xl;
l = xl*l+((m&0x7fff)<<15)+w[j]+(c&0x3fffffff);
c = (l>>>30)+(m>>>15)+xh*h+(c>>>30);
w[j++] = l&0x3fffffff;
}
return c;
}
// Alternately, set max digit bits to 28 since some
// browsers slow down when dealing with 32-bit numbers.
function am3(i,x,w,j,c,n) {
var xl = x&0x3fff, xh = x>>14;
while(--n >= 0) {
var l = this[i]&0x3fff;
var h = this[i++]>>14;
var m = xh*l+h*xl;
l = xl*l+((m&0x3fff)<<14)+w[j]+c;
c = (l>>28)+(m>>14)+xh*h;
w[j++] = l&0xfffffff;
}
return c;
}
if(j_lm && (navigator.appName == "Microsoft Internet Explorer")) {
BigInteger.prototype.am = am2;
dbits = 30;
}
else if(j_lm && (navigator.appName != "Netscape")) {
BigInteger.prototype.am = am1;
dbits = 26;
}
else { // Mozilla/Netscape seems to prefer am3
BigInteger.prototype.am = am3;
dbits = 28;
}
BigInteger.prototype.DB = dbits;
BigInteger.prototype.DM = ((1<<dbits)-1);
BigInteger.prototype.DV = (1<<dbits);
var BI_FP = 52;
BigInteger.prototype.FV = Math.pow(2,BI_FP);
BigInteger.prototype.F1 = BI_FP-dbits;
BigInteger.prototype.F2 = 2*dbits-BI_FP;
// Digit conversions
var BI_RM = "0123456789abcdefghijklmnopqrstuvwxyz";
var BI_RC = new Array();
var rr,vv;
rr = "0".charCodeAt(0);
for(vv = 0; vv <= 9; ++vv) BI_RC[rr++] = vv;
rr = "a".charCodeAt(0);
for(vv = 10; vv < 36; ++vv) BI_RC[rr++] = vv;
rr = "A".charCodeAt(0);
for(vv = 10; vv < 36; ++vv) BI_RC[rr++] = vv;
function int2char(n) { return BI_RM.charAt(n); }
function intAt(s,i) {
var c = BI_RC[s.charCodeAt(i)];
return (c==null)?-1:c;
}
// (protected) copy this to r
function | (r) {
for(var i = this.t-1; i >= 0; --i) r[i] = this[i];
r.t = this.t;
r.s = this.s;
}
// (protected) set from integer value x, -DV <= x < DV
function bnpFromInt(x) {
this.t = 1;
this.s = (x<0)?-1:0;
if(x > 0) this[0] = x;
else if(x < -1) this[0] = x+DV;
else this.t = 0;
}
// return bigint initialized to value
function nbv(i) { var r = nbi(); r.fromInt(i); return r; }
// (protected) set from string and radix
function bnpFromString(s,b) {
var k;
if(b == 16) k = 4;
else if(b == 8) k = 3;
else if(b == 256) k = 8; // byte array
else if(b == 2) k = 1;
else if(b == 32) k = 5;
else if(b == 4) k = 2;
else { this.fromRadix(s,b); return; }
this.t = 0;
this.s = 0;
var i = s.length, mi = false, sh = 0;
while(--i >= 0) {
var x = (k==8)?s[i]&0xff:intAt(s,i);
if(x < 0) {
if(s.charAt(i) == "-") mi = true;
continue;
}
mi = false;
if(sh == 0)
this[this.t++] = x;
else if(sh+k > this.DB) {
this[this.t-1] |= (x&((1<<(this.DB-sh))-1))<<sh;
this[this.t++] = (x>>(this.DB-sh));
}
else
this[this.t-1] |= x<<sh;
sh += k;
if(sh >= this.DB) sh -= this.DB;
}
if(k == 8 && (s[0]&0x80) != 0) {
this.s = -1;
if(sh > 0) this[this.t-1] |= ((1<<(this.DB-sh))-1)<<sh;
}
this.clamp();
if(mi) BigInteger.ZERO.subTo(this,this);
}
// (protected) clamp off excess high words
function bnpClamp() {
var c = this.s&this.DM;
while(this.t > 0 && this[this.t-1] == c) --this.t;
}
// (public) return string representation in given radix
function bnToString(b) {
if(this.s < 0) return "-"+this.negate().toString(b);
var k;
if(b == 16) k = 4;
else if(b == 8) k = 3;
else if(b == 2) k = 1;
else if(b == 32) k = 5;
else if(b == 4) k = 2;
else return this.toRadix(b);
var km = (1<<k)-1, d, m = false, r = "", i = this.t;
var p = this.DB-(i*this.DB)%k;
if(i-- > 0) {
if(p < this.DB && (d = this[i]>>p) > 0) { m = true; r = int2char(d); }
while(i >= 0) {
if(p < k) {
d = (this[i]&((1<<p)-1))<<(k-p);
d |= this[--i]>>(p+=this.DB-k);
}
else {
d = (this[i]>>(p-=k))&km;
if(p <= 0) { p += this.DB; --i; }
}
if(d > 0) m = true;
if(m) r += int2char(d);
}
}
return m?r:"0";
}
// (public) -this
function bnNegate() { var r = nbi(); BigInteger.ZERO.subTo(this,r); return r; }
// (public) |this|
function bnAbs() { return (this.s<0)?this.negate():this; }
// (public) return + if this > a, - if this < a, 0 if equal
function bnCompareTo(a) {
var r = this.s-a.s;
if(r != 0) return r;
var i = this.t;
r = i-a.t;
if(r != 0) return (this.s<0)?-r:r;
while(--i >= 0) if((r=this[i]-a[i]) != 0) return r;
return 0;
}
// returns bit length of the integer x
function nbits(x) {
var r = 1, t;
if((t=x>>>16) != 0) { x = t; r += 16; }
if((t=x>>8) != 0) { x = t; r += 8; }
if((t=x>>4) != 0) { x = t; r += 4; }
if((t=x>>2) != 0) { x = t; r += 2; }
if((t=x>>1) != 0) { x = t; r += 1; }
return r;
}
// (public) return the number of bits in "this"
function bnBitLength() {
if(this.t <= 0) return 0;
return this.DB*(this.t-1)+nbits(this[this.t-1]^(this.s&this.DM));
}
// (protected) r = this << n*DB
function bnpDLShiftTo(n,r) {
var i;
for(i = this.t-1; i >= 0; --i) r[i+n] = this[i];
for(i = n-1; i >= 0; --i) r[i] = 0;
r.t = this.t+n;
r.s = this.s;
}
// (protected) r = this >> n*DB
function bnpDRShiftTo(n,r) {
for(var i = n; i < this.t; ++i) r[i-n] = this[i];
r.t = Math.max(this.t-n,0);
r.s = this.s;
}
// (protected) r = this << n
function bnpLShiftTo(n,r) {
var bs = n%this.DB;
var cbs = this.DB-bs;
var bm = (1<<cbs)-1;
var ds = Math.floor(n/this.DB), c = (this.s<<bs)&this.DM, i;
for(i = this.t-1; i >= 0; --i) {
r[i+ds+1] = (this[i]>>cbs)|c;
c = (this[i]&bm)<<bs;
}
for(i = ds-1; i >= 0; --i) r[i] = 0;
r[ds] = c;
r.t = this.t+ds+1;
r.s = this.s;
r.clamp();
}
// (protected) r = this >> n
function bnpRShiftTo(n,r) {
r.s = this.s;
var ds = Math.floor(n/this.DB);
if(ds >= this.t) { r.t = 0; return; }
var bs = n%this.DB;
var cbs = this.DB-bs;
var bm = (1<<bs)-1;
r[0] = this[ds]>>bs;
for(var i = ds+1; i < this.t; ++i) {
r[i-ds-1] |= (this[i]&bm)<<cbs;
r[i-ds] = this[i]>>bs;
}
if(bs > 0) r[this.t-ds-1] |= (this.s&bm)<<cbs;
r.t = this.t-ds;
r.clamp();
}
// (protected) r = this - a
function bnpSubTo(a,r) {
var i = 0, c = 0, m = Math.min(a.t,this.t);
while(i < m) {
c += this[i]-a[i];
r[i++] = c&this.DM;
c >>= this.DB;
}
if(a.t < this.t) {
c -= a.s;
while(i < this.t) {
c += this[i];
r[i++] = c&this.DM;
c >>= this.DB;
}
c += this.s;
}
else {
c += this.s;
while(i < a.t) {
c -= a[i];
r[i++] = c&this.DM;
c >>= this.DB;
}
c -= a.s;
}
r.s = (c<0)?-1:0;
if(c < -1) r[i++] = this.DV+c;
else if(c > 0) r[i++] = c;
r.t = i;
r.clamp();
}
// (protected) r = this * a, r != this,a (HAC 14.12)
// "this" should be the larger one if appropriate.
function bnpMultiplyTo(a,r) {
var x = this.abs(), y = a.abs();
var i = x.t;
r.t = i+y.t;
while(--i >= 0) r[i] = 0;
for(i = 0; i < y.t; ++i) r[i+x.t] = x.am(0,y[i],r,i,0,x.t);
r.s = 0;
r.clamp();
if(this.s != a.s) BigInteger.ZERO.subTo(r,r);
}
// (protected) r = this^2, r != this (HAC 14.16)
function bnpSquareTo(r) {
var x = this.abs();
var i = r.t = 2*x.t;
while(--i >= 0) r[i] = 0;
for(i = 0; i < x.t-1; ++i) {
var c = x.am(i,x[i],r,2*i,0,1);
if((r[i+x.t]+=x.am(i+1,2*x[i],r,2*i+1,c,x.t-i-1)) >= x.DV) {
r[i+x.t] -= x.DV;
r[i+x.t+1] = 1;
}
}
if(r.t > 0) r[r.t-1] += x.am(i,x[i],r,2*i,0,1);
r.s = 0;
r.clamp();
}
// (protected) divide this by m, quotient and remainder to q, r (HAC 14.20)
// r != q, this != m. q or r may be null.
function bnpDivRemTo(m,q,r) {
var pm = m.abs();
if(pm.t <= 0) return;
var pt = this.abs();
if(pt.t < pm.t) {
if(q != null) q.fromInt(0);
if(r != null) this.copyTo(r);
return;
}
if(r == null) r = nbi();
var y = nbi(), ts = this.s, ms = m.s;
var nsh = this.DB-nbits(pm[pm.t-1]); // normalize modulus
if(nsh > 0) { pm.lShiftTo(nsh,y); pt.lShiftTo(nsh,r); }
else { pm.copyTo(y); pt.copyTo(r); }
var ys = y.t;
var y0 = y[ys-1];
if(y0 == 0) return;
var yt = y0*(1<<this.F1)+((ys>1)?y[ys-2]>>this.F2:0);
var d1 = this.FV/yt, d2 = (1<<this.F1)/yt, e = 1<<this.F2;
var i = r.t, j = i-ys, t = (q==null)?nbi():q;
y.dlShiftTo(j,t);
if(r.compareTo(t) >= 0) {
r[r.t++] = 1;
r.subTo(t,r);
}
BigInteger.ONE.dlShiftTo(ys,t);
t.subTo(y,y); // "negative" y so we can replace sub with am later
while(y.t < ys) y[y.t++] = 0;
while(--j >= 0) {
// Estimate quotient digit
var qd = (r[--i]==y0)?this.DM:Math.floor(r[i]*d1+(r[i-1]+e)*d2);
if((r[i]+=y.am(0,qd,r,j,0,ys)) < qd) { // Try it out
y.dlShiftTo(j,t);
r.subTo(t,r);
while(r[i] < --qd) r.subTo(t,r);
}
}
if(q != null) {
r.drShiftTo(ys,q);
if(ts != ms) BigInteger.ZERO.subTo(q,q);
}
r.t = ys;
r.clamp();
if(nsh > 0) r.rShiftTo(nsh,r); // Denormalize remainder
if(ts < 0) BigInteger.ZERO.subTo(r,r);
}
// (public) this mod a
function bnMod(a) {
var r = nbi();
this.abs().divRemTo(a,null,r);
if(this.s < 0 && r.compareTo(BigInteger.ZERO) > 0) a.subTo(r,r);
return r;
}
// Modular reduction using "classic" algorithm
function Classic(m) { this.m = m; }
function cConvert(x) {
if(x.s < 0 || x.compareTo(this.m) >= 0) return x.mod(this.m);
else return x;
}
function cRevert(x) { return x; }
function cReduce(x) { x.divRemTo(this.m,null,x); }
function cMulTo(x,y,r) { x.multiplyTo(y,r); this.reduce(r); }
function cSqrTo(x,r) { x.squareTo(r); this.reduce(r); }
Classic.prototype.convert = cConvert;
Classic.prototype.revert = cRevert;
Classic.prototype.reduce = cReduce;
Classic.prototype.mulTo = cMulTo;
Classic.prototype.sqrTo = cSqrTo;
// (protected) return "-1/this % 2^DB"; useful for Mont. reduction
// justification:
// xy == 1 (mod m)
// xy = 1+km
// xy(2-xy) = (1+km)(1-km)
// x[y(2-xy)] = 1-k^2m^2
// x[y(2-xy)] == 1 (mod m^2)
// if y is 1/x mod m, then y(2-xy) is 1/x mod m^2
// should reduce x and y(2-xy) by m^2 at each step to keep size bounded.
// JS multiply "overflows" differently from C/C++, so care is needed here.
function bnpInvDigit() {
if(this.t < 1) return 0;
var x = this[0];
if((x&1) == 0) return 0;
var y = x&3; // y == 1/x mod 2^2
y = (y*(2-(x&0xf)*y))&0xf; // y == 1/x mod 2^4
y = (y*(2-(x&0xff)*y))&0xff; // y == 1/x mod 2^8
y = (y*(2-(((x&0xffff)*y)&0xffff)))&0xffff; // y == 1/x mod 2^16
// last step - calculate inverse mod DV directly;
// assumes 16 < DB <= 32 and assumes ability to handle 48-bit ints
y = (y*(2-x*y%this.DV))%this.DV; // y == 1/x mod 2^dbits
// we really want the negative inverse, and -DV < y < DV
return (y>0)?this.DV-y:-y;
}
// Montgomery reduction
function Montgomery(m) {
this.m = m;
this.mp = m.invDigit();
this.mpl = this.mp&0x7fff;
this.mph = this.mp>>15;
this.um = (1<<(m.DB-15))-1;
this.mt2 = 2*m.t;
}
// xR mod m
function montConvert(x) {
var r = nbi();
x.abs().dlShiftTo(this.m.t,r);
r.divRemTo(this.m,null,r);
if(x.s < 0 && r.compareTo(BigInteger.ZERO) > 0) this.m.subTo(r,r);
return r;
}
// x/R mod m
function montRevert(x) {
var r = nbi();
x.copyTo(r);
this.reduce(r);
return r;
}
// x = x/R mod m (HAC 14.32)
function montReduce(x) {
while(x.t <= this.mt2) // pad x so am has enough room later
x[x.t++] = 0;
for(var i = 0; i < this.m.t; ++i) {
// faster way of calculating u0 = x[i]*mp mod DV
var j = x[i]&0x7fff;
var u0 = (j*this.mpl+(((j*this.mph+(x[i]>>15)*this.mpl)&this.um)<<15))&x.DM;
// use am to combine the multiply-shift-add into one call
j = i+this.m.t;
x[j] += this.m.am(0,u0,x,i,0,this.m.t);
// propagate carry
while(x[j] >= x.DV) { x[j] -= x.DV; x[++j]++; }
}
x.clamp();
x.drShiftTo(this.m.t,x);
if(x.compareTo(this.m) >= 0) x.subTo(this.m,x);
}
// r = "x^2/R mod m"; x != r
function montSqrTo(x,r) { x.squareTo(r); this.reduce(r); }
// r = "xy/R mod m"; x,y != r
function montMulTo(x,y,r) { x.multiplyTo(y,r); this.reduce(r); }
Montgomery.prototype.convert = montConvert;
Montgomery.prototype.revert = montRevert;
Montgomery.prototype.reduce = montReduce;
Montgomery.prototype.mulTo = montMulTo;
Montgomery.prototype.sqrTo = montSqrTo;
// (protected) true iff this is even
function bnpIsEven() { return ((this.t>0)?(this[0]&1):this.s) == 0; }
// (protected) this^e, e < 2^32, doing sqr and mul with "r" (HAC 14.79)
function bnpExp(e,z) {
if(e > 0xffffffff || e < 1) return BigInteger.ONE;
var r = nbi(), r2 = nbi(), g = z.convert(this), i = nbits(e)-1;
g.copyTo(r);
while(--i >= 0) {
z.sqrTo(r,r2);
if((e&(1<<i)) > 0) z.mulTo(r2,g,r);
else { var t = r; r = r2; r2 = t; }
}
return z.revert(r);
}
// (public) this^e % m, 0 <= e < 2^32
function bnModPowInt(e,m) {
var z;
if(e < 256 || m.isEven()) z = new Classic(m); else z = new Montgomery(m);
return this.exp(e,z);
}
// protected
BigInteger.prototype.copyTo = bnpCopyTo;
BigInteger.prototype.fromInt = bnpFromInt;
BigInteger.prototype.fromString = bnpFromString;
BigInteger.prototype.clamp = bnpClamp;
BigInteger.prototype.dlShiftTo = bnpDLShiftTo;
BigInteger.prototype.drShiftTo = bnpDRShiftTo;
BigInteger.prototype.lShiftTo = bnpLShiftTo;
BigInteger.prototype.rShiftTo = bnpRShiftTo;
BigInteger.prototype.subTo = bnpSubTo;
BigInteger.prototype.multiplyTo = bnpMultiplyTo;
BigInteger.prototype.squareTo = bnpSquareTo;
BigInteger.prototype.divRemTo = bnpDivRemTo;
BigInteger.prototype.invDigit = bnpInvDigit;
BigInteger.prototype.isEven = bnpIsEven;
BigInteger.prototype.exp = bnpExp;
// public
BigInteger.prototype.toString = bnToString;
BigInteger.prototype.negate = bnNegate;
BigInteger.prototype.abs = bnAbs;
BigInteger.prototype.compareTo = bnCompareTo;
BigInteger.prototype.bitLength = bnBitLength;
BigInteger.prototype.mod = bnMod;
BigInteger.prototype.modPowInt = bnModPowInt;
// "constants"
BigInteger.ZERO = nbv(0);
BigInteger.ONE = nbv(1);
// prng4.js - uses Arcfour as a PRNG
function Arcfour() {
this.i = 0;
this.j = 0;
this.S = new Array();
}
// Initialize arcfour context from key, an array of ints, each from [0..255]
function ARC4init(key) {
var i, j, t;
for(i = 0; i < 256; ++i)
this.S[i] = i;
j = 0;
for(i = 0; i < 256; ++i) {
j = (j + this.S[i] + key[i % key.length]) & 255;
t = this.S[i];
this.S[i] = this.S[j];
this.S[j] = t;
}
this.i = 0;
this.j = 0;
}
function ARC4next() {
var t;
this.i = (this.i + 1) & 255;
this.j = (this.j + this.S[this.i]) & 255;
t = this.S[this.i];
this.S[this.i] = this.S[this.j];
this.S[this.j] = t;
return this.S[(t + this.S[this.i]) & 255];
}
Arcfour.prototype.init = ARC4init;
Arcfour.prototype.next = ARC4next;
// Plug in your RNG constructor here
function prng_newstate() {
return new Arcfour();
}
// Pool size must be a multiple of 4 and greater than 32.
// An array of bytes the size of the pool will be passed to init()
var rng_psize = 256;
// Random number generator - requires a PRNG backend, e.g. prng4.js
// For best results, put code like
// <body onClick='rng_seed_time();' onKeyPress='rng_seed_time();'>
// in your main HTML document.
var rng_state;
var rng_pool;
var rng_pptr;
// Mix in a 32-bit integer into the pool
function rng_seed_int(x) {
rng_pool[rng_pptr++] ^= x & 255;
rng_pool[rng_pptr++] ^= (x >> 8) & 255;
rng_pool[rng_pptr++] ^= (x >> 16) & 255;
rng_pool[rng_pptr++] ^= (x >> 24) & 255;
if(rng_pptr >= rng_psize) rng_pptr -= rng_psize;
}
// Mix in the current time (w/milliseconds) into the pool
function rng_seed_time() {
rng_seed_int(new Date().getTime());
}
// Initialize the pool with junk if needed.
if(rng_pool == null) {
rng_pool = new Array();
rng_pptr = 0;
var t;
if(navigator.appName == "Netscape" && navigator.appVersion < "5" && window.crypto) {
// Extract entropy (256 bits) from NS4 RNG if available
var z = window.crypto.random(32);
for(t = 0; t < z.length; ++t)
rng_pool[rng_pptr++] = z.charCodeAt(t) & 255;
}
while(rng_pptr < rng_psize) { // extract some randomness from Math.random()
t = Math.floor(65536 * Math.random());
rng_pool[rng_pptr++] = t >>> 8;
rng_pool[rng_pptr++] = t & 255;
}
rng_pptr = 0;
rng_seed_time();
//rng_seed_int(window.screenX);
//rng_seed_int(window.screenY);
}
function rng_get_byte() {
if(rng_state == null) {
rng_seed_time();
rng_state = prng_newstate();
rng_state.init(rng_pool);
for(rng_pptr = 0; rng_pptr < rng_pool.length; ++rng_pptr)
rng_pool[rng_pptr] = 0;
rng_pptr = 0;
//rng_pool = null;
}
// TODO: allow reseeding after first request
return rng_state.next();
}
function rng_get_bytes(ba) {
var i;
for(i = 0; i < ba.length; ++i) ba[i] = rng_get_byte();
}
function SecureRandom() {}
SecureRandom.prototype.nextBytes = rng_get_bytes;
//Depends on jsbn.js and rng.js
//Version 1.1: support utf-8 encoding in pkcs1pad2
//convert a (hex) string to a bignum object
function parseBigInt(str,r) {
return new BigInteger(str,r);
}
function linebrk(s,n) {
var ret = "";
var i = 0;
while(i + n < s.length) {
ret += s.substring(i,i+n) + "\n";
i += n;
}
return ret + s.substring(i,s.length);
}
function byte2Hex(b) {
if(b < 0x10)
return "0" + b.toString(16);
else
return b.toString(16);
}
//PKCS#1 (type 2, random) pad input string s to n bytes, and return a bigint
function pkcs1pad2(s,n) {
if(n < s.length + 11) { // TODO: fix for utf-8
alert("Message too long for RSA");
return null;
}
var ba = new Array();
var i = s.length - 1;
while(i >= 0 && n > 0) {
var c = s.charCodeAt(i--);
if(c < 128) { // encode using utf-8
ba[--n] = c;
}
else if((c > 127) && (c < 2048)) {
ba[--n] = (c & 63) | 128;
ba[--n] = (c >> 6) | 192;
}
else {
ba[--n] = (c & 63) | 128;
ba[--n] = ((c >> 6) & 63) | 128;
ba[--n] = (c >> 12) | 224;
}
}
ba[--n] = 0;
var rng = new SecureRandom();
var x = new Array();
while(n > 2) { // random non-zero pad
x[0] = 0;
while(x[0] == 0) rng.nextBytes(x);
ba[--n] = x[0];
}
ba[--n] = 2;
ba[--n] = 0;
return new BigInteger(ba);
}
//"empty" RSA key constructor
function RSAKey() {
this.n = null;
this.e = 0;
this.d = null;
this.p = null;
this.q = null;
this.dmp1 = null;
this.dmq1 = null;
this.coeff = null;
}
//Set the public key fields N and e from hex strings
function RSASetPublic(N,E) {
if(N != null && E != null && N.length > 0 && E.length > 0) {
this.n = parseBigInt(N,16);
this.e = parseInt(E,16);
}
else
alert("Invalid RSA public key");
}
//Perform raw public operation on "x": return x^e (mod n)
function RSADoPublic(x) {
return x.modPowInt(this.e, this.n);
}
//Return the PKCS#1 RSA encryption of "text" as an even-length hex string
function RSAEncrypt(text) {
var m = pkcs1pad2(text,(this.n.bitLength()+7)>>3);
if(m == null) return null;
var c = this.doPublic(m);
if(c == null) return null;
var h = c.toString(16);
if((h.length & 1) == 0) return h; else return "0" + h;
}
//Return the PKCS#1 RSA encryption of "text" as a Base64-encoded string
//function RSAEncryptB64(text) {
//var h = this.encrypt(text);
//if(h) return hex2b64(h); else return null;
//}
//protected
RSAKey.prototype.doPublic = RSADoPublic;
//public
RSAKey.prototype.setPublic = RSASetPublic;
RSAKey.prototype.encrypt = RSAEncrypt;
//RSAKey.prototype.encrypt_b64 = RSAEncryptB64;
module.exports = {
SecureRandom: SecureRandom,
byte2Hex: byte2Hex,
RSAKey: RSAKey
}
},{}],4:[function(_dereq_,module,exports){
// http://wiki.commonjs.org/wiki/Unit_Testing/1.0
//
// THIS IS NOT TESTED NOR LIKELY TO WORK OUTSIDE V8!
//
// Originally from narwhal.js (http://narwhaljs.org)
// Copyright (c) 2009 Thomas Robinson <280north.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the 'Software'), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// when used in node, this will actually load the util module we depend on
// versus loading the builtin util module as happens otherwise
// this is a bug in node module loading as far as I am concerned
var util = _dereq_('util/');
var pSlice = Array.prototype.slice;
var hasOwn = Object.prototype.hasOwnProperty;
// 1. The assert module provides functions that throw
// AssertionError's when particular conditions are not met. The
// assert module must conform to the following interface.
var assert = module.exports = ok;
// 2. The AssertionError is defined in assert.
// new assert.AssertionError({ message: message,
// actual: actual,
// expected: expected })
assert.AssertionError = function AssertionError(options) {
this.name = 'AssertionError';
this.actual = options.actual;
this.expected = options.expected;
this.operator = options.operator;
if (options.message) {
this.message = options.message;
this.generatedMessage = false;
} else {
this.message = getMessage(this);
this.generatedMessage = true;
}
var stackStartFunction = options.stackStartFunction || fail;
if (Error.captureStackTrace) {
Error.captureStackTrace(this, stackStartFunction);
}
else {
// non v8 browsers so we can have a stacktrace
var err = new Error();
if (err.stack) {
var out = err.stack;
// try to strip useless frames
var fn_name = stackStartFunction.name;
var idx = out.indexOf('\n' + fn_name);
if (idx >= 0) {
// once we have located the function frame
// we need to strip out everything before it (and its line)
var next_line = out.indexOf('\n', idx + 1);
out = out.substring(next_line + 1);
}
this.stack = out;
}
}
};
// assert.AssertionError instanceof Error
util.inherits(assert.AssertionError, Error);
function replacer(key, value) {
if (util.isUndefined(value)) {
return '' + value;
}
if (util.isNumber(value) && (isNaN(value) || !isFinite(value))) {
return value.toString();
}
if (util.isFunction(value) || util.isRegExp(value)) {
return value.toString();
}
return value;
}
function truncate(s, n) {
if (util.isString(s)) {
return s.length < n ? s : s.slice(0, n);
} else {
return s;
}
}
function getMessage(self) {
return truncate(JSON.stringify(self.actual, replacer), 128) + ' ' +
self.operator + ' ' +
truncate(JSON.stringify(self.expected, replacer), 128);
}
// At present only the three keys mentioned above are used and
// understood by the spec. Implementations or sub modules can pass
// other keys to the AssertionError's constructor - they will be
// ignored.
// 3. All of the following functions must throw an AssertionError
// when a corresponding condition is not met, with a message that
// may be undefined if not provided. All assertion methods provide
// both the actual and expected values to the assertion error for
// display purposes.
function fail(actual, expected, message, operator, stackStartFunction) {
throw new assert.AssertionError({
message: message,
actual: actual,
expected: expected,
operator: operator,
stackStartFunction: stackStartFunction
});
}
// EXTENSION! allows for well behaved errors defined elsewhere.
assert.fail = fail;
// 4. Pure assertion tests whether a value is truthy, as determined
// by !!guard.
// assert.ok(guard, message_opt);
// This statement is equivalent to assert.equal(true, !!guard,
// message_opt);. To test strictly for the value true, use
// assert.strictEqual(true, guard, message_opt);.
function ok(value, message) {
if (!value) fail(value, true, message, '==', assert.ok);
}
assert.ok = ok;
// 5. The equality assertion tests shallow, coercive equality with
// ==.
// assert.equal(actual, expected, message_opt);
assert.equal = function equal(actual, expected, message) {
if (actual != expected) fail(actual, expected, message, '==', assert.equal);
};
// 6. The non-equality assertion tests for whether two objects are not equal
// with != assert.notEqual(actual, expected, message_opt);
assert.notEqual = function notEqual(actual, expected, message) {
if (actual == expected) {
fail(actual, expected, message, '!=', assert.notEqual);
}
};
// 7. The equivalence assertion tests a deep equality relation.
// assert.deepEqual(actual, expected, message_opt);
assert.deepEqual = function deepEqual(actual, expected, message) {
if (!_deepEqual(actual, expected)) {
fail(actual, expected, message, 'deepEqual', assert.deepEqual);
}
};
function _deepEqual(actual, expected) {
// 7.1. All identical values are equivalent, as determined by ===.
if (actual === expected) {
return true;
} else if (util.isBuffer(actual) && util.isBuffer(expected)) {
if (actual.length != expected.length) return false;
for (var i = 0; i < actual.length; i++) {
if (actual[i] !== expected[i]) return false;
}
return true;
// 7.2. If the expected value is a Date object, the actual value is
// equivalent if it is also a Date object that refers to the same time.
} else if (util.isDate(actual) && util.isDate(expected)) {
return actual.getTime() === expected.getTime();
// 7.3 If the expected value is a RegExp object, the actual value is
// equivalent if it is also a RegExp object with the same source and
// properties (`global`, `multiline`, `lastIndex`, `ignoreCase`).
} else if (util.isRegExp(actual) && util.isRegExp(expected)) {
return actual.source === expected.source &&
actual.global === expected.global &&
actual.multiline === expected.multiline &&
actual.lastIndex === expected.lastIndex &&
actual.ignoreCase === expected.ignoreCase;
// 7.4. Other pairs that do not both pass typeof value == 'object',
// equivalence is determined by ==.
} else if (!util.isObject(actual) && !util.isObject(expected)) {
return actual == expected;
// 7.5 For all other Object pairs, including Array objects, equivalence is
// determined by having the same number of owned properties (as verified
// with Object.prototype.hasOwnProperty.call), the same set of keys
// (although not necessarily the same order), equivalent values for every
// corresponding key, and an identical 'prototype' property. Note: this
// accounts for both named and indexed properties on Arrays.
} else {
return objEquiv(actual, expected);
}
}
function isArguments(object) {
return Object.prototype.toString.call(object) == '[object Arguments]';
}
function objEquiv(a, b) {
if (util.isNullOrUndefined(a) || util.isNullOrUndefined(b))
return false;
// an identical 'prototype' property.
if (a.prototype !== b.prototype) return false;
//~~~I've managed to break Object.keys through screwy arguments passing.
// Converting to array solves the problem.
if (isArguments(a)) {
if (!isArguments(b)) {
return false;
}
a = pSlice.call(a);
b = pSlice.call(b);
return _deepEqual(a, b);
}
try {
var ka = objectKeys(a),
kb = objectKeys(b),
key, i;
} catch (e) {//happens when one is a string literal and the other isn't
return false;
}
// having the same number of owned properties (keys incorporates
// hasOwnProperty)
if (ka.length != kb.length)
return false;
//the same set of keys (although not necessarily the same order),
ka.sort();
kb.sort();
//~~~cheap key test
for (i = ka.length - 1; i >= 0; i--) {
if (ka[i] != kb[i])
return false;
}
//equivalent values for every corresponding key, and
//~~~possibly expensive deep test
for (i = ka.length - 1; i >= 0; i--) {
key = ka[i];
if (!_deepEqual(a[key], b[key])) return false;
}
return true;
}
// 8. The non-equivalence assertion tests for any deep inequality.
// assert.notDeepEqual(actual, expected, message_opt);
assert.notDeepEqual = function notDeepEqual(actual, expected, message) {
if (_deepEqual(actual, expected)) {
fail(actual, expected, message, 'notDeepEqual', assert.notDeepEqual);
}
};
// 9. The strict equality assertion tests strict equality, as determined by ===.
// assert.strictEqual(actual, expected, message_opt);
assert.strictEqual = function strictEqual(actual, expected, message) {
if (actual !== expected) {
fail(actual, expected, message, '===', assert.strictEqual);
}
};
// 10. The strict non-equality assertion tests for strict inequality, as
// determined by !==. assert.notStrictEqual(actual, expected, message_opt);
assert.notStrictEqual = function notStrictEqual(actual, expected, message) {
if (actual === expected) {
fail(actual, expected, message, '!==', assert.notStrictEqual);
}
};
function expectedException(actual, expected) {
if (!actual || !expected) {
return false;
}
if (Object.prototype.toString.call(expected) == '[object RegExp]') {
return expected.test(actual);
} else if (actual instanceof expected) {
return true;
} else if (expected.call({}, actual) === true) {
return true;
}
return false;
}
function _throws(shouldThrow, block, expected, message) {
var actual;
if (util.isString(expected)) {
message = expected;
expected = null;
}
try {
block();
} catch (e) {
actual = e;
}
message = (expected && expected.name ? ' (' + expected.name + ').' : '.') +
(message ? ' ' + message : '.');
if (shouldThrow && !actual) {
fail(actual, expected, 'Missing expected exception' + message);
}
if (!shouldThrow && expectedException(actual, expected)) {
fail(actual, expected, 'Got unwanted exception' + message);
}
if ((shouldThrow && actual && expected &&
!expectedException(actual, expected)) || (!shouldThrow && actual)) {
throw actual;
}
}
// 11. Expected to throw an error:
// assert.throws(block, Error_opt, message_opt);
assert.throws = function(block, /*optional*/error, /*optional*/message) {
_throws.apply(this, [true].concat(pSlice.call(arguments)));
};
// EXTENSION! This is annoying to write outside this module.
assert.doesNotThrow = function(block, /*optional*/message) {
_throws.apply(this, [false].concat(pSlice.call(arguments)));
};
assert.ifError = function(err) { if (err) {throw err;}};
var objectKeys = Object.keys || function (obj) {
var keys = [];
for (var key in obj) {
if (hasOwn.call(obj, key)) keys.push(key);
}
return keys;
};
},{"util/":18}],5:[function(_dereq_,module,exports){
// shim for using process in browser
var process = module.exports = {};
process.nextTick = (function () {
var canSetImmediate = typeof window !== 'undefined'
&& window.setImmediate;
var canPost = typeof window !== 'undefined'
&& window.postMessage && window.addEventListener
;
if (canSetImmediate) {
return function (f) { return window.setImmediate(f) };
}
if (canPost) {
var queue = [];
window.addEventListener('message', function (ev) {
var source = ev.source;
if ((source === window || source === null) && ev.data === 'process-tick') {
ev.stopPropagation();
if (queue.length > 0) {
var fn = queue.shift();
fn();
}
}
}, true);
return function nextTick(fn) {
queue.push(fn);
window.postMessage('process-tick', '*');
};
}
return function nextTick(fn) {
setTimeout(fn, 0);
};
})();
process.title = 'browser';
process.browser = true;
process.env = {};
process.argv = [];
function noop() {}
process.on = noop;
process.addListener = noop;
process.once = noop;
process.off = noop;
process.removeListener = noop;
process.removeAllListeners = noop;
process.emit = noop;
process.binding = function (name) {
throw new Error('process.binding is not supported');
}
// TODO(shtylman)
process.cwd = function () { return '/' };
process.chdir = function (dir) {
throw new Error('process.chdir is not supported');
};
},{}],6:[function(_dereq_,module,exports){
(function (global){
/*global window, global*/
var util = _dereq_("util")
var assert = _dereq_("assert")
var slice = Array.prototype.slice
var console
var times = {}
if (typeof global !== "undefined" && global.console) {
console = global.console
} else if (typeof window !== "undefined" && window.console) {
console = window.console
} else {
console = {}
}
var functions = [
[log, "log"]
, [info, "info"]
, [warn, "warn"]
, [error, "error"]
, [time, "time"]
, [timeEnd, "timeEnd"]
, [trace, "trace"]
, [dir, "dir"]
, [assert, "assert"]
]
for (var i = 0; i < functions.length; i++) {
var tuple = functions[i]
var f = tuple[0]
var name = tuple[1]
if (!console[name]) {
console[name] = f
}
}
module.exports = console
function log() {}
function info() {
console.log.apply(console, arguments)
}
function warn() {
console.log.apply(console, arguments)
}
function error() {
console.warn.apply(console, arguments)
}
function time(label) {
times[label] = Date.now()
}
function timeEnd(label) {
var time = times[label]
if (!time) {
throw new Error("No such label: " + label)
}
var duration = Date.now() - time
console.log(label + ": " + duration + "ms")
}
function trace() {
var err = new Error()
err.name = "Trace"
err.message = util.format.apply(null, arguments)
console.error(err.stack)
}
function dir(object) {
console.log(util.inspect(object) + "\n")
}
function assert(expression) {
if (!expression) {
var arr = slice.call(arguments, 1)
assert.ok(false, util.format.apply(null, arr))
}
}
}).call(this,typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{"assert":4,"util":18}],7:[function(_dereq_,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
function EventEmitter() {
this._events = this._events || {};
this._maxListeners = this._maxListeners || undefined;
}
module.exports = EventEmitter;
// Backwards-compat with node 0.10.x
EventEmitter.EventEmitter = EventEmitter;
EventEmitter.prototype._events = undefined;
EventEmitter.prototype._maxListeners = undefined;
// By default EventEmitters will print a warning if more than 10 listeners are
// added to it. This is a useful default which helps finding memory leaks.
EventEmitter.defaultMaxListeners = 10;
// Obviously not all Emitters should be limited to 10. This function allows
// that to be increased. Set to zero for unlimited.
EventEmitter.prototype.setMaxListeners = function(n) {
if (!isNumber(n) || n < 0 || isNaN(n))
throw TypeError('n must be a positive number');
this._maxListeners = n;
return this;
};
EventEmitter.prototype.emit = function(type) {
var er, handler, len, args, i, listeners;
if (!this._events)
this._events = {};
// If there is no 'error' event listener then throw.
if (type === 'error') {
if (!this._events.error ||
(isObject(this._events.error) && !this._events.error.length)) {
er = arguments[1];
if (er instanceof Error) {
throw er; // Unhandled 'error' event
}
throw TypeError('Uncaught, unspecified "error" event.');
}
}
handler = this._events[type];
if (isUndefined(handler))
return false;
if (isFunction(handler)) {
switch (arguments.length) {
// fast cases
case 1:
handler.call(this);
break;
case 2:
handler.call(this, arguments[1]);
break;
case 3:
handler.call(this, arguments[1], arguments[2]);
break;
// slower
default:
len = arguments.length;
args = new Array(len - 1);
for (i = 1; i < len; i++)
args[i - 1] = arguments[i];
handler.apply(this, args);
}
} else if (isObject(handler)) {
len = arguments.length;
args = new Array(len - 1);
for (i = 1; i < len; i++)
args[i - 1] = arguments[i];
listeners = handler.slice();
len = listeners.length;
for (i = 0; i < len; i++)
listeners[i].apply(this, args);
}
return true;
};
EventEmitter.prototype.addListener = function(type, listener) {
var m;
if (!isFunction(listener))
throw TypeError('listener must be a function');
if (!this._events)
this._events = {};
// To avoid recursion in the case that type === "newListener"! Before
// adding it to the listeners, first emit "newListener".
if (this._events.newListener)
this.emit('newListener', type,
isFunction(listener.listener) ?
listener.listener : listener);
if (!this._events[type])
// Optimize the case of one listener. Don't need the extra array object.
this._events[type] = listener;
else if (isObject(this._events[type]))
// If we've already got an array, just append.
this._events[type].push(listener);
else
// Adding the second element, need to change to array.
this._events[type] = [this._events[type], listener];
// Check for listener leak
if (isObject(this._events[type]) && !this._events[type].warned) {
var m;
if (!isUndefined(this._maxListeners)) {
m = this._maxListeners;
} else {
m = EventEmitter.defaultMaxListeners;
}
if (m && m > 0 && this._events[type].length > m) {
this._events[type].warned = true;
console.error('(node) warning: possible EventEmitter memory ' +
'leak detected. %d listeners added. ' +
'Use emitter.setMaxListeners() to increase limit.',
this._events[type].length);
if (typeof console.trace === 'function') {
// not supported in IE 10
console.trace();
}
}
}
return this;
};
EventEmitter.prototype.on = EventEmitter.prototype.addListener;
EventEmitter.prototype.once = function(type, listener) {
if (!isFunction(listener))
throw TypeError('listener must be a function');
var fired = false;
function g() {
this.removeListener(type, g);
if (!fired) {
fired = true;
listener.apply(this, arguments);
}
}
g.listener = listener;
this.on(type, g);
return this;
};
// emits a 'removeListener' event iff the listener was removed
EventEmitter.prototype.removeListener = function(type, listener) {
var list, position, length, i;
if (!isFunction(listener))
throw TypeError('listener must be a function');
if (!this._events || !this._events[type])
return this;
list = this._events[type];
length = list.length;
position = -1;
if (list === listener ||
(isFunction(list.listener) && list.listener === listener)) {
delete this._events[type];
if (this._events.removeListener)
this.emit('removeListener', type, listener);
} else if (isObject(list)) {
for (i = length; i-- > 0;) {
if (list[i] === listener ||
(list[i].listener && list[i].listener === listener)) {
position = i;
break;
}
}
if (position < 0)
return this;
if (list.length === 1) {
list.length = 0;
delete this._events[type];
} else {
list.splice(position, 1);
}
if (this._events.removeListener)
this.emit('removeListener', type, listener);
}
return this;
};
EventEmitter.prototype.removeAllListeners = function(type) {
var key, listeners;
if (!this._events)
return this;
// not listening for removeListener, no need to emit
if (!this._events.removeListener) {
if (arguments.length === 0)
this._events = {};
else if (this._events[type])
delete this._events[type];
return this;
}
// emit removeListener for all listeners on all events
if (arguments.length === 0) {
for (key in this._events) {
if (key === 'removeListener') continue;
this.removeAllListeners(key);
}
this.removeAllListeners('removeListener');
this._events = {};
return this;
}
listeners = this._events[type];
if (isFunction(listeners)) {
this.removeListener(type, listeners);
} else {
// LIFO order
while (listeners.length)
this.removeListener(type, listeners[listeners.length - 1]);
}
delete this._events[type];
return this;
};
EventEmitter.prototype.listeners = function(type) {
var ret;
if (!this._events || !this._events[type])
ret = [];
else if (isFunction(this._events[type]))
ret = [this._events[type]];
else
ret = this._events[type].slice();
return ret;
};
EventEmitter.listenerCount = function(emitter, type) {
var ret;
if (!emitter._events || !emitter._events[type])
ret = 0;
else if (isFunction(emitter._events[type]))
ret = 1;
else
ret = emitter._events[type].length;
return ret;
};
function isFunction(arg) {
return typeof arg === 'function';
}
function isNumber(arg) {
return typeof arg === 'number';
}
function isObject(arg) {
return typeof arg === 'object' && arg !== null;
}
function isUndefined(arg) {
return arg === void 0;
}
},{}],8:[function(_dereq_,module,exports){
/*
* loglevel - https://github.com/pimterry/loglevel
*
* Copyright (c) 2013 Tim Perry
* Licensed under the MIT license.
*/
;(function (undefined) {
var undefinedType = "undefined";
(function (name, definition) {
if (typeof module !== 'undefined') {
module.exports = definition();
} else if (typeof define === 'function' && typeof define.amd === 'object') {
define(definition);
} else {
this[name] = definition();
}
}('log', function () {
var self = {};
var noop = function() {};
function realMethod(methodName) {
if (typeof console === undefinedType) {
return noop;
} else if (console[methodName] === undefined) {
if (console.log !== undefined) {
return boundToConsole(console, 'log');
} else {
return noop;
}
} else {
return boundToConsole(console, methodName);
}
}
function boundToConsole(console, methodName) {
var method = console[methodName];
if (method.bind === undefined) {
if (Function.prototype.bind === undefined) {
return functionBindingWrapper(method, console);
} else {
try {
return Function.prototype.bind.call(console[methodName], console);
} catch (e) {
// In IE8 + Modernizr, the bind shim will reject the above, so we fall back to wrapping
return functionBindingWrapper(method, console);
}
}
} else {
return console[methodName].bind(console);
}
}
function functionBindingWrapper(f, context) {
return function() {
Function.prototype.apply.apply(f, [context, arguments]);
};
}
var logMethods = [
"trace",
"debug",
"info",
"warn",
"error"
];
function replaceLoggingMethods(methodFactory) {
for (var ii = 0; ii < logMethods.length; ii++) {
self[logMethods[ii]] = methodFactory(logMethods[ii]);
}
}
function cookiesAvailable() {
return (typeof window !== undefinedType &&
window.document !== undefined &&
window.document.cookie !== undefined);
}
function localStorageAvailable() {
try {
return (typeof window !== undefinedType &&
window.localStorage !== undefined);
} catch (e) {
return false;
}
}
function persistLevelIfPossible(levelNum) {
var localStorageFail = false,
levelName;
for (var key in self.levels) {
if (self.levels.hasOwnProperty(key) && self.levels[key] === levelNum) {
levelName = key;
break;
}
}
if (localStorageAvailable()) {
/*
* Setting localStorage can create a DOM 22 Exception if running in Private mode
* in Safari, so even if it is available we need to catch any errors when trying
* to write to it
*/
try {
window.localStorage['loglevel'] = levelName;
} catch (e) {
localStorageFail = true;
}
} else {
localStorageFail = true;
}
if (localStorageFail && cookiesAvailable()) {
window.document.cookie = "loglevel=" + levelName + ";";
}
}
var cookieRegex = /loglevel=([^;]+)/;
function loadPersistedLevel() {
var storedLevel;
if (localStorageAvailable()) {
storedLevel = window.localStorage['loglevel'];
}
if (storedLevel === undefined && cookiesAvailable()) {
var cookieMatch = cookieRegex.exec(window.document.cookie) || [];
storedLevel = cookieMatch[1];
}
if (self.levels[storedLevel] === undefined) {
storedLevel = "WARN";
}
self.setLevel(self.levels[storedLevel]);
}
/*
*
* Public API
*
*/
self.levels = { "TRACE": 0, "DEBUG": 1, "INFO": 2, "WARN": 3,
"ERROR": 4, "SILENT": 5};
self.setLevel = function (level) {
if (typeof level === "number" && level >= 0 && level <= self.levels.SILENT) {
persistLevelIfPossible(level);
if (level === self.levels.SILENT) {
replaceLoggingMethods(function () {
return noop;
});
return;
} else if (typeof console === undefinedType) {
replaceLoggingMethods(function (methodName) {
return function () {
if (typeof console !== undefinedType) {
self.setLevel(level);
self[methodName].apply(self, arguments);
}
};
});
return "No console available for logging";
} else {
replaceLoggingMethods(function (methodName) {
if (level <= self.levels[methodName.toUpperCase()]) {
return realMethod(methodName);
} else {
return noop;
}
});
}
} else if (typeof level === "string" && self.levels[level.toUpperCase()] !== undefined) {
self.setLevel(self.levels[level.toUpperCase()]);
} else {
throw "log.setLevel() called with invalid level: " + level;
}
};
self.enableAll = function() {
self.setLevel(self.levels.TRACE);
};
self.disableAll = function() {
self.setLevel(self.levels.SILENT);
};
loadPersistedLevel();
return self;
}));
})();
},{}],9:[function(_dereq_,module,exports){
(function (global){
/*! http://mths.be/punycode v1.2.4 by @mathias */
;(function(root) {
/** Detect free variables */
var freeExports = typeof exports == 'object' && exports;
var freeModule = typeof module == 'object' && module &&
module.exports == freeExports && module;
var freeGlobal = typeof global == 'object' && global;
if (freeGlobal.global === freeGlobal || freeGlobal.window === freeGlobal) {
root = freeGlobal;
}
/**
* The `punycode` object.
* @name punycode
* @type Object
*/
var punycode,
/** Highest positive signed 32-bit float value */
maxInt = 2147483647, // aka. 0x7FFFFFFF or 2^31-1
/** Bootstring parameters */
base = 36,
tMin = 1,
tMax = 26,
skew = 38,
damp = 700,
initialBias = 72,
initialN = 128, // 0x80
delimiter = '-', // '\x2D'
/** Regular expressions */
regexPunycode = /^xn--/,
regexNonASCII = /[^ -~]/, // unprintable ASCII chars + non-ASCII chars
regexSeparators = /\x2E|\u3002|\uFF0E|\uFF61/g, // RFC 3490 separators
/** Error messages */
errors = {
'overflow': 'Overflow: input needs wider integers to process',
'not-basic': 'Illegal input >= 0x80 (not a basic code point)',
'invalid-input': 'Invalid input'
},
/** Convenience shortcuts */
baseMinusTMin = base - tMin,
floor = Math.floor,
stringFromCharCode = String.fromCharCode,
/** Temporary variable */
key;
/*--------------------------------------------------------------------------*/
/**
* A generic error utility function.
* @private
* @param {String} type The error type.
* @returns {Error} Throws a `RangeError` with the applicable error message.
*/
function error(type) {
throw RangeError(errors[type]);
}
/**
* A generic `Array#map` utility function.
* @private
* @param {Array} array The array to iterate over.
* @param {Function} callback The function that gets called for every array
* item.
* @returns {Array} A new array of values returned by the callback function.
*/
function map(array, fn) {
var length = array.length;
while (length--) {
array[length] = fn(array[length]);
}
return array;
}
/**
* A simple `Array#map`-like wrapper to work with domain name strings.
* @private
* @param {String} domain The domain name.
* @param {Function} callback The function that gets called for every
* character.
* @returns {Array} A new string of characters returned by the callback
* function.
*/
function mapDomain(string, fn) {
return map(string.split(regexSeparators), fn).join('.');
}
/**
* Creates an array containing the numeric code points of each Unicode
* character in the string. While JavaScript uses UCS-2 internally,
* this function will convert a pair of surrogate halves (each of which
* UCS-2 exposes as separate characters) into a single code point,
* matching UTF-16.
* @see `punycode.ucs2.encode`
* @see <http://mathiasbynens.be/notes/javascript-encoding>
* @memberOf punycode.ucs2
* @name decode
* @param {String} string The Unicode input string (UCS-2).
* @returns {Array} The new array of code points.
*/
function ucs2decode(string) {
var output = [],
counter = 0,
length = string.length,
value,
extra;
while (counter < length) {
value = string.charCodeAt(counter++);
if (value >= 0xD800 && value <= 0xDBFF && counter < length) {
// high surrogate, and there is a next character
extra = string.charCodeAt(counter++);
if ((extra & 0xFC00) == 0xDC00) { // low surrogate
output.push(((value & 0x3FF) << 10) + (extra & 0x3FF) + 0x10000);
} else {
// unmatched surrogate; only append this code unit, in case the next
// code unit is the high surrogate of a surrogate pair
output.push(value);
counter--;
}
} else {
output.push(value);
}
}
return output;
}
/**
* Creates a string based on an array of numeric code points.
* @see `punycode.ucs2.decode`
* @memberOf punycode.ucs2
* @name encode
* @param {Array} codePoints The array of numeric code points.
* @returns {String} The new Unicode string (UCS-2).
*/
function ucs2encode(array) {
return map(array, function(value) {
var output = '';
if (value > 0xFFFF) {
value -= 0x10000;
output += stringFromCharCode(value >>> 10 & 0x3FF | 0xD800);
value = 0xDC00 | value & 0x3FF;
}
output += stringFromCharCode(value);
return output;
}).join('');
}
/**
* Converts a basic code point into a digit/integer.
* @see `digitToBasic()`
* @private
* @param {Number} codePoint The basic numeric code point value.
* @returns {Number} The numeric value of a basic code point (for use in
* representing integers) in the range `0` to `base - 1`, or `base` if
* the code point does not represent a value.
*/
function basicToDigit(codePoint) {
if (codePoint - 48 < 10) {
return codePoint - 22;
}
if (codePoint - 65 < 26) {
return codePoint - 65;
}
if (codePoint - 97 < 26) {
return codePoint - 97;
}
return base;
}
/**
* Converts a digit/integer into a basic code point.
* @see `basicToDigit()`
* @private
* @param {Number} digit The numeric value of a basic code point.
* @returns {Number} The basic code point whose value (when used for
* representing integers) is `digit`, which needs to be in the range
* `0` to `base - 1`. If `flag` is non-zero, the uppercase form is
* used; else, the lowercase form is used. The behavior is undefined
* if `flag` is non-zero and `digit` has no uppercase form.
*/
function digitToBasic(digit, flag) {
// 0..25 map to ASCII a..z or A..Z
// 26..35 map to ASCII 0..9
return digit + 22 + 75 * (digit < 26) - ((flag != 0) << 5);
}
/**
* Bias adaptation function as per section 3.4 of RFC 3492.
* http://tools.ietf.org/html/rfc3492#section-3.4
* @private
*/
function adapt(delta, numPoints, firstTime) {
var k = 0;
delta = firstTime ? floor(delta / damp) : delta >> 1;
delta += floor(delta / numPoints);
for (/* no initialization */; delta > baseMinusTMin * tMax >> 1; k += base) {
delta = floor(delta / baseMinusTMin);
}
return floor(k + (baseMinusTMin + 1) * delta / (delta + skew));
}
/**
* Converts a Punycode string of ASCII-only symbols to a string of Unicode
* symbols.
* @memberOf punycode
* @param {String} input The Punycode string of ASCII-only symbols.
* @returns {String} The resulting string of Unicode symbols.
*/
function decode(input) {
// Don't use UCS-2
var output = [],
inputLength = input.length,
out,
i = 0,
n = initialN,
bias = initialBias,
basic,
j,
index,
oldi,
w,
k,
digit,
t,
/** Cached calculation results */
baseMinusT;
// Handle the basic code points: let `basic` be the number of input code
// points before the last delimiter, or `0` if there is none, then copy
// the first basic code points to the output.
basic = input.lastIndexOf(delimiter);
if (basic < 0) {
basic = 0;
}
for (j = 0; j < basic; ++j) {
// if it's not a basic code point
if (input.charCodeAt(j) >= 0x80) {
error('not-basic');
}
output.push(input.charCodeAt(j));
}
// Main decoding loop: start just after the last delimiter if any basic code
// points were copied; start at the beginning otherwise.
for (index = basic > 0 ? basic + 1 : 0; index < inputLength; /* no final expression */) {
// `index` is the index of the next character to be consumed.
// Decode a generalized variable-length integer into `delta`,
// which gets added to `i`. The overflow checking is easier
// if we increase `i` as we go, then subtract off its starting
// value at the end to obtain `delta`.
for (oldi = i, w = 1, k = base; /* no condition */; k += base) {
if (index >= inputLength) {
error('invalid-input');
}
digit = basicToDigit(input.charCodeAt(index++));
if (digit >= base || digit > floor((maxInt - i) / w)) {
error('overflow');
}
i += digit * w;
t = k <= bias ? tMin : (k >= bias + tMax ? tMax : k - bias);
if (digit < t) {
break;
}
baseMinusT = base - t;
if (w > floor(maxInt / baseMinusT)) {
error('overflow');
}
w *= baseMinusT;
}
out = output.length + 1;
bias = adapt(i - oldi, out, oldi == 0);
// `i` was supposed to wrap around from `out` to `0`,
// incrementing `n` each time, so we'll fix that now:
if (floor(i / out) > maxInt - n) {
error('overflow');
}
n += floor(i / out);
i %= out;
// Insert `n` at position `i` of the output
output.splice(i++, 0, n);
}
return ucs2encode(output);
}
/**
* Converts a string of Unicode symbols to a Punycode string of ASCII-only
* symbols.
* @memberOf punycode
* @param {String} input The string of Unicode symbols.
* @returns {String} The resulting Punycode string of ASCII-only symbols.
*/
function encode(input) {
var n,
delta,
handledCPCount,
basicLength,
bias,
j,
m,
q,
k,
t,
currentValue,
output = [],
/** `inputLength` will hold the number of code points in `input`. */
inputLength,
/** Cached calculation results */
handledCPCountPlusOne,
baseMinusT,
qMinusT;
// Convert the input in UCS-2 to Unicode
input = ucs2decode(input);
// Cache the length
inputLength = input.length;
// Initialize the state
n = initialN;
delta = 0;
bias = initialBias;
// Handle the basic code points
for (j = 0; j < inputLength; ++j) {
currentValue = input[j];
if (currentValue < 0x80) {
output.push(stringFromCharCode(currentValue));
}
}
handledCPCount = basicLength = output.length;
// `handledCPCount` is the number of code points that have been handled;
// `basicLength` is the number of basic code points.
// Finish the basic string - if it is not empty - with a delimiter
if (basicLength) {
output.push(delimiter);
}
// Main encoding loop:
while (handledCPCount < inputLength) {
// All non-basic code points < n have been handled already. Find the next
// larger one:
for (m = maxInt, j = 0; j < inputLength; ++j) {
currentValue = input[j];
if (currentValue >= n && currentValue < m) {
m = currentValue;
}
}
// Increase `delta` enough to advance the decoder's <n,i> state to <m,0>,
// but guard against overflow
handledCPCountPlusOne = handledCPCount + 1;
if (m - n > floor((maxInt - delta) / handledCPCountPlusOne)) {
error('overflow');
}
delta += (m - n) * handledCPCountPlusOne;
n = m;
for (j = 0; j < inputLength; ++j) {
currentValue = input[j];
if (currentValue < n && ++delta > maxInt) {
error('overflow');
}
if (currentValue == n) {
// Represent delta as a generalized variable-length integer
for (q = delta, k = base; /* no condition */; k += base) {
t = k <= bias ? tMin : (k >= bias + tMax ? tMax : k - bias);
if (q < t) {
break;
}
qMinusT = q - t;
baseMinusT = base - t;
output.push(
stringFromCharCode(digitToBasic(t + qMinusT % baseMinusT, 0))
);
q = floor(qMinusT / baseMinusT);
}
output.push(stringFromCharCode(digitToBasic(q, 0)));
bias = adapt(delta, handledCPCountPlusOne, handledCPCount == basicLength);
delta = 0;
++handledCPCount;
}
}
++delta;
++n;
}
return output.join('');
}
/**
* Converts a Punycode string representing a domain name to Unicode. Only the
* Punycoded parts of the domain name will be converted, i.e. it doesn't
* matter if you call it on a string that has already been converted to
* Unicode.
* @memberOf punycode
* @param {String} domain The Punycode domain name to convert to Unicode.
* @returns {String} The Unicode representation of the given Punycode
* string.
*/
function toUnicode(domain) {
return mapDomain(domain, function(string) {
return regexPunycode.test(string)
? decode(string.slice(4).toLowerCase())
: string;
});
}
/**
* Converts a Unicode string representing a domain name to Punycode. Only the
* non-ASCII parts of the domain name will be converted, i.e. it doesn't
* matter if you call it with a domain that's already in ASCII.
* @memberOf punycode
* @param {String} domain The domain name to convert, as a Unicode string.
* @returns {String} The Punycode representation of the given domain name.
*/
function toASCII(domain) {
return mapDomain(domain, function(string) {
return regexNonASCII.test(string)
? 'xn--' + encode(string)
: string;
});
}
/*--------------------------------------------------------------------------*/
/** Define the public API */
punycode = {
/**
* A string representing the current Punycode.js version number.
* @memberOf punycode
* @type String
*/
'version': '1.2.4',
/**
* An object of methods to convert from JavaScript's internal character
* representation (UCS-2) to Unicode code points, and back.
* @see <http://mathiasbynens.be/notes/javascript-encoding>
* @memberOf punycode
* @type Object
*/
'ucs2': {
'decode': ucs2decode,
'encode': ucs2encode
},
'decode': decode,
'encode': encode,
'toASCII': toASCII,
'toUnicode': toUnicode
};
/** Expose `punycode` */
// Some AMD build optimizers, like r.js, check for specific condition patterns
// like the following:
if (
typeof define == 'function' &&
typeof define.amd == 'object' &&
define.amd
) {
define('punycode', function() {
return punycode;
});
} else if (freeExports && !freeExports.nodeType) {
if (freeModule) { // in Node.js or RingoJS v0.8.0+
freeModule.exports = punycode;
} else { // in Narwhal or RingoJS v0.7.0-
for (key in punycode) {
punycode.hasOwnProperty(key) && (freeExports[key] = punycode[key]);
}
}
} else { // in Rhino or a web browser
root.punycode = punycode;
}
}(this));
}).call(this,typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{}],10:[function(_dereq_,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
// If obj.hasOwnProperty has been overridden, then calling
// obj.hasOwnProperty(prop) will break.
// See: https://github.com/joyent/node/issues/1707
function hasOwnProperty(obj, prop) {
return Object.prototype.hasOwnProperty.call(obj, prop);
}
module.exports = function(qs, sep, eq, options) {
sep = sep || '&';
eq = eq || '=';
var obj = {};
if (typeof qs !== 'string' || qs.length === 0) {
return obj;
}
var regexp = /\+/g;
qs = qs.split(sep);
var maxKeys = 1000;
if (options && typeof options.maxKeys === 'number') {
maxKeys = options.maxKeys;
}
var len = qs.length;
// maxKeys <= 0 means that we should not limit keys count
if (maxKeys > 0 && len > maxKeys) {
len = maxKeys;
}
for (var i = 0; i < len; ++i) {
var x = qs[i].replace(regexp, '%20'),
idx = x.indexOf(eq),
kstr, vstr, k, v;
if (idx >= 0) {
kstr = x.substr(0, idx);
vstr = x.substr(idx + 1);
} else {
kstr = x;
vstr = '';
}
k = decodeURIComponent(kstr);
v = decodeURIComponent(vstr);
if (!hasOwnProperty(obj, k)) {
obj[k] = v;
} else if (isArray(obj[k])) {
obj[k].push(v);
} else {
obj[k] = [obj[k], v];
}
}
return obj;
};
var isArray = Array.isArray || function (xs) {
return Object.prototype.toString.call(xs) === '[object Array]';
};
},{}],11:[function(_dereq_,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict';
var stringifyPrimitive = function(v) {
switch (typeof v) {
case 'string':
return v;
case 'boolean':
return v ? 'true' : 'false';
case 'number':
return isFinite(v) ? v : '';
default:
return '';
}
};
module.exports = function(obj, sep, eq, name) {
sep = sep || '&';
eq = eq || '=';
if (obj === null) {
obj = undefined;
}
if (typeof obj === 'object') {
return map(objectKeys(obj), function(k) {
var ks = encodeURIComponent(stringifyPrimitive(k)) + eq;
if (isArray(obj[k])) {
return obj[k].map(function(v) {
return ks + encodeURIComponent(stringifyPrimitive(v));
}).join(sep);
} else {
return ks + encodeURIComponent(stringifyPrimitive(obj[k]));
}
}).join(sep);
}
if (!name) return '';
return encodeURIComponent(stringifyPrimitive(name)) + eq +
encodeURIComponent(stringifyPrimitive(obj));
};
var isArray = Array.isArray || function (xs) {
return Object.prototype.toString.call(xs) === '[object Array]';
};
function map (xs, f) {
if (xs.map) return xs.map(f);
var res = [];
for (var i = 0; i < xs.length; i++) {
res.push(f(xs[i], i));
}
return res;
}
var objectKeys = Object.keys || function (obj) {
var res = [];
for (var key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) res.push(key);
}
return res;
};
},{}],12:[function(_dereq_,module,exports){
'use strict';
exports.decode = exports.parse = _dereq_('./decode');
exports.encode = exports.stringify = _dereq_('./encode');
},{"./decode":10,"./encode":11}],13:[function(_dereq_,module,exports){
var toString = Object.prototype.toString
module.exports = function(val){
switch (toString.call(val)) {
case '[object Function]': return 'function'
case '[object Date]': return 'date'
case '[object RegExp]': return 'regexp'
case '[object Arguments]': return 'arguments'
case '[object Array]': return 'array'
case '[object String]': return 'string'
}
if (typeof val == 'object' && val && typeof val.length == 'number') {
try {
if (typeof val.callee == 'function') return 'arguments';
} catch (ex) {
if (ex instanceof TypeError) {
return 'arguments';
}
}
}
if (val === null) return 'null'
if (val === undefined) return 'undefined'
if (val && val.nodeType === 1) return 'element'
if (val === Object(val)) return 'object'
return typeof val
}
},{}],14:[function(_dereq_,module,exports){
// Underscore.js 1.6.0
// http://underscorejs.org
// (c) 2009-2014 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
// Underscore may be freely distributed under the MIT license.
(function() {
// Baseline setup
// --------------
// Establish the root object, `window` in the browser, or `exports` on the server.
var root = this;
// Save the previous value of the `_` variable.
var previousUnderscore = root._;
// Establish the object that gets returned to break out of a loop iteration.
var breaker = {};
// Save bytes in the minified (but not gzipped) version:
var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype;
// Create quick reference variables for speed access to core prototypes.
var
push = ArrayProto.push,
slice = ArrayProto.slice,
concat = ArrayProto.concat,
toString = ObjProto.toString,
hasOwnProperty = ObjProto.hasOwnProperty;
// All **ECMAScript 5** native function implementations that we hope to use
// are declared here.
var
nativeForEach = ArrayProto.forEach,
nativeMap = ArrayProto.map,
nativeReduce = ArrayProto.reduce,
nativeReduceRight = ArrayProto.reduceRight,
nativeFilter = ArrayProto.filter,
nativeEvery = ArrayProto.every,
nativeSome = ArrayProto.some,
nativeIndexOf = ArrayProto.indexOf,
nativeLastIndexOf = ArrayProto.lastIndexOf,
nativeIsArray = Array.isArray,
nativeKeys = Object.keys,
nativeBind = FuncProto.bind;
// Create a safe reference to the Underscore object for use below.
var _ = function(obj) {
if (obj instanceof _) return obj;
if (!(this instanceof _)) return new _(obj);
this._wrapped = obj;
};
// Export the Underscore object for **Node.js**, with
// backwards-compatibility for the old `require()` API. If we're in
// the browser, add `_` as a global object via a string identifier,
// for Closure Compiler "advanced" mode.
if (typeof exports !== 'undefined') {
if (typeof module !== 'undefined' && module.exports) {
exports = module.exports = _;
}
exports._ = _;
} else {
root._ = _;
}
// Current version.
_.VERSION = '1.6.0';
// Collection Functions
// --------------------
// The cornerstone, an `each` implementation, aka `forEach`.
// Handles objects with the built-in `forEach`, arrays, and raw objects.
// Delegates to **ECMAScript 5**'s native `forEach` if available.
var each = _.each = _.forEach = function(obj, iterator, context) {
if (obj == null) return obj;
if (nativeForEach && obj.forEach === nativeForEach) {
obj.forEach(iterator, context);
} else if (obj.length === +obj.length) {
for (var i = 0, length = obj.length; i < length; i++) {
if (iterator.call(context, obj[i], i, obj) === breaker) return;
}
} else {
var keys = _.keys(obj);
for (var i = 0, length = keys.length; i < length; i++) {
if (iterator.call(context, obj[keys[i]], keys[i], obj) === breaker) return;
}
}
return obj;
};
// Return the results of applying the iterator to each element.
// Delegates to **ECMAScript 5**'s native `map` if available.
_.map = _.collect = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
if (nativeMap && obj.map === nativeMap) return obj.map(iterator, context);
each(obj, function(value, index, list) {
results.push(iterator.call(context, value, index, list));
});
return results;
};
var reduceError = 'Reduce of empty array with no initial value';
// **Reduce** builds up a single result from a list of values, aka `inject`,
// or `foldl`. Delegates to **ECMAScript 5**'s native `reduce` if available.
_.reduce = _.foldl = _.inject = function(obj, iterator, memo, context) {
var initial = arguments.length > 2;
if (obj == null) obj = [];
if (nativeReduce && obj.reduce === nativeReduce) {
if (context) iterator = _.bind(iterator, context);
return initial ? obj.reduce(iterator, memo) : obj.reduce(iterator);
}
each(obj, function(value, index, list) {
if (!initial) {
memo = value;
initial = true;
} else {
memo = iterator.call(context, memo, value, index, list);
}
});
if (!initial) throw new TypeError(reduceError);
return memo;
};
// The right-associative version of reduce, also known as `foldr`.
// Delegates to **ECMAScript 5**'s native `reduceRight` if available.
_.reduceRight = _.foldr = function(obj, iterator, memo, context) {
var initial = arguments.length > 2;
if (obj == null) obj = [];
if (nativeReduceRight && obj.reduceRight === nativeReduceRight) {
if (context) iterator = _.bind(iterator, context);
return initial ? obj.reduceRight(iterator, memo) : obj.reduceRight(iterator);
}
var length = obj.length;
if (length !== +length) {
var keys = _.keys(obj);
length = keys.length;
}
each(obj, function(value, index, list) {
index = keys ? keys[--length] : --length;
if (!initial) {
memo = obj[index];
initial = true;
} else {
memo = iterator.call(context, memo, obj[index], index, list);
}
});
if (!initial) throw new TypeError(reduceError);
return memo;
};
// Return the first value which passes a truth test. Aliased as `detect`.
_.find = _.detect = function(obj, predicate, context) {
var result;
any(obj, function(value, index, list) {
if (predicate.call(context, value, index, list)) {
result = value;
return true;
}
});
return result;
};
// Return all the elements that pass a truth test.
// Delegates to **ECMAScript 5**'s native `filter` if available.
// Aliased as `select`.
_.filter = _.select = function(obj, predicate, context) {
var results = [];
if (obj == null) return results;
if (nativeFilter && obj.filter === nativeFilter) return obj.filter(predicate, context);
each(obj, function(value, index, list) {
if (predicate.call(context, value, index, list)) results.push(value);
});
return results;
};
// Return all the elements for which a truth test fails.
_.reject = function(obj, predicate, context) {
return _.filter(obj, function(value, index, list) {
return !predicate.call(context, value, index, list);
}, context);
};
// Determine whether all of the elements match a truth test.
// Delegates to **ECMAScript 5**'s native `every` if available.
// Aliased as `all`.
_.every = _.all = function(obj, predicate, context) {
predicate || (predicate = _.identity);
var result = true;
if (obj == null) return result;
if (nativeEvery && obj.every === nativeEvery) return obj.every(predicate, context);
each(obj, function(value, index, list) {
if (!(result = result && predicate.call(context, value, index, list))) return breaker;
});
return !!result;
};
// Determine if at least one element in the object matches a truth test.
// Delegates to **ECMAScript 5**'s native `some` if available.
// Aliased as `any`.
var any = _.some = _.any = function(obj, predicate, context) {
predicate || (predicate = _.identity);
var result = false;
if (obj == null) return result;
if (nativeSome && obj.some === nativeSome) return obj.some(predicate, context);
each(obj, function(value, index, list) {
if (result || (result = predicate.call(context, value, index, list))) return breaker;
});
return !!result;
};
// Determine if the array or object contains a given value (using `===`).
// Aliased as `include`.
_.contains = _.include = function(obj, target) {
if (obj == null) return false;
if (nativeIndexOf && obj.indexOf === nativeIndexOf) return obj.indexOf(target) != -1;
return any(obj, function(value) {
return value === target;
});
};
// Invoke a method (with arguments) on every item in a collection.
_.invoke = function(obj, method) {
var args = slice.call(arguments, 2);
var isFunc = _.isFunction(method);
return _.map(obj, function(value) {
return (isFunc ? method : value[method]).apply(value, args);
});
};
// Convenience version of a common use case of `map`: fetching a property.
_.pluck = function(obj, key) {
return _.map(obj, _.property(key));
};
// Convenience version of a common use case of `filter`: selecting only objects
// containing specific `key:value` pairs.
_.where = function(obj, attrs) {
return _.filter(obj, _.matches(attrs));
};
// Convenience version of a common use case of `find`: getting the first object
// containing specific `key:value` pairs.
_.findWhere = function(obj, attrs) {
return _.find(obj, _.matches(attrs));
};
// Return the maximum element or (element-based computation).
// Can't optimize arrays of integers longer than 65,535 elements.
// See [WebKit Bug 80797](https://bugs.webkit.org/show_bug.cgi?id=80797)
_.max = function(obj, iterator, context) {
if (!iterator && _.isArray(obj) && obj[0] === +obj[0] && obj.length < 65535) {
return Math.max.apply(Math, obj);
}
var result = -Infinity, lastComputed = -Infinity;
each(obj, function(value, index, list) {
var computed = iterator ? iterator.call(context, value, index, list) : value;
if (computed > lastComputed) {
result = value;
lastComputed = computed;
}
});
return result;
};
// Return the minimum element (or element-based computation).
_.min = function(obj, iterator, context) {
if (!iterator && _.isArray(obj) && obj[0] === +obj[0] && obj.length < 65535) {
return Math.min.apply(Math, obj);
}
var result = Infinity, lastComputed = Infinity;
each(obj, function(value, index, list) {
var computed = iterator ? iterator.call(context, value, index, list) : value;
if (computed < lastComputed) {
result = value;
lastComputed = computed;
}
});
return result;
};
// Shuffle an array, using the modern version of the
// [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/FisherโYates_shuffle).
_.shuffle = function(obj) {
var rand;
var index = 0;
var shuffled = [];
each(obj, function(value) {
rand = _.random(index++);
shuffled[index - 1] = shuffled[rand];
shuffled[rand] = value;
});
return shuffled;
};
// Sample **n** random values from a collection.
// If **n** is not specified, returns a single random element.
// The internal `guard` argument allows it to work with `map`.
_.sample = function(obj, n, guard) {
if (n == null || guard) {
if (obj.length !== +obj.length) obj = _.values(obj);
return obj[_.random(obj.length - 1)];
}
return _.shuffle(obj).slice(0, Math.max(0, n));
};
// An internal function to generate lookup iterators.
var lookupIterator = function(value) {
if (value == null) return _.identity;
if (_.isFunction(value)) return value;
return _.property(value);
};
// Sort the object's values by a criterion produced by an iterator.
_.sortBy = function(obj, iterator, context) {
iterator = lookupIterator(iterator);
return _.pluck(_.map(obj, function(value, index, list) {
return {
value: value,
index: index,
criteria: iterator.call(context, value, index, list)
};
}).sort(function(left, right) {
var a = left.criteria;
var b = right.criteria;
if (a !== b) {
if (a > b || a === void 0) return 1;
if (a < b || b === void 0) return -1;
}
return left.index - right.index;
}), 'value');
};
// An internal function used for aggregate "group by" operations.
var group = function(behavior) {
return function(obj, iterator, context) {
var result = {};
iterator = lookupIterator(iterator);
each(obj, function(value, index) {
var key = iterator.call(context, value, index, obj);
behavior(result, key, value);
});
return result;
};
};
// Groups the object's values by a criterion. Pass either a string attribute
// to group by, or a function that returns the criterion.
_.groupBy = group(function(result, key, value) {
_.has(result, key) ? result[key].push(value) : result[key] = [value];
});
// Indexes the object's values by a criterion, similar to `groupBy`, but for
// when you know that your index values will be unique.
_.indexBy = group(function(result, key, value) {
result[key] = value;
});
// Counts instances of an object that group by a certain criterion. Pass
// either a string attribute to count by, or a function that returns the
// criterion.
_.countBy = group(function(result, key) {
_.has(result, key) ? result[key]++ : result[key] = 1;
});
// Use a comparator function to figure out the smallest index at which
// an object should be inserted so as to maintain order. Uses binary search.
_.sortedIndex = function(array, obj, iterator, context) {
iterator = lookupIterator(iterator);
var value = iterator.call(context, obj);
var low = 0, high = array.length;
while (low < high) {
var mid = (low + high) >>> 1;
iterator.call(context, array[mid]) < value ? low = mid + 1 : high = mid;
}
return low;
};
// Safely create a real, live array from anything iterable.
_.toArray = function(obj) {
if (!obj) return [];
if (_.isArray(obj)) return slice.call(obj);
if (obj.length === +obj.length) return _.map(obj, _.identity);
return _.values(obj);
};
// Return the number of elements in an object.
_.size = function(obj) {
if (obj == null) return 0;
return (obj.length === +obj.length) ? obj.length : _.keys(obj).length;
};
// Array Functions
// ---------------
// Get the first element of an array. Passing **n** will return the first N
// values in the array. Aliased as `head` and `take`. The **guard** check
// allows it to work with `_.map`.
_.first = _.head = _.take = function(array, n, guard) {
if (array == null) return void 0;
if ((n == null) || guard) return array[0];
if (n < 0) return [];
return slice.call(array, 0, n);
};
// Returns everything but the last entry of the array. Especially useful on
// the arguments object. Passing **n** will return all the values in
// the array, excluding the last N. The **guard** check allows it to work with
// `_.map`.
_.initial = function(array, n, guard) {
return slice.call(array, 0, array.length - ((n == null) || guard ? 1 : n));
};
// Get the last element of an array. Passing **n** will return the last N
// values in the array. The **guard** check allows it to work with `_.map`.
_.last = function(array, n, guard) {
if (array == null) return void 0;
if ((n == null) || guard) return array[array.length - 1];
return slice.call(array, Math.max(array.length - n, 0));
};
// Returns everything but the first entry of the array. Aliased as `tail` and `drop`.
// Especially useful on the arguments object. Passing an **n** will return
// the rest N values in the array. The **guard**
// check allows it to work with `_.map`.
_.rest = _.tail = _.drop = function(array, n, guard) {
return slice.call(array, (n == null) || guard ? 1 : n);
};
// Trim out all falsy values from an array.
_.compact = function(array) {
return _.filter(array, _.identity);
};
// Internal implementation of a recursive `flatten` function.
var flatten = function(input, shallow, output) {
if (shallow && _.every(input, _.isArray)) {
return concat.apply(output, input);
}
each(input, function(value) {
if (_.isArray(value) || _.isArguments(value)) {
shallow ? push.apply(output, value) : flatten(value, shallow, output);
} else {
output.push(value);
}
});
return output;
};
// Flatten out an array, either recursively (by default), or just one level.
_.flatten = function(array, shallow) {
return flatten(array, shallow, []);
};
// Return a version of the array that does not contain the specified value(s).
_.without = function(array) {
return _.difference(array, slice.call(arguments, 1));
};
// Split an array into two arrays: one whose elements all satisfy the given
// predicate, and one whose elements all do not satisfy the predicate.
_.partition = function(array, predicate) {
var pass = [], fail = [];
each(array, function(elem) {
(predicate(elem) ? pass : fail).push(elem);
});
return [pass, fail];
};
// Produce a duplicate-free version of the array. If the array has already
// been sorted, you have the option of using a faster algorithm.
// Aliased as `unique`.
_.uniq = _.unique = function(array, isSorted, iterator, context) {
if (_.isFunction(isSorted)) {
context = iterator;
iterator = isSorted;
isSorted = false;
}
var initial = iterator ? _.map(array, iterator, context) : array;
var results = [];
var seen = [];
each(initial, function(value, index) {
if (isSorted ? (!index || seen[seen.length - 1] !== value) : !_.contains(seen, value)) {
seen.push(value);
results.push(array[index]);
}
});
return results;
};
// Produce an array that contains the union: each distinct element from all of
// the passed-in arrays.
_.union = function() {
return _.uniq(_.flatten(arguments, true));
};
// Produce an array that contains every item shared between all the
// passed-in arrays.
_.intersection = function(array) {
var rest = slice.call(arguments, 1);
return _.filter(_.uniq(array), function(item) {
return _.every(rest, function(other) {
return _.contains(other, item);
});
});
};
// Take the difference between one array and a number of other arrays.
// Only the elements present in just the first array will remain.
_.difference = function(array) {
var rest = concat.apply(ArrayProto, slice.call(arguments, 1));
return _.filter(array, function(value){ return !_.contains(rest, value); });
};
// Zip together multiple lists into a single array -- elements that share
// an index go together.
_.zip = function() {
var length = _.max(_.pluck(arguments, 'length').concat(0));
var results = new Array(length);
for (var i = 0; i < length; i++) {
results[i] = _.pluck(arguments, '' + i);
}
return results;
};
// Converts lists into objects. Pass either a single array of `[key, value]`
// pairs, or two parallel arrays of the same length -- one of keys, and one of
// the corresponding values.
_.object = function(list, values) {
if (list == null) return {};
var result = {};
for (var i = 0, length = list.length; i < length; i++) {
if (values) {
result[list[i]] = values[i];
} else {
result[list[i][0]] = list[i][1];
}
}
return result;
};
// If the browser doesn't supply us with indexOf (I'm looking at you, **MSIE**),
// we need this function. Return the position of the first occurrence of an
// item in an array, or -1 if the item is not included in the array.
// Delegates to **ECMAScript 5**'s native `indexOf` if available.
// If the array is large and already in sort order, pass `true`
// for **isSorted** to use binary search.
_.indexOf = function(array, item, isSorted) {
if (array == null) return -1;
var i = 0, length = array.length;
if (isSorted) {
if (typeof isSorted == 'number') {
i = (isSorted < 0 ? Math.max(0, length + isSorted) : isSorted);
} else {
i = _.sortedIndex(array, item);
return array[i] === item ? i : -1;
}
}
if (nativeIndexOf && array.indexOf === nativeIndexOf) return array.indexOf(item, isSorted);
for (; i < length; i++) if (array[i] === item) return i;
return -1;
};
// Delegates to **ECMAScript 5**'s native `lastIndexOf` if available.
_.lastIndexOf = function(array, item, from) {
if (array == null) return -1;
var hasIndex = from != null;
if (nativeLastIndexOf && array.lastIndexOf === nativeLastIndexOf) {
return hasIndex ? array.lastIndexOf(item, from) : array.lastIndexOf(item);
}
var i = (hasIndex ? from : array.length);
while (i--) if (array[i] === item) return i;
return -1;
};
// Generate an integer Array containing an arithmetic progression. A port of
// the native Python `range()` function. See
// [the Python documentation](http://docs.python.org/library/functions.html#range).
_.range = function(start, stop, step) {
if (arguments.length <= 1) {
stop = start || 0;
start = 0;
}
step = arguments[2] || 1;
var length = Math.max(Math.ceil((stop - start) / step), 0);
var idx = 0;
var range = new Array(length);
while(idx < length) {
range[idx++] = start;
start += step;
}
return range;
};
// Function (ahem) Functions
// ------------------
// Reusable constructor function for prototype setting.
var ctor = function(){};
// Create a function bound to a given object (assigning `this`, and arguments,
// optionally). Delegates to **ECMAScript 5**'s native `Function.bind` if
// available.
_.bind = function(func, context) {
var args, bound;
if (nativeBind && func.bind === nativeBind) return nativeBind.apply(func, slice.call(arguments, 1));
if (!_.isFunction(func)) throw new TypeError;
args = slice.call(arguments, 2);
return bound = function() {
if (!(this instanceof bound)) return func.apply(context, args.concat(slice.call(arguments)));
ctor.prototype = func.prototype;
var self = new ctor;
ctor.prototype = null;
var result = func.apply(self, args.concat(slice.call(arguments)));
if (Object(result) === result) return result;
return self;
};
};
// Partially apply a function by creating a version that has had some of its
// arguments pre-filled, without changing its dynamic `this` context. _ acts
// as a placeholder, allowing any combination of arguments to be pre-filled.
_.partial = function(func) {
var boundArgs = slice.call(arguments, 1);
return function() {
var position = 0;
var args = boundArgs.slice();
for (var i = 0, length = args.length; i < length; i++) {
if (args[i] === _) args[i] = arguments[position++];
}
while (position < arguments.length) args.push(arguments[position++]);
return func.apply(this, args);
};
};
// Bind a number of an object's methods to that object. Remaining arguments
// are the method names to be bound. Useful for ensuring that all callbacks
// defined on an object belong to it.
_.bindAll = function(obj) {
var funcs = slice.call(arguments, 1);
if (funcs.length === 0) throw new Error('bindAll must be passed function names');
each(funcs, function(f) { obj[f] = _.bind(obj[f], obj); });
return obj;
};
// Memoize an expensive function by storing its results.
_.memoize = function(func, hasher) {
var memo = {};
hasher || (hasher = _.identity);
return function() {
var key = hasher.apply(this, arguments);
return _.has(memo, key) ? memo[key] : (memo[key] = func.apply(this, arguments));
};
};
// Delays a function for the given number of milliseconds, and then calls
// it with the arguments supplied.
_.delay = function(func, wait) {
var args = slice.call(arguments, 2);
return setTimeout(function(){ return func.apply(null, args); }, wait);
};
// Defers a function, scheduling it to run after the current call stack has
// cleared.
_.defer = function(func) {
return _.delay.apply(_, [func, 1].concat(slice.call(arguments, 1)));
};
// Returns a function, that, when invoked, will only be triggered at most once
// during a given window of time. Normally, the throttled function will run
// as much as it can, without ever going more than once per `wait` duration;
// but if you'd like to disable the execution on the leading edge, pass
// `{leading: false}`. To disable execution on the trailing edge, ditto.
_.throttle = function(func, wait, options) {
var context, args, result;
var timeout = null;
var previous = 0;
options || (options = {});
var later = function() {
previous = options.leading === false ? 0 : _.now();
timeout = null;
result = func.apply(context, args);
context = args = null;
};
return function() {
var now = _.now();
if (!previous && options.leading === false) previous = now;
var remaining = wait - (now - previous);
context = this;
args = arguments;
if (remaining <= 0) {
clearTimeout(timeout);
timeout = null;
previous = now;
result = func.apply(context, args);
context = args = null;
} else if (!timeout && options.trailing !== false) {
timeout = setTimeout(later, remaining);
}
return result;
};
};
// Returns a function, that, as long as it continues to be invoked, will not
// be triggered. The function will be called after it stops being called for
// N milliseconds. If `immediate` is passed, trigger the function on the
// leading edge, instead of the trailing.
_.debounce = function(func, wait, immediate) {
var timeout, args, context, timestamp, result;
var later = function() {
var last = _.now() - timestamp;
if (last < wait) {
timeout = setTimeout(later, wait - last);
} else {
timeout = null;
if (!immediate) {
result = func.apply(context, args);
context = args = null;
}
}
};
return function() {
context = this;
args = arguments;
timestamp = _.now();
var callNow = immediate && !timeout;
if (!timeout) {
timeout = setTimeout(later, wait);
}
if (callNow) {
result = func.apply(context, args);
context = args = null;
}
return result;
};
};
// Returns a function that will be executed at most one time, no matter how
// often you call it. Useful for lazy initialization.
_.once = function(func) {
var ran = false, memo;
return function() {
if (ran) return memo;
ran = true;
memo = func.apply(this, arguments);
func = null;
return memo;
};
};
// Returns the first function passed as an argument to the second,
// allowing you to adjust arguments, run code before and after, and
// conditionally execute the original function.
_.wrap = function(func, wrapper) {
return _.partial(wrapper, func);
};
// Returns a function that is the composition of a list of functions, each
// consuming the return value of the function that follows.
_.compose = function() {
var funcs = arguments;
return function() {
var args = arguments;
for (var i = funcs.length - 1; i >= 0; i--) {
args = [funcs[i].apply(this, args)];
}
return args[0];
};
};
// Returns a function that will only be executed after being called N times.
_.after = function(times, func) {
return function() {
if (--times < 1) {
return func.apply(this, arguments);
}
};
};
// Object Functions
// ----------------
// Retrieve the names of an object's properties.
// Delegates to **ECMAScript 5**'s native `Object.keys`
_.keys = function(obj) {
if (!_.isObject(obj)) return [];
if (nativeKeys) return nativeKeys(obj);
var keys = [];
for (var key in obj) if (_.has(obj, key)) keys.push(key);
return keys;
};
// Retrieve the values of an object's properties.
_.values = function(obj) {
var keys = _.keys(obj);
var length = keys.length;
var values = new Array(length);
for (var i = 0; i < length; i++) {
values[i] = obj[keys[i]];
}
return values;
};
// Convert an object into a list of `[key, value]` pairs.
_.pairs = function(obj) {
var keys = _.keys(obj);
var length = keys.length;
var pairs = new Array(length);
for (var i = 0; i < length; i++) {
pairs[i] = [keys[i], obj[keys[i]]];
}
return pairs;
};
// Invert the keys and values of an object. The values must be serializable.
_.invert = function(obj) {
var result = {};
var keys = _.keys(obj);
for (var i = 0, length = keys.length; i < length; i++) {
result[obj[keys[i]]] = keys[i];
}
return result;
};
// Return a sorted list of the function names available on the object.
// Aliased as `methods`
_.functions = _.methods = function(obj) {
var names = [];
for (var key in obj) {
if (_.isFunction(obj[key])) names.push(key);
}
return names.sort();
};
// Extend a given object with all the properties in passed-in object(s).
_.extend = function(obj) {
each(slice.call(arguments, 1), function(source) {
if (source) {
for (var prop in source) {
obj[prop] = source[prop];
}
}
});
return obj;
};
// Return a copy of the object only containing the whitelisted properties.
_.pick = function(obj) {
var copy = {};
var keys = concat.apply(ArrayProto, slice.call(arguments, 1));
each(keys, function(key) {
if (key in obj) copy[key] = obj[key];
});
return copy;
};
// Return a copy of the object without the blacklisted properties.
_.omit = function(obj) {
var copy = {};
var keys = concat.apply(ArrayProto, slice.call(arguments, 1));
for (var key in obj) {
if (!_.contains(keys, key)) copy[key] = obj[key];
}
return copy;
};
// Fill in a given object with default properties.
_.defaults = function(obj) {
each(slice.call(arguments, 1), function(source) {
if (source) {
for (var prop in source) {
if (obj[prop] === void 0) obj[prop] = source[prop];
}
}
});
return obj;
};
// Create a (shallow-cloned) duplicate of an object.
_.clone = function(obj) {
if (!_.isObject(obj)) return obj;
return _.isArray(obj) ? obj.slice() : _.extend({}, obj);
};
// Invokes interceptor with the obj, and then returns obj.
// The primary purpose of this method is to "tap into" a method chain, in
// order to perform operations on intermediate results within the chain.
_.tap = function(obj, interceptor) {
interceptor(obj);
return obj;
};
// Internal recursive comparison function for `isEqual`.
var eq = function(a, b, aStack, bStack) {
// Identical objects are equal. `0 === -0`, but they aren't identical.
// See the [Harmony `egal` proposal](http://wiki.ecmascript.org/doku.php?id=harmony:egal).
if (a === b) return a !== 0 || 1 / a == 1 / b;
// A strict comparison is necessary because `null == undefined`.
if (a == null || b == null) return a === b;
// Unwrap any wrapped objects.
if (a instanceof _) a = a._wrapped;
if (b instanceof _) b = b._wrapped;
// Compare `[[Class]]` names.
var className = toString.call(a);
if (className != toString.call(b)) return false;
switch (className) {
// Strings, numbers, dates, and booleans are compared by value.
case '[object String]':
// Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is
// equivalent to `new String("5")`.
return a == String(b);
case '[object Number]':
// `NaN`s are equivalent, but non-reflexive. An `egal` comparison is performed for
// other numeric values.
return a != +a ? b != +b : (a == 0 ? 1 / a == 1 / b : a == +b);
case '[object Date]':
case '[object Boolean]':
// Coerce dates and booleans to numeric primitive values. Dates are compared by their
// millisecond representations. Note that invalid dates with millisecond representations
// of `NaN` are not equivalent.
return +a == +b;
// RegExps are compared by their source patterns and flags.
case '[object RegExp]':
return a.source == b.source &&
a.global == b.global &&
a.multiline == b.multiline &&
a.ignoreCase == b.ignoreCase;
}
if (typeof a != 'object' || typeof b != 'object') return false;
// Assume equality for cyclic structures. The algorithm for detecting cyclic
// structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`.
var length = aStack.length;
while (length--) {
// Linear search. Performance is inversely proportional to the number of
// unique nested structures.
if (aStack[length] == a) return bStack[length] == b;
}
// Objects with different constructors are not equivalent, but `Object`s
// from different frames are.
var aCtor = a.constructor, bCtor = b.constructor;
if (aCtor !== bCtor && !(_.isFunction(aCtor) && (aCtor instanceof aCtor) &&
_.isFunction(bCtor) && (bCtor instanceof bCtor))
&& ('constructor' in a && 'constructor' in b)) {
return false;
}
// Add the first object to the stack of traversed objects.
aStack.push(a);
bStack.push(b);
var size = 0, result = true;
// Recursively compare objects and arrays.
if (className == '[object Array]') {
// Compare array lengths to determine if a deep comparison is necessary.
size = a.length;
result = size == b.length;
if (result) {
// Deep compare the contents, ignoring non-numeric properties.
while (size--) {
if (!(result = eq(a[size], b[size], aStack, bStack))) break;
}
}
} else {
// Deep compare objects.
for (var key in a) {
if (_.has(a, key)) {
// Count the expected number of properties.
size++;
// Deep compare each member.
if (!(result = _.has(b, key) && eq(a[key], b[key], aStack, bStack))) break;
}
}
// Ensure that both objects contain the same number of properties.
if (result) {
for (key in b) {
if (_.has(b, key) && !(size--)) break;
}
result = !size;
}
}
// Remove the first object from the stack of traversed objects.
aStack.pop();
bStack.pop();
return result;
};
// Perform a deep comparison to check if two objects are equal.
_.isEqual = function(a, b) {
return eq(a, b, [], []);
};
// Is a given array, string, or object empty?
// An "empty" object has no enumerable own-properties.
_.isEmpty = function(obj) {
if (obj == null) return true;
if (_.isArray(obj) || _.isString(obj)) return obj.length === 0;
for (var key in obj) if (_.has(obj, key)) return false;
return true;
};
// Is a given value a DOM element?
_.isElement = function(obj) {
return !!(obj && obj.nodeType === 1);
};
// Is a given value an array?
// Delegates to ECMA5's native Array.isArray
_.isArray = nativeIsArray || function(obj) {
return toString.call(obj) == '[object Array]';
};
// Is a given variable an object?
_.isObject = function(obj) {
return obj === Object(obj);
};
// Add some isType methods: isArguments, isFunction, isString, isNumber, isDate, isRegExp.
each(['Arguments', 'Function', 'String', 'Number', 'Date', 'RegExp'], function(name) {
_['is' + name] = function(obj) {
return toString.call(obj) == '[object ' + name + ']';
};
});
// Define a fallback version of the method in browsers (ahem, IE), where
// there isn't any inspectable "Arguments" type.
if (!_.isArguments(arguments)) {
_.isArguments = function(obj) {
return !!(obj && _.has(obj, 'callee'));
};
}
// Optimize `isFunction` if appropriate.
if (typeof (/./) !== 'function') {
_.isFunction = function(obj) {
return typeof obj === 'function';
};
}
// Is a given object a finite number?
_.isFinite = function(obj) {
return isFinite(obj) && !isNaN(parseFloat(obj));
};
// Is the given value `NaN`? (NaN is the only number which does not equal itself).
_.isNaN = function(obj) {
return _.isNumber(obj) && obj != +obj;
};
// Is a given value a boolean?
_.isBoolean = function(obj) {
return obj === true || obj === false || toString.call(obj) == '[object Boolean]';
};
// Is a given value equal to null?
_.isNull = function(obj) {
return obj === null;
};
// Is a given variable undefined?
_.isUndefined = function(obj) {
return obj === void 0;
};
// Shortcut function for checking if an object has a given property directly
// on itself (in other words, not on a prototype).
_.has = function(obj, key) {
return hasOwnProperty.call(obj, key);
};
// Utility Functions
// -----------------
// Run Underscore.js in *noConflict* mode, returning the `_` variable to its
// previous owner. Returns a reference to the Underscore object.
_.noConflict = function() {
root._ = previousUnderscore;
return this;
};
// Keep the identity function around for default iterators.
_.identity = function(value) {
return value;
};
_.constant = function(value) {
return function () {
return value;
};
};
_.property = function(key) {
return function(obj) {
return obj[key];
};
};
// Returns a predicate for checking whether an object has a given set of `key:value` pairs.
_.matches = function(attrs) {
return function(obj) {
if (obj === attrs) return true; //avoid comparing an object to itself.
for (var key in attrs) {
if (attrs[key] !== obj[key])
return false;
}
return true;
}
};
// Run a function **n** times.
_.times = function(n, iterator, context) {
var accum = Array(Math.max(0, n));
for (var i = 0; i < n; i++) accum[i] = iterator.call(context, i);
return accum;
};
// Return a random integer between min and max (inclusive).
_.random = function(min, max) {
if (max == null) {
max = min;
min = 0;
}
return min + Math.floor(Math.random() * (max - min + 1));
};
// A (possibly faster) way to get the current timestamp as an integer.
_.now = Date.now || function() { return new Date().getTime(); };
// List of HTML entities for escaping.
var entityMap = {
escape: {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": '''
}
};
entityMap.unescape = _.invert(entityMap.escape);
// Regexes containing the keys and values listed immediately above.
var entityRegexes = {
escape: new RegExp('[' + _.keys(entityMap.escape).join('') + ']', 'g'),
unescape: new RegExp('(' + _.keys(entityMap.unescape).join('|') + ')', 'g')
};
// Functions for escaping and unescaping strings to/from HTML interpolation.
_.each(['escape', 'unescape'], function(method) {
_[method] = function(string) {
if (string == null) return '';
return ('' + string).replace(entityRegexes[method], function(match) {
return entityMap[method][match];
});
};
});
// If the value of the named `property` is a function then invoke it with the
// `object` as context; otherwise, return it.
_.result = function(object, property) {
if (object == null) return void 0;
var value = object[property];
return _.isFunction(value) ? value.call(object) : value;
};
// Add your own custom functions to the Underscore object.
_.mixin = function(obj) {
each(_.functions(obj), function(name) {
var func = _[name] = obj[name];
_.prototype[name] = function() {
var args = [this._wrapped];
push.apply(args, arguments);
return result.call(this, func.apply(_, args));
};
});
};
// Generate a unique integer id (unique within the entire client session).
// Useful for temporary DOM ids.
var idCounter = 0;
_.uniqueId = function(prefix) {
var id = ++idCounter + '';
return prefix ? prefix + id : id;
};
// By default, Underscore uses ERB-style template delimiters, change the
// following template settings to use alternative delimiters.
_.templateSettings = {
evaluate : /<%([\s\S]+?)%>/g,
interpolate : /<%=([\s\S]+?)%>/g,
escape : /<%-([\s\S]+?)%>/g
};
// When customizing `templateSettings`, if you don't want to define an
// interpolation, evaluation or escaping regex, we need one that is
// guaranteed not to match.
var noMatch = /(.)^/;
// Certain characters need to be escaped so that they can be put into a
// string literal.
var escapes = {
"'": "'",
'\\': '\\',
'\r': 'r',
'\n': 'n',
'\t': 't',
'\u2028': 'u2028',
'\u2029': 'u2029'
};
var escaper = /\\|'|\r|\n|\t|\u2028|\u2029/g;
// JavaScript micro-templating, similar to John Resig's implementation.
// Underscore templating handles arbitrary delimiters, preserves whitespace,
// and correctly escapes quotes within interpolated code.
_.template = function(text, data, settings) {
var render;
settings = _.defaults({}, settings, _.templateSettings);
// Combine delimiters into one regular expression via alternation.
var matcher = new RegExp([
(settings.escape || noMatch).source,
(settings.interpolate || noMatch).source,
(settings.evaluate || noMatch).source
].join('|') + '|$', 'g');
// Compile the template source, escaping string literals appropriately.
var index = 0;
var source = "__p+='";
text.replace(matcher, function(match, escape, interpolate, evaluate, offset) {
source += text.slice(index, offset)
.replace(escaper, function(match) { return '\\' + escapes[match]; });
if (escape) {
source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'";
}
if (interpolate) {
source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'";
}
if (evaluate) {
source += "';\n" + evaluate + "\n__p+='";
}
index = offset + match.length;
return match;
});
source += "';\n";
// If a variable is not specified, place data values in local scope.
if (!settings.variable) source = 'with(obj||{}){\n' + source + '}\n';
source = "var __t,__p='',__j=Array.prototype.join," +
"print=function(){__p+=__j.call(arguments,'');};\n" +
source + "return __p;\n";
try {
render = new Function(settings.variable || 'obj', '_', source);
} catch (e) {
e.source = source;
throw e;
}
if (data) return render(data, _);
var template = function(data) {
return render.call(this, data, _);
};
// Provide the compiled function source as a convenience for precompilation.
template.source = 'function(' + (settings.variable || 'obj') + '){\n' + source + '}';
return template;
};
// Add a "chain" function, which will delegate to the wrapper.
_.chain = function(obj) {
return _(obj).chain();
};
// OOP
// ---------------
// If Underscore is called as a function, it returns a wrapped object that
// can be used OO-style. This wrapper holds altered versions of all the
// underscore functions. Wrapped objects may be chained.
// Helper function to continue chaining intermediate results.
var result = function(obj) {
return this._chain ? _(obj).chain() : obj;
};
// Add all of the Underscore functions to the wrapper object.
_.mixin(_);
// Add all mutator Array functions to the wrapper.
each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) {
var method = ArrayProto[name];
_.prototype[name] = function() {
var obj = this._wrapped;
method.apply(obj, arguments);
if ((name == 'shift' || name == 'splice') && obj.length === 0) delete obj[0];
return result.call(this, obj);
};
});
// Add all accessor Array functions to the wrapper.
each(['concat', 'join', 'slice'], function(name) {
var method = ArrayProto[name];
_.prototype[name] = function() {
return result.call(this, method.apply(this._wrapped, arguments));
};
});
_.extend(_.prototype, {
// Start chaining a wrapped Underscore object.
chain: function() {
this._chain = true;
return this;
},
// Extracts the result from a wrapped and chained object.
value: function() {
return this._wrapped;
}
});
// AMD registration happens at the end for compatibility with AMD loaders
// that may not enforce next-turn semantics on modules. Even though general
// practice for AMD registration is to be anonymous, underscore registers
// as a named module because, like jQuery, it is a base library that is
// popular enough to be bundled in a third party lib, but not be part of
// an AMD load request. Those cases could generate an error when an
// anonymous define() is called outside of a loader request.
if (typeof define === 'function' && define.amd) {
define('underscore', [], function() {
return _;
});
}
}).call(this);
},{}],15:[function(_dereq_,module,exports){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
var punycode = _dereq_('punycode');
exports.parse = urlParse;
exports.resolve = urlResolve;
exports.resolveObject = urlResolveObject;
exports.format = urlFormat;
exports.Url = Url;
function Url() {
this.protocol = null;
this.slashes = null;
this.auth = null;
this.host = null;
this.port = null;
this.hostname = null;
this.hash = null;
this.search = null;
this.query = null;
this.pathname = null;
this.path = null;
this.href = null;
}
// Reference: RFC 3986, RFC 1808, RFC 2396
// define these here so at least they only have to be
// compiled once on the first module load.
var protocolPattern = /^([a-z0-9.+-]+:)/i,
portPattern = /:[0-9]*$/,
// RFC 2396: characters reserved for delimiting URLs.
// We actually just auto-escape these.
delims = ['<', '>', '"', '`', ' ', '\r', '\n', '\t'],
// RFC 2396: characters not allowed for various reasons.
unwise = ['{', '}', '|', '\\', '^', '`'].concat(delims),
// Allowed by RFCs, but cause of XSS attacks. Always escape these.
autoEscape = ['\''].concat(unwise),
// Characters that are never ever allowed in a hostname.
// Note that any invalid chars are also handled, but these
// are the ones that are *expected* to be seen, so we fast-path
// them.
nonHostChars = ['%', '/', '?', ';', '#'].concat(autoEscape),
hostEndingChars = ['/', '?', '#'],
hostnameMaxLen = 255,
hostnamePartPattern = /^[a-z0-9A-Z_-]{0,63}$/,
hostnamePartStart = /^([a-z0-9A-Z_-]{0,63})(.*)$/,
// protocols that can allow "unsafe" and "unwise" chars.
unsafeProtocol = {
'javascript': true,
'javascript:': true
},
// protocols that never have a hostname.
hostlessProtocol = {
'javascript': true,
'javascript:': true
},
// protocols that always contain a // bit.
slashedProtocol = {
'http': true,
'https': true,
'ftp': true,
'gopher': true,
'file': true,
'http:': true,
'https:': true,
'ftp:': true,
'gopher:': true,
'file:': true
},
querystring = _dereq_('querystring');
function urlParse(url, parseQueryString, slashesDenoteHost) {
if (url && isObject(url) && url instanceof Url) return url;
var u = new Url;
u.parse(url, parseQueryString, slashesDenoteHost);
return u;
}
Url.prototype.parse = function(url, parseQueryString, slashesDenoteHost) {
if (!isString(url)) {
throw new TypeError("Parameter 'url' must be a string, not " + typeof url);
}
var rest = url;
// trim before proceeding.
// This is to support parse stuff like " http://foo.com \n"
rest = rest.trim();
var proto = protocolPattern.exec(rest);
if (proto) {
proto = proto[0];
var lowerProto = proto.toLowerCase();
this.protocol = lowerProto;
rest = rest.substr(proto.length);
}
// figure out if it's got a host
// user@server is *always* interpreted as a hostname, and url
// resolution will treat //foo/bar as host=foo,path=bar because that's
// how the browser resolves relative URLs.
if (slashesDenoteHost || proto || rest.match(/^\/\/[^@\/]+@[^@\/]+/)) {
var slashes = rest.substr(0, 2) === '//';
if (slashes && !(proto && hostlessProtocol[proto])) {
rest = rest.substr(2);
this.slashes = true;
}
}
if (!hostlessProtocol[proto] &&
(slashes || (proto && !slashedProtocol[proto]))) {
// there's a hostname.
// the first instance of /, ?, ;, or # ends the host.
//
// If there is an @ in the hostname, then non-host chars *are* allowed
// to the left of the last @ sign, unless some host-ending character
// comes *before* the @-sign.
// URLs are obnoxious.
//
// ex:
// http://a@b@c/ => user:a@b host:c
// http://a@b?@c => user:a host:c path:/?@c
// v0.12 TODO(isaacs): This is not quite how Chrome does things.
// Review our test case against browsers more comprehensively.
// find the first instance of any hostEndingChars
var hostEnd = -1;
for (var i = 0; i < hostEndingChars.length; i++) {
var hec = rest.indexOf(hostEndingChars[i]);
if (hec !== -1 && (hostEnd === -1 || hec < hostEnd))
hostEnd = hec;
}
// at this point, either we have an explicit point where the
// auth portion cannot go past, or the last @ char is the decider.
var auth, atSign;
if (hostEnd === -1) {
// atSign can be anywhere.
atSign = rest.lastIndexOf('@');
} else {
// atSign must be in auth portion.
// http://a@b/c@d => host:b auth:a path:/c@d
atSign = rest.lastIndexOf('@', hostEnd);
}
// Now we have a portion which is definitely the auth.
// Pull that off.
if (atSign !== -1) {
auth = rest.slice(0, atSign);
rest = rest.slice(atSign + 1);
this.auth = decodeURIComponent(auth);
}
// the host is the remaining to the left of the first non-host char
hostEnd = -1;
for (var i = 0; i < nonHostChars.length; i++) {
var hec = rest.indexOf(nonHostChars[i]);
if (hec !== -1 && (hostEnd === -1 || hec < hostEnd))
hostEnd = hec;
}
// if we still have not hit it, then the entire thing is a host.
if (hostEnd === -1)
hostEnd = rest.length;
this.host = rest.slice(0, hostEnd);
rest = rest.slice(hostEnd);
// pull out port.
this.parseHost();
// we've indicated that there is a hostname,
// so even if it's empty, it has to be present.
this.hostname = this.hostname || '';
// if hostname begins with [ and ends with ]
// assume that it's an IPv6 address.
var ipv6Hostname = this.hostname[0] === '[' &&
this.hostname[this.hostname.length - 1] === ']';
// validate a little.
if (!ipv6Hostname) {
var hostparts = this.hostname.split(/\./);
for (var i = 0, l = hostparts.length; i < l; i++) {
var part = hostparts[i];
if (!part) continue;
if (!part.match(hostnamePartPattern)) {
var newpart = '';
for (var j = 0, k = part.length; j < k; j++) {
if (part.charCodeAt(j) > 127) {
// we replace non-ASCII char with a temporary placeholder
// we need this to make sure size of hostname is not
// broken by replacing non-ASCII by nothing
newpart += 'x';
} else {
newpart += part[j];
}
}
// we test again with ASCII char only
if (!newpart.match(hostnamePartPattern)) {
var validParts = hostparts.slice(0, i);
var notHost = hostparts.slice(i + 1);
var bit = part.match(hostnamePartStart);
if (bit) {
validParts.push(bit[1]);
notHost.unshift(bit[2]);
}
if (notHost.length) {
rest = '/' + notHost.join('.') + rest;
}
this.hostname = validParts.join('.');
break;
}
}
}
}
if (this.hostname.length > hostnameMaxLen) {
this.hostname = '';
} else {
// hostnames are always lower case.
this.hostname = this.hostname.toLowerCase();
}
if (!ipv6Hostname) {
// IDNA Support: Returns a puny coded representation of "domain".
// It only converts the part of the domain name that
// has non ASCII characters. I.e. it dosent matter if
// you call it with a domain that already is in ASCII.
var domainArray = this.hostname.split('.');
var newOut = [];
for (var i = 0; i < domainArray.length; ++i) {
var s = domainArray[i];
newOut.push(s.match(/[^A-Za-z0-9_-]/) ?
'xn--' + punycode.encode(s) : s);
}
this.hostname = newOut.join('.');
}
var p = this.port ? ':' + this.port : '';
var h = this.hostname || '';
this.host = h + p;
this.href += this.host;
// strip [ and ] from the hostname
// the host field still retains them, though
if (ipv6Hostname) {
this.hostname = this.hostname.substr(1, this.hostname.length - 2);
if (rest[0] !== '/') {
rest = '/' + rest;
}
}
}
// now rest is set to the post-host stuff.
// chop off any delim chars.
if (!unsafeProtocol[lowerProto]) {
// First, make 100% sure that any "autoEscape" chars get
// escaped, even if encodeURIComponent doesn't think they
// need to be.
for (var i = 0, l = autoEscape.length; i < l; i++) {
var ae = autoEscape[i];
var esc = encodeURIComponent(ae);
if (esc === ae) {
esc = escape(ae);
}
rest = rest.split(ae).join(esc);
}
}
// chop off from the tail first.
var hash = rest.indexOf('#');
if (hash !== -1) {
// got a fragment string.
this.hash = rest.substr(hash);
rest = rest.slice(0, hash);
}
var qm = rest.indexOf('?');
if (qm !== -1) {
this.search = rest.substr(qm);
this.query = rest.substr(qm + 1);
if (parseQueryString) {
this.query = querystring.parse(this.query);
}
rest = rest.slice(0, qm);
} else if (parseQueryString) {
// no query string, but parseQueryString still requested
this.search = '';
this.query = {};
}
if (rest) this.pathname = rest;
if (slashedProtocol[lowerProto] &&
this.hostname && !this.pathname) {
this.pathname = '/';
}
//to support http.request
if (this.pathname || this.search) {
var p = this.pathname || '';
var s = this.search || '';
this.path = p + s;
}
// finally, reconstruct the href based on what has been validated.
this.href = this.format();
return this;
};
// format a parsed object into a url string
function urlFormat(obj) {
// ensure it's an object, and not a string url.
// If it's an obj, this is a no-op.
// this way, you can call url_format() on strings
// to clean up potentially wonky urls.
if (isString(obj)) obj = urlParse(obj);
if (!(obj instanceof Url)) return Url.prototype.format.call(obj);
return obj.format();
}
Url.prototype.format = function() {
var auth = this.auth || '';
if (auth) {
auth = encodeURIComponent(auth);
auth = auth.replace(/%3A/i, ':');
auth += '@';
}
var protocol = this.protocol || '',
pathname = this.pathname || '',
hash = this.hash || '',
host = false,
query = '';
if (this.host) {
host = auth + this.host;
} else if (this.hostname) {
host = auth + (this.hostname.indexOf(':') === -1 ?
this.hostname :
'[' + this.hostname + ']');
if (this.port) {
host += ':' + this.port;
}
}
if (this.query &&
isObject(this.query) &&
Object.keys(this.query).length) {
query = querystring.stringify(this.query);
}
var search = this.search || (query && ('?' + query)) || '';
if (protocol && protocol.substr(-1) !== ':') protocol += ':';
// only the slashedProtocols get the //. Not mailto:, xmpp:, etc.
// unless they had them to begin with.
if (this.slashes ||
(!protocol || slashedProtocol[protocol]) && host !== false) {
host = '//' + (host || '');
if (pathname && pathname.charAt(0) !== '/') pathname = '/' + pathname;
} else if (!host) {
host = '';
}
if (hash && hash.charAt(0) !== '#') hash = '#' + hash;
if (search && search.charAt(0) !== '?') search = '?' + search;
pathname = pathname.replace(/[?#]/g, function(match) {
return encodeURIComponent(match);
});
search = search.replace('#', '%23');
return protocol + host + pathname + search + hash;
};
function urlResolve(source, relative) {
return urlParse(source, false, true).resolve(relative);
}
Url.prototype.resolve = function(relative) {
return this.resolveObject(urlParse(relative, false, true)).format();
};
function urlResolveObject(source, relative) {
if (!source) return relative;
return urlParse(source, false, true).resolveObject(relative);
}
Url.prototype.resolveObject = function(relative) {
if (isString(relative)) {
var rel = new Url();
rel.parse(relative, false, true);
relative = rel;
}
var result = new Url();
Object.keys(this).forEach(function(k) {
result[k] = this[k];
}, this);
// hash is always overridden, no matter what.
// even href="" will remove it.
result.hash = relative.hash;
// if the relative url is empty, then there's nothing left to do here.
if (relative.href === '') {
result.href = result.format();
return result;
}
// hrefs like //foo/bar always cut to the protocol.
if (relative.slashes && !relative.protocol) {
// take everything except the protocol from relative
Object.keys(relative).forEach(function(k) {
if (k !== 'protocol')
result[k] = relative[k];
});
//urlParse appends trailing / to urls like http://www.example.com
if (slashedProtocol[result.protocol] &&
result.hostname && !result.pathname) {
result.path = result.pathname = '/';
}
result.href = result.format();
return result;
}
if (relative.protocol && relative.protocol !== result.protocol) {
// if it's a known url protocol, then changing
// the protocol does weird things
// first, if it's not file:, then we MUST have a host,
// and if there was a path
// to begin with, then we MUST have a path.
// if it is file:, then the host is dropped,
// because that's known to be hostless.
// anything else is assumed to be absolute.
if (!slashedProtocol[relative.protocol]) {
Object.keys(relative).forEach(function(k) {
result[k] = relative[k];
});
result.href = result.format();
return result;
}
result.protocol = relative.protocol;
if (!relative.host && !hostlessProtocol[relative.protocol]) {
var relPath = (relative.pathname || '').split('/');
while (relPath.length && !(relative.host = relPath.shift()));
if (!relative.host) relative.host = '';
if (!relative.hostname) relative.hostname = '';
if (relPath[0] !== '') relPath.unshift('');
if (relPath.length < 2) relPath.unshift('');
result.pathname = relPath.join('/');
} else {
result.pathname = relative.pathname;
}
result.search = relative.search;
result.query = relative.query;
result.host = relative.host || '';
result.auth = relative.auth;
result.hostname = relative.hostname || relative.host;
result.port = relative.port;
// to support http.request
if (result.pathname || result.search) {
var p = result.pathname || '';
var s = result.search || '';
result.path = p + s;
}
result.slashes = result.slashes || relative.slashes;
result.href = result.format();
return result;
}
var isSourceAbs = (result.pathname && result.pathname.charAt(0) === '/'),
isRelAbs = (
relative.host ||
relative.pathname && relative.pathname.charAt(0) === '/'
),
mustEndAbs = (isRelAbs || isSourceAbs ||
(result.host && relative.pathname)),
removeAllDots = mustEndAbs,
srcPath = result.pathname && result.pathname.split('/') || [],
relPath = relative.pathname && relative.pathname.split('/') || [],
psychotic = result.protocol && !slashedProtocol[result.protocol];
// if the url is a non-slashed url, then relative
// links like ../.. should be able
// to crawl up to the hostname, as well. This is strange.
// result.protocol has already been set by now.
// Later on, put the first path part into the host field.
if (psychotic) {
result.hostname = '';
result.port = null;
if (result.host) {
if (srcPath[0] === '') srcPath[0] = result.host;
else srcPath.unshift(result.host);
}
result.host = '';
if (relative.protocol) {
relative.hostname = null;
relative.port = null;
if (relative.host) {
if (relPath[0] === '') relPath[0] = relative.host;
else relPath.unshift(relative.host);
}
relative.host = null;
}
mustEndAbs = mustEndAbs && (relPath[0] === '' || srcPath[0] === '');
}
if (isRelAbs) {
// it's absolute.
result.host = (relative.host || relative.host === '') ?
relative.host : result.host;
result.hostname = (relative.hostname || relative.hostname === '') ?
relative.hostname : result.hostname;
result.search = relative.search;
result.query = relative.query;
srcPath = relPath;
// fall through to the dot-handling below.
} else if (relPath.length) {
// it's relative
// throw away the existing file, and take the new path instead.
if (!srcPath) srcPath = [];
srcPath.pop();
srcPath = srcPath.concat(relPath);
result.search = relative.search;
result.query = relative.query;
} else if (!isNullOrUndefined(relative.search)) {
// just pull out the search.
// like href='?foo'.
// Put this after the other two cases because it simplifies the booleans
if (psychotic) {
result.hostname = result.host = srcPath.shift();
//occationaly the auth can get stuck only in host
//this especialy happens in cases like
//url.resolveObject('mailto:local1@domain1', 'local2@domain2')
var authInHost = result.host && result.host.indexOf('@') > 0 ?
result.host.split('@') : false;
if (authInHost) {
result.auth = authInHost.shift();
result.host = result.hostname = authInHost.shift();
}
}
result.search = relative.search;
result.query = relative.query;
//to support http.request
if (!isNull(result.pathname) || !isNull(result.search)) {
result.path = (result.pathname ? result.pathname : '') +
(result.search ? result.search : '');
}
result.href = result.format();
return result;
}
if (!srcPath.length) {
// no path at all. easy.
// we've already handled the other stuff above.
result.pathname = null;
//to support http.request
if (result.search) {
result.path = '/' + result.search;
} else {
result.path = null;
}
result.href = result.format();
return result;
}
// if a url ENDs in . or .., then it must get a trailing slash.
// however, if it ends in anything else non-slashy,
// then it must NOT get a trailing slash.
var last = srcPath.slice(-1)[0];
var hasTrailingSlash = (
(result.host || relative.host) && (last === '.' || last === '..') ||
last === '');
// strip single dots, resolve double dots to parent dir
// if the path tries to go above the root, `up` ends up > 0
var up = 0;
for (var i = srcPath.length; i >= 0; i--) {
last = srcPath[i];
if (last == '.') {
srcPath.splice(i, 1);
} else if (last === '..') {
srcPath.splice(i, 1);
up++;
} else if (up) {
srcPath.splice(i, 1);
up--;
}
}
// if the path is allowed to go above the root, restore leading ..s
if (!mustEndAbs && !removeAllDots) {
for (; up--; up) {
srcPath.unshift('..');
}
}
if (mustEndAbs && srcPath[0] !== '' &&
(!srcPath[0] || srcPath[0].charAt(0) !== '/')) {
srcPath.unshift('');
}
if (hasTrailingSlash && (srcPath.join('/').substr(-1) !== '/')) {
srcPath.push('');
}
var isAbsolute = srcPath[0] === '' ||
(srcPath[0] && srcPath[0].charAt(0) === '/');
// put the host back
if (psychotic) {
result.hostname = result.host = isAbsolute ? '' :
srcPath.length ? srcPath.shift() : '';
//occationaly the auth can get stuck only in host
//this especialy happens in cases like
//url.resolveObject('mailto:local1@domain1', 'local2@domain2')
var authInHost = result.host && result.host.indexOf('@') > 0 ?
result.host.split('@') : false;
if (authInHost) {
result.auth = authInHost.shift();
result.host = result.hostname = authInHost.shift();
}
}
mustEndAbs = mustEndAbs || (result.host && srcPath.length);
if (mustEndAbs && !isAbsolute) {
srcPath.unshift('');
}
if (!srcPath.length) {
result.pathname = null;
result.path = null;
} else {
result.pathname = srcPath.join('/');
}
//to support request.http
if (!isNull(result.pathname) || !isNull(result.search)) {
result.path = (result.pathname ? result.pathname : '') +
(result.search ? result.search : '');
}
result.auth = relative.auth || result.auth;
result.slashes = result.slashes || relative.slashes;
result.href = result.format();
return result;
};
Url.prototype.parseHost = function() {
var host = this.host;
var port = portPattern.exec(host);
if (port) {
port = port[0];
if (port !== ':') {
this.port = port.substr(1);
}
host = host.substr(0, host.length - port.length);
}
if (host) this.hostname = host;
};
function isString(arg) {
return typeof arg === "string";
}
function isObject(arg) {
return typeof arg === 'object' && arg !== null;
}
function isNull(arg) {
return arg === null;
}
function isNullOrUndefined(arg) {
return arg == null;
}
},{"punycode":9,"querystring":12}],16:[function(_dereq_,module,exports){
if (typeof Object.create === 'function') {
// implementation from standard node.js 'util' module
module.exports = function inherits(ctor, superCtor) {
ctor.super_ = superCtor
ctor.prototype = Object.create(superCtor.prototype, {
constructor: {
value: ctor,
enumerable: false,
writable: true,
configurable: true
}
});
};
} else {
// old school shim for old browsers
module.exports = function inherits(ctor, superCtor) {
ctor.super_ = superCtor
var TempCtor = function () {}
TempCtor.prototype = superCtor.prototype
ctor.prototype = new TempCtor()
ctor.prototype.constructor = ctor
}
}
},{}],17:[function(_dereq_,module,exports){
module.exports = function isBuffer(arg) {
return arg && typeof arg === 'object'
&& typeof arg.copy === 'function'
&& typeof arg.fill === 'function'
&& typeof arg.readUInt8 === 'function';
}
},{}],18:[function(_dereq_,module,exports){
(function (process,global){
// Copyright Joyent, Inc. and other Node contributors.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit
// persons to whom the Software is furnished to do so, subject to the
// following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
// USE OR OTHER DEALINGS IN THE SOFTWARE.
var formatRegExp = /%[sdj%]/g;
exports.format = function(f) {
if (!isString(f)) {
var objects = [];
for (var i = 0; i < arguments.length; i++) {
objects.push(inspect(arguments[i]));
}
return objects.join(' ');
}
var i = 1;
var args = arguments;
var len = args.length;
var str = String(f).replace(formatRegExp, function(x) {
if (x === '%%') return '%';
if (i >= len) return x;
switch (x) {
case '%s': return String(args[i++]);
case '%d': return Number(args[i++]);
case '%j':
try {
return JSON.stringify(args[i++]);
} catch (_) {
return '[Circular]';
}
default:
return x;
}
});
for (var x = args[i]; i < len; x = args[++i]) {
if (isNull(x) || !isObject(x)) {
str += ' ' + x;
} else {
str += ' ' + inspect(x);
}
}
return str;
};
// Mark that a method should not be used.
// Returns a modified function which warns once by default.
// If --no-deprecation is set, then it is a no-op.
exports.deprecate = function(fn, msg) {
// Allow for deprecating things in the process of starting up.
if (isUndefined(global.process)) {
return function() {
return exports.deprecate(fn, msg).apply(this, arguments);
};
}
if (process.noDeprecation === true) {
return fn;
}
var warned = false;
function deprecated() {
if (!warned) {
if (process.throwDeprecation) {
throw new Error(msg);
} else if (process.traceDeprecation) {
console.trace(msg);
} else {
console.error(msg);
}
warned = true;
}
return fn.apply(this, arguments);
}
return deprecated;
};
var debugs = {};
var debugEnviron;
exports.debuglog = function(set) {
if (isUndefined(debugEnviron))
debugEnviron = process.env.NODE_DEBUG || '';
set = set.toUpperCase();
if (!debugs[set]) {
if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) {
var pid = process.pid;
debugs[set] = function() {
var msg = exports.format.apply(exports, arguments);
console.error('%s %d: %s', set, pid, msg);
};
} else {
debugs[set] = function() {};
}
}
return debugs[set];
};
/**
* Echos the value of a value. Trys to print the value out
* in the best way possible given the different types.
*
* @param {Object} obj The object to print out.
* @param {Object} opts Optional options object that alters the output.
*/
/* legacy: obj, showHidden, depth, colors*/
function inspect(obj, opts) {
// default options
var ctx = {
seen: [],
stylize: stylizeNoColor
};
// legacy...
if (arguments.length >= 3) ctx.depth = arguments[2];
if (arguments.length >= 4) ctx.colors = arguments[3];
if (isBoolean(opts)) {
// legacy...
ctx.showHidden = opts;
} else if (opts) {
// got an "options" object
exports._extend(ctx, opts);
}
// set default options
if (isUndefined(ctx.showHidden)) ctx.showHidden = false;
if (isUndefined(ctx.depth)) ctx.depth = 2;
if (isUndefined(ctx.colors)) ctx.colors = false;
if (isUndefined(ctx.customInspect)) ctx.customInspect = true;
if (ctx.colors) ctx.stylize = stylizeWithColor;
return formatValue(ctx, obj, ctx.depth);
}
exports.inspect = inspect;
// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics
inspect.colors = {
'bold' : [1, 22],
'italic' : [3, 23],
'underline' : [4, 24],
'inverse' : [7, 27],
'white' : [37, 39],
'grey' : [90, 39],
'black' : [30, 39],
'blue' : [34, 39],
'cyan' : [36, 39],
'green' : [32, 39],
'magenta' : [35, 39],
'red' : [31, 39],
'yellow' : [33, 39]
};
// Don't use 'blue' not visible on cmd.exe
inspect.styles = {
'special': 'cyan',
'number': 'yellow',
'boolean': 'yellow',
'undefined': 'grey',
'null': 'bold',
'string': 'green',
'date': 'magenta',
// "name": intentionally not styling
'regexp': 'red'
};
function stylizeWithColor(str, styleType) {
var style = inspect.styles[styleType];
if (style) {
return '\u001b[' + inspect.colors[style][0] + 'm' + str +
'\u001b[' + inspect.colors[style][1] + 'm';
} else {
return str;
}
}
function stylizeNoColor(str, styleType) {
return str;
}
function arrayToHash(array) {
var hash = {};
array.forEach(function(val, idx) {
hash[val] = true;
});
return hash;
}
function formatValue(ctx, value, recurseTimes) {
// Provide a hook for user-specified inspect functions.
// Check that value is an object with an inspect function on it
if (ctx.customInspect &&
value &&
isFunction(value.inspect) &&
// Filter out the util module, it's inspect function is special
value.inspect !== exports.inspect &&
// Also filter out any prototype objects using the circular check.
!(value.constructor && value.constructor.prototype === value)) {
var ret = value.inspect(recurseTimes, ctx);
if (!isString(ret)) {
ret = formatValue(ctx, ret, recurseTimes);
}
return ret;
}
// Primitive types cannot have properties
var primitive = formatPrimitive(ctx, value);
if (primitive) {
return primitive;
}
// Look up the keys of the object.
var keys = Object.keys(value);
var visibleKeys = arrayToHash(keys);
if (ctx.showHidden) {
keys = Object.getOwnPropertyNames(value);
}
// IE doesn't make error fields non-enumerable
// http://msdn.microsoft.com/en-us/library/ie/dww52sbt(v=vs.94).aspx
if (isError(value)
&& (keys.indexOf('message') >= 0 || keys.indexOf('description') >= 0)) {
return formatError(value);
}
// Some type of object without properties can be shortcutted.
if (keys.length === 0) {
if (isFunction(value)) {
var name = value.name ? ': ' + value.name : '';
return ctx.stylize('[Function' + name + ']', 'special');
}
if (isRegExp(value)) {
return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp');
}
if (isDate(value)) {
return ctx.stylize(Date.prototype.toString.call(value), 'date');
}
if (isError(value)) {
return formatError(value);
}
}
var base = '', array = false, braces = ['{', '}'];
// Make Array say that they are Array
if (isArray(value)) {
array = true;
braces = ['[', ']'];
}
// Make functions say that they are functions
if (isFunction(value)) {
var n = value.name ? ': ' + value.name : '';
base = ' [Function' + n + ']';
}
// Make RegExps say that they are RegExps
if (isRegExp(value)) {
base = ' ' + RegExp.prototype.toString.call(value);
}
// Make dates with properties first say the date
if (isDate(value)) {
base = ' ' + Date.prototype.toUTCString.call(value);
}
// Make error with message first say the error
if (isError(value)) {
base = ' ' + formatError(value);
}
if (keys.length === 0 && (!array || value.length == 0)) {
return braces[0] + base + braces[1];
}
if (recurseTimes < 0) {
if (isRegExp(value)) {
return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp');
} else {
return ctx.stylize('[Object]', 'special');
}
}
ctx.seen.push(value);
var output;
if (array) {
output = formatArray(ctx, value, recurseTimes, visibleKeys, keys);
} else {
output = keys.map(function(key) {
return formatProperty(ctx, value, recurseTimes, visibleKeys, key, array);
});
}
ctx.seen.pop();
return reduceToSingleString(output, base, braces);
}
function formatPrimitive(ctx, value) {
if (isUndefined(value))
return ctx.stylize('undefined', 'undefined');
if (isString(value)) {
var simple = '\'' + JSON.stringify(value).replace(/^"|"$/g, '')
.replace(/'/g, "\\'")
.replace(/\\"/g, '"') + '\'';
return ctx.stylize(simple, 'string');
}
if (isNumber(value))
return ctx.stylize('' + value, 'number');
if (isBoolean(value))
return ctx.stylize('' + value, 'boolean');
// For some reason typeof null is "object", so special case here.
if (isNull(value))
return ctx.stylize('null', 'null');
}
function formatError(value) {
return '[' + Error.prototype.toString.call(value) + ']';
}
function formatArray(ctx, value, recurseTimes, visibleKeys, keys) {
var output = [];
for (var i = 0, l = value.length; i < l; ++i) {
if (hasOwnProperty(value, String(i))) {
output.push(formatProperty(ctx, value, recurseTimes, visibleKeys,
String(i), true));
} else {
output.push('');
}
}
keys.forEach(function(key) {
if (!key.match(/^\d+$/)) {
output.push(formatProperty(ctx, value, recurseTimes, visibleKeys,
key, true));
}
});
return output;
}
function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) {
var name, str, desc;
desc = Object.getOwnPropertyDescriptor(value, key) || { value: value[key] };
if (desc.get) {
if (desc.set) {
str = ctx.stylize('[Getter/Setter]', 'special');
} else {
str = ctx.stylize('[Getter]', 'special');
}
} else {
if (desc.set) {
str = ctx.stylize('[Setter]', 'special');
}
}
if (!hasOwnProperty(visibleKeys, key)) {
name = '[' + key + ']';
}
if (!str) {
if (ctx.seen.indexOf(desc.value) < 0) {
if (isNull(recurseTimes)) {
str = formatValue(ctx, desc.value, null);
} else {
str = formatValue(ctx, desc.value, recurseTimes - 1);
}
if (str.indexOf('\n') > -1) {
if (array) {
str = str.split('\n').map(function(line) {
return ' ' + line;
}).join('\n').substr(2);
} else {
str = '\n' + str.split('\n').map(function(line) {
return ' ' + line;
}).join('\n');
}
}
} else {
str = ctx.stylize('[Circular]', 'special');
}
}
if (isUndefined(name)) {
if (array && key.match(/^\d+$/)) {
return str;
}
name = JSON.stringify('' + key);
if (name.match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)) {
name = name.substr(1, name.length - 2);
name = ctx.stylize(name, 'name');
} else {
name = name.replace(/'/g, "\\'")
.replace(/\\"/g, '"')
.replace(/(^"|"$)/g, "'");
name = ctx.stylize(name, 'string');
}
}
return name + ': ' + str;
}
function reduceToSingleString(output, base, braces) {
var numLinesEst = 0;
var length = output.reduce(function(prev, cur) {
numLinesEst++;
if (cur.indexOf('\n') >= 0) numLinesEst++;
return prev + cur.replace(/\u001b\[\d\d?m/g, '').length + 1;
}, 0);
if (length > 60) {
return braces[0] +
(base === '' ? '' : base + '\n ') +
' ' +
output.join(',\n ') +
' ' +
braces[1];
}
return braces[0] + base + ' ' + output.join(', ') + ' ' + braces[1];
}
// NOTE: These type checking functions intentionally don't use `instanceof`
// because it is fragile and can be easily faked with `Object.create()`.
function isArray(ar) {
return Array.isArray(ar);
}
exports.isArray = isArray;
function isBoolean(arg) {
return typeof arg === 'boolean';
}
exports.isBoolean = isBoolean;
function isNull(arg) {
return arg === null;
}
exports.isNull = isNull;
function isNullOrUndefined(arg) {
return arg == null;
}
exports.isNullOrUndefined = isNullOrUndefined;
function isNumber(arg) {
return typeof arg === 'number';
}
exports.isNumber = isNumber;
function isString(arg) {
return typeof arg === 'string';
}
exports.isString = isString;
function isSymbol(arg) {
return typeof arg === 'symbol';
}
exports.isSymbol = isSymbol;
function isUndefined(arg) {
return arg === void 0;
}
exports.isUndefined = isUndefined;
function isRegExp(re) {
return isObject(re) && objectToString(re) === '[object RegExp]';
}
exports.isRegExp = isRegExp;
function isObject(arg) {
return typeof arg === 'object' && arg !== null;
}
exports.isObject = isObject;
function isDate(d) {
return isObject(d) && objectToString(d) === '[object Date]';
}
exports.isDate = isDate;
function isError(e) {
return isObject(e) &&
(objectToString(e) === '[object Error]' || e instanceof Error);
}
exports.isError = isError;
function isFunction(arg) {
return typeof arg === 'function';
}
exports.isFunction = isFunction;
function isPrimitive(arg) {
return arg === null ||
typeof arg === 'boolean' ||
typeof arg === 'number' ||
typeof arg === 'string' ||
typeof arg === 'symbol' || // ES6 symbol
typeof arg === 'undefined';
}
exports.isPrimitive = isPrimitive;
exports.isBuffer = _dereq_('./support/isBuffer');
function objectToString(o) {
return Object.prototype.toString.call(o);
}
function pad(n) {
return n < 10 ? '0' + n.toString(10) : n.toString(10);
}
var months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'];
// 26 Feb 16:19:34
function timestamp() {
var d = new Date();
var time = [pad(d.getHours()),
pad(d.getMinutes()),
pad(d.getSeconds())].join(':');
return [d.getDate(), months[d.getMonth()], time].join(' ');
}
// log is just a thin wrapper to console.log that prepends a timestamp
exports.log = function() {
console.log('%s - %s', timestamp(), exports.format.apply(exports, arguments));
};
/**
* Inherit the prototype methods from one constructor into another.
*
* The Function.prototype.inherits from lang.js rewritten as a standalone
* function (not on Function.prototype). NOTE: If this file is to be loaded
* during bootstrapping this function needs to be rewritten using some native
* functions as prototype setup using normal JavaScript does not work as
* expected during bootstrapping (see mirror.js in r114903).
*
* @param {function} ctor Constructor function which needs to inherit the
* prototype.
* @param {function} superCtor Constructor function to inherit prototype from.
*/
exports.inherits = _dereq_('inherits');
exports._extend = function(origin, add) {
// Don't do anything if add isn't an object
if (!add || !isObject(add)) return origin;
var keys = Object.keys(add);
var i = keys.length;
while (i--) {
origin[keys[i]] = add[keys[i]];
}
return origin;
};
function hasOwnProperty(obj, prop) {
return Object.prototype.hasOwnProperty.call(obj, prop);
}
}).call(this,_dereq_("FWaASH"),typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{"./support/isBuffer":17,"FWaASH":5,"inherits":16}],19:[function(_dereq_,module,exports){
var constants = _dereq_("./modules/constants");
var events = _dereq_("./modules/events");
var logger = _dereq_("./modules/logger");
var ajax = _dereq_("./modules/ajax");
var events = _dereq_("./modules/events");
var cloud = _dereq_("./modules/waitForCloud");
var api_act = _dereq_("./modules/api_act");
var api_auth = _dereq_("./modules/api_auth");
var api_sec = _dereq_("./modules/api_sec");
var api_hash = _dereq_("./modules/api_hash");
var api_sync = _dereq_("./modules/sync-cli");
var api_mbaas = _dereq_("./modules/api_mbaas");
var api_cloud = _dereq_("./modules/api_cloud");
var api_push = _dereq_("./modules/api_push");
var fhparams = _dereq_("./modules/fhparams");
var appProps = _dereq_("./modules/appProps");
var device = _dereq_("./modules/device");
var defaultFail = function(msg, error) {
logger.error(msg + ":" + JSON.stringify(error));
};
var addListener = function(type, listener) {
events.addListener(type, listener);
if (type === constants.INIT_EVENT) {
//for fhinit event, need to check the status of cloud and may need to fire the listener immediately.
if (cloud.isReady()) {
listener(null, {
host: cloud.getCloudHostUrl()
});
} else if (cloud.getInitError()) {
listener(cloud.getInitError());
}
}
};
var once = function(type, listener) {
if (type === constants.INIT_EVENT && cloud.isReady()) {
listener(null, {
host: cloud.getCloudHostUrl()
});
} else if (type === constants.INIT_EVENT && cloud.getInitError()) {
listener(cloud.getInitError());
} else {
events.once(type, listener);
}
};
//Legacy shim. Init hapens based on fhconfig.json or, for v2, global var called fh_app_props which is injected as part of the index.html wrapper
var init = function(opts, success, fail) {
logger.warn("$fh.init will be deprecated soon");
cloud.ready(function(err, host) {
if (err) {
if (typeof fail === "function") {
return fail(err);
}
} else {
if (typeof success === "function") {
success(host.host);
}
}
});
};
var fh = window.$fh || {};
fh.init = init;
fh.act = api_act;
fh.auth = api_auth;
fh.cloud = api_cloud;
fh.sec = api_sec;
fh.hash = api_hash;
fh.sync = api_sync;
fh.push = api_push;
fh.ajax = fh.__ajax = ajax;
fh.mbaas = api_mbaas;
fh._getDeviceId = device.getDeviceId;
fh.fh_timeout = 60000; //keep backward compatible
fh.getCloudURL = function() {
return cloud.getCloudHostUrl();
};
fh.getFHParams = function() {
return fhparams.buildFHParams();
};
fh.getFHHeaders = function() {
return fhparams.getFHHeaders();
};
//events
fh.addListener = addListener;
fh.on = addListener;
fh.once = once;
var methods = ["removeListener", "removeAllListeners", "setMaxListeners", "listeners", "emit"];
for (var i = 0; i < methods.length; i++) {
fh[methods[i]] = events[methods[i]];
}
//keep backward compatibility
fh.on(constants.INIT_EVENT, function(err, host) {
if (err) {
fh.cloud_props = {};
fh.app_props = {};
} else {
fh.cloud_props = {
hosts: {
url: host.host
}
};
fh.app_props = appProps.getAppProps();
}
});
//keep backward compatibility
fh.on(constants.INTERNAL_CONFIG_LOADED_EVENT, function(err, host) {
if (err) {
fh.app_props = {};
} else {
fh.app_props = appProps.getAppProps();
}
// Emit config loaded event - appprops set at this point
// V2 legacy SDK uses this to know when to fire $fh.ready (i.e. appprops is now set)
events.emit(constants.CONFIG_LOADED_EVENT, null);
});
//for test
fh.reset = cloud.reset;
//we should really stop polluting global name space. Ideally we should ask browserify to use "$fh" when umd-fy the module. However, "$" is not allowed as the standard module name.
//So, we assign $fh to the window name space directly here. (otherwise, we have to fork the grunt browserify plugin, then fork browerify and the dependent umd module, really not worthing the effort).
window.$fh = fh;
module.exports = fh;
},{"./modules/ajax":21,"./modules/api_act":22,"./modules/api_auth":23,"./modules/api_cloud":24,"./modules/api_hash":25,"./modules/api_mbaas":26,"./modules/api_push":27,"./modules/api_sec":28,"./modules/appProps":29,"./modules/constants":31,"./modules/device":34,"./modules/events":35,"./modules/fhparams":36,"./modules/logger":42,"./modules/sync-cli":50,"./modules/waitForCloud":52}],20:[function(_dereq_,module,exports){
var urlparser = _dereq_('url');
var XDomainRequestWrapper = function(xdr){
this.xdr = xdr;
this.isWrapper = true;
this.readyState = 0;
this.onreadystatechange = null;
this.status = 0;
this.statusText = "";
this.responseText = "";
this.headers = {};
var self = this;
this.xdr.onload = function(){
self.readyState = 4;
self.status = 200;
self.statusText = "";
self.responseText = self.xdr.responseText;
if(self.onreadystatechange){
self.onreadystatechange();
}
};
this.xdr.onerror = function(){
if(self.onerror){
self.onerror();
}
self.readyState = 4;
self.status = 0;
self.statusText = "";
if(self.onreadystatechange){
self.onreadystatechange();
}
};
this.xdr.ontimeout = function(){
self.readyState = 4;
self.status = 408;
self.statusText = "timeout";
if(self.onreadystatechange){
self.onreadystatechange();
}
};
};
XDomainRequestWrapper.prototype.open = function(method, url, asyn){
var parsedUrl = urlparser.parse(url, true);
parsedUrl.query = parsedUrl.query || {};
parsedUrl.query.fh_headers = this.headers;
this.xdr.open(method, urlparser.format(parsedUrl));
};
XDomainRequestWrapper.prototype.send = function(data){
this.xdr.send(data);
};
XDomainRequestWrapper.prototype.abort = function(){
this.xdr.abort();
};
XDomainRequestWrapper.prototype.setRequestHeader = function(n, v){
//not supported by xdr
//Good doc on limitations of XDomainRequest http://blogs.msdn.com/b/ieinternals/archive/2010/05/13/xdomainrequest-restrictions-limitations-and-workarounds.aspx
//XDomainRequest doesn't allow setting custom request headers. But it is the only available option to do CORS requests in IE8 & 9. In IE10, they finally start to use standard XMLHttpRequest.
//To support FH auth tokens in IE8&9, we will append them as query parameters, use the key "fh_headers"
this.headers[n] = v;
};
XDomainRequestWrapper.prototype.getResponseHeader = function(n){
//not supported by xdr
};
module.exports = XDomainRequestWrapper;
},{"url":15}],21:[function(_dereq_,module,exports){
//a shameless copy from https://github.com/ForbesLindesay/ajax/blob/master/index.js.
//it has the same methods and config options as jQuery/zeptojs but very light weight. see http://api.jquery.com/jQuery.ajax/
//a few small changes are made for supporting IE 8 and other features:
//1. use getXhr function to replace the default XMLHttpRequest implementation for supporting IE8
//2. Integrate with events emitter. So to subscribe ajax events, you can do $fh.on("ajaxStart", handler). See http://api.jquery.com/Ajax_Events/ for full list of events
//3. allow passing xhr factory method through options: e.g. $fh.ajax({xhr: function(){/*own implementation of xhr*/}});
//4. Use fh_timeout value as the default timeout
//5. an extra option called "tryJSONP" to allow try the same call with JSONP if normal CORS failed - should only be used internally
//6. for jsonp, allow to specify the callback query param name using the "jsonp" option
var eventsHandler = _dereq_("./events");
var XDomainRequestWrapper = _dereq_("./XDomainRequestWrapper");
var logger = _dereq_("./logger");
var type
try {
type = _dereq_('type-of')
} catch (ex) {
//hide from browserify
var r = _dereq_
type = r('type')
}
var jsonpID = 0,
document = window.document,
key,
name,
rscript = /<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,
scriptTypeRE = /^(?:text|application)\/javascript/i,
xmlTypeRE = /^(?:text|application)\/xml/i,
jsonType = 'application/json',
htmlType = 'text/html',
blankRE = /^\s*$/;
var ajax = module.exports = function (options) {
var settings = extend({}, options || {})
//keep backward compatibility
if(window && window.$fh && typeof window.$fh.fh_timeout === "number"){
ajax.settings.timeout = window.$fh.fh_timeout;
}
for (key in ajax.settings)
if (settings[key] === undefined) settings[key] = ajax.settings[key]
ajaxStart(settings)
if (!settings.crossDomain) {
settings.crossDomain = /^([\w-]+:)?\/\/([^\/]+)/.test(settings.url) && (RegExp.$1 != window.location.protocol || RegExp.$2 != window.location.host)
}
var dataType = settings.dataType,
hasPlaceholder = /=\?/.test(settings.url)
if (dataType == 'jsonp' || hasPlaceholder) {
if (!hasPlaceholder) {
settings.url = appendQuery(settings.url, (settings.jsonp? settings.jsonp: '_callback') + '=?');
}
return ajax.JSONP(settings)
}
if (!settings.url) settings.url = window.location.toString()
serializeData(settings)
var mime = settings.accepts[dataType],
baseHeaders = {},
protocol = /^([\w-]+:)\/\//.test(settings.url) ? RegExp.$1 : window.location.protocol,
xhr = settings.xhr(settings.crossDomain),
abortTimeout = null;
if (!settings.crossDomain) baseHeaders['X-Requested-With'] = 'XMLHttpRequest'
if (mime) {
baseHeaders['Accept'] = mime
if (mime.indexOf(',') > -1) mime = mime.split(',', 2)[0]
xhr.overrideMimeType && xhr.overrideMimeType(mime)
}
if (settings.contentType || (settings.data && !settings.formdata && settings.type.toUpperCase() != 'GET'))
baseHeaders['Content-Type'] = (settings.contentType || 'application/x-www-form-urlencoded')
settings.headers = extend(baseHeaders, settings.headers || {})
if (typeof Titanium !== 'undefined') {
xhr.onerror = function(){
if (!abortTimeout){
return;
}
clearTimeout(abortTimeout);
ajaxError(null, 'error', xhr, settings);
};
}
xhr.onreadystatechange = function () {
if (xhr.readyState == 4) {
clearTimeout(abortTimeout)
abortTimeout = undefined;
var result, error = false
if(settings.tryJSONP){
//check if the request has fail. In some cases, we may want to try jsonp as well. Again, FH only...
if(xhr.status === 0 && settings.crossDomain && !xhr.isTimeout && protocol != 'file:'){
logger.debug("retry ajax call with jsonp")
settings.type = "GET";
settings.dataType = "jsonp";
if (settings.data) {
settings.data = "_jsonpdata=" + JSON.stringify(
_dereq_("./fhparams").addFHParams(JSON.parse(settings.data))
);
} else {
settings.data = "_jsonpdata=" + settings.data;
}
return ajax(settings);
}
}
if ((xhr.status >= 200 && xhr.status < 300) || xhr.status == 304 || (xhr.status == 0 && protocol == 'file:')) {
dataType = dataType || mimeToDataType(xhr.getResponseHeader('content-type'))
result = xhr.responseText
logger.debug("ajax response :: status = " + xhr.status + " :: body = " + result)
try {
if (dataType == 'script')(1, eval)(result)
else if (dataType == 'xml') result = xhr.responseXML
else if (dataType == 'json') result = blankRE.test(result) ? null : JSON.parse(result)
} catch (e) {
error = e
}
if (error) {
logger.debug("ajax error", error);
ajaxError(error, 'parsererror', xhr, settings)
}
else ajaxSuccess(result, xhr, settings)
} else {
ajaxError(null, 'error', xhr, settings)
}
}
}
var async = 'async' in settings ? settings.async : true
logger.debug("ajax call settings", settings)
xhr.open(settings.type, settings.url, async)
for (name in settings.headers) xhr.setRequestHeader(name, settings.headers[name])
if (ajaxBeforeSend(xhr, settings) === false) {
logger.debug("ajax call is aborted due to ajaxBeforeSend")
xhr.abort()
return false
}
if (settings.timeout > 0) abortTimeout = setTimeout(function () {
logger.debug("ajax call timed out")
xhr.onreadystatechange = empty
xhr.abort()
xhr.isTimeout = true
ajaxError(null, 'timeout', xhr, settings)
}, settings.timeout)
// avoid sending empty string (#319)
xhr.send(settings.data ? settings.data : null)
return xhr
}
// trigger a custom event and return true
function triggerAndReturn(context, eventName, data) {
eventsHandler.emit(eventName, data);
return true;
}
// trigger an Ajax "global" event
function triggerGlobal(settings, context, eventName, data) {
if (settings.global) return triggerAndReturn(context || document, eventName, data)
}
// Number of active Ajax requests
ajax.active = 0
function ajaxStart(settings) {
if (settings.global && ajax.active++ === 0) triggerGlobal(settings, null, 'ajaxStart')
}
function ajaxStop(settings) {
if (settings.global && !(--ajax.active)) triggerGlobal(settings, null, 'ajaxStop')
}
// triggers an extra global event "ajaxBeforeSend" that's like "ajaxSend" but cancelable
function ajaxBeforeSend(xhr, settings) {
var context = settings.context
if (settings.beforeSend.call(context, xhr, settings) === false)
return false
triggerGlobal(settings, context, 'ajaxSend', [xhr, settings])
}
function ajaxSuccess(data, xhr, settings) {
var context = settings.context,
status = 'success'
settings.success.call(context, data, status, xhr)
triggerGlobal(settings, context, 'ajaxSuccess', [xhr, settings, data])
ajaxComplete(status, xhr, settings)
}
// type: "timeout", "error", "abort", "parsererror"
function ajaxError(error, type, xhr, settings) {
var context = settings.context
settings.error.call(context, xhr, type, error)
triggerGlobal(settings, context, 'ajaxError', [xhr, settings, error])
ajaxComplete(type, xhr, settings)
}
// status: "success", "notmodified", "error", "timeout", "abort", "parsererror"
function ajaxComplete(status, xhr, settings) {
var context = settings.context
settings.complete.call(context, xhr, status)
triggerGlobal(settings, context, 'ajaxComplete', [xhr, settings])
ajaxStop(settings)
}
// Empty function, used as default callback
function empty() {}
ajax.JSONP = function (options) {
if (!('type' in options)) return ajax(options)
var callbackName = 'jsonp' + (++jsonpID),
script = document.createElement('script'),
abort = function () {
//todo: remove script
//$(script).remove()
if (callbackName in window) window[callbackName] = empty
ajaxComplete('abort', xhr, options)
},
xhr = {
abort: abort
}, abortTimeout,
head = document.getElementsByTagName("head")[0] || document.documentElement
if (options.error) script.onerror = function () {
xhr.abort()
options.error()
}
window[callbackName] = function (data) {
clearTimeout(abortTimeout)
abortTimeout = undefined;
//todo: remove script
//$(script).remove()
delete window[callbackName]
ajaxSuccess(data, xhr, options)
}
serializeData(options)
script.src = options.url.replace(/=\?/, '=' + callbackName)
// Use insertBefore instead of appendChild to circumvent an IE6 bug.
// This arises when a base node is used (see jQuery bugs #2709 and #4378).
head.insertBefore(script, head.firstChild);
if (options.timeout > 0) abortTimeout = setTimeout(function () {
xhr.abort()
ajaxComplete('timeout', xhr, options)
}, options.timeout)
return xhr
}
function isIE(){
var ie = false;
if(navigator.userAgent && navigator.userAgent.indexOf("MSIE") >=0 ){
ie = true;
}
return ie;
}
function getXhr(crossDomain){
var xhr = null;
//always use XMLHttpRequest if available
if(window.XMLHttpRequest){
xhr = new XMLHttpRequest();
}
//for IE8 only. Need to make sure it's not used when running inside Cordova.
if(isIE() && (crossDomain === true) && typeof window.XDomainRequest !== "undefined" && typeof window.cordova === "undefined"){
xhr = new XDomainRequestWrapper(new XDomainRequest());
}
// For Titanium SDK
if (typeof Titanium !== 'undefined'){
var params = {};
if(ajax.settings && ajax.settings.timeout){
params.timeout = ajax.settings.timeout;
}
xhr = Titanium.Network.createHTTPClient(params);
}
return xhr;
}
ajax.settings = {
// Default type of request
type: 'GET',
// Callback that is executed before request
beforeSend: empty,
// Callback that is executed if the request succeeds
success: empty,
// Callback that is executed the the server drops error
error: empty,
// Callback that is executed on request complete (both: error and success)
complete: empty,
// The context for the callbacks
context: null,
// Whether to trigger "global" Ajax events
global: true,
// Transport
xhr: getXhr,
// MIME types mapping
accepts: {
script: 'text/javascript, application/javascript',
json: jsonType,
xml: 'application/xml, text/xml',
html: htmlType,
text: 'text/plain'
},
// Whether the request is to another domain
crossDomain: false
}
function mimeToDataType(mime) {
return mime && (mime == htmlType ? 'html' :
mime == jsonType ? 'json' :
scriptTypeRE.test(mime) ? 'script' :
xmlTypeRE.test(mime) && 'xml') || 'text'
}
function appendQuery(url, query) {
return (url + '&' + query).replace(/[&?]{1,2}/, '?')
}
// serialize payload and append it to the URL for GET requests
function serializeData(options) {
if (type(options.data) === 'object') {
if(typeof options.data.append === "function"){
//we are dealing with FormData, do not serialize
options.formdata = true;
} else {
options.data = param(options.data)
}
}
if (options.data && (!options.type || options.type.toUpperCase() == 'GET'))
options.url = appendQuery(options.url, options.data)
}
ajax.get = function (url, success) {
return ajax({
url: url,
success: success
})
}
ajax.post = function (url, data, success, dataType) {
if (type(data) === 'function') dataType = dataType || success, success = data, data = null
return ajax({
type: 'POST',
url: url,
data: data,
success: success,
dataType: dataType
})
}
ajax.getJSON = function (url, success) {
return ajax({
url: url,
success: success,
dataType: 'json'
})
}
var escape = encodeURIComponent;
function serialize(params, obj, traditional, scope) {
var array = type(obj) === 'array';
for (var key in obj) {
var value = obj[key];
if (scope) key = traditional ? scope : scope + '[' + (array ? '' : key) + ']'
// handle data in serializeArray() format
if (!scope && array) params.add(value.name, value.value)
// recurse into nested objects
else if (traditional ? (type(value) === 'array') : (type(value) === 'object'))
serialize(params, value, traditional, key)
else params.add(key, value)
}
}
function param(obj, traditional) {
var params = []
params.add = function (k, v) {
this.push(escape(k) + '=' + escape(v))
}
serialize(params, obj, traditional)
return params.join('&').replace('%20', '+')
}
function extend(target) {
var slice = Array.prototype.slice;
slice.call(arguments, 1).forEach(function (source) {
for (key in source)
if (source[key] !== undefined)
target[key] = source[key]
})
return target
}
},{"./XDomainRequestWrapper":20,"./events":35,"./fhparams":36,"./logger":42,"type-of":13}],22:[function(_dereq_,module,exports){
var logger =_dereq_("./logger");
var cloud = _dereq_("./waitForCloud");
var fhparams = _dereq_("./fhparams");
var ajax = _dereq_("./ajax");
var handleError = _dereq_("./handleError");
var appProps = _dereq_("./appProps");
var _ = _dereq_('underscore');
function doActCall(opts, success, fail){
var cloud_host = cloud.getCloudHost();
var url = cloud_host.getActUrl(opts.act);
var params = opts.req || {};
var headers = fhparams.getFHHeaders();
if (opts.headers) {
headers = _.extend(headers, opts.headers);
}
return ajax({
"url": url,
"tryJSONP": true,
"type": "POST",
"dataType": "json",
"data": JSON.stringify(params),
"headers": headers,
"contentType": "application/json",
"timeout": opts.timeout || appProps.timeout,
"success": success,
"error": function(req, statusText, error){
return handleError(fail, req, statusText, error);
}
});
}
module.exports = function(opts, success, fail){
logger.debug("act is called");
if(!fail){
fail = function(msg, error){
logger.debug(msg + ":" + JSON.stringify(error));
};
}
if(!opts.act){
return fail('act_no_action', {});
}
cloud.ready(function(err, cloudHost){
logger.debug("Calling fhact now");
if(err){
return fail(err.message, err);
} else {
doActCall(opts, success, fail);
}
});
};
},{"./ajax":21,"./appProps":29,"./fhparams":36,"./handleError":37,"./logger":42,"./waitForCloud":52,"underscore":14}],23:[function(_dereq_,module,exports){
var logger = _dereq_("./logger");
var cloud = _dereq_("./waitForCloud");
var fhparams = _dereq_("./fhparams");
var ajax = _dereq_("./ajax");
var handleError = _dereq_("./handleError");
var device = _dereq_("./device");
var constants = _dereq_("./constants");
var checkAuth = _dereq_("./checkAuth");
var appProps = _dereq_("./appProps");
var data = _dereq_('./data');
function callAuthEndpoint(endpoint, data, opts, success, fail){
var app_props = appProps.getAppProps();
var path = app_props.host + constants.boxprefix + "admin/authpolicy/" + endpoint;
if (app_props.local) {
path = cloud.getCloudHostUrl() + constants.boxprefix + "admin/authpolicy/" + endpoint;
}
ajax({
"url": path,
"type": "POST",
"tryJSONP": true,
"data": JSON.stringify(data),
"dataType": "json",
"contentType": "application/json",
"timeout": opts.timeout || app_props.timeout,
"headers": fhparams.getFHHeaders(),
success: function(res){
if(success){
return success(res);
}
},
error: function(req, statusText, error){
logger.error('got error when calling ' + endpoint, req.responseText || req, error);
if(fail){
fail(req, statusText, error);
}
}
});
}
var auth = function(opts, success, fail) {
if (!fail) {
fail = function(msg, error) {
logger.debug(msg + ":" + JSON.stringify(error));
};
}
if (!opts.policyId) {
return fail('auth_no_policyId', {});
}
if (!opts.clientToken) {
return fail('auth_no_clientToken', {});
}
cloud.ready(function(err, data) {
if (err) {
return fail(err.message, err);
} else {
var req = {};
req.policyId = opts.policyId;
req.clientToken = opts.clientToken;
var cloudHost = cloud.getCloudHost();
if(cloudHost.getEnv()){
req.environment = cloudHost.getEnv();
}
if (opts.endRedirectUrl) {
req.endRedirectUrl = opts.endRedirectUrl;
if (opts.authCallback) {
req.endRedirectUrl += (/\?/.test(req.endRedirectUrl) ? "&" : "?") + "_fhAuthCallback=" + opts.authCallback;
}
}
req.params = {};
if (opts.params) {
req.params = opts.params;
}
var endurl = opts.endRedirectUrl || "status=complete";
req.device = device.getDeviceId();
req = fhparams.addFHParams(req);
callAuthEndpoint('auth', req, opts, function(res){
auth.authenticateHandler(endurl, res, success, fail);
}, function(req, statusText, error){
handleError(fail, req, statusText, error);
});
}
});
};
auth.hasSession = function(cb){
data.sessionManager.exists(cb);
};
auth.clearSession = function(cb){
data.sessionManager.read(function(err, session){
if(err){
return cb(err);
}
if(session){
//try the best to delete the remote session
callAuthEndpoint('revokesession', session, {});
}
data.sessionManager.remove(cb);
fhparams.setAuthSessionToken(undefined);
});
};
auth.authenticateHandler = checkAuth.handleAuthResponse;
auth.verify = function(cb){
data.sessionManager.read(function(err, session){
if(err){
return cb(err);
}
if(session){
//try the best to delete the session in remote
callAuthEndpoint('verifysession', session, {}, function(res){
return cb(null, res.isValid);
}, function(req, statusText, error){
return cb('network_error');
});
} else {
return cb('no_session');
}
});
};
module.exports = auth;
},{"./ajax":21,"./appProps":29,"./checkAuth":30,"./constants":31,"./data":33,"./device":34,"./fhparams":36,"./handleError":37,"./logger":42,"./waitForCloud":52}],24:[function(_dereq_,module,exports){
var logger =_dereq_("./logger");
var cloud = _dereq_("./waitForCloud");
var fhparams = _dereq_("./fhparams");
var ajax = _dereq_("./ajax");
var handleError = _dereq_("./handleError");
var appProps = _dereq_("./appProps");
var _ = _dereq_('underscore');
function doCloudCall(opts, success, fail){
var cloud_host = cloud.getCloudHost();
var url = cloud_host.getCloudUrl(opts.path);
var params = opts.data || {};
var type = opts.method || "POST";
var data;
if (["POST", "PUT", "PATCH", "DELETE"].indexOf(type.toUpperCase()) !== -1) {
data = JSON.stringify(params);
} else {
data = params;
}
var headers = fhparams.getFHHeaders();
if (opts.headers) {
headers = _.extend(headers, opts.headers);
}
return ajax({
"url": url,
"type": type,
"dataType": opts.dataType || "json",
"data": data,
"contentType": opts.contentType || "application/json",
"timeout": opts.timeout || appProps.timeout,
"headers": headers,
"success": success,
"error": function(req, statusText, error){
return handleError(fail, req, statusText, error);
}
});
}
module.exports = function(opts, success, fail){
logger.debug("cloud is called");
if(!fail){
fail = function(msg, error){
logger.debug(msg + ":" + JSON.stringify(error));
};
}
cloud.ready(function(err, cloudHost){
logger.debug("Calling fhact now");
if(err){
return fail(err.message, err);
} else {
doCloudCall(opts, success, fail);
}
});
};
},{"./ajax":21,"./appProps":29,"./fhparams":36,"./handleError":37,"./logger":42,"./waitForCloud":52,"underscore":14}],25:[function(_dereq_,module,exports){
var hashImpl = _dereq_("./security/hash");
module.exports = function(p, s, f){
var params = {};
if(typeof p.algorithm === "undefined"){
p.algorithm = "MD5";
}
params.act = "hash";
params.params = p;
hashImpl(params, s, f);
};
},{"./security/hash":48}],26:[function(_dereq_,module,exports){
var logger =_dereq_("./logger");
var cloud = _dereq_("./waitForCloud");
var fhparams = _dereq_("./fhparams");
var ajax = _dereq_("./ajax");
var handleError = _dereq_("./handleError");
var consts = _dereq_("./constants");
var appProps = _dereq_("./appProps");
module.exports = function(opts, success, fail){
logger.debug("mbaas is called.");
if(!fail){
fail = function(msg, error){
console.debug(msg + ":" + JSON.stringify(error));
};
}
var mbaas = opts.service;
var params = opts.params;
cloud.ready(function(err, cloudHost){
logger.debug("Calling mbaas now");
if(err){
return fail(err.message, err);
} else {
var cloud_host = cloud.getCloudHost();
var url = cloud_host.getMBAASUrl(mbaas);
params = fhparams.addFHParams(params);
return ajax({
"url": url,
"tryJSONP": true,
"type": "POST",
"dataType": "json",
"data": JSON.stringify(params),
"headers": fhparams.getFHHeaders(),
"contentType": "application/json",
"timeout": opts.timeout || appProps.timeout,
"success": success,
"error": function(req, statusText, error){
return handleError(fail, req, statusText, error);
}
});
}
});
};
},{"./ajax":21,"./appProps":29,"./constants":31,"./fhparams":36,"./handleError":37,"./logger":42,"./waitForCloud":52}],27:[function(_dereq_,module,exports){
var logger = _dereq_("./logger");
var appProps = _dereq_("./appProps");
var cloud = _dereq_("./waitForCloud");
module.exports = function (onNotification, success, fail, config) {
if (!fail) {
fail = function (msg, error) {
logger.debug(msg + ":" + JSON.stringify(error));
};
}
cloud.ready(function(err, cloudHost){
logger.debug("push is called");
if(err){
return fail(err.message, err);
} else {
if (window.push) {
var props = appProps.getAppProps();
props.pushServerURL = props.host + '/api/v2/ag-push';
if (config) {
for(var key in config) {
props[key] = config[key];
}
}
window.push.register(onNotification, success, fail, props);
} else {
fail('push plugin not installed');
}
}
});
};
},{"./appProps":29,"./logger":42,"./waitForCloud":52}],28:[function(_dereq_,module,exports){
var keygen = _dereq_("./security/aes-keygen");
var aes = _dereq_("./security/aes-node");
var rsa = _dereq_("./security/rsa-node");
var hash = _dereq_("./security/hash");
module.exports = function(p, s, f){
if (!p.act) {
f('bad_act', {}, p);
return;
}
if (!p.params) {
f('no_params', {}, p);
return;
}
if (!p.params.algorithm) {
f('no_params_algorithm', {}, p);
return;
}
p.params.algorithm = p.params.algorithm.toLowerCase();
if(p.act === "hash"){
return hash(p, s, f);
} else if(p.act === "encrypt"){
if(p.params.algorithm === "aes"){
return aes.encrypt(p, s, f);
} else if(p.params.algorithm === "rsa"){
return rsa.encrypt(p, s, f);
} else {
return f('encrypt_bad_algorithm:' + p.params.algorithm, {}, p);
}
} else if(p.act === "decrypt"){
if(p.params.algorithm === "aes"){
return aes.decrypt(p, s, f);
} else {
return f('decrypt_bad_algorithm:' + p.params.algorithm, {}, p);
}
} else if(p.act === "keygen"){
if(p.params.algorithm === "aes"){
return keygen(p, s, f);
} else {
return f('keygen_bad_algorithm:' + p.params.algorithm, {}, p);
}
}
};
},{"./security/aes-keygen":46,"./security/aes-node":47,"./security/hash":48,"./security/rsa-node":49}],29:[function(_dereq_,module,exports){
var consts = _dereq_("./constants");
var ajax = _dereq_("./ajax");
var logger = _dereq_("./logger");
var qs = _dereq_("./queryMap");
var _ = _dereq_('underscore');
var app_props = null;
var load = function(cb) {
var doc_url = document.location.href;
var url_params = qs(doc_url.replace(/#.*?$/g, ''));
var url_props = {};
//only use fh_ prefixed params
for(var key in url_params){
if(url_params.hasOwnProperty(key) ){
if(key.indexOf('fh_') === 0){
url_props[key.substr(3)] = decodeURI(url_params[key]);
}
}
}
//default properties
app_props = {
appid: "000000000000000000000000",
appkey: "0000000000000000000000000000000000000000",
projectid: "000000000000000000000000",
connectiontag: "0.0.1"
};
function setProps(props){
_.extend(app_props, props, url_props);
if(typeof url_params.url !== 'undefined'){
app_props.host = url_params.url;
}
app_props.local = !!(url_props.host || url_params.url || (props.local && props.host));
cb(null, app_props);
}
var config_url = url_params.fhconfig || consts.config_js;
ajax({
url: config_url,
dataType: "json",
success: function(data) {
logger.debug("fhconfig = " + JSON.stringify(data));
//when load the config file on device, because file:// protocol is used, it will never call fail call back. The success callback will be called but the data value will be null.
if (null == data) {
//fh v2 only
if(window.fh_app_props){
return setProps(window.fh_app_props);
}
return cb(new Error("app_config_missing"));
} else {
setProps(data);
}
},
error: function(req, statusText, error) {
//fh v2 only
if(window.fh_app_props){
return setProps(window.fh_app_props);
}
logger.error(consts.config_js + " Not Found");
cb(new Error("app_config_missing"));
}
});
};
var setAppProps = function(props) {
app_props = props;
};
var getAppProps = function() {
return app_props;
};
module.exports = {
load: load,
getAppProps: getAppProps,
setAppProps: setAppProps
};
},{"./ajax":21,"./constants":31,"./logger":42,"./queryMap":44,"underscore":14}],30:[function(_dereq_,module,exports){
var logger = _dereq_("./logger");
var queryMap = _dereq_("./queryMap");
var fhparams = _dereq_("./fhparams");
var data = _dereq_('./data');
var checkAuth = function(url) {
if (/\_fhAuthCallback/.test(url)) {
var qmap = queryMap(url);
if (qmap) {
var fhCallback = qmap["_fhAuthCallback"];
if (fhCallback) {
if (qmap['result'] && qmap['result'] === 'success') {
var sucRes = {'sessionToken': qmap['fh_auth_session'], 'authResponse' : JSON.parse(decodeURIComponent(decodeURIComponent(qmap['authResponse'])))};
fhparams.setAuthSessionToken(qmap['fh_auth_session']);
data.sessionManager.save(qmap['fh_auth_session']);
window[fhCallback](null, sucRes);
} else {
window[fhCallback]({'message':qmap['message']});
}
}
}
}
};
var handleAuthResponse = function(endurl, res, success, fail){
if(res.status && res.status === "ok"){
var onComplete = function(res){
if(res.sessionToken){
fhparams.setAuthSessionToken(res.sessionToken);
data.sessionManager.save(res.sessionToken, function(){
return success(res);
});
} else {
return success(res);
}
};
//for OAuth, a url will be returned which means the user should be directed to that url to authenticate.
//we try to use the ChildBrower plugin if it can be found. Otherwise send the url to the success function to allow developer to handle it.
if(res.url){
var inappBrowserWindow = null;
var locationChange = function(new_url){
if(new_url.indexOf(endurl) > -1){
if(inappBrowserWindow){
inappBrowserWindow.close();
}
var qmap = queryMap(new_url);
if(qmap) {
if(qmap['result'] && qmap['result'] === 'success'){
var sucRes = {'sessionToken': qmap['fh_auth_session'], 'authResponse' : JSON.parse(decodeURIComponent(decodeURIComponent(qmap['authResponse'])))};
onComplete(sucRes);
} else {
if(fail){
fail("auth_failed", {'message':qmap['message']});
}
}
} else {
if(fail){
fail("auth_failed", {'message':qmap['message']});
}
}
}
};
if(window.PhoneGap || window.cordova){
if(window.plugins && window.plugins.childBrowser){
//found childbrowser plugin,add the event listener and load it
//we need to know when the OAuth process is finished by checking for the presence of endurl. If the endurl is found, it means the authentication finished and we should find if it's successful.
if(typeof window.plugins.childBrowser.showWebPage === "function"){
window.plugins.childBrowser.onLocationChange = locationChange;
window.plugins.childBrowser.showWebPage(res.url);
inappBrowserWindow = window.plugins.childBrowser;
}
} else {
try {
inappBrowserWindow = window.open(res.url, "_blank", 'location=yes');
inappBrowserWindow.addEventListener("loadstart", function(ev){
locationChange(ev.url);
});
} catch(e){
logger.info("InAppBrowser plugin is not intalled.");
onComplete(res);
}
}
} else {
document.location.href = res.url;
}
} else {
onComplete(res);
}
} else {
if(fail){
fail("auth_failed", res);
}
}
};
//This is mainly for using $fh.auth inside browsers. If the authentication method is OAuth, at the end of the process, the user will be re-directed to
//a url that we specified for checking if the auth is successful. So we always check the url to see if we are on the re-directed page.
if (window.addEventListener) {
window.addEventListener('load', function(){
checkAuth(window.location.href);
}, false); //W3C
} else if (window.attachEvent) {
window.attachEvent('onload', function(){
checkAuth(window.location.href);
}); //IE
}
module.exports = {
"handleAuthResponse": handleAuthResponse
};
},{"./data":33,"./fhparams":36,"./logger":42,"./queryMap":44}],31:[function(_dereq_,module,exports){
module.exports = {
"boxprefix": "/box/srv/1.1/",
"sdk_version": "2.18.6",
"config_js": "fhconfig.json",
"INIT_EVENT": "fhinit",
"INTERNAL_CONFIG_LOADED_EVENT": "internalfhconfigloaded",
"CONFIG_LOADED_EVENT": "fhconfigloaded",
"SESSION_TOKEN_STORAGE_NAME": "fh_session_token",
"SESSION_TOKEN_KEY_NAME":"sessionToken"
};
},{}],32:[function(_dereq_,module,exports){
module.exports = {
readCookieValue : function (cookie_name) {
var name_str = cookie_name + "=";
var cookies = document.cookie.split(";");
for (var i = 0; i < cookies.length; i++) {
var c = cookies[i];
while (c.charAt(0) === ' ') {
c = c.substring(1, c.length);
}
if (c.indexOf(name_str) === 0) {
return c.substring(name_str.length, c.length);
}
}
return null;
},
createCookie : function (cookie_name, cookie_value) {
var date = new Date();
date.setTime(date.getTime() + 36500 * 24 * 60 * 60 * 1000); //100 years
var expires = "; expires=" + date.toGMTString();
document.cookie = cookie_name + "=" + cookie_value + expires + "; path = /";
}
};
},{}],33:[function(_dereq_,module,exports){
var Lawnchair = _dereq_('../../libs/generated/lawnchair');
var lawnchairext = _dereq_('./lawnchair-ext');
var logger = _dereq_('./logger');
var constants = _dereq_("./constants");
var data = {
//dom adapter doens't work on windows phone, so don't specify the adapter if the dom one failed
//we specify the order of lawnchair adapters to use, lawnchair will find the right one to use, to keep backward compatibility, keep the order
//as dom, webkit-sqlite, localFileStorage, window-name
DEFAULT_ADAPTERS : ["dom", "webkit-sqlite", "window-name", "titanium"],
getStorage: function(name, adapters, fail){
var adpts = data.DEFAULT_ADAPTERS;
var errorHandler = fail || function(){};
if(adapters && adapters.length > 0){
adpts = (typeof adapters === 'string'?[adapters]: adapters);
}
var conf = {
name: name,
adapter: adpts,
fail: function(msg, err){
var error_message = 'read/save from/to local storage failed msg:' + msg + ' err:' + err;
logger.error(error_message, err);
errorHandler(error_message, {});
}
};
var store = Lawnchair(conf, function(){});
return store;
},
addFileStorageAdapter: function(appProps, hashFunc){
Lawnchair.adapter('localFileStorage', lawnchairext.fileStorageAdapter(appProps, hashFunc));
},
sessionManager: {
read: function(cb){
data.getStorage(constants.SESSION_TOKEN_STORAGE_NAME).get(constants.SESSION_TOKEN_KEY_NAME, function(session){
if(cb){
return cb(null, session);
}
});
},
exists: function(cb){
data.getStorage(constants.SESSION_TOKEN_STORAGE_NAME).exists(constants.SESSION_TOKEN_KEY_NAME, function(exist){
if(cb){
return cb(null, exist);
}
});
},
remove: function(cb){
data.getStorage(constants.SESSION_TOKEN_STORAGE_NAME).remove(constants.SESSION_TOKEN_KEY_NAME, function(){
if(cb){
return cb();
}
});
},
save: function(sessionToken, cb){
data.getStorage(constants.SESSION_TOKEN_STORAGE_NAME).save({key: constants.SESSION_TOKEN_KEY_NAME, sessionToken: sessionToken}, function(obj){
if(cb){
return cb();
}
});
}
}
};
module.exports = data;
},{"../../libs/generated/lawnchair":2,"./constants":31,"./lawnchair-ext":40,"./logger":42}],34:[function(_dereq_,module,exports){
var cookies = _dereq_("./cookies");
var uuidModule = _dereq_("./uuid");
var logger = _dereq_("./logger");
module.exports = {
//try to get the unique device identifier
"getDeviceId": function(){
//check for cordova/phonegap first
if(typeof window.fhdevice !== "undefined" && typeof window.fhdevice.uuid !== "undefined"){
return window.fhdevice.uuid;
} else if(typeof window.device !== "undefined" && typeof window.device.uuid !== "undefined"){
return window.device.uuid;
} else if(typeof navigator.device !== "undefined" && typeof navigator.device.uuid !== "undefined"){
return navigator.device.uuid;
} else {
var _mock_uuid_cookie_name = "mock_uuid";
var uuid = cookies.readCookieValue(_mock_uuid_cookie_name);
if(!uuid){
uuid = uuidModule.createUUID();
cookies.createCookie(_mock_uuid_cookie_name, uuid);
}
return uuid;
}
},
//this is for fixing analytics issues when upgrading from io6 to ios7. Probably can be deprecated now
"getCuidMap": function(){
if(typeof window.fhdevice !== "undefined" && typeof window.fhdevice.cuidMap !== "undefined"){
return window.fhdevice.cuidMap;
} else if(typeof window.device !== "undefined" && typeof window.device.cuidMap !== "undefined"){
return window.device.cuidMap;
} else if(typeof navigator.device !== "undefined" && typeof navigator.device.cuidMap !== "undefined"){
return navigator.device.cuidMap;
}
return null;
},
"getDestination": function(){
var destination = null;
var platformsToTest = _dereq_("./platformsMap");
var userAgent = navigator.userAgent;
var dest_override = document.location.search.split("fh_destination_code=");
if (dest_override.length > 1) {
destination = dest_override[1];
} else if (typeof window.fh_destination_code !== 'undefined') {
destination = window.fh_destination_code;
} else {
platformsToTest.forEach(function(testDestination){
testDestination.test.forEach(function(destinationTest){
if(userAgent.indexOf(destinationTest) > -1){
destination = testDestination.destination;
}
});
});
}
if(destination == null){ //No user agents were found, set to default web
destination = "web";
}
logger.debug("destination = " + destination);
return destination;
}
};
},{"./cookies":32,"./logger":42,"./platformsMap":43,"./uuid":51}],35:[function(_dereq_,module,exports){
var EventEmitter = _dereq_('events').EventEmitter;
var emitter = new EventEmitter();
emitter.setMaxListeners(0);
module.exports = emitter;
},{"events":7}],36:[function(_dereq_,module,exports){
var device = _dereq_("./device");
var sdkversion = _dereq_("./sdkversion");
var appProps = _dereq_("./appProps");
var logger = _dereq_("./logger");
var defaultParams = null;
var authSessionToken = null;
//TODO: review these options, we probably only needs all of them for init calls, but we shouldn't need all of them for act calls
var buildFHParams = function(){
if(defaultParams){
return defaultParams;
}
var fhparams = {};
fhparams.cuid = device.getDeviceId();
fhparams.cuidMap = device.getCuidMap();
fhparams.destination = device.getDestination();
if(window.device || navigator.device){
fhparams.device = window.device || navigator.device;
}
//backward compatible
if (typeof window.fh_app_version !== 'undefined'){
fhparams.app_version = fh_app_version;
}
if (typeof window.fh_project_version !== 'undefined'){
fhparams.project_version = fh_project_version;
}
if (typeof window.fh_project_app_version !== 'undefined'){
fhparams.project_app_version = fh_project_app_version;
}
fhparams.sdk_version = sdkversion();
if(authSessionToken){
fhparams.sessionToken = authSessionToken;
}
var app_props = appProps.getAppProps();
if(app_props){
fhparams.appid = app_props.appid;
fhparams.appkey = app_props.appkey;
fhparams.projectid = app_props.projectid;
fhparams.analyticsTag = app_props.analyticsTag;
fhparams.connectiontag = app_props.connectiontag;
if(app_props.init){
fhparams.init = typeof(app_props.init) === "string" ? JSON.parse(app_props.init) : app_props.init;
}
}
defaultParams = fhparams;
logger.debug("fhparams = ", defaultParams);
return fhparams;
};
//TODO: deprecate this. Move to use headers instead
var addFHParams = function(params){
var p = params || {};
p.__fh = buildFHParams();
return p;
};
var getFHHeaders = function(){
var headers = {};
var params = buildFHParams();
for(var name in params){
if(params.hasOwnProperty(name)){
headers['X-FH-' + name] = params[name];
}
}
return headers;
};
var setAuthSessionToken = function(sessionToken){
authSessionToken = sessionToken;
defaultParams = null;
};
module.exports = {
"buildFHParams": buildFHParams,
"addFHParams": addFHParams,
"setAuthSessionToken":setAuthSessionToken,
"getFHHeaders": getFHHeaders
};
},{"./appProps":29,"./device":34,"./logger":42,"./sdkversion":45}],37:[function(_dereq_,module,exports){
module.exports = function(fail, req, resStatus, error){
var errraw;
var statusCode = 0;
if(req){
try{
statusCode = req.status;
var res = JSON.parse(req.responseText);
errraw = res.error || res.msg || res;
if (errraw instanceof Array) {
errraw = errraw.join('\n');
}
} catch(e){
errraw = req.responseText;
}
}
if(fail){
fail(errraw, {
status: statusCode,
message: resStatus,
error: error
});
}
};
},{}],38:[function(_dereq_,module,exports){
var constants = _dereq_("./constants");
var appProps = _dereq_("./appProps");
function removeEndSlash(input){
var ret = input;
if(ret.charAt(ret.length - 1) === "/"){
ret = ret.substring(0, ret.length-1);
}
return ret;
}
function removeStartSlash(input){
var ret = input;
if(ret.length > 1 && ret.charAt(0) === "/"){
ret = ret.substring(1, ret.length);
}
return ret;
}
function CloudHost(cloud_props){
this.cloud_props = cloud_props;
this.cloud_host = undefined;
this.app_env = null;
this.isLegacy = false;
}
CloudHost.prototype.getHost = function(appType){
if(this.cloud_host){
return this.cloud_host;
} else {
var url;
var app_type;
if(this.cloud_props && this.cloud_props.hosts){
url = this.cloud_props.hosts.url;
if (typeof url === 'undefined') {
// resolve url the old way i.e. depending on
// -burnt in app mode
// -returned dev or live url
// -returned dev or live type (node or fh(rhino or proxying))
var cloud_host = this.cloud_props.hosts.releaseCloudUrl;
app_type = this.cloud_props.hosts.releaseCloudType;
if(typeof appType !== "undefined" && appType.indexOf("dev") > -1){
cloud_host = this.cloud_props.hosts.debugCloudUrl;
app_type = this.cloud_props.hosts.debugCloudType;
}
url = cloud_host;
}
}
url = removeEndSlash(url);
this.cloud_host = url;
if(app_type === "fh"){
this.isLegacy = true;
}
return url;
}
};
CloudHost.prototype.getActUrl = function(act){
var app_props = appProps.getAppProps() || {};
if(typeof this.cloud_host === "undefined"){
this.getHost(app_props.mode);
}
if(this.isLegacy){
return this.cloud_host + constants.boxprefix + "act/" + this.cloud_props.domain + "/" + app_props.appid + "/" + act + "/" + app_props.appid;
} else {
return this.cloud_host + "/cloud/" + act;
}
};
CloudHost.prototype.getMBAASUrl = function(service){
var app_props = appProps.getAppProps() || {};
if(typeof this.cloud_host === "undefined"){
this.getHost(app_props.mode);
}
return this.cloud_host + "/mbaas/" + service;
};
CloudHost.prototype.getCloudUrl = function(path){
var app_props = appProps.getAppProps() || {};
if(typeof this.cloud_host === "undefined"){
this.getHost(app_props.mode);
}
return this.cloud_host + "/" + removeStartSlash(path);
};
CloudHost.prototype.getEnv = function(){
if(this.app_env){
return this.app_env;
} else {
if(this.cloud_props && this.cloud_props.hosts){
this.app_env = this.cloud_props.hosts.environment;
}
}
return this.app_env;
};
module.exports = CloudHost;
},{"./appProps":29,"./constants":31}],39:[function(_dereq_,module,exports){
var loadScript = _dereq_("./loadScript");
var consts = _dereq_("./constants");
var fhparams = _dereq_("./fhparams");
var ajax = _dereq_("./ajax");
var handleError = _dereq_("./handleError");
var logger = _dereq_("./logger");
var hashFunc = _dereq_("./security/hash");
var appProps = _dereq_("./appProps");
var constants = _dereq_("./constants");
var events = _dereq_("./events");
var data = _dereq_('./data');
var init = function(cb) {
appProps.load(function(err, data) {
if (err) {
return cb(err);
}
// Emit internal config loaded event - SDK will now set appprops
events.emit(constants.INTERNAL_CONFIG_LOADED_EVENT, null, data);
return loadCloudProps(data, cb);
});
};
var loadCloudProps = function(app_props, callback) {
if (app_props.loglevel) {
logger.setLevel(app_props.loglevel);
}
// If local - shortcircuit the init - just return the host
if (app_props.local) {
var res = {
"domain": "local",
"firstTime": false,
"hosts": {
"debugCloudType": "node",
"debugCloudUrl": app_props.host,
"releaseCloudType": "node",
"releaseCloudUrl": app_props.host,
"type": "cloud_nodejs",
"url": app_props.host
},
"init": {
"trackId": "000000000000000000000000"
},
"status": "ok"
};
return callback(null, {
cloud: res
});
}
//now we have app props, add the fileStorageAdapter
data.addFileStorageAdapter(app_props, hashFunc);
var doInit = function(path, appProps, savedHost, storage) {
var data = fhparams.buildFHParams();
ajax({
"url": path,
"type": "POST",
"tryJSONP": true,
"dataType": "json",
"contentType": "application/json",
"data": JSON.stringify(data),
"timeout": appProps.timeout,
"success": function(initRes) {
if (storage) {
storage.save({
key: "fh_init",
value: initRes
}, function() {});
}
if (callback) {
callback(null, {
cloud: initRes
});
}
},
"error": function(req, statusText, error) {
var errormsg = "unknown";
if (req) {
errormsg = req.status + " - " + req.responseText;
}
logger.error("App init returned error : " + errormsg);
//use the cached host if we have a copy
if (savedHost) {
logger.info("Using cached host: " + JSON.stringify(savedHost));
if (callback) {
callback(null, {
cloud: savedHost
});
}
} else {
logger.error("No cached host found. Init failed.");
handleError(function(msg, err) {
if (callback) {
callback({
error: err,
message: msg
});
}
}, req, statusText, error);
}
}
});
};
var storage = null;
var path = app_props.host + consts.boxprefix + "app/init";
try {
storage = data.getStorage("fh_init_storage", typeof Titanium !== "undefined"?['titanium']:null);
storage.get('fh_init', function(storage_res) {
var savedHost = null;
if (storage_res && storage_res.value !== null && typeof(storage_res.value) !== "undefined" && storage_res !== "") {
storage_res = typeof(storage_res) === "string" ? JSON.parse(storage_res) : storage_res;
storage_res.value = typeof(storage_res.value) === "string" ? JSON.parse(storage_res.value) : storage_res.value;
if (storage_res.value.init) {
app_props.init = storage_res.value.init;
} else {
//keep it backward compatible.
app_props.init = typeof(storage_res.value) === "string" ? JSON.parse(storage_res.value) : storage_res.value;
}
if (storage_res.value.hosts) {
savedHost = storage_res.value;
}
}
doInit(path, app_props, savedHost, storage);
});
} catch (e) {
//for whatever reason (e.g. localStorage is disabled) Lawnchair is failed to init, just do the init
doInit(path, app_props, null, null);
}
};
module.exports = {
"init": init,
"loadCloudProps": loadCloudProps
};
},{"./ajax":21,"./appProps":29,"./constants":31,"./data":33,"./events":35,"./fhparams":36,"./handleError":37,"./loadScript":41,"./logger":42,"./security/hash":48}],40:[function(_dereq_,module,exports){
var fileStorageAdapter = function (app_props, hashFunc) {
// private methods
function doLog(mess){
if(console){
console.log(mess);
}
}
var fail = function (e, i) {
if(console) {
console.log('error in file system adapter !', e, i);
} else {
throw e;
}
};
function filenameForKey(key, cb) {
key = app_props.appid + key;
hashFunc({
algorithm: "MD5",
text: key
}, function(result) {
var filename = result.hashvalue + '.txt';
if (typeof navigator.externalstorage !== "undefined") {
navigator.externalstorage.enable(function handleSuccess(res){
var path = filename;
if(res.path ) {
path = res.path;
if(!path.match(/\/$/)) {
path += '/';
}
path += filename;
}
filename = path;
return cb(filename);
},function handleError(err){
return cb(filename);
});
} else {
doLog('filenameForKey key=' + key+ ' , Filename: ' + filename);
return cb(filename);
}
});
}
return {
valid: function () { return !!(window.requestFileSystem); },
init : function (options, callback){
//calls the parent function fn and applies this scope
if(options && 'function' === typeof options.fail ) {
fail = options.fail;
}
if (callback) {
this.fn(this.name, callback).call(this, this);
}
},
keys: function (callback){
throw "Currently not supported";
},
save : function (obj, callback){
var key = obj.key;
var value = obj.val||obj.value;
filenameForKey(key, function(hash) {
window.requestFileSystem(LocalFileSystem.PERSISTENT, 0, function gotFS(fileSystem) {
fileSystem.root.getFile(hash, {
create: true
}, function gotFileEntry(fileEntry) {
fileEntry.createWriter(function gotFileWriter(writer) {
writer.onwrite = function() {
return callback({
key: key,
val: value
});
};
writer.write(value);
}, function() {
fail('[save] Failed to create file writer');
});
}, function() {
fail('[save] Failed to getFile');
});
}, function() {
fail('[save] Failed to requestFileSystem');
});
});
},
batch : function (records, callback){
throw "Currently not supported";
},
get : function (key, callback){
filenameForKey(key, function(hash) {
window.requestFileSystem(LocalFileSystem.PERSISTENT, 0, function gotFS(fileSystem) {
fileSystem.root.getFile(hash, {}, function gotFileEntry(fileEntry) {
fileEntry.file(function gotFile(file) {
var reader = new FileReader();
reader.onloadend = function (evt) {
var text = evt.target.result;
// Check for URLencoded
// PG 2.2 bug in readAsText()
try {
text = decodeURIComponent(text);
} catch (e) {
// Swallow exception if not URLencoded
// Just use the result
}
return callback({
key: key,
val: text
});
};
reader.readAsText(file);
}, function() {
fail('[load] Failed to getFile');
});
}, function() {
// Success callback on key load failure
callback({
key: key,
val: null
});
});
}, function() {
fail('[load] Failed to get fileSystem');
});
});
},
exists : function (key, callback){
filenameForKey(key,function (hash){
window.requestFileSystem(LocalFileSystem.PERSISTENT, 0, function gotFS(fileSystem) {
fileSystem.root.getFile(hash, {},
function gotFileEntry(fileEntry) {
return callback(true);
}, function (err){
return callback(false);
});
});
});
},
all : function (callback){
throw "Currently not supported";
},
remove : function (key, callback){
filenameForKey(key, function(hash) {
window.requestFileSystem(LocalFileSystem.PERSISTENT, 0, function gotFS(fileSystem) {
fileSystem.root.getFile(hash, {}, function gotFileEntry(fileEntry) {
fileEntry.remove(function() {
return callback({
key: key,
val: null
});
}, function() {
fail('[remove] Failed to remove file');
});
}, function() {
fail('[remove] Failed to getFile');
});
}, function() {
fail('[remove] Failed to get fileSystem');
});
});
},
nuke : function (callback){
throw "Currently not supported";
}
};
};
module.exports = {
fileStorageAdapter: fileStorageAdapter
};
},{}],41:[function(_dereq_,module,exports){
module.exports = function (url, callback) {
var script;
var head = document.head || document.getElementsByTagName("head")[0] || document.documentElement;
script = document.createElement("script");
script.async = "async";
script.src = url;
script.type = "text/javascript";
script.onload = script.onreadystatechange = function () {
if (!script.readyState || /loaded|complete/.test(script.readyState)) {
script.onload = script.onreadystatechange = null;
if (head && script.parentNode) {
head.removeChild(script);
}
script = undefined;
if (callback && typeof callback === "function") {
callback();
}
}
};
head.insertBefore(script, head.firstChild);
};
},{}],42:[function(_dereq_,module,exports){
var console = _dereq_('console');
var log = _dereq_('loglevel');
log.setLevel('info');
/**
* APIs:
* see https://github.com/pimterry/loglevel.
* In short, you can use:
* log.setLevel(loglevel) - default to info
* log.enableAll() - enable all log messages
* log.disableAll() - disable all log messages
*
* log.trace(msg)
* log.debug(msg)
* log.info(msg)
* log.warn(msg)
* log.error(msg)
*
* Available levels: { "TRACE": 0, "DEBUG": 1, "INFO": 2, "WARN": 3, "ERROR": 4, "SILENT": 5}
* Use either string or integer value
*/
module.exports = log;
},{"console":6,"loglevel":8}],43:[function(_dereq_,module,exports){
module.exports = [
{
"destination" :"ipad",
"test": ["iPad"]
},
{
"destination" :"iphone",
"test": ["iPhone"]
},
{
"destination" :"android",
"test": ["Android"]
},
{
"destination" :"blackberry",
"test": ["BlackBerry", "BB10", "RIM Tablet OS"]//Blackberry 10 does not contain "Blackberry"
},
{
"destination" :"windowsphone",
"test": ["Windows Phone 8"]
},
{
"destination" :"windowsphone7",
"test": ["Windows Phone OS 7"]
}
];
},{}],44:[function(_dereq_,module,exports){
module.exports = function(url) {
var qmap = {};
var i = url.split("?");
if (i.length === 2) {
var queryString = i[1];
var pairs = queryString.split("&");
qmap = {};
for (var p = 0; p < pairs.length; p++) {
var q = pairs[p];
var qp = q.split("=");
qmap[qp[0]] = qp[1];
}
}
return qmap;
};
},{}],45:[function(_dereq_,module,exports){
var constants = _dereq_("./constants");
module.exports = function() {
var type = "FH_JS_SDK";
if (typeof window.fh_destination_code !== 'undefined') {
type = "FH_HYBRID_SDK";
} else if(window.PhoneGap || window.cordova) {
type = "FH_PHONEGAP_SDK";
}
return type + "/" + constants.sdk_version;
};
},{"./constants":31}],46:[function(_dereq_,module,exports){
var rsa = _dereq_("../../../libs/rsa");
var SecureRandom = rsa.SecureRandom;
var byte2Hex = rsa.byte2Hex;
var generateRandomKey = function(keysize){
var r = new SecureRandom();
var key = new Array(keysize);
r.nextBytes(key);
var result = "";
for(var i=0;i<key.length;i++){
result += byte2Hex(key[i]);
}
return result;
};
var aes_keygen = function(p, s, f){
if (!p.params.keysize) {
f('no_params_keysize', {}, p);
return;
}
if (p.params.algorithm.toLowerCase() !== "aes") {
f('keygen_bad_algorithm', {}, p);
return;
}
var keysize = parseInt(p.params.keysize, 10);
//keysize is in bit, need to convert to bytes to generate random key
//but the legacy code has a bug, it doesn't do the convert, so if the keysize is less than 100, don't convert
if(keysize > 100){
keysize = keysize/8;
}
if(typeof SecureRandom === "undefined"){
return f("security library is not loaded.");
}
return s({
'algorithm': 'AES',
'secretkey': generateRandomKey(keysize),
'iv': generateRandomKey(keysize)
});
};
module.exports = aes_keygen;
},{"../../../libs/rsa":3}],47:[function(_dereq_,module,exports){
var CryptoJS = _dereq_("../../../libs/generated/crypto");
var encrypt = function(p, s, f){
var fields = ['key', 'plaintext', 'iv'];
if(p.params.algorithm.toLowerCase() !== "aes"){
return f('encrypt_bad_algorithm', {}, p);
}
for (var i = 0; i < fields; i++) {
var field = fields[i];
if (!p.params[field]) {
return f('no_params_' + field, {}, p);
}
}
var encrypted = CryptoJS.AES.encrypt(p.params.plaintext, CryptoJS.enc.Hex.parse(p.params.key), {iv: CryptoJS.enc.Hex.parse(p.params.iv)});
cipher_text = CryptoJS.enc.Hex.stringify(encrypted.ciphertext);
return s({ciphertext: cipher_text});
};
var decrypt = function(p, s, f){
var fields = ['key', 'ciphertext', 'iv'];
if(p.params.algorithm.toLowerCase() !== "aes"){
return f('decrypt_bad_algorithm', {}, p);
}
for (var i = 0; i < fields; i++) {
var field = fields[i];
if (!p.params[field]) {
return f('no_params_' + field, {}, p);
}
}
var data = CryptoJS.enc.Hex.parse(p.params.ciphertext);
var encodeData = CryptoJS.enc.Base64.stringify(data);
var decrypted = CryptoJS.AES.decrypt(encodeData, CryptoJS.enc.Hex.parse(p.params.key), {iv: CryptoJS.enc.Hex.parse(p.params.iv)});
try {
return s({plaintext:decrypted.toString(CryptoJS.enc.Utf8)});
} catch (e) {
return f(e);
}
};
module.exports = {
encrypt: encrypt,
decrypt: decrypt
};
},{"../../../libs/generated/crypto":1}],48:[function(_dereq_,module,exports){
var CryptoJS = _dereq_("../../../libs/generated/crypto");
var hash = function(p, s, f){
if (!p.params.text) {
f('hash_no_text', {}, p);
return;
}
var hashValue;
if (p.params.algorithm.toLowerCase() === "md5") {
hashValue = CryptoJS.MD5(p.params.text).toString(CryptoJS.enc.Hex);
} else if(p.params.algorithm.toLowerCase() === "sha1"){
hashValue = CryptoJS.SHA1(p.params.text).toString(CryptoJS.enc.Hex);
} else if(p.params.algorithm.toLowerCase() === "sha256"){
hashValue = CryptoJS.SHA256(p.params.text).toString(CryptoJS.enc.Hex);
} else if(p.params.algorithm.toLowerCase() === "sha512"){
hashValue = CryptoJS.SHA512(p.params.text).toString(CryptoJS.enc.Hex);
} else {
return f("hash_unsupported_algorithm: " + p.params.algorithm);
}
return s({"hashvalue": hashValue});
};
module.exports = hash;
},{"../../../libs/generated/crypto":1}],49:[function(_dereq_,module,exports){
var rsa = _dereq_("../../../libs/rsa");
var RSAKey = rsa.RSAKey;
var encrypt = function(p, s, f){
var fields = ['modulu', 'plaintext'];
if(p.params.algorithm.toLowerCase() !== "rsa"){
return f('encrypt_bad_algorithm', {}, p);
}
for (var i = 0; i < fields; i++) {
var field = fields[i];
if (!p.params[field]) {
return f('no_params_' + field, {}, p);
}
}
var key = new RSAKey();
key.setPublic(p.params.modulu, "10001");
var ori_text = p.params.plaintext;
cipher_text = key.encrypt(ori_text);
return s({ciphertext:cipher_text});
};
module.exports = {
encrypt: encrypt
};
},{"../../../libs/rsa":3}],50:[function(_dereq_,module,exports){
var actAPI = _dereq_("./api_act");
var cloudAPI = _dereq_("./api_cloud");
var CryptoJS = _dereq_("../../libs/generated/crypto");
var Lawnchair = _dereq_('../../libs/generated/lawnchair');
var self = {
// CONFIG
defaults: {
"sync_frequency": 10,
// How often to synchronise data with the cloud in seconds.
"auto_sync_local_updates": true,
// Should local chages be syned to the cloud immediately, or should they wait for the next sync interval
"notify_client_storage_failed": true,
// Should a notification event be triggered when loading/saving to client storage fails
"notify_sync_started": true,
// Should a notification event be triggered when a sync cycle with the server has been started
"notify_sync_complete": true,
// Should a notification event be triggered when a sync cycle with the server has been completed
"notify_offline_update": true,
// Should a notification event be triggered when an attempt was made to update a record while offline
"notify_collision_detected": true,
// Should a notification event be triggered when an update failed due to data collision
"notify_remote_update_failed": true,
// Should a notification event be triggered when an update failed for a reason other than data collision
"notify_local_update_applied": true,
// Should a notification event be triggered when an update was applied to the local data store
"notify_remote_update_applied": true,
// Should a notification event be triggered when an update was applied to the remote data store
"notify_delta_received": true,
// Should a notification event be triggered when a delta was received from the remote data store for the dataset
"notify_record_delta_received": true,
// Should a notification event be triggered when a delta was received from the remote data store for a record
"notify_sync_failed": true,
// Should a notification event be triggered when the sync loop failed to complete
"do_console_log": false,
// Should log statements be written to console.log
"crashed_count_wait" : 10,
// How many syncs should we check for updates on crashed in flight updates before we give up searching
"resend_crashed_updates" : true,
// If we have reached the crashed_count_wait limit, should we re-try sending the crashed in flight pending record
"sync_active" : true,
// Is the background sync with the cloud currently active
"storage_strategy" : "html5-filesystem",
// Storage strategy to use for Lawnchair - supported strategies are 'html5-filesystem' and 'dom'
"file_system_quota" : 50 * 1024 * 1204,
// Amount of space to request from the HTML5 filesystem API when running in browser
"has_custom_sync" : null,
//If the app has custom cloud sync function, it should be set to true. If set to false, the default mbaas sync implementation will be used. When set to null or undefined,
//a check will be performed to determine which implementation to use
"icloud_backup" : false //ios only. If set to true, the file will be backed by icloud
},
notifications: {
"CLIENT_STORAGE_FAILED": "client_storage_failed",
// loading/saving to client storage failed
"SYNC_STARTED": "sync_started",
// A sync cycle with the server has been started
"SYNC_COMPLETE": "sync_complete",
// A sync cycle with the server has been completed
"OFFLINE_UPDATE": "offline_update",
// An attempt was made to update a record while offline
"COLLISION_DETECTED": "collision_detected",
//Update Failed due to data collision
"REMOTE_UPDATE_FAILED": "remote_update_failed",
// Update Failed for a reason other than data collision
"REMOTE_UPDATE_APPLIED": "remote_update_applied",
// An update was applied to the remote data store
"LOCAL_UPDATE_APPLIED": "local_update_applied",
// An update was applied to the local data store
"DELTA_RECEIVED": "delta_received",
// A delta was received from the remote data store for the dataset
"RECORD_DELTA_RECEIVED": "record_delta_received",
// A delta was received from the remote data store for the record
"SYNC_FAILED": "sync_failed"
// Sync loop failed to complete
},
datasets: {},
// Initialise config to default values;
config: undefined,
//TODO: deprecate this
notify_callback: undefined,
notify_callback_map : {},
init_is_called: false,
//this is used to map the temp data uid (created on client) to the real uid (created in the cloud)
uid_map: {},
// PUBLIC FUNCTION IMPLEMENTATIONS
init: function(options) {
self.consoleLog('sync - init called');
self.config = JSON.parse(JSON.stringify(self.defaults));
for (var i in options) {
self.config[i] = options[i];
}
//prevent multiple monitors from created if init is called multiple times
if(!self.init_is_called){
self.init_is_called = true;
self.datasetMonitor();
}
},
notify: function(datasetId, callback) {
if(arguments.length === 1 && typeof datasetId === 'function'){
self.notify_callback = datasetId;
} else {
self.notify_callback_map[datasetId] = callback;
}
},
manage: function(dataset_id, opts, query_params, meta_data, cb) {
self.consoleLog('manage - START');
// Currently we do not enforce the rule that init() funciton should be called before manage().
// We need this check to guard against self.config undefined
if (!self.config){
self.config = JSON.parse(JSON.stringify(self.defaults));
}
var options = opts || {};
var doManage = function(dataset) {
self.consoleLog('doManage dataset :: initialised = ' + dataset.initialised + " :: " + dataset_id + ' :: ' + JSON.stringify(options));
var currentDatasetCfg = (dataset.config) ? dataset.config : self.config;
var datasetConfig = self.setOptions(currentDatasetCfg, options);
dataset.query_params = query_params || dataset.query_params || {};
dataset.meta_data = meta_data || dataset.meta_data || {};
dataset.config = datasetConfig;
dataset.syncRunning = false;
dataset.syncPending = true;
dataset.initialised = true;
if(typeof dataset.meta === "undefined"){
dataset.meta = {};
}
self.saveDataSet(dataset_id, function() {
if( cb ) {
cb();
}
});
};
// Check if the dataset is already loaded
self.getDataSet(dataset_id, function(dataset) {
self.consoleLog('manage - dataset already loaded');
doManage(dataset);
}, function(err) {
self.consoleLog('manage - dataset not loaded... trying to load');
// Not already loaded, try to load from local storage
self.loadDataSet(dataset_id, function(dataset) {
self.consoleLog('manage - dataset loaded from local storage');
// Loading from local storage worked
// Fire the local update event to indicate that dataset was loaded from local storage
self.doNotify(dataset_id, null, self.notifications.LOCAL_UPDATE_APPLIED, "load");
// Put the dataet under the management of the sync service
doManage(dataset);
},
function(err) {
// No dataset in memory or local storage - create a new one and put it in memory
self.consoleLog('manage - Creating new dataset for id ' + dataset_id);
var dataset = {};
dataset.data = {};
dataset.pending = {};
dataset.meta = {};
self.datasets[dataset_id] = dataset;
doManage(dataset);
});
});
},
/**
* Sets options for passed in config, if !config then options will be applied to default config.
* @param {Object} config - config to which options will be applied
* @param {Object} options - options to be applied to the config
*/
setOptions: function(config, options) {
// Make sure config is initialised
if( ! config ) {
config = JSON.parse(JSON.stringify(self.defaults));
}
var datasetConfig = JSON.parse(JSON.stringify(config));
var optionsIn = JSON.parse(JSON.stringify(options));
for (var k in optionsIn) {
datasetConfig[k] = optionsIn[k];
}
return datasetConfig;
},
list: function(dataset_id, success, failure) {
self.getDataSet(dataset_id, function(dataset) {
if (dataset && dataset.data) {
// Return a copy of the dataset so updates will not automatically make it back into the dataset
var res = JSON.parse(JSON.stringify(dataset.data));
success(res);
} else {
if(failure) {
failure('no_data');
}
}
}, function(code, msg) {
if(failure) {
failure(code, msg);
}
});
},
getUID: function(oldOrNewUid){
var uid = self.uid_map[oldOrNewUid];
if(uid || uid === 0){
return uid;
} else {
return oldOrNewUid;
}
},
create: function(dataset_id, data, success, failure) {
if(data == null){
if(failure){
return failure("null_data");
}
}
self.addPendingObj(dataset_id, null, data, "create", success, failure);
},
read: function(dataset_id, uid, success, failure) {
self.getDataSet(dataset_id, function(dataset) {
uid = self.getUID(uid);
var rec = dataset.data[uid];
if (!rec) {
failure("unknown_uid");
} else {
// Return a copy of the record so updates will not automatically make it back into the dataset
var res = JSON.parse(JSON.stringify(rec));
success(res);
}
}, function(code, msg) {
if(failure) {
failure(code, msg);
}
});
},
update: function(dataset_id, uid, data, success, failure) {
uid = self.getUID(uid);
self.addPendingObj(dataset_id, uid, data, "update", success, failure);
},
'delete': function(dataset_id, uid, success, failure) {
uid = self.getUID(uid);
self.addPendingObj(dataset_id, uid, null, "delete", success, failure);
},
getPending: function(dataset_id, cb) {
self.getDataSet(dataset_id, function(dataset) {
var res;
if( dataset ) {
res = dataset.pending;
}
cb(res);
}, function(err, datatset_id) {
self.consoleLog(err);
});
},
clearPending: function(dataset_id, cb) {
self.getDataSet(dataset_id, function(dataset) {
dataset.pending = {};
self.saveDataSet(dataset_id, cb);
});
},
listCollisions : function(dataset_id, success, failure){
self.getDataSet(dataset_id, function(dataset) {
self.doCloudCall({
"dataset_id": dataset_id,
"req": {
"fn": "listCollisions",
"meta_data" : dataset.meta_data
}
}, success, failure);
}, failure);
},
removeCollision: function(dataset_id, colissionHash, success, failure) {
self.getDataSet(dataset_id, function(dataset) {
self.doCloudCall({
"dataset_id" : dataset_id,
"req": {
"fn": "removeCollision",
"hash": colissionHash,
meta_data: dataset.meta_data
}
}, success, failure);
});
},
// PRIVATE FUNCTIONS
isOnline: function(callback) {
var online = true;
// first, check if navigator.online is available
if(typeof navigator.onLine !== "undefined"){
online = navigator.onLine;
}
// second, check if Phonegap is available and has online info
if(online){
//use phonegap to determin if the network is available
if(typeof navigator.network !== "undefined" && typeof navigator.network.connection !== "undefined"){
var networkType = navigator.network.connection.type;
if(networkType === "none" || networkType === null) {
online = false;
}
}
}
return callback(online);
},
doNotify: function(dataset_id, uid, code, message) {
if( self.notify_callback || self.notify_callback_map[dataset_id]) {
var notifyFunc = self.notify_callback_map[dataset_id] || self.notify_callback;
if ( self.config['notify_' + code] ) {
var notification = {
"dataset_id" : dataset_id,
"uid" : uid,
"code" : code,
"message" : message
};
// make sure user doesn't block
setTimeout(function () {
notifyFunc(notification);
}, 0);
}
}
},
getDataSet: function(dataset_id, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
success(dataset);
} else {
if(failure){
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
getQueryParams: function(dataset_id, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
success(dataset.query_params);
} else {
if(failure){
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
setQueryParams: function(dataset_id, queryParams, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
dataset.query_params = queryParams;
self.saveDataSet(dataset_id);
if( success ) {
success(dataset.query_params);
}
} else {
if ( failure ) {
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
getMetaData: function(dataset_id, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
success(dataset.meta_data);
} else {
if(failure){
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
setMetaData: function(dataset_id, metaData, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
dataset.meta_data = metaData;
self.saveDataSet(dataset_id);
if( success ) {
success(dataset.meta_data);
}
} else {
if( failure ) {
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
getConfig: function(dataset_id, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
success(dataset.config);
} else {
if(failure){
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
setConfig: function(dataset_id, config, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
var fullConfig = self.setOptions(dataset.config, config);
dataset.config = fullConfig;
self.saveDataSet(dataset_id);
if( success ) {
success(dataset.config);
}
} else {
if( failure ) {
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
stopSync: function(dataset_id, success, failure) {
self.setConfig(dataset_id, {"sync_active" : false}, function() {
if( success ) {
success();
}
}, failure);
},
startSync: function(dataset_id, success, failure) {
self.setConfig(dataset_id, {"sync_active" : true}, function() {
if( success ) {
success();
}
}, failure);
},
doSync: function(dataset_id, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
dataset.syncPending = true;
self.saveDataSet(dataset_id);
if( success ) {
success();
}
} else {
if( failure ) {
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
forceSync: function(dataset_id, success, failure) {
var dataset = self.datasets[dataset_id];
if (dataset) {
dataset.syncForced = true;
self.saveDataSet(dataset_id);
if( success ) {
success();
}
} else {
if( failure ) {
failure('unknown_dataset ' + dataset_id, dataset_id);
}
}
},
sortObject : function(object) {
if (typeof object !== "object" || object === null) {
return object;
}
var result = [];
Object.keys(object).sort().forEach(function(key) {
result.push({
key: key,
value: self.sortObject(object[key])
});
});
return result;
},
sortedStringify : function(obj) {
var str = '';
try {
str = JSON.stringify(self.sortObject(obj));
} catch (e) {
console.error('Error stringifying sorted object:' + e);
}
return str;
},
generateHash: function(object) {
var hash = CryptoJS.SHA1(self.sortedStringify(object));
return hash.toString();
},
addPendingObj: function(dataset_id, uid, data, action, success, failure) {
self.isOnline(function (online) {
if (!online) {
self.doNotify(dataset_id, uid, self.notifications.OFFLINE_UPDATE, action);
}
});
function storePendingObject(obj) {
obj.hash = obj.hash || self.generateHash(obj);
self.getDataSet(dataset_id, function(dataset) {
dataset.pending[obj.hash] = obj;
self.updateDatasetFromLocal(dataset, obj);
if(self.config.auto_sync_local_updates) {
dataset.syncPending = true;
}
self.saveDataSet(dataset_id);
self.doNotify(dataset_id, uid, self.notifications.LOCAL_UPDATE_APPLIED, action);
success(obj);
}, function(code, msg) {
if(failure) {
failure(code, msg);
}
});
}
var pendingObj = {};
pendingObj.inFlight = false;
pendingObj.action = action;
pendingObj.post = JSON.parse(JSON.stringify(data));
pendingObj.postHash = self.generateHash(pendingObj.post);
pendingObj.timestamp = new Date().getTime();
if( "create" === action ) {
//this hash value will be returned later on when the cloud returns updates. We can then link the old uid
//with new uid
pendingObj.hash = self.generateHash(pendingObj);
pendingObj.uid = pendingObj.hash;
storePendingObject(pendingObj);
} else {
self.read(dataset_id, uid, function(rec) {
pendingObj.uid = uid;
pendingObj.pre = rec.data;
pendingObj.preHash = self.generateHash(rec.data);
storePendingObject(pendingObj);
}, function(code, msg) {
if(failure){
failure(code, msg);
}
});
}
},
syncLoop: function(dataset_id) {
self.getDataSet(dataset_id, function(dataSet) {
// The sync loop is currently active
dataSet.syncPending = false;
dataSet.syncRunning = true;
dataSet.syncLoopStart = new Date().getTime();
self.doNotify(dataset_id, null, self.notifications.SYNC_STARTED, null);
self.isOnline(function(online) {
if (!online) {
self.syncComplete(dataset_id, "offline", self.notifications.SYNC_FAILED);
} else {
self.checkHasCustomSync(dataset_id, function() {
var syncLoopParams = {};
syncLoopParams.fn = 'sync';
syncLoopParams.dataset_id = dataset_id;
syncLoopParams.query_params = dataSet.query_params;
syncLoopParams.config = dataSet.config;
syncLoopParams.meta_data = dataSet.meta_data;
//var datasetHash = self.generateLocalDatasetHash(dataSet);
syncLoopParams.dataset_hash = dataSet.hash;
syncLoopParams.acknowledgements = dataSet.acknowledgements || [];
var pending = dataSet.pending;
var pendingArray = [];
for(var i in pending ) {
// Mark the pending records we are about to submit as inflight and add them to the array for submission
// Don't re-add previous inFlight pending records who whave crashed - i.e. who's current state is unknown
// Don't add delayed records
if( !pending[i].inFlight && !pending[i].crashed && !pending[i].delayed) {
pending[i].inFlight = true;
pending[i].inFlightDate = new Date().getTime();
pendingArray.push(pending[i]);
}
}
syncLoopParams.pending = pendingArray;
if( pendingArray.length > 0 ) {
self.consoleLog('Starting sync loop - global hash = ' + dataSet.hash + ' :: params = ' + JSON.stringify(syncLoopParams, null, 2));
}
self.doCloudCall({
'dataset_id': dataset_id,
'req': syncLoopParams
}, function(res) {
var rec;
function processUpdates(updates, notification, acknowledgements) {
if( updates ) {
for (var up in updates) {
rec = updates[up];
acknowledgements.push(rec);
if( dataSet.pending[up] && dataSet.pending[up].inFlight) {
delete dataSet.pending[up];
self.doNotify(dataset_id, rec.uid, notification, rec);
}
}
}
}
// Check to see if any previously crashed inflight records can now be resolved
self.updateCrashedInFlightFromNewData(dataset_id, dataSet, res);
//Check to see if any delayed pending records can now be set to ready
self.updateDelayedFromNewData(dataset_id, dataSet, res);
//Check meta data as well to make sure it contains the correct info
self.updateMetaFromNewData(dataset_id, dataSet, res);
if (res.updates) {
var acknowledgements = [];
self.checkUidChanges(dataSet, res.updates.applied);
processUpdates(res.updates.applied, self.notifications.REMOTE_UPDATE_APPLIED, acknowledgements);
processUpdates(res.updates.failed, self.notifications.REMOTE_UPDATE_FAILED, acknowledgements);
processUpdates(res.updates.collisions, self.notifications.COLLISION_DETECTED, acknowledgements);
dataSet.acknowledgements = acknowledgements;
}
if (res.hash && res.hash !== dataSet.hash) {
self.consoleLog("Local dataset stale - syncing records :: local hash= " + dataSet.hash + " - remoteHash=" + res.hash);
// Different hash value returned - Sync individual records
self.syncRecords(dataset_id);
} else {
self.consoleLog("Local dataset up to date");
self.syncComplete(dataset_id, "online", self.notifications.SYNC_COMPLETE);
}
}, function(msg, err) {
// The AJAX call failed to complete succesfully, so the state of the current pending updates is unknown
// Mark them as "crashed". The next time a syncLoop completets successfully, we will review the crashed
// records to see if we can determine their current state.
self.markInFlightAsCrashed(dataSet);
self.consoleLog("syncLoop failed : msg=" + msg + " :: err = " + err);
self.syncComplete(dataset_id, msg, self.notifications.SYNC_FAILED);
});
});
}
});
});
},
syncRecords: function(dataset_id) {
self.getDataSet(dataset_id, function(dataSet) {
var localDataSet = dataSet.data || {};
var clientRecs = {};
for (var i in localDataSet) {
var uid = i;
var hash = localDataSet[i].hash;
clientRecs[uid] = hash;
}
var syncRecParams = {};
syncRecParams.fn = 'syncRecords';
syncRecParams.dataset_id = dataset_id;
syncRecParams.query_params = dataSet.query_params;
syncRecParams.clientRecs = clientRecs;
self.consoleLog("syncRecParams :: " + JSON.stringify(syncRecParams));
self.doCloudCall({
'dataset_id': dataset_id,
'req': syncRecParams
}, function(res) {
self.consoleLog('syncRecords Res before applying pending changes :: ' + JSON.stringify(res));
self.applyPendingChangesToRecords(dataSet, res);
self.consoleLog('syncRecords Res after apply pending changes :: ' + JSON.stringify(res));
var i;
if (res.create) {
for (i in res.create) {
localDataSet[i] = {"hash" : res.create[i].hash, "data" : res.create[i].data};
self.doNotify(dataset_id, i, self.notifications.RECORD_DELTA_RECEIVED, "create");
}
}
if (res.update) {
for (i in res.update) {
localDataSet[i].hash = res.update[i].hash;
localDataSet[i].data = res.update[i].data;
self.doNotify(dataset_id, i, self.notifications.RECORD_DELTA_RECEIVED, "update");
}
}
if (res['delete']) {
for (i in res['delete']) {
delete localDataSet[i];
self.doNotify(dataset_id, i, self.notifications.RECORD_DELTA_RECEIVED, "delete");
}
}
self.doNotify(dataset_id, res.hash, self.notifications.DELTA_RECEIVED, 'partial dataset');
dataSet.data = localDataSet;
if(res.hash) {
dataSet.hash = res.hash;
}
self.syncComplete(dataset_id, "online", self.notifications.SYNC_COMPLETE);
}, function(msg, err) {
self.consoleLog("syncRecords failed : msg=" + msg + " :: err=" + err);
self.syncComplete(dataset_id, msg, self.notifications.SYNC_FAILED);
});
});
},
syncComplete: function(dataset_id, status, notification) {
self.getDataSet(dataset_id, function(dataset) {
dataset.syncRunning = false;
dataset.syncLoopEnd = new Date().getTime();
self.saveDataSet(dataset_id);
self.doNotify(dataset_id, dataset.hash, notification, status);
});
},
applyPendingChangesToRecords: function(dataset, records){
var pendings = dataset.pending;
for(var pendingUid in pendings){
if(pendings.hasOwnProperty(pendingUid)){
var pendingObj = pendings[pendingUid];
var uid = pendingObj.uid;
//if the records contain any thing about the data records that are currently in pendings,
//it means there are local changes that haven't been applied to the cloud yet,
//so update the pre value of each pending record to relect the latest status from cloud
//and remove them from the response
if(records.create){
var creates = records.create;
if(creates && creates[uid]){
delete creates[uid];
}
}
if(records.update){
var updates = records.update;
if(updates && updates[uid]){
delete updates[uid];
}
}
if(records['delete']){
var deletes = records['delete'];
if(deletes && deletes[uid]){
delete deletes[uid];
}
}
}
}
},
checkUidChanges: function(dataset, appliedUpdates){
if(appliedUpdates){
var new_uids = {};
var changeUidsCount = 0;
for(var update in appliedUpdates){
if(appliedUpdates.hasOwnProperty(update)){
var applied_update = appliedUpdates[update];
var action = applied_update.action;
if(action && action === 'create'){
//we are receving the results of creations, at this point, we will have the old uid(the hash) and the real uid generated by the cloud
var newUid = applied_update.uid;
var oldUid = applied_update.hash;
changeUidsCount++;
//remember the mapping
self.uid_map[oldUid] = newUid;
new_uids[oldUid] = newUid;
//update the data uid in the dataset
var record = dataset.data[oldUid];
if(record){
dataset.data[newUid] = record;
delete dataset.data[oldUid];
}
//update the old uid in meta data
var metaData = dataset.meta[oldUid];
if(metaData) {
dataset.meta[newUid] = metaData;
delete dataset.meta[oldUid];
}
}
}
}
if(changeUidsCount > 0){
//we need to check all existing pendingRecords and update their UIDs if they are still the old values
for(var pending in dataset.pending){
if(dataset.pending.hasOwnProperty(pending)){
var pendingObj = dataset.pending[pending];
var pendingRecordUid = pendingObj.uid;
if(new_uids[pendingRecordUid]){
pendingObj.uid = new_uids[pendingRecordUid];
}
}
}
}
}
},
checkDatasets: function() {
for( var dataset_id in self.datasets ) {
if( self.datasets.hasOwnProperty(dataset_id) ) {
var dataset = self.datasets[dataset_id];
if(dataset && !dataset.syncRunning && (dataset.config.sync_active || dataset.syncForced)) {
// Check to see if it is time for the sync loop to run again
var lastSyncStart = dataset.syncLoopStart;
var lastSyncCmp = dataset.syncLoopEnd;
if(dataset.syncForced){
dataset.syncPending = true;
} else if( lastSyncStart == null ) {
self.consoleLog(dataset_id +' - Performing initial sync');
// Dataset has never been synced before - do initial sync
dataset.syncPending = true;
} else if (lastSyncCmp != null) {
var timeSinceLastSync = new Date().getTime() - lastSyncCmp;
var syncFrequency = dataset.config.sync_frequency * 1000;
if( timeSinceLastSync > syncFrequency ) {
// Time between sync loops has passed - do another sync
dataset.syncPending = true;
}
}
if( dataset.syncPending ) {
// Reset syncForced in case it was what caused the sync cycle to run.
dataset.syncForced = false;
// If the dataset requres syncing, run the sync loop. This may be because the sync interval has passed
// or because the sync_frequency has been changed or because a change was made to the dataset and the
// immediate_sync flag set to true
self.syncLoop(dataset_id);
}
}
}
}
},
checkHasCustomSync : function(dataset_id, cb) {
var dataset = self.datasets[dataset_id];
if(dataset && dataset.config){
self.consoleLog("dataset.config.has_custom_sync = " + dataset.config.has_custom_sync);
if(dataset.config.has_custom_sync != null) {
return cb();
}
self.consoleLog('starting check has custom sync');
actAPI({
'act' : dataset_id,
'req': {
'fn': 'sync'
}
}, function(res) {
//if the custom sync is defined in the cloud, this call should success.
//if failed, we think this the custom sync is not defined
self.consoleLog('check has_custom_sync - success - ', res);
dataset.config.has_custom_sync = true;
return cb();
}, function(msg,err) {
self.consoleLog('check has_custom_sync - failure - ', err);
if(err.status && err.status === 500){
//if we receive 500, it could be that there is an error occured due to missing parameters or similar,
//but the endpoint is defined.
self.consoleLog('check has_custom_sync - failed with 500, endpoint does exists');
dataset.config.has_custom_sync = true;
} else {
dataset.config.has_custom_sync = false;
}
return cb();
});
} else {
return cb();
}
},
doCloudCall: function(params, success, failure) {
var callbackCalled = false;
try {
var hasCustomSync = false;
var dataset = self.datasets[params.dataset_id];
if(dataset && dataset.config){
hasCustomSync = dataset.config.has_custom_sync;
}
if( hasCustomSync === true ) {
actAPI({
'act' : params.dataset_id,
'req' : params.req
}, function(res) {
callbackCalled = true;
success(res);
}, function(msg, err) {
callbackCalled = true;
failure(msg, err);
});
} else {
cloudAPI({
'path' : '/mbaas/sync/' + params.dataset_id,
'method' : 'post',
'data' : params.req
}, function(res) {
callbackCalled = true;
success(res);
}, function(msg, err) {
callbackCalled = true;
failure(msg, err);
});
}
}
catch (e) {
var msg = 'Exception in doCloudCall - ' + e;
self.consoleLog(msg);
// only call the failure callback if success/failure hasn't been called already
// This will prevent exceptions thrown in the success/failure callback resulting in that fn being called again
// i.e. only let the caller known about exceptions up to the point of the ajax call being made.
if (!callbackCalled) {
failure(msg, e);
}
}
},
datasetMonitor: function() {
self.checkDatasets();
// Re-execute datasetMonitor every 500ms so we keep invoking checkDatasets();
setTimeout(function() {
self.datasetMonitor();
}, 500);
},
getStorageAdapter: function(dataset_id, isSave, cb){
var onFail = function(msg, err){
var errMsg = (isSave?'save to': 'load from' ) + ' local storage failed msg: ' + msg + ' err: ' + err;
self.doNotify(dataset_id, null, self.notifications.CLIENT_STORAGE_FAILED, errMsg);
self.consoleLog(errMsg);
};
Lawnchair({fail:onFail, adapter: self.config.storage_strategy, size:self.config.file_system_quota, backup: self.config.icloud_backup}, function(){
return cb(null, this);
});
},
saveDataSet: function (dataset_id, cb) {
self.getDataSet(dataset_id, function(dataset) {
self.getStorageAdapter(dataset_id, true, function(err, storage){
storage.save({key:"dataset_" + dataset_id, val:dataset}, function(){
//save success
if(cb) {
return cb();
}
});
});
});
},
loadDataSet: function (dataset_id, success, failure) {
self.getStorageAdapter(dataset_id, false, function(err, storage){
storage.get( "dataset_" + dataset_id, function (data){
if (data && data.val) {
var dataset = data.val;
if(typeof dataset === "string"){
dataset = JSON.parse(dataset);
}
// Datasets should not be auto initialised when loaded - the mange function should be called for each dataset
// the user wants sync
dataset.initialised = false;
self.datasets[dataset_id] = dataset; // TODO: do we need to handle binary data?
self.consoleLog('load from local storage success for dataset_id :' + dataset_id);
if(success) {
return success(dataset);
}
} else {
// no data yet, probably first time. failure calback should handle this
if(failure) {
return failure();
}
}
});
});
},
clearCache: function(dataset_id, cb){
delete self.datasets[dataset_id];
self.notify_callback_map[dataset_id] = null;
self.getStorageAdapter(dataset_id, true, function(err, storage){
storage.remove("dataset_" + dataset_id, function(){
self.consoleLog('local cache is cleared for dataset : ' + dataset_id);
if(cb){
return cb();
}
});
});
},
updateDatasetFromLocal: function(dataset, pendingRec) {
var pending = dataset.pending;
var previousPendingUid;
var previousPending;
var uid = pendingRec.uid;
self.consoleLog('updating local dataset for uid ' + uid + ' - action = ' + pendingRec.action);
dataset.meta[uid] = dataset.meta[uid] || {};
// Creating a new record
if( pendingRec.action === "create" ) {
if( dataset.data[uid] ) {
self.consoleLog('dataset already exists for uid in create :: ' + JSON.stringify(dataset.data[uid]));
// We are trying to do a create using a uid which already exists
if (dataset.meta[uid].fromPending) {
// We are trying to create on top of an existing pending record
// Remove the previous pending record and use this one instead
previousPendingUid = dataset.meta[uid].pendingUid;
delete pending[previousPendingUid];
}
}
dataset.data[uid] = {};
}
if( pendingRec.action === "update" ) {
if( dataset.data[uid] ) {
if (dataset.meta[uid].fromPending) {
self.consoleLog('updating an existing pending record for dataset :: ' + JSON.stringify(dataset.data[uid]));
// We are trying to update an existing pending record
previousPendingUid = dataset.meta[uid].pendingUid;
previousPending = pending[previousPendingUid];
if(previousPending) {
if(!previousPending.inFlight){
self.consoleLog('existing pre-flight pending record = ' + JSON.stringify(previousPending));
// We are trying to perform an update on an existing pending record
// modify the original record to have the latest value and delete the pending update
previousPending.post = pendingRec.post;
previousPending.postHash = pendingRec.postHash;
delete pending[pendingRec.hash];
// Update the pending record to have the hash of the previous record as this is what is now being
// maintained in the pending array & is what we want in the meta record
pendingRec.hash = previousPendingUid;
} else {
//we are performing changes to a pending record which is inFlight. Until the status of this pending record is resolved,
//we should not submit this pending record to the cloud. Mark it as delayed.
self.consoleLog('existing in-inflight pending record = ' + JSON.stringify(previousPending));
pendingRec.delayed = true;
pendingRec.waiting = previousPending.hash;
}
}
}
}
}
if( pendingRec.action === "delete" ) {
if( dataset.data[uid] ) {
if (dataset.meta[uid].fromPending) {
self.consoleLog('Deleting an existing pending record for dataset :: ' + JSON.stringify(dataset.data[uid]));
// We are trying to delete an existing pending record
previousPendingUid = dataset.meta[uid].pendingUid;
previousPending = pending[previousPendingUid];
if( previousPending ) {
if(!previousPending.inFlight){
self.consoleLog('existing pending record = ' + JSON.stringify(previousPending));
if( previousPending.action === "create" ) {
// We are trying to perform a delete on an existing pending create
// These cancel each other out so remove them both
delete pending[pendingRec.hash];
delete pending[previousPendingUid];
}
if( previousPending.action === "update" ) {
// We are trying to perform a delete on an existing pending update
// Use the pre value from the pending update for the delete and
// get rid of the pending update
pendingRec.pre = previousPending.pre;
pendingRec.preHash = previousPending.preHash;
pendingRec.inFlight = false;
delete pending[previousPendingUid];
}
} else {
self.consoleLog('existing in-inflight pending record = ' + JSON.stringify(previousPending));
pendingRec.delayed = true;
pendingRec.waiting = previousPending.hash;
}
}
}
delete dataset.data[uid];
}
}
if( dataset.data[uid] ) {
dataset.data[uid].data = pendingRec.post;
dataset.data[uid].hash = pendingRec.postHash;
dataset.meta[uid].fromPending = true;
dataset.meta[uid].pendingUid = pendingRec.hash;
}
},
updateCrashedInFlightFromNewData: function(dataset_id, dataset, newData) {
var updateNotifications = {
applied: self.notifications.REMOTE_UPDATE_APPLIED,
failed: self.notifications.REMOTE_UPDATE_FAILED,
collisions: self.notifications.COLLISION_DETECTED
};
var pending = dataset.pending;
var resolvedCrashes = {};
var pendingHash;
var pendingRec;
if( pending ) {
for( pendingHash in pending ) {
if( pending.hasOwnProperty(pendingHash) ) {
pendingRec = pending[pendingHash];
if( pendingRec.inFlight && pendingRec.crashed) {
self.consoleLog('updateCrashedInFlightFromNewData - Found crashed inFlight pending record uid=' + pendingRec.uid + ' :: hash=' + pendingRec.hash );
if( newData && newData.updates && newData.updates.hashes) {
// Check if the updates received contain any info about the crashed in flight update
var crashedUpdate = newData.updates.hashes[pendingHash];
if( !crashedUpdate ) {
//TODO: review this - why we need to wait?
// No word on our crashed update - increment a counter to reflect another sync that did not give us
// any update on our crashed record.
if( pendingRec.crashedCount ) {
pendingRec.crashedCount++;
}
else {
pendingRec.crashedCount = 1;
}
}
}
else {
// No word on our crashed update - increment a counter to reflect another sync that did not give us
// any update on our crashed record.
if( pendingRec.crashedCount ) {
pendingRec.crashedCount++;
}
else {
pendingRec.crashedCount = 1;
}
}
}
}
}
for( pendingHash in pending ) {
if( pending.hasOwnProperty(pendingHash) ) {
pendingRec = pending[pendingHash];
if( pendingRec.inFlight && pendingRec.crashed) {
if( pendingRec.crashedCount > dataset.config.crashed_count_wait ) {
self.consoleLog('updateCrashedInFlightFromNewData - Crashed inflight pending record has reached crashed_count_wait limit : ' + JSON.stringify(pendingRec));
self.consoleLog('updateCrashedInFlightFromNewData - Retryig crashed inflight pending record');
pendingRec.crashed = false;
pendingRec.inFlight = false;
}
}
}
}
}
},
updateDelayedFromNewData: function(dataset_id, dataset, newData){
var pending = dataset.pending;
var pendingHash;
var pendingRec;
if(pending){
for( pendingHash in pending ){
if( pending.hasOwnProperty(pendingHash) ){
pendingRec = pending[pendingHash];
if( pendingRec.delayed && pendingRec.waiting ){
self.consoleLog('updateDelayedFromNewData - Found delayed pending record uid=' + pendingRec.uid + ' :: hash=' + pendingRec.hash + ' :: waiting=' + pendingRec.waiting);
if( newData && newData.updates && newData.updates.hashes ){
var waitingRec = newData.updates.hashes[pendingRec.waiting];
if(waitingRec){
self.consoleLog('updateDelayedFromNewData - Waiting pending record is resolved rec=' + JSON.stringify(waitingRec));
pendingRec.delayed = false;
pendingRec.waiting = undefined;
}
}
}
}
}
}
},
updateMetaFromNewData: function(dataset_id, dataset, newData){
var meta = dataset.meta;
if(meta && newData && newData.updates && newData.updates.hashes){
for(var uid in meta){
if(meta.hasOwnProperty(uid)){
var metadata = meta[uid];
var pendingHash = metadata.pendingUid;
self.consoleLog("updateMetaFromNewData - Found metadata with uid = " + uid + " :: pendingHash = " + pendingHash);
var pendingResolved = true;
if(pendingHash){
//we have current pending in meta data, see if it's resolved
pendingResolved = false;
var hashresolved = newData.updates.hashes[pendingHash];
if(hashresolved){
self.consoleLog("updateMetaFromNewData - Found pendingUid in meta data resolved - resolved = " + JSON.stringify(hashresolved));
//the current pending is resolved in the cloud
metadata.pendingUid = undefined;
pendingResolved = true;
}
}
if(pendingResolved){
self.consoleLog("updateMetaFromNewData - both previous and current pendings are resolved for meta data with uid " + uid + ". Delete it.");
//all pendings are resolved, the entry can be removed from meta data
delete meta[uid];
}
}
}
}
},
markInFlightAsCrashed : function(dataset) {
var pending = dataset.pending;
var pendingHash;
var pendingRec;
if( pending ) {
var crashedRecords = {};
for( pendingHash in pending ) {
if( pending.hasOwnProperty(pendingHash) ) {
pendingRec = pending[pendingHash];
if( pendingRec.inFlight ) {
self.consoleLog('Marking in flight pending record as crashed : ' + pendingHash);
pendingRec.crashed = true;
crashedRecords[pendingRec.uid] = pendingRec;
}
}
}
}
},
consoleLog: function(msg) {
if( self.config.do_console_log ) {
console.log(msg);
}
}
};
(function() {
self.config = self.defaults;
//Initialse the sync service with default config
//self.init({});
})();
module.exports = {
init: self.init,
manage: self.manage,
notify: self.notify,
doList: self.list,
getUID: self.getUID,
doCreate: self.create,
doRead: self.read,
doUpdate: self.update,
doDelete: self['delete'],
listCollisions: self.listCollisions,
removeCollision: self.removeCollision,
getPending : self.getPending,
clearPending : self.clearPending,
getDataset : self.getDataSet,
getQueryParams: self.getQueryParams,
setQueryParams: self.setQueryParams,
getMetaData: self.getMetaData,
setMetaData: self.setMetaData,
getConfig: self.getConfig,
setConfig: self.setConfig,
startSync: self.startSync,
stopSync: self.stopSync,
doSync: self.doSync,
forceSync: self.forceSync,
generateHash: self.generateHash,
loadDataSet: self.loadDataSet,
checkHasCustomSync: self.checkHasCustomSync,
clearCache: self.clearCache,
doCloudCall: self.doCloudCall
};
},{"../../libs/generated/crypto":1,"../../libs/generated/lawnchair":2,"./api_act":22,"./api_cloud":24}],51:[function(_dereq_,module,exports){
module.exports = {
createUUID : function () {
//from http://stackoverflow.com/questions/105034/how-to-create-a-guid-uuid-in-javascript
//based on RFC 4122, section 4.4 (Algorithms for creating UUID from truely random pr pseudo-random number)
var s = [];
var hexDigitals = "0123456789ABCDEF";
for (var i = 0; i < 32; i++) {
s[i] = hexDigitals.substr(Math.floor(Math.random() * 0x10), 1);
}
s[12] = "4";
s[16] = hexDigitals.substr((s[16] & 0x3) | 0x8, 1);
var uuid = s.join("");
return uuid;
}
};
},{}],52:[function(_dereq_,module,exports){
var initializer = _dereq_("./initializer");
var events = _dereq_("./events");
var CloudHost = _dereq_("./hosts");
var constants = _dereq_("./constants");
var logger = _dereq_("./logger");
var data = _dereq_('./data');
var fhparams = _dereq_('./fhparams');
//the cloud configurations
var cloud_host;
var is_initialising = false;
var is_cloud_ready = false;
var init_error = null;
var ready = function(cb){
if(is_cloud_ready){
return cb(null, {host: getCloudHostUrl()});
} else {
events.once(constants.INIT_EVENT, function(err, host){
return cb(err, host);
});
if(!is_initialising){
is_initialising = true;
var fhinit = function(){
data.sessionManager.read(function(err, session){
//load the persisted sessionToken and set it for the session
if(session && session.sessionToken){
fhparams.setAuthSessionToken(session.sessionToken);
}
initializer.init(function(err, initRes){
is_initialising = false;
if(err){
init_error = err;
return events.emit(constants.INIT_EVENT, err);
} else {
init_error = null;
is_cloud_ready = true;
cloud_host = new CloudHost(initRes.cloud);
return events.emit(constants.INIT_EVENT, null, {host: getCloudHostUrl()});
}
});
});
};
if(typeof window.cordova !== "undefined" || typeof window.phonegap !== "undefined"){
//if we are running inside cordova/phonegap, only init after device is ready to ensure the device id is the right one
document.addEventListener("deviceready", fhinit, false);
} else {
fhinit();
}
}
}
};
var getCloudHost = function(){
return cloud_host;
};
var getCloudHostUrl = function(){
if(typeof cloud_host !== "undefined"){
var appProps = _dereq_("./appProps").getAppProps();
return cloud_host.getHost(appProps.mode);
} else {
return undefined;
}
};
var isReady = function(){
return is_cloud_ready;
};
var getInitError = function(){
return init_error;
};
//for test
var reset = function(){
is_cloud_ready = false;
is_initialising = false;
cloud_host = undefined;
init_error = undefined;
ready(function(){
});
};
ready(function(error, host){
if(error){
if(error.message !== "app_config_missing"){
logger.error("Failed to initialise fh.");
} else {
logger.info("No fh config file");
}
} else {
logger.info("fh cloud is ready");
}
});
module.exports = {
ready: ready,
isReady: isReady,
getCloudHost: getCloudHost,
getCloudHostUrl: getCloudHostUrl,
getInitError: getInitError,
reset: reset
};
},{"./appProps":29,"./constants":31,"./data":33,"./events":35,"./fhparams":36,"./hosts":38,"./initializer":39,"./logger":42}]},{},[19])
(19)
});
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
},{}],2:[function(require,module,exports){
var $fh = require('fh-js-sdk');
},{"fh-js-sdk":1}]},{},[2]);
| bnpCopyTo |
mod.rs | use crate::{
binding_model, command, conv,
device::life::WaitIdleError,
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Hub, Input, InvalidId, Storage, Token},
id,
init_tracker::{
BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
TextureInitTracker, TextureInitTrackerAction,
},
instance, pipeline, present, resource,
track::{BindGroupStates, TextureSelector, Tracker},
validation::{self, check_buffer_usage, check_texture_usage},
FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored,
SubmissionIndex, DOWNLEVEL_ERROR_MESSAGE,
};
use arrayvec::ArrayVec;
use copyless::VecHelper as _;
use hal::{CommandEncoder as _, Device as _};
use parking_lot::{Mutex, MutexGuard};
use smallvec::SmallVec;
use thiserror::Error;
use wgt::{BufferAddress, TextureFormat, TextureViewDimension};
use std::{borrow::Cow, iter, mem, num::NonZeroU32, ops::Range, ptr};
mod life;
pub mod queue;
#[cfg(any(feature = "trace", feature = "replay"))]
pub mod trace;
pub const SHADER_STAGE_COUNT: usize = 3;
// Should be large enough for the largest possible texture row. This value is enough for a 16k texture with float4 format.
pub(crate) const ZERO_BUFFER_SIZE: BufferAddress = 512 << 10;
const CLEANUP_WAIT_MS: u32 = 5000;
const IMPLICIT_FAILURE: &str = "failed implicit";
const EP_FAILURE: &str = "EP is invalid";
pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
#[repr(C)]
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum HostMap {
Read,
Write,
}
#[derive(Clone, Debug, Hash, PartialEq)]
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
pub(crate) struct AttachmentData<T> {
pub colors: ArrayVec<T, { hal::MAX_COLOR_TARGETS }>,
pub resolves: ArrayVec<T, { hal::MAX_COLOR_TARGETS }>,
pub depth_stencil: Option<T>,
}
impl<T: PartialEq> Eq for AttachmentData<T> {}
impl<T> AttachmentData<T> {
pub(crate) fn map<U, F: Fn(&T) -> U>(&self, fun: F) -> AttachmentData<U> {
AttachmentData {
colors: self.colors.iter().map(&fun).collect(),
resolves: self.resolves.iter().map(&fun).collect(),
depth_stencil: self.depth_stencil.as_ref().map(&fun),
}
}
}
#[derive(Clone, Debug, Hash, PartialEq)]
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
pub(crate) struct RenderPassContext {
pub attachments: AttachmentData<TextureFormat>,
pub sample_count: u32,
pub multiview: Option<NonZeroU32>,
}
#[derive(Clone, Debug, Error)]
pub enum RenderPassCompatibilityError {
#[error("Incompatible color attachment: the renderpass expected {0:?} but was given {1:?}")]
IncompatibleColorAttachment(
ArrayVec<TextureFormat, { hal::MAX_COLOR_TARGETS }>,
ArrayVec<TextureFormat, { hal::MAX_COLOR_TARGETS }>,
),
#[error(
"Incompatible depth-stencil attachment: the renderpass expected {0:?} but was given {1:?}"
)]
IncompatibleDepthStencilAttachment(Option<TextureFormat>, Option<TextureFormat>),
#[error("Incompatible sample count: the renderpass expected {0:?} but was given {1:?}")]
IncompatibleSampleCount(u32, u32),
#[error("Incompatible multiview: the renderpass expected {0:?} but was given {1:?}")]
IncompatibleMultiview(Option<NonZeroU32>, Option<NonZeroU32>),
}
impl RenderPassContext {
// Assumes the renderpass only contains one subpass
pub(crate) fn check_compatible(
&self,
other: &Self,
) -> Result<(), RenderPassCompatibilityError> {
if self.attachments.colors != other.attachments.colors {
return Err(RenderPassCompatibilityError::IncompatibleColorAttachment(
self.attachments.colors.clone(),
other.attachments.colors.clone(),
));
}
if self.attachments.depth_stencil != other.attachments.depth_stencil {
return Err(
RenderPassCompatibilityError::IncompatibleDepthStencilAttachment(
self.attachments.depth_stencil,
other.attachments.depth_stencil,
),
);
}
if self.sample_count != other.sample_count {
return Err(RenderPassCompatibilityError::IncompatibleSampleCount(
self.sample_count,
other.sample_count,
));
}
if self.multiview != other.multiview {
return Err(RenderPassCompatibilityError::IncompatibleMultiview(
self.multiview,
other.multiview,
));
}
Ok(())
}
}
pub type BufferMapPendingClosure = (resource::BufferMapOperation, resource::BufferMapAsyncStatus);
#[derive(Default)]
pub struct UserClosures {
pub mappings: Vec<BufferMapPendingClosure>,
pub submissions: SmallVec<[queue::SubmittedWorkDoneClosure; 1]>,
}
impl UserClosures {
fn extend(&mut self, other: Self) {
self.mappings.extend(other.mappings);
self.submissions.extend(other.submissions);
}
fn fire(self) {
// Note: this logic is specifically moved out of `handle_mapping()` in order to
// have nothing locked by the time we execute users callback code.
for (operation, status) in self.mappings {
operation.callback.call(status);
}
for closure in self.submissions {
closure.call();
}
}
}
fn map_buffer<A: hal::Api>(
raw: &A::Device,
buffer: &mut resource::Buffer<A>,
offset: BufferAddress,
size: BufferAddress,
kind: HostMap,
) -> Result<ptr::NonNull<u8>, resource::BufferAccessError> {
let mapping = unsafe {
raw.map_buffer(buffer.raw.as_ref().unwrap(), offset..offset + size)
.map_err(DeviceError::from)?
};
buffer.sync_mapped_writes = match kind {
HostMap::Read if !mapping.is_coherent => unsafe {
raw.invalidate_mapped_ranges(
buffer.raw.as_ref().unwrap(),
iter::once(offset..offset + size),
);
None
},
HostMap::Write if !mapping.is_coherent => Some(offset..offset + size),
_ => None,
};
assert_eq!(offset % wgt::COPY_BUFFER_ALIGNMENT, 0);
assert_eq!(size % wgt::COPY_BUFFER_ALIGNMENT, 0);
// Zero out uninitialized parts of the mapping. (Spec dictates all resources behave as if they were initialized with zero)
//
// If this is a read mapping, ideally we would use a `clear_buffer` command before reading the data from GPU (i.e. `invalidate_range`).
// However, this would require us to kick off and wait for a command buffer or piggy back on an existing one (the later is likely the only worthwhile option).
// As reading uninitialized memory isn't a particular important path to support,
// we instead just initialize the memory here and make sure it is GPU visible, so this happens at max only once for every buffer region.
//
// If this is a write mapping zeroing out the memory here is the only reasonable way as all data is pushed to GPU anyways.
let zero_init_needs_flush_now = mapping.is_coherent && buffer.sync_mapped_writes.is_none(); // No need to flush if it is flushed later anyways.
for uninitialized_range in buffer.initialization_status.drain(offset..(size + offset)) {
let num_bytes = uninitialized_range.end - uninitialized_range.start;
unsafe {
ptr::write_bytes(
mapping
.ptr
.as_ptr()
.offset(uninitialized_range.start as isize),
0,
num_bytes as usize,
)
};
if zero_init_needs_flush_now {
unsafe {
raw.flush_mapped_ranges(
buffer.raw.as_ref().unwrap(),
iter::once(uninitialized_range.start..uninitialized_range.start + num_bytes),
)
};
}
}
Ok(mapping.ptr)
}
struct CommandAllocator<A: hal::Api> {
free_encoders: Vec<A::CommandEncoder>,
}
impl<A: hal::Api> CommandAllocator<A> {
fn acquire_encoder(
&mut self,
device: &A::Device,
queue: &A::Queue,
) -> Result<A::CommandEncoder, hal::DeviceError> {
match self.free_encoders.pop() {
Some(encoder) => Ok(encoder),
None => unsafe {
let hal_desc = hal::CommandEncoderDescriptor { label: None, queue };
device.create_command_encoder(&hal_desc)
},
}
}
fn release_encoder(&mut self, encoder: A::CommandEncoder) |
fn dispose(self, device: &A::Device) {
log::info!("Destroying {} command encoders", self.free_encoders.len());
for cmd_encoder in self.free_encoders {
unsafe {
device.destroy_command_encoder(cmd_encoder);
}
}
}
}
/// Structure describing a logical device. Some members are internally mutable,
/// stored behind mutexes.
/// TODO: establish clear order of locking for these:
/// `mem_allocator`, `desc_allocator`, `life_tracker`, `trackers`,
/// `render_passes`, `pending_writes`, `trace`.
///
/// Currently, the rules are:
/// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system
/// 1. `self.trackers` is locked last (unenforced)
/// 1. `self.trace` is locked last (unenforced)
pub struct Device<A: HalApi> {
pub(crate) raw: A::Device,
pub(crate) adapter_id: Stored<id::AdapterId>,
pub(crate) queue: A::Queue,
pub(crate) zero_buffer: A::Buffer,
//pub(crate) cmd_allocator: command::CommandAllocator<A>,
//mem_allocator: Mutex<alloc::MemoryAllocator<A>>,
//desc_allocator: Mutex<descriptor::DescriptorAllocator<A>>,
//Note: The submission index here corresponds to the last submission that is done.
pub(crate) life_guard: LifeGuard,
/// A clone of `life_guard.ref_count`.
///
/// Holding a separate clone of the `RefCount` here lets us tell whether the
/// device is referenced by other resources, even if `life_guard.ref_count`
/// was set to `None` by a call to `device_drop`.
ref_count: RefCount,
command_allocator: Mutex<CommandAllocator<A>>,
pub(crate) active_submission_index: SubmissionIndex,
fence: A::Fence,
/// All live resources allocated with this [`Device`].
///
/// Has to be locked temporarily only (locked last)
pub(crate) trackers: Mutex<Tracker<A>>,
// Life tracker should be locked right after the device and before anything else.
life_tracker: Mutex<life::LifetimeTracker<A>>,
/// Temporary storage for resource management functions. Cleared at the end
/// of every call (unless an error occurs).
temp_suspected: life::SuspectedResources,
pub(crate) alignments: hal::Alignments,
pub(crate) limits: wgt::Limits,
pub(crate) features: wgt::Features,
pub(crate) downlevel: wgt::DownlevelCapabilities,
//TODO: move this behind another mutex. This would allow several methods to switch
// to borrow Device immutably, such as `write_buffer`, `write_texture`, and `buffer_unmap`.
pending_writes: queue::PendingWrites<A>,
#[cfg(feature = "trace")]
pub(crate) trace: Option<Mutex<trace::Trace>>,
}
#[derive(Clone, Debug, Error)]
pub enum CreateDeviceError {
#[error("not enough memory left")]
OutOfMemory,
#[error("failed to create internal buffer for initializing textures")]
FailedToCreateZeroBuffer(#[from] DeviceError),
}
impl<A: HalApi> Device<A> {
pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> {
if self.features.contains(feature) {
Ok(())
} else {
Err(MissingFeatures(feature))
}
}
pub(crate) fn require_downlevel_flags(
&self,
flags: wgt::DownlevelFlags,
) -> Result<(), MissingDownlevelFlags> {
if self.downlevel.flags.contains(flags) {
Ok(())
} else {
Err(MissingDownlevelFlags(flags))
}
}
}
impl<A: HalApi> Device<A> {
pub(crate) fn new(
open: hal::OpenDevice<A>,
adapter_id: Stored<id::AdapterId>,
alignments: hal::Alignments,
downlevel: wgt::DownlevelCapabilities,
desc: &DeviceDescriptor,
trace_path: Option<&std::path::Path>,
) -> Result<Self, CreateDeviceError> {
#[cfg(not(feature = "trace"))]
if let Some(_) = trace_path {
log::error!("Feature 'trace' is not enabled");
}
let fence =
unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?;
let mut com_alloc = CommandAllocator {
free_encoders: Vec::new(),
};
let pending_encoder = com_alloc
.acquire_encoder(&open.device, &open.queue)
.map_err(|_| CreateDeviceError::OutOfMemory)?;
let mut pending_writes = queue::PendingWrites::<A>::new(pending_encoder);
// Create zeroed buffer used for texture clears.
let zero_buffer = unsafe {
open.device
.create_buffer(&hal::BufferDescriptor {
label: Some("(wgpu internal) zero init buffer"),
size: ZERO_BUFFER_SIZE,
usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST,
memory_flags: hal::MemoryFlags::empty(),
})
.map_err(DeviceError::from)?
};
pending_writes.activate();
unsafe {
pending_writes
.command_encoder
.transition_buffers(iter::once(hal::BufferBarrier {
buffer: &zero_buffer,
usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST,
}));
pending_writes
.command_encoder
.clear_buffer(&zero_buffer, 0..ZERO_BUFFER_SIZE);
pending_writes
.command_encoder
.transition_buffers(iter::once(hal::BufferBarrier {
buffer: &zero_buffer,
usage: hal::BufferUses::COPY_DST..hal::BufferUses::COPY_SRC,
}));
}
let life_guard = LifeGuard::new("<device>");
let ref_count = life_guard.add_ref();
Ok(Self {
raw: open.device,
adapter_id,
queue: open.queue,
zero_buffer,
life_guard,
ref_count,
command_allocator: Mutex::new(com_alloc),
active_submission_index: 0,
fence,
trackers: Mutex::new(Tracker::new()),
life_tracker: Mutex::new(life::LifetimeTracker::new()),
temp_suspected: life::SuspectedResources::default(),
#[cfg(feature = "trace")]
trace: trace_path.and_then(|path| match trace::Trace::new(path) {
Ok(mut trace) => {
trace.add(trace::Action::Init {
desc: desc.clone(),
backend: A::VARIANT,
});
Some(Mutex::new(trace))
}
Err(e) => {
log::error!("Unable to start a trace in '{:?}': {:?}", path, e);
None
}
}),
alignments,
limits: desc.limits.clone(),
features: desc.features,
downlevel,
pending_writes,
})
}
fn lock_life<'this, 'token: 'this>(
&'this self,
//TODO: fix this - the token has to be borrowed for the lock
_token: &mut Token<'token, Self>,
) -> MutexGuard<'this, life::LifetimeTracker<A>> {
self.life_tracker.lock()
}
/// Check this device for completed commands.
///
/// The `maintain` argument tells how the maintence function should behave, either
/// blocking or just polling the current state of the gpu.
///
/// Return a pair `(closures, queue_empty)`, where:
///
/// - `closures` is a list of actions to take: mapping buffers, notifying the user
///
/// - `queue_empty` is a boolean indicating whether there are more queue
/// submissions still in flight. (We have to take the locks needed to
/// produce this information for other reasons, so we might as well just
/// return it to our callers.)
fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
&'this self,
hub: &Hub<A, G>,
maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
token: &mut Token<'token, Self>,
) -> Result<(UserClosures, bool), WaitIdleError> {
profiling::scope!("maintain", "Device");
let mut life_tracker = self.lock_life(token);
// Normally, `temp_suspected` exists only to save heap
// allocations: it's cleared at the start of the function
// call, and cleared by the end. But `Global::queue_submit` is
// fallible; if it exits early, it may leave some resources in
// `temp_suspected`.
life_tracker
.suspected_resources
.extend(&self.temp_suspected);
life_tracker.triage_suspected(
hub,
&self.trackers,
#[cfg(feature = "trace")]
self.trace.as_ref(),
token,
);
life_tracker.triage_mapped(hub, token);
let last_done_index = if maintain.is_wait() {
let index_to_wait_for = match maintain {
wgt::Maintain::WaitForSubmissionIndex(submission_index) => {
// We don't need to check to see if the queue id matches
// as we already checked this from inside the poll call.
submission_index.index
}
_ => self.active_submission_index,
};
unsafe {
self.raw
.wait(&self.fence, index_to_wait_for, CLEANUP_WAIT_MS)
.map_err(DeviceError::from)?
};
index_to_wait_for
} else {
unsafe {
self.raw
.get_fence_value(&self.fence)
.map_err(DeviceError::from)?
}
};
let submission_closures =
life_tracker.triage_submissions(last_done_index, &self.command_allocator);
let mapping_closures = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token);
life_tracker.cleanup(&self.raw);
let closures = UserClosures {
mappings: mapping_closures,
submissions: submission_closures,
};
Ok((closures, life_tracker.queue_empty()))
}
fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
&'this mut self,
hub: &Hub<A, G>,
trackers: &Tracker<A>,
token: &mut Token<'token, Self>,
) {
self.temp_suspected.clear();
// As the tracker is cleared/dropped, we need to consider all the resources
// that it references for destruction in the next GC pass.
{
let (bind_group_guard, mut token) = hub.bind_groups.read(token);
let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
let (sampler_guard, _) = hub.samplers.read(&mut token);
for id in trackers.buffers.used() {
if buffer_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.buffers.push(id);
}
}
for id in trackers.textures.used() {
if texture_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.textures.push(id);
}
}
for id in trackers.views.used() {
if texture_view_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.texture_views.push(id);
}
}
for id in trackers.bind_groups.used() {
if bind_group_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.bind_groups.push(id);
}
}
for id in trackers.samplers.used() {
if sampler_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.samplers.push(id);
}
}
for id in trackers.compute_pipelines.used() {
if compute_pipe_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.compute_pipelines.push(id);
}
}
for id in trackers.render_pipelines.used() {
if render_pipe_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.render_pipelines.push(id);
}
}
for id in trackers.query_sets.used() {
if query_set_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.query_sets.push(id);
}
}
}
self.lock_life(token)
.suspected_resources
.extend(&self.temp_suspected);
self.temp_suspected.clear();
}
fn create_buffer(
&self,
self_id: id::DeviceId,
desc: &resource::BufferDescriptor,
transient: bool,
) -> Result<resource::Buffer<A>, resource::CreateBufferError> {
debug_assert_eq!(self_id.backend(), A::VARIANT);
let mut usage = conv::map_buffer_usage(desc.usage);
if desc.usage.is_empty() {
return Err(resource::CreateBufferError::EmptyUsage);
}
if desc.mapped_at_creation {
if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(resource::CreateBufferError::UnalignedSize);
}
if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
// we are going to be copying into it, internally
usage |= hal::BufferUses::COPY_DST;
}
} else {
// We are required to zero out (initialize) all memory.
// This is done on demand using clear_buffer which requires write transfer usage!
usage |= hal::BufferUses::COPY_DST;
}
let actual_size = if desc.size == 0 {
wgt::COPY_BUFFER_ALIGNMENT
} else if desc.usage.contains(wgt::BufferUsages::VERTEX) {
// Bumping the size by 1 so that we can bind an empty range at the end of the buffer.
desc.size + 1
} else {
desc.size
};
let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT;
let aligned_size = if clear_remainder != 0 {
actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder
} else {
actual_size
};
let mut memory_flags = hal::MemoryFlags::empty();
memory_flags.set(hal::MemoryFlags::TRANSIENT, transient);
let hal_desc = hal::BufferDescriptor {
label: desc.label.borrow_option(),
size: aligned_size,
usage,
memory_flags,
};
let buffer = unsafe { self.raw.create_buffer(&hal_desc) }.map_err(DeviceError::from)?;
Ok(resource::Buffer {
raw: Some(buffer),
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
usage: desc.usage,
size: desc.size,
initialization_status: BufferInitTracker::new(desc.size),
sync_mapped_writes: None,
map_state: resource::BufferMapState::Idle,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
})
}
fn create_texture_from_hal(
&self,
hal_texture: A::Texture,
hal_usage: hal::TextureUses,
self_id: id::DeviceId,
desc: &resource::TextureDescriptor,
format_features: wgt::TextureFormatFeatures,
clear_mode: resource::TextureClearMode<A>,
) -> resource::Texture<A> {
debug_assert_eq!(self_id.backend(), A::VARIANT);
resource::Texture {
inner: resource::TextureInner::Native {
raw: Some(hal_texture),
},
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
desc: desc.map_label(|_| ()),
hal_usage,
format_features,
initialization_status: TextureInitTracker::new(
desc.mip_level_count,
desc.array_layer_count(),
),
full_range: TextureSelector {
mips: 0..desc.mip_level_count,
layers: 0..desc.array_layer_count(),
},
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
clear_mode,
}
}
fn create_texture(
&self,
self_id: id::DeviceId,
adapter: &crate::instance::Adapter<A>,
desc: &resource::TextureDescriptor,
) -> Result<resource::Texture<A>, resource::CreateTextureError> {
use resource::{CreateTextureError, TextureDimensionError};
if desc.usage.is_empty() {
return Err(CreateTextureError::EmptyUsage);
}
conv::check_texture_dimension_size(
desc.dimension,
desc.size,
desc.sample_count,
&self.limits,
)?;
let format_desc = desc.format.describe();
if desc.dimension != wgt::TextureDimension::D2 {
// Depth textures can only be 2D
if format_desc.sample_type == wgt::TextureSampleType::Depth {
return Err(CreateTextureError::InvalidDepthDimension(
desc.dimension,
desc.format,
));
}
// Renderable textures can only be 2D
if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(CreateTextureError::InvalidDimensionUsages(
wgt::TextureUsages::RENDER_ATTACHMENT,
desc.dimension,
));
}
// Compressed textures can only be 2D
if format_desc.is_compressed() {
return Err(CreateTextureError::InvalidCompressedDimension(
desc.dimension,
desc.format,
));
}
}
if format_desc.is_compressed() {
let block_width = format_desc.block_dimensions.0 as u32;
let block_height = format_desc.block_dimensions.1 as u32;
if desc.size.width % block_width != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::NotMultipleOfBlockWidth {
width: desc.size.width,
block_width,
format: desc.format,
},
));
}
if desc.size.height % block_height != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::NotMultipleOfBlockHeight {
height: desc.size.height,
block_height,
format: desc.format,
},
));
}
}
if desc.sample_count > 1 {
if desc.mip_level_count != 1 {
return Err(CreateTextureError::InvalidMipLevelCount {
requested: desc.mip_level_count,
maximum: 1,
});
}
if desc.size.depth_or_array_layers != 1 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::MultisampledDepthOrArrayLayer(
desc.size.depth_or_array_layers,
),
));
}
if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) {
return Err(CreateTextureError::InvalidMultisampledStorageBinding);
}
if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(CreateTextureError::MultisampledNotRenderAttachment);
}
if !format_desc
.guaranteed_format_features
.flags
.contains(wgt::TextureFormatFeatureFlags::MULTISAMPLE)
{
return Err(CreateTextureError::InvalidMultisampledFormat(desc.format));
}
}
let mips = desc.mip_level_count;
let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS);
if mips == 0 || mips > max_levels_allowed {
return Err(CreateTextureError::InvalidMipLevelCount {
requested: mips,
maximum: max_levels_allowed,
});
}
let format_features = self
.describe_format_features(adapter, desc.format)
.map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
let missing_allowed_usages = desc.usage - format_features.allowed_usages;
if !missing_allowed_usages.is_empty() {
return Err(CreateTextureError::InvalidFormatUsages(
missing_allowed_usages,
desc.format,
));
}
// TODO: validate missing TextureDescriptor::view_formats.
// Enforce having COPY_DST/DEPTH_STENCIL_WRIT/COLOR_TARGET otherwise we wouldn't be able to initialize the texture.
let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into())
| if format_desc.sample_type == wgt::TextureSampleType::Depth {
hal::TextureUses::DEPTH_STENCIL_WRITE
} else if desc.usage.contains(wgt::TextureUsages::COPY_DST) {
hal::TextureUses::COPY_DST // (set already)
} else {
// Use COPY_DST only if we can't use COLOR_TARGET
if format_features
.allowed_usages
.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
&& desc.dimension != wgt::TextureDimension::D3
// Render targets into 3D textures are not
{
hal::TextureUses::COLOR_TARGET
} else {
hal::TextureUses::COPY_DST
}
};
let hal_desc = hal::TextureDescriptor {
label: desc.label.borrow_option(),
size: desc.size,
mip_level_count: desc.mip_level_count,
sample_count: desc.sample_count,
dimension: desc.dimension,
format: desc.format,
usage: hal_usage,
memory_flags: hal::MemoryFlags::empty(),
};
let raw_texture = unsafe {
self.raw
.create_texture(&hal_desc)
.map_err(DeviceError::from)?
};
let clear_mode = if hal_usage
.intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET)
{
let (is_color, usage) =
if desc.format.describe().sample_type == wgt::TextureSampleType::Depth {
(false, hal::TextureUses::DEPTH_STENCIL_WRITE)
} else {
(true, hal::TextureUses::COLOR_TARGET)
};
let dimension = match desc.dimension {
wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1,
wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2,
wgt::TextureDimension::D3 => unreachable!(),
};
let mut clear_views = SmallVec::new();
for mip_level in 0..desc.mip_level_count {
for array_layer in 0..desc.size.depth_or_array_layers {
let desc = hal::TextureViewDescriptor {
label: Some("(wgpu internal) clear texture view"),
format: desc.format,
dimension,
usage,
range: wgt::ImageSubresourceRange {
aspect: wgt::TextureAspect::All,
base_mip_level: mip_level,
mip_level_count: NonZeroU32::new(1),
base_array_layer: array_layer,
array_layer_count: NonZeroU32::new(1),
},
};
clear_views.push(
unsafe { self.raw.create_texture_view(&raw_texture, &desc) }
.map_err(DeviceError::from)?,
);
}
}
resource::TextureClearMode::RenderPass {
clear_views,
is_color,
}
} else {
resource::TextureClearMode::BufferCopy
};
let mut texture = self.create_texture_from_hal(
raw_texture,
hal_usage,
self_id,
desc,
format_features,
clear_mode,
);
texture.hal_usage = hal_usage;
Ok(texture)
}
fn create_texture_view(
&self,
texture: &resource::Texture<A>,
texture_id: id::TextureId,
desc: &resource::TextureViewDescriptor,
) -> Result<resource::TextureView<A>, resource::CreateTextureViewError> {
let texture_raw = texture
.inner
.as_raw()
.ok_or(resource::CreateTextureViewError::InvalidTexture)?;
let view_dim = match desc.dimension {
Some(dim) => {
// check if the dimension is compatible with the texture
if texture.desc.dimension != dim.compatible_texture_dimension() {
return Err(
resource::CreateTextureViewError::InvalidTextureViewDimension {
view: dim,
texture: texture.desc.dimension,
},
);
}
// check if multisampled texture is seen as anything but 2D
match dim {
wgt::TextureViewDimension::D2 | wgt::TextureViewDimension::D2Array => {}
_ if texture.desc.sample_count > 1 => {
return Err(resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension(dim));
}
_ => {}
}
dim
}
None => match texture.desc.dimension {
wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1,
wgt::TextureDimension::D2 if texture.desc.size.depth_or_array_layers > 1 => {
wgt::TextureViewDimension::D2Array
}
wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2,
wgt::TextureDimension::D3 => wgt::TextureViewDimension::D3,
},
};
let required_level_count =
desc.range.base_mip_level + desc.range.mip_level_count.map_or(1, |count| count.get());
let required_layer_count = match desc.range.array_layer_count {
Some(count) => desc.range.base_array_layer + count.get(),
None => match view_dim {
wgt::TextureViewDimension::D1
| wgt::TextureViewDimension::D2
| wgt::TextureViewDimension::D3 => 1,
wgt::TextureViewDimension::Cube => 6,
_ => texture.desc.array_layer_count(),
},
};
let level_end = texture.full_range.mips.end;
let layer_end = texture.full_range.layers.end;
if required_level_count > level_end {
return Err(resource::CreateTextureViewError::TooManyMipLevels {
requested: required_level_count,
total: level_end,
});
}
if required_layer_count > layer_end {
return Err(resource::CreateTextureViewError::TooManyArrayLayers {
requested: required_layer_count,
total: layer_end,
});
};
match view_dim {
TextureViewDimension::Cube if required_layer_count != 6 => {
return Err(
resource::CreateTextureViewError::InvalidCubemapTextureDepth {
depth: required_layer_count,
},
)
}
TextureViewDimension::CubeArray if required_layer_count % 6 != 0 => {
return Err(
resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth {
depth: required_layer_count,
},
)
}
_ => {}
}
let full_aspect = hal::FormatAspects::from(texture.desc.format);
let select_aspect = hal::FormatAspects::from(desc.range.aspect);
if (full_aspect & select_aspect).is_empty() {
return Err(resource::CreateTextureViewError::InvalidAspect {
texture_format: texture.desc.format,
requested_aspect: desc.range.aspect,
});
}
let end_level = desc
.range
.mip_level_count
.map_or(level_end, |_| required_level_count);
let end_layer = desc
.range
.array_layer_count
.map_or(layer_end, |_| required_layer_count);
let selector = TextureSelector {
mips: desc.range.base_mip_level..end_level,
layers: desc.range.base_array_layer..end_layer,
};
let view_layer_count = selector.layers.end - selector.layers.start;
let layer_check_ok = match view_dim {
wgt::TextureViewDimension::D1
| wgt::TextureViewDimension::D2
| wgt::TextureViewDimension::D3 => view_layer_count == 1,
wgt::TextureViewDimension::D2Array => true,
wgt::TextureViewDimension::Cube => view_layer_count == 6,
wgt::TextureViewDimension::CubeArray => view_layer_count % 6 == 0,
};
if !layer_check_ok {
return Err(resource::CreateTextureViewError::InvalidArrayLayerCount {
requested: view_layer_count,
dim: view_dim,
});
}
let mut extent = texture
.desc
.mip_level_size(desc.range.base_mip_level)
.unwrap();
if view_dim != wgt::TextureViewDimension::D3 {
extent.depth_or_array_layers = view_layer_count;
}
let format = desc.format.unwrap_or(texture.desc.format);
if format != texture.desc.format {
return Err(resource::CreateTextureViewError::FormatReinterpretation {
texture: texture.desc.format,
view: format,
});
}
// filter the usages based on the other criteria
let usage = {
let mask_copy = !(hal::TextureUses::COPY_SRC | hal::TextureUses::COPY_DST);
let mask_dimension = match view_dim {
wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => {
hal::TextureUses::RESOURCE
}
wgt::TextureViewDimension::D3 => {
hal::TextureUses::RESOURCE
| hal::TextureUses::STORAGE_READ
| hal::TextureUses::STORAGE_READ_WRITE
}
_ => hal::TextureUses::all(),
};
let mask_mip_level = if selector.mips.end - selector.mips.start != 1 {
hal::TextureUses::RESOURCE
} else {
hal::TextureUses::all()
};
texture.hal_usage & mask_copy & mask_dimension & mask_mip_level
};
log::debug!(
"Create view for texture {:?} filters usages to {:?}",
texture_id,
usage
);
let hal_desc = hal::TextureViewDescriptor {
label: desc.label.borrow_option(),
format,
dimension: view_dim,
usage,
range: desc.range.clone(),
};
let raw = unsafe {
self.raw
.create_texture_view(texture_raw, &hal_desc)
.map_err(|_| resource::CreateTextureViewError::OutOfMemory)?
};
Ok(resource::TextureView {
raw,
parent_id: Stored {
value: id::Valid(texture_id),
ref_count: texture.life_guard.add_ref(),
},
device_id: texture.device_id.clone(),
desc: resource::HalTextureViewDescriptor {
format: hal_desc.format,
dimension: hal_desc.dimension,
range: hal_desc.range,
},
format_features: texture.format_features,
extent,
samples: texture.desc.sample_count,
selector,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
})
}
fn create_sampler(
&self,
self_id: id::DeviceId,
desc: &resource::SamplerDescriptor,
) -> Result<resource::Sampler<A>, resource::CreateSamplerError> {
if desc
.address_modes
.iter()
.any(|am| am == &wgt::AddressMode::ClampToBorder)
{
self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?;
}
if desc.border_color == Some(wgt::SamplerBorderColor::Zero) {
self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?;
}
let lod_clamp = if desc.lod_min_clamp > 0.0 || desc.lod_max_clamp < 32.0 {
Some(desc.lod_min_clamp..desc.lod_max_clamp)
} else {
None
};
let anisotropy_clamp = if let Some(clamp) = desc.anisotropy_clamp {
let clamp = clamp.get();
let valid_clamp =
clamp <= hal::MAX_ANISOTROPY && conv::is_power_of_two_u32(clamp as u32);
if !valid_clamp {
return Err(resource::CreateSamplerError::InvalidClamp(clamp));
}
if self
.downlevel
.flags
.contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING)
{
std::num::NonZeroU8::new(clamp)
} else {
None
}
} else {
None
};
//TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS
let hal_desc = hal::SamplerDescriptor {
label: desc.label.borrow_option(),
address_modes: desc.address_modes,
mag_filter: desc.mag_filter,
min_filter: desc.min_filter,
mipmap_filter: desc.mipmap_filter,
lod_clamp,
compare: desc.compare,
anisotropy_clamp,
border_color: desc.border_color,
};
let raw = unsafe {
self.raw
.create_sampler(&hal_desc)
.map_err(DeviceError::from)?
};
Ok(resource::Sampler {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
comparison: desc.compare.is_some(),
filtering: desc.min_filter == wgt::FilterMode::Linear
|| desc.mag_filter == wgt::FilterMode::Linear,
})
}
fn create_shader_module<'a>(
&self,
self_id: id::DeviceId,
desc: &pipeline::ShaderModuleDescriptor<'a>,
source: pipeline::ShaderModuleSource<'a>,
) -> Result<pipeline::ShaderModule<A>, pipeline::CreateShaderModuleError> {
let (module, source) = match source {
pipeline::ShaderModuleSource::Wgsl(code) => {
profiling::scope!("naga::wgsl::parse_str");
let module = naga::front::wgsl::parse_str(&code).map_err(|inner| {
pipeline::CreateShaderModuleError::Parsing(pipeline::ShaderError {
source: code.to_string(),
label: desc.label.as_ref().map(|l| l.to_string()),
inner,
})
})?;
(module, code.into_owned())
}
pipeline::ShaderModuleSource::Naga(module) => (module, String::new()),
};
use naga::valid::Capabilities as Caps;
profiling::scope!("naga::validate");
let mut caps = Caps::empty();
caps.set(
Caps::PUSH_CONSTANT,
self.features.contains(wgt::Features::PUSH_CONSTANTS),
);
caps.set(
Caps::FLOAT64,
self.features.contains(wgt::Features::SHADER_FLOAT64),
);
caps.set(
Caps::PRIMITIVE_INDEX,
self.features
.contains(wgt::Features::SHADER_PRIMITIVE_INDEX),
);
caps.set(
Caps::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
),
);
caps.set(
Caps::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
),
);
// TODO: This needs a proper wgpu feature
caps.set(
Caps::SAMPLER_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
),
);
let info = naga::valid::Validator::new(naga::valid::ValidationFlags::all(), caps)
.validate(&module)
.map_err(|inner| {
pipeline::CreateShaderModuleError::Validation(pipeline::ShaderError {
source,
label: desc.label.as_ref().map(|l| l.to_string()),
inner,
})
})?;
let interface =
validation::Interface::new(&module, &info, self.features, self.limits.clone());
let hal_shader = hal::ShaderInput::Naga(hal::NagaShader { module, info });
let hal_desc = hal::ShaderModuleDescriptor {
label: desc.label.borrow_option(),
runtime_checks: desc.shader_bound_checks.runtime_checks(),
};
let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } {
Ok(raw) => raw,
Err(error) => {
return Err(match error {
hal::ShaderError::Device(error) => {
pipeline::CreateShaderModuleError::Device(error.into())
}
hal::ShaderError::Compilation(ref msg) => {
log::error!("Shader error: {}", msg);
pipeline::CreateShaderModuleError::Generation
}
})
}
};
Ok(pipeline::ShaderModule {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
interface: Some(interface),
#[cfg(debug_assertions)]
label: desc.label.borrow_or_default().to_string(),
})
}
#[allow(unused_unsafe)]
unsafe fn create_shader_module_spirv<'a>(
&self,
self_id: id::DeviceId,
desc: &pipeline::ShaderModuleDescriptor<'a>,
source: &'a [u32],
) -> Result<pipeline::ShaderModule<A>, pipeline::CreateShaderModuleError> {
self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?;
let hal_desc = hal::ShaderModuleDescriptor {
label: desc.label.borrow_option(),
runtime_checks: desc.shader_bound_checks.runtime_checks(),
};
let hal_shader = hal::ShaderInput::SpirV(source);
let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } {
Ok(raw) => raw,
Err(error) => {
return Err(match error {
hal::ShaderError::Device(error) => {
pipeline::CreateShaderModuleError::Device(error.into())
}
hal::ShaderError::Compilation(ref msg) => {
log::error!("Shader error: {}", msg);
pipeline::CreateShaderModuleError::Generation
}
})
}
};
Ok(pipeline::ShaderModule {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
interface: None,
#[cfg(debug_assertions)]
label: desc.label.borrow_or_default().to_string(),
})
}
fn deduplicate_bind_group_layout(
self_id: id::DeviceId,
entry_map: &binding_model::BindEntryMap,
guard: &Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
) -> Option<id::BindGroupLayoutId> {
guard
.iter(self_id.backend())
.find(|&(_, bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map)
.map(|(id, value)| {
value.multi_ref_count.inc();
id
})
}
fn get_introspection_bind_group_layouts<'a>(
pipeline_layout: &binding_model::PipelineLayout<A>,
bgl_guard: &'a Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
) -> ArrayVec<&'a binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }> {
pipeline_layout
.bind_group_layout_ids
.iter()
.map(|&id| &bgl_guard[id].entries)
.collect()
}
/// Generate information about late-validated buffer bindings for pipelines.
//TODO: should this be combined with `get_introspection_bind_group_layouts` in some way?
fn make_late_sized_buffer_groups<'a>(
shader_binding_sizes: &FastHashMap<naga::ResourceBinding, wgt::BufferSize>,
layout: &binding_model::PipelineLayout<A>,
bgl_guard: &'a Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
) -> ArrayVec<pipeline::LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }> {
// Given the shader-required binding sizes and the pipeline layout,
// return the filtered list of them in the layout order,
// removing those with given `min_binding_size`.
layout
.bind_group_layout_ids
.iter()
.enumerate()
.map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup {
shader_sizes: bgl_guard[bgl_id]
.entries
.values()
.filter_map(|entry| match entry.ty {
wgt::BindingType::Buffer {
min_binding_size: None,
..
} => {
let rb = naga::ResourceBinding {
group: group_index as u32,
binding: entry.binding,
};
let shader_size =
shader_binding_sizes.get(&rb).map_or(0, |nz| nz.get());
Some(shader_size)
}
_ => None,
})
.collect(),
})
.collect()
}
fn create_bind_group_layout(
&self,
self_id: id::DeviceId,
label: Option<&str>,
entry_map: binding_model::BindEntryMap,
) -> Result<binding_model::BindGroupLayout<A>, binding_model::CreateBindGroupLayoutError> {
#[derive(PartialEq)]
enum WritableStorage {
Yes,
No,
}
for entry in entry_map.values() {
use wgt::BindingType as Bt;
let mut required_features = wgt::Features::empty();
let mut required_downlevel_flags = wgt::DownlevelFlags::empty();
let (array_feature, writable_storage) = match entry.ty {
Bt::Buffer {
ty: wgt::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: _,
} => (
Some(wgt::Features::BUFFER_BINDING_ARRAY),
WritableStorage::No,
),
Bt::Buffer {
ty: wgt::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: _,
} => (
Some(wgt::Features::BUFFER_BINDING_ARRAY),
WritableStorage::No,
),
Bt::Buffer {
ty: wgt::BufferBindingType::Storage { read_only },
..
} => (
Some(
wgt::Features::BUFFER_BINDING_ARRAY
| wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
),
match read_only {
true => WritableStorage::No,
false => WritableStorage::Yes,
},
),
Bt::Sampler { .. } => (
Some(wgt::Features::TEXTURE_BINDING_ARRAY),
WritableStorage::No,
),
Bt::Texture { .. } => (
Some(wgt::Features::TEXTURE_BINDING_ARRAY),
WritableStorage::No,
),
Bt::StorageTexture {
access,
view_dimension,
format: _,
} => {
match view_dimension {
wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => {
return Err(binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error: binding_model::BindGroupLayoutEntryError::StorageTextureCube,
})
}
_ => (),
}
match access {
wgt::StorageTextureAccess::ReadOnly
| wgt::StorageTextureAccess::ReadWrite
if !self.features.contains(
wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
) =>
{
return Err(binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite,
});
}
_ => (),
}
(
Some(
wgt::Features::TEXTURE_BINDING_ARRAY
| wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
),
match access {
wgt::StorageTextureAccess::WriteOnly => WritableStorage::Yes,
wgt::StorageTextureAccess::ReadOnly => {
required_features |=
wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
WritableStorage::No
}
wgt::StorageTextureAccess::ReadWrite => {
required_features |=
wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
WritableStorage::Yes
}
},
)
}
};
// Validate the count parameter
if entry.count.is_some() {
required_features |= array_feature
.ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported)
.map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error,
})?;
}
if entry.visibility.contains(wgt::ShaderStages::VERTEX) {
if writable_storage == WritableStorage::Yes {
required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE;
}
if let Bt::Buffer {
ty: wgt::BufferBindingType::Storage { .. },
..
} = entry.ty
{
required_downlevel_flags |= wgt::DownlevelFlags::VERTEX_STORAGE;
}
}
if writable_storage == WritableStorage::Yes
&& entry.visibility.contains(wgt::ShaderStages::FRAGMENT)
{
required_downlevel_flags |= wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE;
}
self.require_features(required_features)
.map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures)
.map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error,
})?;
self.require_downlevel_flags(required_downlevel_flags)
.map_err(binding_model::BindGroupLayoutEntryError::MissingDownlevelFlags)
.map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error,
})?;
}
let bgl_flags = conv::bind_group_layout_flags(self.features);
let mut hal_bindings = entry_map.values().cloned().collect::<Vec<_>>();
hal_bindings.sort_by_key(|b| b.binding);
let hal_desc = hal::BindGroupLayoutDescriptor {
label,
flags: bgl_flags,
entries: &hal_bindings,
};
let raw = unsafe {
self.raw
.create_bind_group_layout(&hal_desc)
.map_err(DeviceError::from)?
};
let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
for entry in entry_map.values() {
count_validator.add_binding(entry);
}
// If a single bind group layout violates limits, the pipeline layout is definitely
// going to violate limits too, lets catch it now.
count_validator
.validate(&self.limits)
.map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?;
Ok(binding_model::BindGroupLayout {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
multi_ref_count: MultiRefCount::new(),
dynamic_count: entry_map
.values()
.filter(|b| b.ty.has_dynamic_offset())
.count(),
count_validator,
entries: entry_map,
#[cfg(debug_assertions)]
label: label.unwrap_or("").to_string(),
})
}
fn create_buffer_binding<'a>(
bb: &binding_model::BufferBinding,
binding: u32,
decl: &wgt::BindGroupLayoutEntry,
used_buffer_ranges: &mut Vec<BufferInitTrackerAction>,
dynamic_binding_info: &mut Vec<binding_model::BindGroupDynamicBindingData>,
late_buffer_binding_sizes: &mut FastHashMap<u32, wgt::BufferSize>,
used: &mut BindGroupStates<A>,
storage: &'a Storage<resource::Buffer<A>, id::BufferId>,
limits: &wgt::Limits,
) -> Result<hal::BufferBinding<'a, A>, binding_model::CreateBindGroupError> {
use crate::binding_model::CreateBindGroupError as Error;
let (binding_ty, dynamic, min_size) = match decl.ty {
wgt::BindingType::Buffer {
ty,
has_dynamic_offset,
min_binding_size,
} => (ty, has_dynamic_offset, min_binding_size),
_ => {
return Err(Error::WrongBindingType {
binding,
actual: decl.ty,
expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
})
}
};
let (pub_usage, internal_use, range_limit) = match binding_ty {
wgt::BufferBindingType::Uniform => (
wgt::BufferUsages::UNIFORM,
hal::BufferUses::UNIFORM,
limits.max_uniform_buffer_binding_size,
),
wgt::BufferBindingType::Storage { read_only } => (
wgt::BufferUsages::STORAGE,
if read_only {
hal::BufferUses::STORAGE_READ
} else {
hal::BufferUses::STORAGE_READ_WRITE
},
limits.max_storage_buffer_binding_size,
),
};
let (align, align_limit_name) =
binding_model::buffer_binding_type_alignment(limits, binding_ty);
if bb.offset % align as u64 != 0 {
return Err(Error::UnalignedBufferOffset(
bb.offset,
align_limit_name,
align,
));
}
let buffer = used
.buffers
.add_single(storage, bb.buffer_id, internal_use)
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
check_buffer_usage(buffer.usage, pub_usage)?;
let raw_buffer = buffer
.raw
.as_ref()
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
let (bind_size, bind_end) = match bb.size {
Some(size) => {
let end = bb.offset + size.get();
if end > buffer.size {
return Err(Error::BindingRangeTooLarge {
buffer: bb.buffer_id,
range: bb.offset..end,
size: buffer.size,
});
}
(size.get(), end)
}
None => (buffer.size - bb.offset, buffer.size),
};
if bind_size > range_limit as u64 {
return Err(Error::BufferRangeTooLarge {
binding,
given: bind_size as u32,
limit: range_limit,
});
}
// Record binding info for validating dynamic offsets
if dynamic {
dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData {
maximum_dynamic_offset: buffer.size - bind_end,
binding_type: binding_ty,
});
}
if let Some(non_zero) = min_size {
let min_size = non_zero.get();
if min_size > bind_size {
return Err(Error::BindingSizeTooSmall {
buffer: bb.buffer_id,
actual: bind_size,
min: min_size,
});
}
} else {
let late_size =
wgt::BufferSize::new(bind_size).ok_or(Error::BindingZeroSize(bb.buffer_id))?;
late_buffer_binding_sizes.insert(binding, late_size);
}
assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0);
used_buffer_ranges.extend(buffer.initialization_status.create_action(
bb.buffer_id,
bb.offset..bb.offset + bind_size,
MemoryInitKind::NeedsInitializedMemory,
));
Ok(hal::BufferBinding {
buffer: raw_buffer,
offset: bb.offset,
size: bb.size,
})
}
fn create_texture_binding(
view: &resource::TextureView<A>,
texture_guard: &Storage<resource::Texture<A>, id::TextureId>,
internal_use: hal::TextureUses,
pub_usage: wgt::TextureUsages,
used: &mut BindGroupStates<A>,
used_texture_ranges: &mut Vec<TextureInitTrackerAction>,
) -> Result<(), binding_model::CreateBindGroupError> {
// Careful here: the texture may no longer have its own ref count,
// if it was deleted by the user.
let texture = used
.textures
.add_single(
texture_guard,
view.parent_id.value.0,
view.parent_id.ref_count.clone(),
Some(view.selector.clone()),
internal_use,
)
.ok_or(binding_model::CreateBindGroupError::InvalidTexture(
view.parent_id.value.0,
))?;
check_texture_usage(texture.desc.usage, pub_usage)?;
used_texture_ranges.push(TextureInitTrackerAction {
id: view.parent_id.value.0,
range: TextureInitRange {
mip_range: view.desc.range.mip_range(&texture.desc),
layer_range: view.desc.range.layer_range(&texture.desc),
},
kind: MemoryInitKind::NeedsInitializedMemory,
});
Ok(())
}
fn create_bind_group<G: GlobalIdentityHandlerFactory>(
&self,
self_id: id::DeviceId,
layout: &binding_model::BindGroupLayout<A>,
desc: &binding_model::BindGroupDescriptor,
hub: &Hub<A, G>,
token: &mut Token<binding_model::BindGroupLayout<A>>,
) -> Result<binding_model::BindGroup<A>, binding_model::CreateBindGroupError> {
use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error};
{
// Check that the number of entries in the descriptor matches
// the number of entries in the layout.
let actual = desc.entries.len();
let expected = layout.entries.len();
if actual != expected {
return Err(Error::BindingsNumMismatch { expected, actual });
}
}
// TODO: arrayvec/smallvec, or re-use allocations
// Record binding info for dynamic offset validation
let mut dynamic_binding_info = Vec::new();
// Map of binding -> shader reflected size
//Note: we can't collect into a vector right away because
// it needs to be in BGL iteration order, not BG entry order.
let mut late_buffer_binding_sizes = FastHashMap::default();
// fill out the descriptors
let mut used = BindGroupStates::new();
let (buffer_guard, mut token) = hub.buffers.read(token);
let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token
let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
let (sampler_guard, _) = hub.samplers.read(&mut token);
let mut used_buffer_ranges = Vec::new();
let mut used_texture_ranges = Vec::new();
let mut hal_entries = Vec::with_capacity(desc.entries.len());
let mut hal_buffers = Vec::new();
let mut hal_samplers = Vec::new();
let mut hal_textures = Vec::new();
for entry in desc.entries.iter() {
let binding = entry.binding;
// Find the corresponding declaration in the layout
let decl = layout
.entries
.get(&binding)
.ok_or(Error::MissingBindingDeclaration(binding))?;
let (res_index, count) = match entry.resource {
Br::Buffer(ref bb) => {
let bb = Self::create_buffer_binding(
bb,
binding,
decl,
&mut used_buffer_ranges,
&mut dynamic_binding_info,
&mut late_buffer_binding_sizes,
&mut used,
&*buffer_guard,
&self.limits,
)?;
let res_index = hal_buffers.len();
hal_buffers.push(bb);
(res_index, 1)
}
Br::BufferArray(ref bindings_array) => {
let num_bindings = bindings_array.len();
Self::check_array_binding(self.features, decl.count, num_bindings)?;
let res_index = hal_buffers.len();
for bb in bindings_array.iter() {
let bb = Self::create_buffer_binding(
bb,
binding,
decl,
&mut used_buffer_ranges,
&mut dynamic_binding_info,
&mut late_buffer_binding_sizes,
&mut used,
&*buffer_guard,
&self.limits,
)?;
hal_buffers.push(bb);
}
(res_index, num_bindings)
}
Br::Sampler(id) => {
match decl.ty {
wgt::BindingType::Sampler(ty) => {
let sampler = used
.samplers
.add_single(&*sampler_guard, id)
.ok_or(Error::InvalidSampler(id))?;
// Allowed sampler values for filtering and comparison
let (allowed_filtering, allowed_comparison) = match ty {
wgt::SamplerBindingType::Filtering => (None, false),
wgt::SamplerBindingType::NonFiltering => (Some(false), false),
wgt::SamplerBindingType::Comparison => (None, true),
};
if let Some(allowed_filtering) = allowed_filtering {
if allowed_filtering != sampler.filtering {
return Err(Error::WrongSamplerFiltering {
binding,
layout_flt: allowed_filtering,
sampler_flt: sampler.filtering,
});
}
}
if allowed_comparison != sampler.comparison {
return Err(Error::WrongSamplerComparison {
binding,
layout_cmp: allowed_comparison,
sampler_cmp: sampler.comparison,
});
}
let res_index = hal_samplers.len();
hal_samplers.push(&sampler.raw);
(res_index, 1)
}
_ => {
return Err(Error::WrongBindingType {
binding,
actual: decl.ty,
expected: "Sampler",
})
}
}
}
Br::SamplerArray(ref bindings_array) => {
let num_bindings = bindings_array.len();
Self::check_array_binding(self.features, decl.count, num_bindings)?;
let res_index = hal_samplers.len();
for &id in bindings_array.iter() {
let sampler = used
.samplers
.add_single(&*sampler_guard, id)
.ok_or(Error::InvalidSampler(id))?;
hal_samplers.push(&sampler.raw);
}
(res_index, num_bindings)
}
Br::TextureView(id) => {
let view = used
.views
.add_single(&*texture_view_guard, id)
.ok_or(Error::InvalidTextureView(id))?;
let (pub_usage, internal_use) = Self::texture_use_parameters(
binding,
decl,
view,
"SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
)?;
Self::create_texture_binding(
view,
&texture_guard,
internal_use,
pub_usage,
&mut used,
&mut used_texture_ranges,
)?;
let res_index = hal_textures.len();
hal_textures.push(hal::TextureBinding {
view: &view.raw,
usage: internal_use,
});
(res_index, 1)
}
Br::TextureViewArray(ref bindings_array) => {
let num_bindings = bindings_array.len();
Self::check_array_binding(self.features, decl.count, num_bindings)?;
let res_index = hal_textures.len();
for &id in bindings_array.iter() {
let view = used
.views
.add_single(&*texture_view_guard, id)
.ok_or(Error::InvalidTextureView(id))?;
let (pub_usage, internal_use) =
Self::texture_use_parameters(binding, decl, view,
"SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?;
Self::create_texture_binding(
view,
&texture_guard,
internal_use,
pub_usage,
&mut used,
&mut used_texture_ranges,
)?;
hal_textures.push(hal::TextureBinding {
view: &view.raw,
usage: internal_use,
});
}
(res_index, num_bindings)
}
};
hal_entries.push(hal::BindGroupEntry {
binding,
resource_index: res_index as u32,
count: count as u32,
});
}
used.optimize();
hal_entries.sort_by_key(|entry| entry.binding);
for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) {
if a.binding == b.binding {
return Err(Error::DuplicateBinding(a.binding));
}
}
let hal_desc = hal::BindGroupDescriptor {
label: desc.label.borrow_option(),
layout: &layout.raw,
entries: &hal_entries,
buffers: &hal_buffers,
samplers: &hal_samplers,
textures: &hal_textures,
};
let raw = unsafe {
self.raw
.create_bind_group(&hal_desc)
.map_err(DeviceError::from)?
};
// manually add a dependency on BGL
layout.multi_ref_count.inc();
Ok(binding_model::BindGroup {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
layout_id: id::Valid(desc.layout),
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
used,
used_buffer_ranges,
used_texture_ranges,
dynamic_binding_info,
// collect in the order of BGL iteration
late_buffer_binding_sizes: layout
.entries
.keys()
.flat_map(|binding| late_buffer_binding_sizes.get(binding).cloned())
.collect(),
})
}
fn check_array_binding(
features: wgt::Features,
count: Option<NonZeroU32>,
num_bindings: usize,
) -> Result<(), super::binding_model::CreateBindGroupError> {
use super::binding_model::CreateBindGroupError as Error;
if let Some(count) = count {
let count = count.get() as usize;
if count < num_bindings {
return Err(Error::BindingArrayPartialLengthMismatch {
actual: num_bindings,
expected: count,
});
}
if count != num_bindings
&& !features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY)
{
return Err(Error::BindingArrayLengthMismatch {
actual: num_bindings,
expected: count,
});
}
if num_bindings == 0 {
return Err(Error::BindingArrayZeroLength);
}
} else {
return Err(Error::SingleBindingExpected);
};
Ok(())
}
fn texture_use_parameters(
binding: u32,
decl: &wgt::BindGroupLayoutEntry,
view: &crate::resource::TextureView<A>,
expected: &'static str,
) -> Result<(wgt::TextureUsages, hal::TextureUses), binding_model::CreateBindGroupError> {
use crate::binding_model::CreateBindGroupError as Error;
if view
.desc
.aspects()
.contains(hal::FormatAspects::DEPTH | hal::FormatAspects::STENCIL)
{
return Err(Error::DepthStencilAspect);
}
let format_info = view.desc.format.describe();
match decl.ty {
wgt::BindingType::Texture {
sample_type,
view_dimension,
multisampled,
} => {
use wgt::TextureSampleType as Tst;
if multisampled != (view.samples != 1) {
return Err(Error::InvalidTextureMultisample {
binding,
layout_multisampled: multisampled,
view_samples: view.samples,
});
}
match (sample_type, format_info.sample_type) {
(Tst::Uint, Tst::Uint) |
(Tst::Sint, Tst::Sint) |
(Tst::Depth, Tst::Depth) |
// if we expect non-filterable, accept anything float
(Tst::Float { filterable: false }, Tst::Float { .. }) |
// if we expect filterable, require it
(Tst::Float { filterable: true }, Tst::Float { filterable: true }) |
// if we expect float, also accept depth
(Tst::Float { .. }, Tst::Depth, ..) => {}
// if we expect filterable, also accept Float that is defined as unfilterable if filterable feature is explicitly enabled
// (only hit if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is enabled)
(Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {}
_ => {
return Err(Error::InvalidTextureSampleType {
binding,
layout_sample_type: sample_type,
view_format: view.desc.format,
})
}
}
if view_dimension != view.desc.dimension {
return Err(Error::InvalidTextureDimension {
binding,
layout_dimension: view_dimension,
view_dimension: view.desc.dimension,
});
}
Ok((
wgt::TextureUsages::TEXTURE_BINDING,
hal::TextureUses::RESOURCE,
))
}
wgt::BindingType::StorageTexture {
access,
format,
view_dimension,
} => {
if format != view.desc.format {
return Err(Error::InvalidStorageTextureFormat {
binding,
layout_format: format,
view_format: view.desc.format,
});
}
if view_dimension != view.desc.dimension {
return Err(Error::InvalidTextureDimension {
binding,
layout_dimension: view_dimension,
view_dimension: view.desc.dimension,
});
}
let mip_level_count = view.selector.mips.end - view.selector.mips.start;
if mip_level_count != 1 {
return Err(Error::InvalidStorageTextureMipLevelCount {
binding,
mip_level_count,
});
}
let internal_use = match access {
wgt::StorageTextureAccess::WriteOnly => hal::TextureUses::STORAGE_READ_WRITE,
wgt::StorageTextureAccess::ReadOnly => {
if !view
.format_features
.flags
.contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE)
{
return Err(Error::StorageReadNotSupported(view.desc.format));
}
hal::TextureUses::STORAGE_READ
}
wgt::StorageTextureAccess::ReadWrite => {
if !view
.format_features
.flags
.contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE)
{
return Err(Error::StorageReadNotSupported(view.desc.format));
}
hal::TextureUses::STORAGE_READ_WRITE
}
};
Ok((wgt::TextureUsages::STORAGE_BINDING, internal_use))
}
_ => Err(Error::WrongBindingType {
binding,
actual: decl.ty,
expected,
}),
}
}
fn create_pipeline_layout(
&self,
self_id: id::DeviceId,
desc: &binding_model::PipelineLayoutDescriptor,
bgl_guard: &Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
) -> Result<binding_model::PipelineLayout<A>, binding_model::CreatePipelineLayoutError> {
use crate::binding_model::CreatePipelineLayoutError as Error;
let bind_group_layouts_count = desc.bind_group_layouts.len();
let device_max_bind_groups = self.limits.max_bind_groups as usize;
if bind_group_layouts_count > device_max_bind_groups {
return Err(Error::TooManyGroups {
actual: bind_group_layouts_count,
max: device_max_bind_groups,
});
}
if !desc.push_constant_ranges.is_empty() {
self.require_features(wgt::Features::PUSH_CONSTANTS)?;
}
let mut used_stages = wgt::ShaderStages::empty();
for (index, pc) in desc.push_constant_ranges.iter().enumerate() {
if pc.stages.intersects(used_stages) {
return Err(Error::MoreThanOnePushConstantRangePerStage {
index,
provided: pc.stages,
intersected: pc.stages & used_stages,
});
}
used_stages |= pc.stages;
let device_max_pc_size = self.limits.max_push_constant_size;
if device_max_pc_size < pc.range.end {
return Err(Error::PushConstantRangeTooLarge {
index,
range: pc.range.clone(),
max: device_max_pc_size,
});
}
if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
return Err(Error::MisalignedPushConstantRange {
index,
bound: pc.range.start,
});
}
if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
return Err(Error::MisalignedPushConstantRange {
index,
bound: pc.range.end,
});
}
}
let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
// validate total resource counts
for &id in desc.bind_group_layouts.iter() {
let bind_group_layout = bgl_guard
.get(id)
.map_err(|_| Error::InvalidBindGroupLayout(id))?;
count_validator.merge(&bind_group_layout.count_validator);
}
count_validator
.validate(&self.limits)
.map_err(Error::TooManyBindings)?;
let bgl_vec = desc
.bind_group_layouts
.iter()
.map(|&id| &bgl_guard.get(id).unwrap().raw)
.collect::<Vec<_>>();
let hal_desc = hal::PipelineLayoutDescriptor {
label: desc.label.borrow_option(),
flags: hal::PipelineLayoutFlags::BASE_VERTEX_INSTANCE,
bind_group_layouts: &bgl_vec,
push_constant_ranges: desc.push_constant_ranges.as_ref(),
};
let raw = unsafe {
self.raw
.create_pipeline_layout(&hal_desc)
.map_err(DeviceError::from)?
};
Ok(binding_model::PipelineLayout {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
bind_group_layout_ids: desc
.bind_group_layouts
.iter()
.map(|&id| {
// manually add a dependency to BGL
bgl_guard.get(id).unwrap().multi_ref_count.inc();
id::Valid(id)
})
.collect(),
push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
})
}
//TODO: refactor this. It's the only method of `Device` that registers new objects
// (the pipeline layout).
fn derive_pipeline_layout(
&self,
self_id: id::DeviceId,
implicit_context: Option<ImplicitPipelineContext>,
mut derived_group_layouts: ArrayVec<binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }>,
bgl_guard: &mut Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
pipeline_layout_guard: &mut Storage<binding_model::PipelineLayout<A>, id::PipelineLayoutId>,
) -> Result<id::PipelineLayoutId, pipeline::ImplicitLayoutError> {
while derived_group_layouts
.last()
.map_or(false, |map| map.is_empty())
{
derived_group_layouts.pop();
}
let mut ids = implicit_context.ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?;
let group_count = derived_group_layouts.len();
if ids.group_ids.len() < group_count {
log::error!(
"Not enough bind group IDs ({}) specified for the implicit layout ({})",
ids.group_ids.len(),
derived_group_layouts.len()
);
return Err(pipeline::ImplicitLayoutError::MissingIds(group_count as _));
}
for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) {
match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard) {
Some(dedup_id) => {
*bgl_id = dedup_id;
}
None => {
let bgl = self.create_bind_group_layout(self_id, None, map)?;
bgl_guard.force_replace(*bgl_id, bgl);
}
};
}
let layout_desc = binding_model::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]),
push_constant_ranges: Cow::Borrowed(&[]), //TODO?
};
let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?;
pipeline_layout_guard.force_replace(ids.root_id, layout);
Ok(ids.root_id)
}
fn create_compute_pipeline<G: GlobalIdentityHandlerFactory>(
&self,
self_id: id::DeviceId,
desc: &pipeline::ComputePipelineDescriptor,
implicit_context: Option<ImplicitPipelineContext>,
hub: &Hub<A, G>,
token: &mut Token<Self>,
) -> Result<pipeline::ComputePipeline<A>, pipeline::CreateComputePipelineError> {
//TODO: only lock mutable if the layout is derived
let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
// This has to be done first, or otherwise the IDs may be pointing to entries
// that are not even in the storage.
if let Some(ref ids) = implicit_context {
pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE);
for &bgl_id in ids.group_ids.iter() {
bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE);
}
}
self.require_downlevel_flags(wgt::DownlevelFlags::COMPUTE_SHADERS)?;
let mut derived_group_layouts =
ArrayVec::<binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }>::new();
let mut shader_binding_sizes = FastHashMap::default();
let io = validation::StageIo::default();
let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
let shader_module = shader_module_guard
.get(desc.stage.module)
.map_err(|_| validation::StageError::InvalidModule)?;
{
let flag = wgt::ShaderStages::COMPUTE;
let provided_layouts = match desc.layout {
Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts(
pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?,
&*bgl_guard,
)),
None => {
for _ in 0..self.limits.max_bind_groups {
derived_group_layouts.push(binding_model::BindEntryMap::default());
}
None
}
};
if let Some(ref interface) = shader_module.interface {
let _ = interface.check_stage(
provided_layouts.as_ref().map(|p| p.as_slice()),
&mut derived_group_layouts,
&mut shader_binding_sizes,
&desc.stage.entry_point,
flag,
io,
)?;
}
}
let pipeline_layout_id = match desc.layout {
Some(id) => id,
None => self.derive_pipeline_layout(
self_id,
implicit_context,
derived_group_layouts,
&mut *bgl_guard,
&mut *pipeline_layout_guard,
)?,
};
let layout = pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?;
let late_sized_buffer_groups =
Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard);
let pipeline_desc = hal::ComputePipelineDescriptor {
label: desc.label.borrow_option(),
layout: &layout.raw,
stage: hal::ProgrammableStage {
entry_point: desc.stage.entry_point.as_ref(),
module: &shader_module.raw,
},
};
let raw =
unsafe { self.raw.create_compute_pipeline(&pipeline_desc) }.map_err(
|err| match err {
hal::PipelineError::Device(error) => {
pipeline::CreateComputePipelineError::Device(error.into())
}
hal::PipelineError::Linkage(_stages, msg) => {
pipeline::CreateComputePipelineError::Internal(msg)
}
hal::PipelineError::EntryPoint(_stage) => {
pipeline::CreateComputePipelineError::Internal(EP_FAILURE.to_string())
}
},
)?;
let pipeline = pipeline::ComputePipeline {
raw,
layout_id: Stored {
value: id::Valid(pipeline_layout_id),
ref_count: layout.life_guard.add_ref(),
},
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
late_sized_buffer_groups,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
};
Ok(pipeline)
}
fn create_render_pipeline<G: GlobalIdentityHandlerFactory>(
&self,
self_id: id::DeviceId,
adapter: &crate::instance::Adapter<A>,
desc: &pipeline::RenderPipelineDescriptor,
implicit_context: Option<ImplicitPipelineContext>,
hub: &Hub<A, G>,
token: &mut Token<Self>,
) -> Result<pipeline::RenderPipeline<A>, pipeline::CreateRenderPipelineError> {
use wgt::TextureFormatFeatureFlags as Tfff;
//TODO: only lock mutable if the layout is derived
let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
// This has to be done first, or otherwise the IDs may be pointing to entries
// that are not even in the storage.
if let Some(ref ids) = implicit_context {
pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE);
for &bgl_id in ids.group_ids.iter() {
bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE);
}
}
let mut derived_group_layouts =
ArrayVec::<binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }>::new();
let mut shader_binding_sizes = FastHashMap::default();
let color_targets = desc
.fragment
.as_ref()
.map_or(&[][..], |fragment| &fragment.targets);
let depth_stencil_state = desc.depth_stencil.as_ref();
if !color_targets.is_empty() && {
let first = &color_targets[0];
color_targets[1..]
.iter()
.any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend)
} {
log::info!("Color targets: {:?}", color_targets);
self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?;
}
let mut io = validation::StageIo::default();
let mut validated_stages = wgt::ShaderStages::empty();
let mut vertex_strides = Vec::with_capacity(desc.vertex.buffers.len());
let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len());
let mut total_attributes = 0;
for (i, vb_state) in desc.vertex.buffers.iter().enumerate() {
vertex_strides
.alloc()
.init((vb_state.array_stride, vb_state.step_mode));
if vb_state.attributes.is_empty() {
continue;
}
if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 {
return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge {
index: i as u32,
given: vb_state.array_stride as u32,
limit: self.limits.max_vertex_buffer_array_stride,
});
}
if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 {
return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride {
index: i as u32,
stride: vb_state.array_stride,
});
}
vertex_buffers.alloc().init(hal::VertexBufferLayout {
array_stride: vb_state.array_stride,
step_mode: vb_state.step_mode,
attributes: vb_state.attributes.as_ref(),
});
for attribute in vb_state.attributes.iter() {
if attribute.offset >= 0x10000000 {
return Err(
pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
location: attribute.shader_location,
offset: attribute.offset,
},
);
}
if let wgt::VertexFormat::Float64
| wgt::VertexFormat::Float64x2
| wgt::VertexFormat::Float64x3
| wgt::VertexFormat::Float64x4 = attribute.format
{
self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?;
}
io.insert(
attribute.shader_location,
validation::InterfaceVar::vertex_attribute(attribute.format),
);
}
total_attributes += vb_state.attributes.len();
}
if vertex_buffers.len() > self.limits.max_vertex_buffers as usize {
return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers {
given: vertex_buffers.len() as u32,
limit: self.limits.max_vertex_buffers,
});
}
if total_attributes > self.limits.max_vertex_attributes as usize {
return Err(
pipeline::CreateRenderPipelineError::TooManyVertexAttributes {
given: total_attributes as u32,
limit: self.limits.max_vertex_attributes,
},
);
}
if desc.primitive.strip_index_format.is_some() && !desc.primitive.topology.is_strip() {
return Err(
pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology {
strip_index_format: desc.primitive.strip_index_format,
topology: desc.primitive.topology,
},
);
}
if desc.primitive.unclipped_depth {
self.require_features(wgt::Features::DEPTH_CLIP_CONTROL)?;
}
if desc.primitive.polygon_mode == wgt::PolygonMode::Line {
self.require_features(wgt::Features::POLYGON_MODE_LINE)?;
}
if desc.primitive.polygon_mode == wgt::PolygonMode::Point {
self.require_features(wgt::Features::POLYGON_MODE_POINT)?;
}
if desc.primitive.conservative {
self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?;
}
if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill {
return Err(
pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode,
);
}
for (i, cs) in color_targets.iter().enumerate() {
let error = loop {
let format_features = self.describe_format_features(adapter, cs.format)?;
if !format_features
.allowed_usages
.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
{
break Some(pipeline::ColorStateError::FormatNotRenderable(cs.format));
}
if cs.blend.is_some() && !format_features.flags.contains(Tfff::FILTERABLE) {
break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format));
}
if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) {
break Some(pipeline::ColorStateError::FormatNotColor(cs.format));
}
if desc.multisample.count > 1 && !format_features.flags.contains(Tfff::MULTISAMPLE)
{
break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format));
}
break None;
};
if let Some(e) = error {
return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e));
}
}
if let Some(ds) = depth_stencil_state {
let error = loop {
let format_features = self.describe_format_features(adapter, ds.format)?;
if !format_features
.allowed_usages
.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
{
break Some(pipeline::DepthStencilStateError::FormatNotRenderable(
ds.format,
));
}
let aspect = hal::FormatAspects::from(ds.format);
if ds.is_depth_enabled() && !aspect.contains(hal::FormatAspects::DEPTH) {
break Some(pipeline::DepthStencilStateError::FormatNotDepth(ds.format));
}
if ds.stencil.is_enabled() && !aspect.contains(hal::FormatAspects::STENCIL) {
break Some(pipeline::DepthStencilStateError::FormatNotStencil(
ds.format,
));
}
if desc.multisample.count > 1 && !format_features.flags.contains(Tfff::MULTISAMPLE)
{
break Some(pipeline::DepthStencilStateError::FormatNotMultisampled(
ds.format,
));
}
break None;
};
if let Some(e) = error {
return Err(pipeline::CreateRenderPipelineError::DepthStencilState(e));
}
}
if desc.layout.is_none() {
for _ in 0..self.limits.max_bind_groups {
derived_group_layouts.push(binding_model::BindEntryMap::default());
}
}
let samples = {
let sc = desc.multisample.count;
if sc == 0 || sc > 32 || !conv::is_power_of_two_u32(sc) {
return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc));
}
sc
};
let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
let vertex_stage = {
let stage = &desc.vertex.stage;
let flag = wgt::ShaderStages::VERTEX;
let shader_module = shader_module_guard.get(stage.module).map_err(|_| {
pipeline::CreateRenderPipelineError::Stage {
stage: flag,
error: validation::StageError::InvalidModule,
}
})?;
let provided_layouts = match desc.layout {
Some(pipeline_layout_id) => {
let pipeline_layout = pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
Some(Device::get_introspection_bind_group_layouts(
pipeline_layout,
&*bgl_guard,
))
}
None => None,
};
if let Some(ref interface) = shader_module.interface {
io = interface
.check_stage(
provided_layouts.as_ref().map(|p| p.as_slice()),
&mut derived_group_layouts,
&mut shader_binding_sizes,
&stage.entry_point,
flag,
io,
)
.map_err(|error| pipeline::CreateRenderPipelineError::Stage {
stage: flag,
error,
})?;
validated_stages |= flag;
}
hal::ProgrammableStage {
module: &shader_module.raw,
entry_point: stage.entry_point.as_ref(),
}
};
let fragment_stage = match desc.fragment {
Some(ref fragment) => {
let flag = wgt::ShaderStages::FRAGMENT;
let shader_module =
shader_module_guard
.get(fragment.stage.module)
.map_err(|_| pipeline::CreateRenderPipelineError::Stage {
stage: flag,
error: validation::StageError::InvalidModule,
})?;
let provided_layouts = match desc.layout {
Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts(
pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?,
&*bgl_guard,
)),
None => None,
};
if validated_stages == wgt::ShaderStages::VERTEX {
if let Some(ref interface) = shader_module.interface {
io = interface
.check_stage(
provided_layouts.as_ref().map(|p| p.as_slice()),
&mut derived_group_layouts,
&mut shader_binding_sizes,
&fragment.stage.entry_point,
flag,
io,
)
.map_err(|error| pipeline::CreateRenderPipelineError::Stage {
stage: flag,
error,
})?;
validated_stages |= flag;
}
}
Some(hal::ProgrammableStage {
module: &shader_module.raw,
entry_point: fragment.stage.entry_point.as_ref(),
})
}
None => None,
};
if validated_stages.contains(wgt::ShaderStages::FRAGMENT) {
for (i, state) in color_targets.iter().enumerate() {
match io.get(&(i as wgt::ShaderLocation)) {
Some(output) => {
validation::check_texture_format(state.format, &output.ty).map_err(
|pipeline| {
pipeline::CreateRenderPipelineError::ColorState(
i as u8,
pipeline::ColorStateError::IncompatibleFormat {
pipeline,
shader: output.ty,
},
)
},
)?;
}
None if state.write_mask.is_empty() => {}
None => {
log::warn!("Missing fragment output[{}], expected {:?}", i, state,);
return Err(pipeline::CreateRenderPipelineError::ColorState(
i as u8,
pipeline::ColorStateError::Missing,
));
}
}
}
}
let last_stage = match desc.fragment {
Some(_) => wgt::ShaderStages::FRAGMENT,
None => wgt::ShaderStages::VERTEX,
};
if desc.layout.is_none() && !validated_stages.contains(last_stage) {
return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into());
}
let pipeline_layout_id = match desc.layout {
Some(id) => id,
None => self.derive_pipeline_layout(
self_id,
implicit_context,
derived_group_layouts,
&mut *bgl_guard,
&mut *pipeline_layout_guard,
)?,
};
let layout = pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
// Multiview is only supported if the feature is enabled
if desc.multiview.is_some() {
self.require_features(wgt::Features::MULTIVIEW)?;
}
let late_sized_buffer_groups =
Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard);
let pipeline_desc = hal::RenderPipelineDescriptor {
label: desc.label.borrow_option(),
layout: &layout.raw,
vertex_buffers: &vertex_buffers,
vertex_stage,
primitive: desc.primitive,
depth_stencil: desc.depth_stencil.clone(),
multisample: desc.multisample,
fragment_stage,
color_targets,
multiview: desc.multiview,
};
let raw =
unsafe { self.raw.create_render_pipeline(&pipeline_desc) }.map_err(
|err| match err {
hal::PipelineError::Device(error) => {
pipeline::CreateRenderPipelineError::Device(error.into())
}
hal::PipelineError::Linkage(stage, msg) => {
pipeline::CreateRenderPipelineError::Internal { stage, error: msg }
}
hal::PipelineError::EntryPoint(stage) => {
pipeline::CreateRenderPipelineError::Internal {
stage: hal::auxil::map_naga_stage(stage),
error: EP_FAILURE.to_string(),
}
}
},
)?;
let pass_context = RenderPassContext {
attachments: AttachmentData {
colors: color_targets.iter().map(|state| state.format).collect(),
resolves: ArrayVec::new(),
depth_stencil: depth_stencil_state.as_ref().map(|state| state.format),
},
sample_count: samples,
multiview: desc.multiview,
};
let mut flags = pipeline::PipelineFlags::empty();
for state in color_targets.iter() {
if let Some(ref bs) = state.blend {
if bs.color.uses_constant() | bs.alpha.uses_constant() {
flags |= pipeline::PipelineFlags::BLEND_CONSTANT;
}
}
}
if let Some(ds) = depth_stencil_state.as_ref() {
if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() {
flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
}
if !ds.is_depth_read_only() {
flags |= pipeline::PipelineFlags::WRITES_DEPTH;
}
if !ds.is_stencil_read_only() {
flags |= pipeline::PipelineFlags::WRITES_STENCIL;
}
}
let pipeline = pipeline::RenderPipeline {
raw,
layout_id: Stored {
value: id::Valid(pipeline_layout_id),
ref_count: layout.life_guard.add_ref(),
},
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
pass_context,
flags,
strip_index_format: desc.primitive.strip_index_format,
vertex_strides,
late_sized_buffer_groups,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
};
Ok(pipeline)
}
fn describe_format_features(
&self,
adapter: &crate::instance::Adapter<A>,
format: TextureFormat,
) -> Result<wgt::TextureFormatFeatures, MissingFeatures> {
let format_desc = format.describe();
self.require_features(format_desc.required_features)?;
let using_device_features = self
.features
.contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES);
// If we're running downlevel, we need to manually ask the backend what we can use as we can't trust WebGPU.
let downlevel = !self.downlevel.is_webgpu_compliant();
if using_device_features || downlevel {
Ok(adapter.get_texture_format_features(format))
} else {
Ok(format_desc.guaranteed_format_features)
}
}
fn wait_for_submit(
&self,
submission_index: SubmissionIndex,
token: &mut Token<Self>,
) -> Result<(), WaitIdleError> {
let last_done_index = unsafe {
self.raw
.get_fence_value(&self.fence)
.map_err(DeviceError::from)?
};
if last_done_index < submission_index {
log::info!("Waiting for submission {:?}", submission_index);
unsafe {
self.raw
.wait(&self.fence, submission_index, !0)
.map_err(DeviceError::from)?
};
let closures = self
.lock_life(token)
.triage_submissions(submission_index, &self.command_allocator);
assert!(
closures.is_empty(),
"wait_for_submit is not expected to work with closures"
);
}
Ok(())
}
fn create_query_set(
&self,
self_id: id::DeviceId,
desc: &resource::QuerySetDescriptor,
) -> Result<resource::QuerySet<A>, resource::CreateQuerySetError> {
use resource::CreateQuerySetError as Error;
match desc.ty {
wgt::QueryType::Occlusion => {}
wgt::QueryType::Timestamp => {
self.require_features(wgt::Features::TIMESTAMP_QUERY)?;
}
wgt::QueryType::PipelineStatistics(..) => {
self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?;
}
}
if desc.count == 0 {
return Err(Error::ZeroCount);
}
if desc.count > wgt::QUERY_SET_MAX_QUERIES {
return Err(Error::TooManyQueries {
count: desc.count,
maximum: wgt::QUERY_SET_MAX_QUERIES,
});
}
let hal_desc = desc.map_label(super::LabelHelpers::borrow_option);
Ok(resource::QuerySet {
raw: unsafe { self.raw.create_query_set(&hal_desc).unwrap() },
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
life_guard: LifeGuard::new(""),
desc: desc.map_label(|_| ()),
})
}
}
impl<A: HalApi> Device<A> {
pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<A>) {
if let Some(raw) = buffer.raw {
unsafe {
self.raw.destroy_buffer(raw);
}
}
}
pub(crate) fn destroy_command_buffer(&self, cmd_buf: command::CommandBuffer<A>) {
let mut baked = cmd_buf.into_baked();
unsafe {
baked.encoder.reset_all(baked.list.into_iter());
}
unsafe {
self.raw.destroy_command_encoder(baked.encoder);
}
}
/// Wait for idle and remove resources that we can, before we die.
pub(crate) fn prepare_to_die(&mut self) {
self.pending_writes.deactivate();
let mut life_tracker = self.life_tracker.lock();
let current_index = self.active_submission_index;
if let Err(error) = unsafe { self.raw.wait(&self.fence, current_index, CLEANUP_WAIT_MS) } {
log::error!("failed to wait for the device: {:?}", error);
}
let _ = life_tracker.triage_submissions(current_index, &self.command_allocator);
life_tracker.cleanup(&self.raw);
#[cfg(feature = "trace")]
{
self.trace = None;
}
}
pub(crate) fn dispose(self) {
self.pending_writes.dispose(&self.raw);
self.command_allocator.into_inner().dispose(&self.raw);
unsafe {
self.raw.destroy_buffer(self.zero_buffer);
self.raw.destroy_fence(self.fence);
self.raw.exit(self.queue);
}
}
}
impl<A: HalApi> crate::hub::Resource for Device<A> {
const TYPE: &'static str = "Device";
fn life_guard(&self) -> &LifeGuard {
&self.life_guard
}
}
#[derive(Clone, Debug, Error)]
#[error("device is invalid")]
pub struct InvalidDevice;
#[derive(Clone, Debug, Error)]
pub enum DeviceError {
#[error("parent device is invalid")]
Invalid,
#[error("parent device is lost")]
Lost,
#[error("not enough memory left")]
OutOfMemory,
}
impl From<hal::DeviceError> for DeviceError {
fn from(error: hal::DeviceError) -> Self {
match error {
hal::DeviceError::Lost => DeviceError::Lost,
hal::DeviceError::OutOfMemory => DeviceError::OutOfMemory,
}
}
}
#[derive(Clone, Debug, Error)]
#[error("Features {0:?} are required but not enabled on the device")]
pub struct MissingFeatures(pub wgt::Features);
#[derive(Clone, Debug, Error)]
#[error(
"Downlevel flags {0:?} are required but not supported on the device.\n{}",
DOWNLEVEL_ERROR_MESSAGE
)]
pub struct MissingDownlevelFlags(pub wgt::DownlevelFlags);
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ImplicitPipelineContext {
pub root_id: id::PipelineLayoutId,
pub group_ids: ArrayVec<id::BindGroupLayoutId, { hal::MAX_BIND_GROUPS }>,
}
pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> {
pub root_id: Input<G, id::PipelineLayoutId>,
pub group_ids: &'a [Input<G, id::BindGroupLayoutId>],
}
impl<G: GlobalIdentityHandlerFactory> ImplicitPipelineIds<'_, G> {
fn prepare<A: HalApi>(self, hub: &Hub<A, G>) -> ImplicitPipelineContext {
ImplicitPipelineContext {
root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(),
group_ids: self
.group_ids
.iter()
.map(|id_in| hub.bind_group_layouts.prepare(id_in.clone()).into_id())
.collect(),
}
}
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn adapter_is_surface_supported<A: HalApi>(
&self,
adapter_id: id::AdapterId,
surface_id: id::SurfaceId,
) -> Result<bool, instance::IsSurfaceSupportedError> {
let hub = A::hub(self);
let mut token = Token::root();
let (surface_guard, mut token) = self.surfaces.read(&mut token);
let (adapter_guard, mut _token) = hub.adapters.read(&mut token);
let adapter = adapter_guard
.get(adapter_id)
.map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?;
let surface = surface_guard
.get(surface_id)
.map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?;
Ok(adapter.is_surface_supported(surface))
}
pub fn surface_get_preferred_format<A: HalApi>(
&self,
surface_id: id::SurfaceId,
adapter_id: id::AdapterId,
) -> Result<TextureFormat, instance::GetSurfacePreferredFormatError> {
profiling::scope!("surface_get_preferred_format");
let hub = A::hub(self);
let mut token = Token::root();
let (surface_guard, mut token) = self.surfaces.read(&mut token);
let (adapter_guard, mut _token) = hub.adapters.read(&mut token);
let adapter = adapter_guard
.get(adapter_id)
.map_err(|_| instance::GetSurfacePreferredFormatError::InvalidAdapter)?;
let surface = surface_guard
.get(surface_id)
.map_err(|_| instance::GetSurfacePreferredFormatError::InvalidSurface)?;
surface.get_preferred_format(adapter)
}
pub fn device_features<A: HalApi>(
&self,
device_id: id::DeviceId,
) -> Result<wgt::Features, InvalidDevice> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
Ok(device.features)
}
pub fn device_limits<A: HalApi>(
&self,
device_id: id::DeviceId,
) -> Result<wgt::Limits, InvalidDevice> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
Ok(device.limits.clone())
}
pub fn device_downlevel_properties<A: HalApi>(
&self,
device_id: id::DeviceId,
) -> Result<wgt::DownlevelCapabilities, InvalidDevice> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
Ok(device.downlevel.clone())
}
pub fn device_create_buffer<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &resource::BufferDescriptor,
id_in: Input<G, id::BufferId>,
) -> (id::BufferId, Option<resource::CreateBufferError>) {
profiling::scope!("create_buffer", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.buffers.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut desc = desc.clone();
let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false);
if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
desc.usage |= wgt::BufferUsages::COPY_DST;
}
trace
.lock()
.add(trace::Action::CreateBuffer(fid.id(), desc));
}
let mut buffer = match device.create_buffer(device_id, desc, false) {
Ok(buffer) => buffer,
Err(e) => break e,
};
let ref_count = buffer.life_guard.add_ref();
let buffer_use = if !desc.mapped_at_creation {
hal::BufferUses::empty()
} else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
// buffer is mappable, so we are just doing that at start
let map_size = buffer.size;
let ptr = match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) {
Ok(ptr) => ptr,
Err(e) => {
let raw = buffer.raw.unwrap();
device
.lock_life(&mut token)
.schedule_resource_destruction(queue::TempResource::Buffer(raw), !0);
break e.into();
}
};
buffer.map_state = resource::BufferMapState::Active {
ptr,
range: 0..map_size,
host: HostMap::Write,
};
hal::BufferUses::MAP_WRITE
} else {
// buffer needs staging area for initialization only
let stage_desc = wgt::BufferDescriptor {
label: Some(Cow::Borrowed(
"(wgpu internal) initializing unmappable buffer",
)),
size: desc.size,
usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC,
mapped_at_creation: false,
};
let mut stage = match device.create_buffer(device_id, &stage_desc, true) {
Ok(stage) => stage,
Err(e) => {
let raw = buffer.raw.unwrap();
device
.lock_life(&mut token)
.schedule_resource_destruction(queue::TempResource::Buffer(raw), !0);
break e;
}
};
let stage_buffer = stage.raw.unwrap();
let mapping = match unsafe { device.raw.map_buffer(&stage_buffer, 0..stage.size) } {
Ok(mapping) => mapping,
Err(e) => {
let raw = buffer.raw.unwrap();
let mut life_lock = device.lock_life(&mut token);
life_lock
.schedule_resource_destruction(queue::TempResource::Buffer(raw), !0);
life_lock.schedule_resource_destruction(
queue::TempResource::Buffer(stage_buffer),
!0,
);
break DeviceError::from(e).into();
}
};
assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0);
// Zero initialize memory and then mark both staging and buffer as initialized
// (it's guaranteed that this is the case by the time the buffer is usable)
unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) };
buffer.initialization_status.drain(0..buffer.size);
stage.initialization_status.drain(0..buffer.size);
buffer.map_state = resource::BufferMapState::Init {
ptr: mapping.ptr,
needs_flush: !mapping.is_coherent,
stage_buffer,
};
hal::BufferUses::COPY_DST
};
let id = fid.assign(buffer, &mut token);
log::info!("Created buffer {:?} with {:?}", id, desc);
device
.trackers
.lock()
.buffers
.insert_single(id, ref_count, buffer_use);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
/// Assign `id_in` an error with the given `label`.
///
/// Ensure that future attempts to use `id_in` as a buffer ID will propagate
/// the error, following the WebGPU ["contagious invalidity"] style.
///
/// Firefox uses this function to comply strictly with the WebGPU spec,
/// which requires [`GPUBufferDescriptor`] validation to be generated on the
/// Device timeline and leave the newly created [`GPUBuffer`] invalid.
///
/// Ideally, we would simply let [`device_create_buffer`] take care of all
/// of this, but some errors must be detected before we can even construct a
/// [`wgpu_types::BufferDescriptor`] to give it. For example, the WebGPU API
/// allows a `GPUBufferDescriptor`'s [`usage`] property to be any WebIDL
/// `unsigned long` value, but we can't construct a
/// [`wgpu_types::BufferUsages`] value from values with unassigned bits
/// set. This means we must validate `usage` before we can call
/// `device_create_buffer`.
///
/// When that validation fails, we must arrange for the buffer id to be
/// considered invalid. This method provides the means to do so.
///
/// ["contagious invalidity"]: https://www.w3.org/TR/webgpu/#invalidity
/// [`GPUBufferDescriptor`]: https://www.w3.org/TR/webgpu/#dictdef-gpubufferdescriptor
/// [`GPUBuffer`]: https://www.w3.org/TR/webgpu/#gpubuffer
/// [`wgpu_types::BufferDescriptor`]: wgt::BufferDescriptor
/// [`device_create_buffer`]: Global::device_create_buffer
/// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage
/// [`wgpu_types::BufferUsages`]: wgt::BufferUsages
pub fn create_buffer_error<A: HalApi>(&self, id_in: Input<G, id::BufferId>, label: Label) {
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.buffers.prepare(id_in);
let (_, mut token) = hub.devices.read(&mut token);
fid.assign_error(label.borrow_or_default(), &mut token);
}
#[cfg(feature = "replay")]
pub fn device_wait_for_buffer<A: HalApi>(
&self,
device_id: id::DeviceId,
buffer_id: id::BufferId,
) -> Result<(), WaitIdleError> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let last_submission = {
let (buffer_guard, _) = hub.buffers.write(&mut token);
match buffer_guard.get(buffer_id) {
Ok(buffer) => buffer.life_guard.life_count(),
Err(_) => return Ok(()),
}
};
device_guard
.get(device_id)
.map_err(|_| DeviceError::Invalid)?
.wait_for_submit(last_submission, &mut token)
}
#[doc(hidden)]
pub fn device_set_buffer_sub_data<A: HalApi>(
&self,
device_id: id::DeviceId,
buffer_id: id::BufferId,
offset: BufferAddress,
data: &[u8],
) -> Result<(), resource::BufferAccessError> {
profiling::scope!("set_buffer_sub_data", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let device = device_guard
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
//assert!(buffer isn't used by the GPU);
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data_path = trace.make_binary("bin", data);
trace.add(trace::Action::WriteBuffer {
id: buffer_id,
data: data_path,
range: offset..offset + data.len() as BufferAddress,
queued: false,
});
}
let raw_buf = buffer.raw.as_ref().unwrap();
unsafe {
let mapping = device
.raw
.map_buffer(raw_buf, offset..offset + data.len() as u64)
.map_err(DeviceError::from)?;
ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len());
if !mapping.is_coherent {
device
.raw
.flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64));
}
device
.raw
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
}
Ok(())
}
#[doc(hidden)]
pub fn device_get_buffer_sub_data<A: HalApi>(
&self,
device_id: id::DeviceId,
buffer_id: id::BufferId,
offset: BufferAddress,
data: &mut [u8],
) -> Result<(), resource::BufferAccessError> {
profiling::scope!("get_buffer_sub_data", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let device = device_guard
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?;
//assert!(buffer isn't used by the GPU);
let raw_buf = buffer.raw.as_ref().unwrap();
unsafe {
let mapping = device
.raw
.map_buffer(raw_buf, offset..offset + data.len() as u64)
.map_err(DeviceError::from)?;
if !mapping.is_coherent {
device.raw.invalidate_mapped_ranges(
raw_buf,
iter::once(offset..offset + data.len() as u64),
);
}
ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len());
device
.raw
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
}
Ok(())
}
pub fn buffer_label<A: HalApi>(&self, id: id::BufferId) -> String {
A::hub(self).buffers.label_for_resource(id)
}
pub fn buffer_destroy<A: HalApi>(
&self,
buffer_id: id::BufferId,
) -> Result<(), resource::DestroyError> {
profiling::scope!("destroy", "Buffer");
let hub = A::hub(self);
let mut token = Token::root();
//TODO: lock pending writes separately, keep the device read-only
let (mut device_guard, mut token) = hub.devices.write(&mut token);
log::info!("Buffer {:?} is destroyed", buffer_id);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::DestroyError::Invalid)?;
let device = &mut device_guard[buffer.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::FreeBuffer(buffer_id));
}
let raw = buffer
.raw
.take()
.ok_or(resource::DestroyError::AlreadyDestroyed)?;
let temp = queue::TempResource::Buffer(raw);
if device.pending_writes.dst_buffers.contains(&buffer_id) {
device.pending_writes.temp_resources.push(temp);
} else {
let last_submit_index = buffer.life_guard.life_count();
drop(buffer_guard);
device
.lock_life(&mut token)
.schedule_resource_destruction(temp, last_submit_index);
}
Ok(())
}
pub fn buffer_drop<A: HalApi>(&self, buffer_id: id::BufferId, wait: bool) {
profiling::scope!("drop", "Buffer");
log::debug!("buffer {:?} is dropped", buffer_id);
let hub = A::hub(self);
let mut token = Token::root();
let (ref_count, last_submit_index, device_id) = {
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
match buffer_guard.get_mut(buffer_id) {
Ok(buffer) => {
let ref_count = buffer.life_guard.ref_count.take().unwrap();
let last_submit_index = buffer.life_guard.life_count();
(ref_count, last_submit_index, buffer.device_id.value)
}
Err(InvalidId) => {
hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
{
let mut life_lock = device.lock_life(&mut token);
if device.pending_writes.dst_buffers.contains(&buffer_id) {
life_lock.future_suspected_buffers.push(Stored {
value: id::Valid(buffer_id),
ref_count,
});
} else {
drop(ref_count);
life_lock
.suspected_resources
.buffers
.push(id::Valid(buffer_id));
}
}
if wait {
match device.wait_for_submit(last_submit_index, &mut token) {
Ok(()) => (),
Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e),
}
}
}
pub fn device_create_texture<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &resource::TextureDescriptor,
id_in: Input<G, id::TextureId>,
) -> (id::TextureId, Option<resource::CreateTextureError>) {
profiling::scope!("create_texture", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.textures.prepare(id_in);
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateTexture(fid.id(), desc.clone()));
}
let adapter = &adapter_guard[device.adapter_id.value];
let texture = match device.create_texture(device_id, adapter, desc) {
Ok(texture) => texture,
Err(error) => break error,
};
let ref_count = texture.life_guard.add_ref();
let id = fid.assign(texture, &mut token);
log::info!("Created texture {:?} with {:?}", id, desc);
device.trackers.lock().textures.insert_single(
id.0,
ref_count,
hal::TextureUses::UNINITIALIZED,
);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
/// # Safety
///
/// - `hal_texture` must be created from `device_id` corresponding raw handle.
/// - `hal_texture` must be created respecting `desc`
/// - `hal_texture` must be initialized
pub unsafe fn create_texture_from_hal<A: HalApi>(
&self,
hal_texture: A::Texture,
device_id: id::DeviceId,
desc: &resource::TextureDescriptor,
id_in: Input<G, id::TextureId>,
) -> (id::TextureId, Option<resource::CreateTextureError>) {
profiling::scope!("create_texture", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.textures.prepare(id_in);
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
// NB: Any change done through the raw texture handle will not be recorded in the replay
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateTexture(fid.id(), desc.clone()));
}
let adapter = &adapter_guard[device.adapter_id.value];
let format_features = match device
.describe_format_features(adapter, desc.format)
.map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))
{
Ok(features) => features,
Err(error) => break error,
};
let mut texture = device.create_texture_from_hal(
hal_texture,
conv::map_texture_usage(desc.usage, desc.format.into()),
device_id,
desc,
format_features,
resource::TextureClearMode::None,
);
if desc.usage.contains(wgt::TextureUsages::COPY_DST) {
texture.hal_usage |= hal::TextureUses::COPY_DST;
}
texture.initialization_status = TextureInitTracker::new(desc.mip_level_count, 0);
let ref_count = texture.life_guard.add_ref();
let id = fid.assign(texture, &mut token);
log::info!("Created texture {:?} with {:?}", id, desc);
device.trackers.lock().textures.insert_single(
id.0,
ref_count,
hal::TextureUses::UNINITIALIZED,
);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn texture_label<A: HalApi>(&self, id: id::TextureId) -> String {
A::hub(self).textures.label_for_resource(id)
}
pub fn texture_destroy<A: HalApi>(
&self,
texture_id: id::TextureId,
) -> Result<(), resource::DestroyError> {
profiling::scope!("destroy", "Texture");
let hub = A::hub(self);
let mut token = Token::root();
//TODO: lock pending writes separately, keep the device read-only
let (mut device_guard, mut token) = hub.devices.write(&mut token);
log::info!("Buffer {:?} is destroyed", texture_id);
let (mut texture_guard, _) = hub.textures.write(&mut token);
let texture = texture_guard
.get_mut(texture_id)
.map_err(|_| resource::DestroyError::Invalid)?;
let device = &mut device_guard[texture.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::FreeTexture(texture_id));
}
let last_submit_index = texture.life_guard.life_count();
let clear_views =
match std::mem::replace(&mut texture.clear_mode, resource::TextureClearMode::None) {
resource::TextureClearMode::BufferCopy => SmallVec::new(),
resource::TextureClearMode::RenderPass { clear_views, .. } => clear_views,
resource::TextureClearMode::None => SmallVec::new(),
};
match texture.inner {
resource::TextureInner::Native { ref mut raw } => {
let raw = raw.take().ok_or(resource::DestroyError::AlreadyDestroyed)?;
let temp = queue::TempResource::Texture(raw, clear_views);
if device.pending_writes.dst_textures.contains(&texture_id) {
device.pending_writes.temp_resources.push(temp);
} else {
drop(texture_guard);
device
.lock_life(&mut token)
.schedule_resource_destruction(temp, last_submit_index);
}
}
resource::TextureInner::Surface { .. } => {
for clear_view in clear_views {
unsafe {
device.raw.destroy_texture_view(clear_view);
}
}
// TODO?
}
}
Ok(())
}
pub fn texture_drop<A: HalApi>(&self, texture_id: id::TextureId, wait: bool) {
profiling::scope!("drop", "Texture");
log::debug!("texture {:?} is dropped", texture_id);
let hub = A::hub(self);
let mut token = Token::root();
let (ref_count, last_submit_index, device_id) = {
let (mut texture_guard, _) = hub.textures.write(&mut token);
match texture_guard.get_mut(texture_id) {
Ok(texture) => {
let ref_count = texture.life_guard.ref_count.take().unwrap();
let last_submit_index = texture.life_guard.life_count();
(ref_count, last_submit_index, texture.device_id.value)
}
Err(InvalidId) => {
hub.textures
.unregister_locked(texture_id, &mut *texture_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
{
let mut life_lock = device.lock_life(&mut token);
if device.pending_writes.dst_textures.contains(&texture_id) {
life_lock.future_suspected_textures.push(Stored {
value: id::Valid(texture_id),
ref_count,
});
} else {
drop(ref_count);
life_lock
.suspected_resources
.textures
.push(id::Valid(texture_id));
}
}
if wait {
match device.wait_for_submit(last_submit_index, &mut token) {
Ok(()) => (),
Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e),
}
}
}
pub fn texture_create_view<A: HalApi>(
&self,
texture_id: id::TextureId,
desc: &resource::TextureViewDescriptor,
id_in: Input<G, id::TextureViewId>,
) -> (id::TextureViewId, Option<resource::CreateTextureViewError>) {
profiling::scope!("create_view", "Texture");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.texture_views.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let (texture_guard, mut token) = hub.textures.read(&mut token);
let error = loop {
let texture = match texture_guard.get(texture_id) {
Ok(texture) => texture,
Err(_) => break resource::CreateTextureViewError::InvalidTexture,
};
let device = &device_guard[texture.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateTextureView {
id: fid.id(),
parent_id: texture_id,
desc: desc.clone(),
});
}
let view = match device.create_texture_view(texture, texture_id, desc) {
Ok(view) => view,
Err(e) => break e,
};
let ref_count = view.life_guard.add_ref();
let id = fid.assign(view, &mut token);
device.trackers.lock().views.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn texture_view_label<A: HalApi>(&self, id: id::TextureViewId) -> String {
A::hub(self).texture_views.label_for_resource(id)
}
pub fn texture_view_drop<A: HalApi>(
&self,
texture_view_id: id::TextureViewId,
wait: bool,
) -> Result<(), resource::TextureViewDestroyError> {
profiling::scope!("drop", "TextureView");
log::debug!("texture view {:?} is dropped", texture_view_id);
let hub = A::hub(self);
let mut token = Token::root();
let (last_submit_index, device_id) = {
let (mut texture_view_guard, _) = hub.texture_views.write(&mut token);
match texture_view_guard.get_mut(texture_view_id) {
Ok(view) => {
let _ref_count = view.life_guard.ref_count.take();
let last_submit_index = view.life_guard.life_count();
(last_submit_index, view.device_id.value)
}
Err(InvalidId) => {
hub.texture_views
.unregister_locked(texture_view_id, &mut *texture_view_guard);
return Ok(());
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
device
.lock_life(&mut token)
.suspected_resources
.texture_views
.push(id::Valid(texture_view_id));
if wait {
match device.wait_for_submit(last_submit_index, &mut token) {
Ok(()) => (),
Err(e) => log::error!(
"Failed to wait for texture view {:?}: {:?}",
texture_view_id,
e
),
}
}
Ok(())
}
pub fn device_create_sampler<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &resource::SamplerDescriptor,
id_in: Input<G, id::SamplerId>,
) -> (id::SamplerId, Option<resource::CreateSamplerError>) {
profiling::scope!("create_sampler", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.samplers.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateSampler(fid.id(), desc.clone()));
}
let sampler = match device.create_sampler(device_id, desc) {
Ok(sampler) => sampler,
Err(e) => break e,
};
let ref_count = sampler.life_guard.add_ref();
let id = fid.assign(sampler, &mut token);
device.trackers.lock().samplers.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn sampler_label<A: HalApi>(&self, id: id::SamplerId) -> String {
A::hub(self).samplers.label_for_resource(id)
}
pub fn sampler_drop<A: HalApi>(&self, sampler_id: id::SamplerId) {
profiling::scope!("drop", "Sampler");
log::debug!("sampler {:?} is dropped", sampler_id);
let hub = A::hub(self);
let mut token = Token::root();
let device_id = {
let (mut sampler_guard, _) = hub.samplers.write(&mut token);
match sampler_guard.get_mut(sampler_id) {
Ok(sampler) => {
sampler.life_guard.ref_count.take();
sampler.device_id.value
}
Err(InvalidId) => {
hub.samplers
.unregister_locked(sampler_id, &mut *sampler_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.samplers
.push(id::Valid(sampler_id));
}
pub fn device_create_bind_group_layout<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &binding_model::BindGroupLayoutDescriptor,
id_in: Input<G, id::BindGroupLayoutId>,
) -> (
id::BindGroupLayoutId,
Option<binding_model::CreateBindGroupLayoutError>,
) {
profiling::scope!("create_bind_group_layout", "Device");
let mut token = Token::root();
let hub = A::hub(self);
let fid = hub.bind_group_layouts.prepare(id_in);
let error = 'outer: loop {
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone()));
}
let mut entry_map = FastHashMap::default();
for entry in desc.entries.iter() {
if entry_map.insert(entry.binding, *entry).is_some() {
break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding(
entry.binding,
);
}
}
// If there is an equivalent BGL, just bump the refcount and return it.
// This is only applicable for identity filters that are generating new IDs,
// so their inputs are `PhantomData` of size 0.
if mem::size_of::<Input<G, id::BindGroupLayoutId>>() == 0 {
let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
if let Some(id) =
Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard)
{
return (id, None);
}
}
let layout = match device.create_bind_group_layout(
device_id,
desc.label.borrow_option(),
entry_map,
) {
Ok(layout) => layout,
Err(e) => break e,
};
let id = fid.assign(layout, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn bind_group_layout_label<A: HalApi>(&self, id: id::BindGroupLayoutId) -> String {
A::hub(self).bind_group_layouts.label_for_resource(id)
}
pub fn bind_group_layout_drop<A: HalApi>(&self, bind_group_layout_id: id::BindGroupLayoutId) {
profiling::scope!("drop", "BindGroupLayout");
log::debug!("bind group layout {:?} is dropped", bind_group_layout_id);
let hub = A::hub(self);
let mut token = Token::root();
let device_id = {
let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token);
match bind_group_layout_guard.get_mut(bind_group_layout_id) {
Ok(layout) => layout.device_id.value,
Err(InvalidId) => {
hub.bind_group_layouts
.unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.bind_group_layouts
.push(id::Valid(bind_group_layout_id));
}
pub fn device_create_pipeline_layout<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &binding_model::PipelineLayoutDescriptor,
id_in: Input<G, id::PipelineLayoutId>,
) -> (
id::PipelineLayoutId,
Option<binding_model::CreatePipelineLayoutError>,
) {
profiling::scope!("create_pipeline_layout", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.pipeline_layouts.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone()));
}
let layout = {
let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
match device.create_pipeline_layout(device_id, desc, &*bgl_guard) {
Ok(layout) => layout,
Err(e) => break e,
}
};
let id = fid.assign(layout, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn pipeline_layout_label<A: HalApi>(&self, id: id::PipelineLayoutId) -> String {
A::hub(self).pipeline_layouts.label_for_resource(id)
}
pub fn pipeline_layout_drop<A: HalApi>(&self, pipeline_layout_id: id::PipelineLayoutId) {
profiling::scope!("drop", "PipelineLayout");
log::debug!("pipeline layout {:?} is dropped", pipeline_layout_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_id, ref_count) = {
let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token);
match pipeline_layout_guard.get_mut(pipeline_layout_id) {
Ok(layout) => (
layout.device_id.value,
layout.life_guard.ref_count.take().unwrap(),
),
Err(InvalidId) => {
hub.pipeline_layouts
.unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.pipeline_layouts
.push(Stored {
value: id::Valid(pipeline_layout_id),
ref_count,
});
}
pub fn device_create_bind_group<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &binding_model::BindGroupDescriptor,
id_in: Input<G, id::BindGroupId>,
) -> (id::BindGroupId, Option<binding_model::CreateBindGroupError>) {
profiling::scope!("create_bind_group", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.bind_groups.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateBindGroup(fid.id(), desc.clone()));
}
let bind_group_layout = match bind_group_layout_guard.get(desc.layout) {
Ok(layout) => layout,
Err(_) => break binding_model::CreateBindGroupError::InvalidLayout,
};
let bind_group =
match device.create_bind_group(device_id, bind_group_layout, desc, hub, &mut token)
{
Ok(bind_group) => bind_group,
Err(e) => break e,
};
let ref_count = bind_group.life_guard.add_ref();
let id = fid.assign(bind_group, &mut token);
log::debug!("Bind group {:?}", id,);
device
.trackers
.lock()
.bind_groups
.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn bind_group_label<A: HalApi>(&self, id: id::BindGroupId) -> String {
A::hub(self).bind_groups.label_for_resource(id)
}
pub fn bind_group_drop<A: HalApi>(&self, bind_group_id: id::BindGroupId) {
profiling::scope!("drop", "BindGroup");
log::debug!("bind group {:?} is dropped", bind_group_id);
let hub = A::hub(self);
let mut token = Token::root();
let device_id = {
let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token);
match bind_group_guard.get_mut(bind_group_id) {
Ok(bind_group) => {
bind_group.life_guard.ref_count.take();
bind_group.device_id.value
}
Err(InvalidId) => {
hub.bind_groups
.unregister_locked(bind_group_id, &mut *bind_group_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.bind_groups
.push(id::Valid(bind_group_id));
}
pub fn device_create_shader_module<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &pipeline::ShaderModuleDescriptor,
source: pipeline::ShaderModuleSource,
id_in: Input<G, id::ShaderModuleId>,
) -> (
id::ShaderModuleId,
Option<pipeline::CreateShaderModuleError>,
) {
profiling::scope!("create_shader_module", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.shader_modules.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data = match source {
pipeline::ShaderModuleSource::Wgsl(ref code) => {
trace.make_binary("wgsl", code.as_bytes())
}
pipeline::ShaderModuleSource::Naga(ref module) => {
let string =
ron::ser::to_string_pretty(module, ron::ser::PrettyConfig::default())
.unwrap();
trace.make_binary("ron", string.as_bytes())
}
};
trace.add(trace::Action::CreateShaderModule {
id: fid.id(),
desc: desc.clone(),
data,
});
};
let shader = match device.create_shader_module(device_id, desc, source) {
Ok(shader) => shader,
Err(e) => break e,
};
let id = fid.assign(shader, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
#[allow(unused_unsafe)] // Unsafe-ness of internal calls has little to do with unsafe-ness of this.
/// # Safety
///
/// This function passes SPIR-V binary to the backend as-is and can potentially result in a
/// driver crash.
pub unsafe fn device_create_shader_module_spirv<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &pipeline::ShaderModuleDescriptor,
source: Cow<[u32]>,
id_in: Input<G, id::ShaderModuleId>,
) -> (
id::ShaderModuleId,
Option<pipeline::CreateShaderModuleError>,
) {
profiling::scope!("create_shader_module", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.shader_modules.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data = trace.make_binary("spv", unsafe {
std::slice::from_raw_parts(source.as_ptr() as *const u8, source.len() * 4)
});
trace.add(trace::Action::CreateShaderModule {
id: fid.id(),
desc: desc.clone(),
data,
});
};
let shader = match device.create_shader_module_spirv(device_id, desc, &source) {
Ok(shader) => shader,
Err(e) => break e,
};
let id = fid.assign(shader, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn shader_module_label<A: HalApi>(&self, id: id::ShaderModuleId) -> String {
A::hub(self).shader_modules.label_for_resource(id)
}
pub fn shader_module_drop<A: HalApi>(&self, shader_module_id: id::ShaderModuleId) {
profiling::scope!("drop", "ShaderModule");
log::debug!("shader module {:?} is dropped", shader_module_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token);
if let Some(module) = module {
let device = &device_guard[module.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::DestroyShaderModule(shader_module_id));
}
unsafe {
device.raw.destroy_shader_module(module.raw);
}
}
}
pub fn device_create_command_encoder<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &wgt::CommandEncoderDescriptor<Label>,
id_in: Input<G, id::CommandEncoderId>,
) -> (id::CommandEncoderId, Option<DeviceError>) {
profiling::scope!("create_command_encoder", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.command_buffers.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid,
};
let dev_stored = Stored {
value: id::Valid(device_id),
ref_count: device.life_guard.add_ref(),
};
let encoder = match device
.command_allocator
.lock()
.acquire_encoder(&device.raw, &device.queue)
{
Ok(raw) => raw,
Err(_) => break DeviceError::OutOfMemory,
};
let command_buffer = command::CommandBuffer::new(
encoder,
dev_stored,
device.limits.clone(),
device.downlevel.clone(),
device.features,
#[cfg(feature = "trace")]
device.trace.is_some(),
&desc.label,
);
let id = fid.assign(command_buffer, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn command_buffer_label<A: HalApi>(&self, id: id::CommandBufferId) -> String {
A::hub(self).command_buffers.label_for_resource(id)
}
pub fn command_encoder_drop<A: HalApi>(&self, command_encoder_id: id::CommandEncoderId) {
profiling::scope!("drop", "CommandEncoder");
log::debug!("command encoder {:?} is dropped", command_encoder_id);
let hub = A::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (cmdbuf, _) = hub
.command_buffers
.unregister(command_encoder_id, &mut token);
if let Some(cmdbuf) = cmdbuf {
let device = &mut device_guard[cmdbuf.device_id.value];
device.untrack::<G>(hub, &cmdbuf.trackers, &mut token);
}
}
pub fn command_buffer_drop<A: HalApi>(&self, command_buffer_id: id::CommandBufferId) {
profiling::scope!("drop", "CommandBuffer");
log::debug!("command buffer {:?} is dropped", command_buffer_id);
self.command_encoder_drop::<A>(command_buffer_id)
}
pub fn device_create_render_bundle_encoder(
&self,
device_id: id::DeviceId,
desc: &command::RenderBundleEncoderDescriptor,
) -> (
id::RenderBundleEncoderId,
Option<command::CreateRenderBundleError>,
) {
profiling::scope!("create_render_bundle_encoder", "Device");
let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) {
Ok(encoder) => (encoder, None),
Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)),
};
(Box::into_raw(Box::new(encoder)), error)
}
pub fn render_bundle_encoder_finish<A: HalApi>(
&self,
bundle_encoder: command::RenderBundleEncoder,
desc: &command::RenderBundleDescriptor,
id_in: Input<G, id::RenderBundleId>,
) -> (id::RenderBundleId, Option<command::RenderBundleError>) {
profiling::scope!("finish", "RenderBundleEncoder");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.render_bundles.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(bundle_encoder.parent()) {
Ok(device) => device,
Err(_) => break command::RenderBundleError::INVALID_DEVICE,
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateRenderBundle {
id: fid.id(),
desc: trace::new_render_bundle_encoder_descriptor(
desc.label.clone(),
&bundle_encoder.context,
bundle_encoder.is_depth_read_only,
bundle_encoder.is_stencil_read_only,
),
base: bundle_encoder.to_base_pass(),
});
}
let render_bundle = match bundle_encoder.finish(desc, device, hub, &mut token) {
Ok(bundle) => bundle,
Err(e) => break e,
};
log::debug!("Render bundle");
let ref_count = render_bundle.life_guard.add_ref();
let id = fid.assign(render_bundle, &mut token);
device.trackers.lock().bundles.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn render_bundle_label<A: HalApi>(&self, id: id::RenderBundleId) -> String {
A::hub(self).render_bundles.label_for_resource(id)
}
pub fn render_bundle_drop<A: HalApi>(&self, render_bundle_id: id::RenderBundleId) {
profiling::scope!("drop", "RenderBundle");
log::debug!("render bundle {:?} is dropped", render_bundle_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let device_id = {
let (mut bundle_guard, _) = hub.render_bundles.write(&mut token);
match bundle_guard.get_mut(render_bundle_id) {
Ok(bundle) => {
bundle.life_guard.ref_count.take();
bundle.device_id.value
}
Err(InvalidId) => {
hub.render_bundles
.unregister_locked(render_bundle_id, &mut *bundle_guard);
return;
}
}
};
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.render_bundles
.push(id::Valid(render_bundle_id));
}
pub fn device_create_query_set<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &resource::QuerySetDescriptor,
id_in: Input<G, id::QuerySetId>,
) -> (id::QuerySetId, Option<resource::CreateQuerySetError>) {
profiling::scope!("create_query_set", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.query_sets.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateQuerySet {
id: fid.id(),
desc: desc.clone(),
});
}
let query_set = match device.create_query_set(device_id, desc) {
Ok(query_set) => query_set,
Err(err) => break err,
};
let ref_count = query_set.life_guard.add_ref();
let id = fid.assign(query_set, &mut token);
device
.trackers
.lock()
.query_sets
.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error("", &mut token);
(id, Some(error))
}
pub fn query_set_drop<A: HalApi>(&self, query_set_id: id::QuerySetId) {
profiling::scope!("drop", "QuerySet");
log::debug!("query set {:?} is dropped", query_set_id);
let hub = A::hub(self);
let mut token = Token::root();
let device_id = {
let (mut query_set_guard, _) = hub.query_sets.write(&mut token);
let query_set = query_set_guard.get_mut(query_set_id).unwrap();
query_set.life_guard.ref_count.take();
query_set.device_id.value
};
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::DestroyQuerySet(query_set_id));
}
device
.lock_life(&mut token)
.suspected_resources
.query_sets
.push(id::Valid(query_set_id));
}
pub fn device_create_render_pipeline<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &pipeline::RenderPipelineDescriptor,
id_in: Input<G, id::RenderPipelineId>,
implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
) -> (
id::RenderPipelineId,
Option<pipeline::CreateRenderPipelineError>,
) {
profiling::scope!("create_render_pipeline", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.render_pipelines.prepare(id_in);
let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub));
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
let adapter = &adapter_guard[device.adapter_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateRenderPipeline {
id: fid.id(),
desc: desc.clone(),
implicit_context: implicit_context.clone(),
});
}
let pipeline = match device.create_render_pipeline(
device_id,
adapter,
desc,
implicit_context,
hub,
&mut token,
) {
Ok(pair) => pair,
Err(e) => break e,
};
let ref_count = pipeline.life_guard.add_ref();
let id = fid.assign(pipeline, &mut token);
log::info!("Created render pipeline {:?} with {:?}", id, desc);
device
.trackers
.lock()
.render_pipelines
.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
/// Get an ID of one of the bind group layouts. The ID adds a refcount,
/// which needs to be released by calling `bind_group_layout_drop`.
pub fn render_pipeline_get_bind_group_layout<A: HalApi>(
&self,
pipeline_id: id::RenderPipelineId,
index: u32,
id_in: Input<G, id::BindGroupLayoutId>,
) -> (
id::BindGroupLayoutId,
Option<binding_model::GetBindGroupLayoutError>,
) {
let hub = A::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let error = loop {
let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
let (_, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, _) = hub.render_pipelines.read(&mut token);
let pipeline = match pipeline_guard.get(pipeline_id) {
Ok(pipeline) => pipeline,
Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
};
let id = match pipeline_layout_guard[pipeline.layout_id.value]
.bind_group_layout_ids
.get(index as usize)
{
Some(id) => id,
None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
};
bgl_guard[*id].multi_ref_count.inc();
return (id.0, None);
};
let id = hub
.bind_group_layouts
.prepare(id_in)
.assign_error("<derived>", &mut token);
(id, Some(error))
}
pub fn render_pipeline_label<A: HalApi>(&self, id: id::RenderPipelineId) -> String {
A::hub(self).render_pipelines.label_for_resource(id)
}
pub fn render_pipeline_drop<A: HalApi>(&self, render_pipeline_id: id::RenderPipelineId) {
profiling::scope!("drop", "RenderPipeline");
log::debug!("render pipeline {:?} is dropped", render_pipeline_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (device_id, layout_id) = {
let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token);
match pipeline_guard.get_mut(render_pipeline_id) {
Ok(pipeline) => {
pipeline.life_guard.ref_count.take();
(pipeline.device_id.value, pipeline.layout_id.clone())
}
Err(InvalidId) => {
hub.render_pipelines
.unregister_locked(render_pipeline_id, &mut *pipeline_guard);
return;
}
}
};
let mut life_lock = device_guard[device_id].lock_life(&mut token);
life_lock
.suspected_resources
.render_pipelines
.push(id::Valid(render_pipeline_id));
life_lock
.suspected_resources
.pipeline_layouts
.push(layout_id);
}
pub fn device_create_compute_pipeline<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &pipeline::ComputePipelineDescriptor,
id_in: Input<G, id::ComputePipelineId>,
implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
) -> (
id::ComputePipelineId,
Option<pipeline::CreateComputePipelineError>,
) {
profiling::scope!("create_compute_pipeline", "Device");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.compute_pipelines.prepare(id_in);
let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub));
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateComputePipeline {
id: fid.id(),
desc: desc.clone(),
implicit_context: implicit_context.clone(),
});
}
let pipeline = match device.create_compute_pipeline(
device_id,
desc,
implicit_context,
hub,
&mut token,
) {
Ok(pair) => pair,
Err(e) => break e,
};
let ref_count = pipeline.life_guard.add_ref();
let id = fid.assign(pipeline, &mut token);
log::info!("Created compute pipeline {:?} with {:?}", id, desc);
device
.trackers
.lock()
.compute_pipelines
.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
/// Get an ID of one of the bind group layouts. The ID adds a refcount,
/// which needs to be released by calling `bind_group_layout_drop`.
pub fn compute_pipeline_get_bind_group_layout<A: HalApi>(
&self,
pipeline_id: id::ComputePipelineId,
index: u32,
id_in: Input<G, id::BindGroupLayoutId>,
) -> (
id::BindGroupLayoutId,
Option<binding_model::GetBindGroupLayoutError>,
) {
let hub = A::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let error = loop {
let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
let (_, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token);
let pipeline = match pipeline_guard.get(pipeline_id) {
Ok(pipeline) => pipeline,
Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
};
let id = match pipeline_layout_guard[pipeline.layout_id.value]
.bind_group_layout_ids
.get(index as usize)
{
Some(id) => id,
None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
};
bgl_guard[*id].multi_ref_count.inc();
return (id.0, None);
};
let id = hub
.bind_group_layouts
.prepare(id_in)
.assign_error("<derived>", &mut token);
(id, Some(error))
}
pub fn compute_pipeline_label<A: HalApi>(&self, id: id::ComputePipelineId) -> String {
A::hub(self).compute_pipelines.label_for_resource(id)
}
pub fn compute_pipeline_drop<A: HalApi>(&self, compute_pipeline_id: id::ComputePipelineId) {
profiling::scope!("drop", "ComputePipeline");
log::debug!("compute pipeline {:?} is dropped", compute_pipeline_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (device_id, layout_id) = {
let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token);
match pipeline_guard.get_mut(compute_pipeline_id) {
Ok(pipeline) => {
pipeline.life_guard.ref_count.take();
(pipeline.device_id.value, pipeline.layout_id.clone())
}
Err(InvalidId) => {
hub.compute_pipelines
.unregister_locked(compute_pipeline_id, &mut *pipeline_guard);
return;
}
}
};
let mut life_lock = device_guard[device_id].lock_life(&mut token);
life_lock
.suspected_resources
.compute_pipelines
.push(id::Valid(compute_pipeline_id));
life_lock
.suspected_resources
.pipeline_layouts
.push(layout_id);
}
pub fn surface_configure<A: HalApi>(
&self,
surface_id: id::SurfaceId,
device_id: id::DeviceId,
config: &wgt::SurfaceConfiguration,
) -> Option<present::ConfigureSurfaceError> {
use hal::{Adapter as _, Surface as _};
use present::ConfigureSurfaceError as E;
profiling::scope!("surface_configure");
fn validate_surface_configuration(
config: &mut hal::SurfaceConfiguration,
caps: &hal::SurfaceCapabilities,
) -> Result<(), E> {
let width = config.extent.width;
let height = config.extent.height;
if width < caps.extents.start().width
|| width > caps.extents.end().width
|| height < caps.extents.start().height
|| height > caps.extents.end().height
{
log::warn!(
"Requested size {}x{} is outside of the supported range: {:?}",
width,
height,
caps.extents
);
}
if !caps.present_modes.contains(&config.present_mode) {
log::warn!(
"Surface does not support present mode: {:?}, falling back to FIFO",
config.present_mode,
);
config.present_mode = wgt::PresentMode::Fifo;
}
if !caps.formats.contains(&config.format) {
return Err(E::UnsupportedFormat {
requested: config.format,
available: caps.formats.clone(),
});
}
if !caps.usage.contains(config.usage) {
return Err(E::UnsupportedUsage);
}
if width == 0 || height == 0 {
return Err(E::ZeroArea);
}
Ok(())
}
log::info!("configuring surface with {:?}", config);
let hub = A::hub(self);
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, _token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::ConfigureSurface(surface_id, config.clone()));
}
let surface = match surface_guard.get_mut(surface_id) {
Ok(surface) => surface,
Err(_) => break E::InvalidSurface,
};
let caps = unsafe {
let suf = A::get_surface(surface);
let adapter = &adapter_guard[device.adapter_id.value];
match adapter.raw.adapter.surface_capabilities(&suf.raw) {
Some(caps) => caps,
None => break E::UnsupportedQueueFamily,
}
};
let num_frames = present::DESIRED_NUM_FRAMES
.max(*caps.swap_chain_sizes.start())
.min(*caps.swap_chain_sizes.end());
let mut hal_config = hal::SurfaceConfiguration {
swap_chain_size: num_frames,
present_mode: config.present_mode,
composite_alpha_mode: hal::CompositeAlphaMode::Opaque,
format: config.format,
extent: wgt::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
usage: conv::map_texture_usage(config.usage, hal::FormatAspects::COLOR),
};
if let Err(error) = validate_surface_configuration(&mut hal_config, &caps) {
break error;
}
match unsafe {
A::get_surface_mut(surface)
.raw
.configure(&device.raw, &hal_config)
} {
Ok(()) => (),
Err(error) => {
break match error {
hal::SurfaceError::Outdated | hal::SurfaceError::Lost => E::InvalidSurface,
hal::SurfaceError::Device(error) => E::Device(error.into()),
hal::SurfaceError::Other(message) => {
log::error!("surface configuration failed: {}", message);
E::InvalidSurface
}
}
}
}
if let Some(present) = surface.presentation.take() {
if present.acquired_texture.is_some() {
break E::PreviousOutputExists;
}
}
surface.presentation = Some(present::Presentation {
device_id: Stored {
value: id::Valid(device_id),
ref_count: device.life_guard.add_ref(),
},
config: config.clone(),
num_frames,
acquired_texture: None,
});
return None;
};
Some(error)
}
#[cfg(feature = "replay")]
/// Only triange suspected resource IDs. This helps us to avoid ID collisions
/// upon creating new resources when re-playing a trace.
pub fn device_maintain_ids<A: HalApi>(
&self,
device_id: id::DeviceId,
) -> Result<(), InvalidDevice> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
device.lock_life(&mut token).triage_suspected(
hub,
&device.trackers,
#[cfg(feature = "trace")]
None,
&mut token,
);
Ok(())
}
/// Check `device_id` for freeable resources and completed buffer mappings.
///
/// Return `queue_empty` indicating whether there are more queue submissions still in flight.
pub fn device_poll<A: HalApi>(
&self,
device_id: id::DeviceId,
maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
) -> Result<bool, WaitIdleError> {
let (closures, queue_empty) = {
if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain {
if submission_index.queue_id != device_id {
return Err(WaitIdleError::WrongSubmissionIndex(
submission_index.queue_id,
device_id,
));
}
}
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard
.get(device_id)
.map_err(|_| DeviceError::Invalid)?
.maintain(hub, maintain, &mut token)?
};
closures.fire();
Ok(queue_empty)
}
/// Poll all devices belonging to the backend `A`.
///
/// If `force_wait` is true, block until all buffer mappings are done.
///
/// Return `all_queue_empty` indicating whether there are more queue submissions still in flight.
fn poll_devices<A: HalApi>(
&self,
force_wait: bool,
closures: &mut UserClosures,
) -> Result<bool, WaitIdleError> {
profiling::scope!("poll_devices");
let hub = A::hub(self);
let mut devices_to_drop = vec![];
let mut all_queue_empty = true;
{
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
for (id, device) in device_guard.iter(A::VARIANT) {
let maintain = if force_wait {
wgt::Maintain::Wait
} else {
wgt::Maintain::Poll
};
let (cbs, queue_empty) = device.maintain(hub, maintain, &mut token)?;
all_queue_empty = all_queue_empty && queue_empty;
// If the device's own `RefCount` clone is the only one left, and
// its submission queue is empty, then it can be freed.
if queue_empty && device.ref_count.load() == 1 {
devices_to_drop.push(id);
}
closures.extend(cbs);
}
}
for device_id in devices_to_drop {
self.exit_device::<A>(device_id);
}
Ok(all_queue_empty)
}
/// Poll all devices on all backends.
///
/// This is the implementation of `wgpu::Instance::poll_all`.
///
/// Return `all_queue_empty` indicating whether there are more queue submissions still in flight.
pub fn poll_all_devices(&self, force_wait: bool) -> Result<bool, WaitIdleError> {
let mut closures = UserClosures::default();
let mut all_queue_empty = true;
#[cfg(vulkan)]
{
all_queue_empty = self.poll_devices::<hal::api::Vulkan>(force_wait, &mut closures)?
&& all_queue_empty;
}
#[cfg(metal)]
{
all_queue_empty =
self.poll_devices::<hal::api::Metal>(force_wait, &mut closures)? && all_queue_empty;
}
#[cfg(dx12)]
{
all_queue_empty =
self.poll_devices::<hal::api::Dx12>(force_wait, &mut closures)? && all_queue_empty;
}
#[cfg(dx11)]
{
all_queue_empty =
self.poll_devices::<hal::api::Dx11>(force_wait, &mut closures)? && all_queue_empty;
}
#[cfg(gl)]
{
all_queue_empty =
self.poll_devices::<hal::api::Gles>(force_wait, &mut closures)? && all_queue_empty;
}
closures.fire();
Ok(all_queue_empty)
}
pub fn device_label<A: HalApi>(&self, id: id::DeviceId) -> String {
A::hub(self).devices.label_for_resource(id)
}
pub fn device_start_capture<A: HalApi>(&self, id: id::DeviceId) {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
if let Ok(device) = device_guard.get(id) {
unsafe { device.raw.start_capture() };
}
}
pub fn device_stop_capture<A: HalApi>(&self, id: id::DeviceId) {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
if let Ok(device) = device_guard.get(id) {
unsafe { device.raw.stop_capture() };
}
}
pub fn device_drop<A: HalApi>(&self, device_id: id::DeviceId) {
profiling::scope!("drop", "Device");
log::debug!("device {:?} is dropped", device_id);
let hub = A::hub(self);
let mut token = Token::root();
// For now, just drop the `RefCount` in `device.life_guard`, which
// stands for the user's reference to the device. We'll take care of
// cleaning up the device when we're polled, once its queue submissions
// have completed and it is no longer needed by other resources.
let (mut device_guard, _) = hub.devices.write(&mut token);
if let Ok(device) = device_guard.get_mut(device_id) {
device.life_guard.ref_count.take().unwrap();
}
}
/// Exit the unreferenced, inactive device `device_id`.
fn exit_device<A: HalApi>(&self, device_id: id::DeviceId) {
let hub = A::hub(self);
let mut token = Token::root();
let mut free_adapter_id = None;
{
let (device, mut _token) = hub.devices.unregister(device_id, &mut token);
if let Some(mut device) = device {
// The things `Device::prepare_to_die` takes care are mostly
// unnecessary here. We know our queue is empty, so we don't
// need to wait for submissions or triage them. We know we were
// just polled, so `life_tracker.free_resources` is empty.
debug_assert!(device.lock_life(&mut _token).queue_empty());
device.pending_writes.deactivate();
// Adapter is only referenced by the device and itself.
// This isn't a robust way to destroy them, we should find a better one.
if device.adapter_id.ref_count.load() == 1 {
free_adapter_id = Some(device.adapter_id.value.0);
}
device.dispose();
}
}
// Free the adapter now that we've dropped the `Device` token.
if let Some(free_adapter_id) = free_adapter_id {
let _ = hub.adapters.unregister(free_adapter_id, &mut token);
}
}
pub fn buffer_map_async<A: HalApi>(
&self,
buffer_id: id::BufferId,
range: Range<BufferAddress>,
op: resource::BufferMapOperation,
) -> Result<(), resource::BufferAccessError> {
profiling::scope!("map_async", "Buffer");
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (pub_usage, internal_use) = match op.host {
HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ),
HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE),
};
if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(resource::BufferAccessError::UnalignedRange);
}
let (device_id, ref_count) = {
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, pub_usage)?;
buffer.map_state = match buffer.map_state {
resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => {
return Err(resource::BufferAccessError::AlreadyMapped);
}
resource::BufferMapState::Waiting(_) => {
op.callback.call_error();
return Ok(());
}
resource::BufferMapState::Idle => {
resource::BufferMapState::Waiting(resource::BufferPendingMapping {
range,
op,
_parent_ref_count: buffer.life_guard.add_ref(),
})
}
};
log::debug!("Buffer {:?} map state -> Waiting", buffer_id);
let device = &device_guard[buffer.device_id.value];
let ret = (buffer.device_id.value, buffer.life_guard.add_ref());
let mut trackers = device.trackers.lock();
trackers
.buffers
.set_single(&*buffer_guard, buffer_id, internal_use);
trackers.buffers.drain();
ret
};
let device = &device_guard[device_id];
device
.lock_life(&mut token)
.map(id::Valid(buffer_id), ref_count);
Ok(())
}
pub fn buffer_get_mapped_range<A: HalApi>(
&self,
buffer_id: id::BufferId,
offset: BufferAddress,
size: Option<BufferAddress>,
) -> Result<(*mut u8, u64), resource::BufferAccessError> {
profiling::scope!("get_mapped_range", "Buffer");
let hub = A::hub(self);
let mut token = Token::root();
let (buffer_guard, _) = hub.buffers.read(&mut token);
let buffer = buffer_guard
.get(buffer_id)
.map_err(|_| resource::BufferAccessError::Invalid)?;
let range_size = if let Some(size) = size {
size
} else if offset > buffer.size {
0
} else {
buffer.size - offset
};
if offset % wgt::MAP_ALIGNMENT != 0 {
return Err(resource::BufferAccessError::UnalignedOffset { offset });
}
if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(resource::BufferAccessError::UnalignedRangeSize { range_size });
}
match buffer.map_state {
resource::BufferMapState::Init { ptr, .. } => {
// offset (u64) can not be < 0, so no need to validate the lower bound
if offset + range_size > buffer.size {
return Err(resource::BufferAccessError::OutOfBoundsOverrun {
index: offset + range_size - 1,
max: buffer.size,
});
}
unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) }
}
resource::BufferMapState::Active { ptr, ref range, .. } => {
if offset < range.start {
return Err(resource::BufferAccessError::OutOfBoundsUnderrun {
index: offset,
min: range.start,
});
}
if offset + range_size > range.end {
return Err(resource::BufferAccessError::OutOfBoundsOverrun {
index: offset + range_size - 1,
max: range.end,
});
}
unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) }
}
resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => {
Err(resource::BufferAccessError::NotMapped)
}
}
}
fn buffer_unmap_inner<A: HalApi>(
&self,
buffer_id: id::BufferId,
) -> Result<Option<BufferMapPendingClosure>, resource::BufferAccessError> {
profiling::scope!("unmap", "Buffer");
let hub = A::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::Invalid)?;
let device = &mut device_guard[buffer.device_id.value];
log::debug!("Buffer {:?} map state -> Idle", buffer_id);
match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) {
resource::BufferMapState::Init {
ptr,
stage_buffer,
needs_flush,
} => {
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data = trace.make_binary("bin", unsafe {
std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize)
});
trace.add(trace::Action::WriteBuffer {
id: buffer_id,
data,
range: 0..buffer.size,
queued: true,
});
}
let _ = ptr;
if needs_flush {
unsafe {
device
.raw
.flush_mapped_ranges(&stage_buffer, iter::once(0..buffer.size));
}
}
let raw_buf = buffer
.raw
.as_ref()
.ok_or(resource::BufferAccessError::Destroyed)?;
buffer.life_guard.use_at(device.active_submission_index + 1);
let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy {
src_offset: 0,
dst_offset: 0,
size,
});
let transition_src = hal::BufferBarrier {
buffer: &stage_buffer,
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
};
let transition_dst = hal::BufferBarrier {
buffer: raw_buf,
usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST,
};
let encoder = device.pending_writes.activate();
unsafe {
encoder.transition_buffers(
iter::once(transition_src).chain(iter::once(transition_dst)),
);
if buffer.size > 0 {
encoder.copy_buffer_to_buffer(&stage_buffer, raw_buf, region.into_iter());
}
}
device
.pending_writes
.consume_temp(queue::TempResource::Buffer(stage_buffer));
device.pending_writes.dst_buffers.insert(buffer_id);
}
resource::BufferMapState::Idle => {
return Err(resource::BufferAccessError::NotMapped);
}
resource::BufferMapState::Waiting(pending) => {
return Ok(Some((pending.op, resource::BufferMapAsyncStatus::Aborted)));
}
resource::BufferMapState::Active { ptr, range, host } => {
if host == HostMap::Write {
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let size = range.end - range.start;
let data = trace.make_binary("bin", unsafe {
std::slice::from_raw_parts(ptr.as_ptr(), size as usize)
});
trace.add(trace::Action::WriteBuffer {
id: buffer_id,
data,
range: range.clone(),
queued: false,
});
}
let _ = (ptr, range);
}
unsafe {
device
.raw
.unmap_buffer(buffer.raw.as_ref().unwrap())
.map_err(DeviceError::from)?
};
}
}
Ok(None)
}
pub fn buffer_unmap<A: HalApi>(
&self,
buffer_id: id::BufferId,
) -> Result<(), resource::BufferAccessError> {
//Note: outside inner function so no locks are held when calling the callback
let closure = self.buffer_unmap_inner::<A>(buffer_id)?;
if let Some((operation, status)) = closure {
operation.callback.call(status);
}
Ok(())
}
}
| {
self.free_encoders.push(encoder);
} |
status.ts | import { createLogger } from '@unly/utils-simple-logger';
import {
NextApiRequest,
NextApiResponse,
} from 'next';
| const logger = createLogger({
label: fileLabel,
});
export const status = async (req: NextApiRequest, res: NextApiResponse): Promise<void> => {
try {
configureReq(req);
res.json({
appStage: process.env.NEXT_PUBLIC_APP_STAGE,
appName: process.env.NEXT_PUBLIC_APP_NAME,
appVersion: process.env.NEXT_PUBLIC_APP_VERSION,
appRelease: process.env.NEXT_PUBLIC_APP_VERSION_RELEASE,
appBuildTime: process.env.NEXT_PUBLIC_APP_BUILD_TIME,
appBuildTimestamp: process.env.NEXT_PUBLIC_APP_BUILD_TIMESTAMP,
appBuildId: process.env.NEXT_PUBLIC_APP_BUILD_ID,
nodejs: process.version,
nodejsAWS: process.env.AWS_EXECUTION_ENV,
regionNOW: process.env.NOW_REGION,
regionAWS: process.env.AWS_REGION,
timezone: process.env.TZ,
memory: process.env.AWS_LAMBDA_FUNCTION_MEMORY_SIZE,
environment: process.env.NODE_ENV,
preset: process.env.NEXT_PUBLIC_NRN_PRESET,
customerRef: process.env.NEXT_PUBLIC_CUSTOMER_REF,
});
} catch (e) {
logger.error(e.message);
Sentry.withScope((scope): void => {
scope.setTag('fileLabel', fileLabel);
Sentry.captureException(e);
});
res.json({
error: true,
message: process.env.NEXT_PUBLIC_APP_STAGE === 'production' ? undefined : e.message,
});
}
};
export default status; | import Sentry, { configureReq } from '../../utils/monitoring/sentry';
const fileLabel = 'api/status'; |
general_test.go | // Copyright 2019 The Go Language Server Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package protocol
import (
"fmt"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go.lsp.dev/uri"
)
func testWorkspaceFolders(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const want = `[{"uri":"file:///Users/zchee/go/src/go.lsp.dev/protocol","name":"protocol"},{"uri":"file:///Users/zchee/go/src/go.lsp.dev/jsonrpc2","name":"jsonrpc2"}]`
wantType := WorkspaceFolders{
{
URI: string(uri.File("/Users/zchee/go/src/go.lsp.dev/protocol")),
Name: "protocol",
},
{
URI: string(uri.File("/Users/zchee/go/src/go.lsp.dev/jsonrpc2")),
Name: "jsonrpc2",
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field WorkspaceFolders
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want WorkspaceFolders
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got WorkspaceFolders
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testClientInfo(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"name":"testClient","version":"v0.0.0"}`
wantNilAll = `{"name":"testClient"}`
)
wantType := ClientInfo{
Name: "testClient",
Version: "v0.0.0",
}
wantTypeNilAll := ClientInfo{
Name: "testClient",
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field ClientInfo
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNilAll,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want ClientInfo
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNilAll,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got ClientInfo
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testInitializeParams(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const wantWorkDoneToken = "156edea9-9d8d-422f-b7ee-81a84594afbb"
const (
want = `{"workDoneToken":"` + wantWorkDoneToken + `","processId":25556,"clientInfo":{"name":"testClient","version":"v0.0.0"},"rootPath":"~/go/src/go.lsp.dev/protocol","rootUri":"file:///Users/zchee/go/src/go.lsp.dev/protocol","initializationOptions":"testdata","capabilities":{},"trace":"on","workspaceFolders":[{"uri":"file:///Users/zchee/go/src/go.lsp.dev/protocol","name":"protocol"},{"uri":"file:///Users/zchee/go/src/go.lsp.dev/jsonrpc2","name":"jsonrpc2"}]}`
wantNil = `{"processId":25556,"rootUri":"file:///Users/zchee/go/src/go.lsp.dev/protocol","capabilities":{}}`
)
wantType := InitializeParams{
WorkDoneProgressParams: WorkDoneProgressParams{
WorkDoneToken: NewProgressToken(wantWorkDoneToken),
},
ProcessID: 25556,
ClientInfo: &ClientInfo{
Name: "testClient",
Version: "v0.0.0",
},
RootPath: "~/go/src/go.lsp.dev/protocol",
RootURI: uri.File("/Users/zchee/go/src/go.lsp.dev/protocol"),
InitializationOptions: "testdata",
Capabilities: ClientCapabilities{},
Trace: "on",
WorkspaceFolders: []WorkspaceFolder{
{
Name: filepath.Base("/Users/zchee/go/src/go.lsp.dev/protocol"),
URI: string(uri.File("/Users/zchee/go/src/go.lsp.dev/protocol")),
},
{
Name: filepath.Base("/Users/zchee/go/src/go.lsp.dev/jsonrpc2"),
URI: string(uri.File("/Users/zchee/go/src/go.lsp.dev/jsonrpc2")),
},
},
}
wantTypeNilAll := InitializeParams{
ProcessID: 25556,
RootURI: uri.File("//Users/zchee/go/src/go.lsp.dev/protocol"),
Capabilities: ClientCapabilities{},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field InitializeParams
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want InitializeParams
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got InitializeParams
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want, cmpopts.IgnoreTypes(WorkDoneProgressParams{})); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
if token := got.WorkDoneToken; token != nil {
if diff := cmp.Diff(fmt.Sprint(token), wantWorkDoneToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
})
}
})
}
func testWorkspaceClientCapabilities(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const want = `{"applyEdit":true,"workspaceEdit":{"documentChanges":true,"failureHandling":"FailureHandling","resourceOperations":["ResourceOperations"]},"didChangeConfiguration":{"dynamicRegistration":true},"didChangeWatchedFiles":{"dynamicRegistration":true},"symbol":{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6]}},"executeCommand":{"dynamicRegistration":true},"workspaceFolders":true,"configuration":true}`
wantType := WorkspaceClientCapabilities{
ApplyEdit: true,
WorkspaceEdit: &WorkspaceClientCapabilitiesWorkspaceEdit{
DocumentChanges: true,
FailureHandling: "FailureHandling",
ResourceOperations: []string{"ResourceOperations"},
},
DidChangeConfiguration: &WorkspaceClientCapabilitiesDidChangeConfiguration{
DynamicRegistration: true,
},
DidChangeWatchedFiles: &WorkspaceClientCapabilitiesDidChangeWatchedFiles{
DynamicRegistration: true,
},
Symbol: &WorkspaceClientCapabilitiesSymbol{
DynamicRegistration: true,
SymbolKind: &WorkspaceClientCapabilitiesSymbolKind{
ValueSet: []SymbolKind{
FileSymbol,
ModuleSymbol,
NamespaceSymbol,
PackageSymbol,
ClassSymbol,
MethodSymbol,
},
},
},
ExecuteCommand: &WorkspaceClientCapabilitiesExecuteCommand{
DynamicRegistration: true,
},
WorkspaceFolders: true,
Configuration: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field WorkspaceClientCapabilities
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want WorkspaceClientCapabilities
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got WorkspaceClientCapabilities
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesSynchronization(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"didSave":true,"dynamicRegistration":true,"willSave":true,"willSaveWaitUntil":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesSynchronization{
DidSave: true,
DynamicRegistration: true,
WillSave: true,
WillSaveWaitUntil: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesSynchronization
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesSynchronization{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesSynchronization
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesSynchronization{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesSynchronization
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesCompletion(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"completionItem":{"snippetSupport":true,"commitCharactersSupport":true,"documentationFormat":["plaintext","markdown"],"deprecatedSupport":true,"preselectSupport":true,"tagSupport":{"valueSet":[1]}},"completionItemKind":{"valueSet":[1]},"contextSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesCompletion{
DynamicRegistration: true,
CompletionItem: &TextDocumentClientCapabilitiesCompletionItem{
SnippetSupport: true,
CommitCharactersSupport: true,
DocumentationFormat: []MarkupKind{
PlainText,
Markdown,
},
DeprecatedSupport: true,
PreselectSupport: true,
TagSupport: &TextDocumentClientCapabilitiesCompletionItemTagSupport{
ValueSet: []CompletionItemTag{
CompletionItemTagDeprecated,
},
},
},
CompletionItemKind: &TextDocumentClientCapabilitiesCompletionItemKind{
ValueSet: []CompletionItemKind{TextCompletion},
},
ContextSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesCompletion
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesCompletion{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesCompletion
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesCompletion{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesCompletion
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesHover(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"contentFormat":["plaintext","markdown"]}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesHover{
DynamicRegistration: true,
ContentFormat: []MarkupKind{
PlainText,
Markdown,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesHover
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesHover{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesHover
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesHover{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesHover
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesSignatureHelp(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"signatureInformation":{"documentationFormat":["plaintext","markdown"]},"contextSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesSignatureHelp{
DynamicRegistration: true,
SignatureInformation: &TextDocumentClientCapabilitiesSignatureInformation{
DocumentationFormat: []MarkupKind{
PlainText,
Markdown,
},
},
ContextSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesSignatureHelp
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesSignatureHelp{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesSignatureHelp
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesSignatureHelp{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesSignatureHelp
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testReferencesParams(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
wantWorkDoneToken = "156edea9-9d8d-422f-b7ee-81a84594afbb"
wantPartialResultToken = "dd134d84-c134-4d7a-a2a3-f8af3ef4a568"
)
const (
want = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1},"workDoneToken":"` + wantWorkDoneToken + `","partialResultToken":"` + wantPartialResultToken + `","context":{"includeDeclaration":true}}`
wantNilAll = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1},"context":{"includeDeclaration":true}}`
wantInvalid = `{"textDocument":{"uri":"file:///path/to/basic_gen.go"},"position":{"line":2,"character":1},"workDoneToken":"` + wantPartialResultToken + `","partialResultToken":"` + wantWorkDoneToken + `","context":{"includeDeclaration":false}}`
)
wantType := ReferencesParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
WorkDoneProgressParams: WorkDoneProgressParams{
WorkDoneToken: NewProgressToken(wantWorkDoneToken),
},
PartialResultParams: PartialResultParams{
PartialResultToken: NewProgressToken(wantPartialResultToken),
},
Context: ReferenceContext{
IncludeDeclaration: true,
},
}
wantTypeNilAll := ReferencesParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
Context: ReferenceContext{
IncludeDeclaration: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field ReferencesParams
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNilAll,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want ReferencesParams
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNilAll,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got ReferencesParams
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want, cmpopts.IgnoreTypes(WorkDoneProgressParams{}, PartialResultParams{})); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
if workDoneToken := got.WorkDoneToken; workDoneToken != nil {
if diff := cmp.Diff(fmt.Sprint(workDoneToken), wantWorkDoneToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
if partialResultToken := got.PartialResultToken; partialResultToken != nil {
if diff := cmp.Diff(fmt.Sprint(partialResultToken), wantPartialResultToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
})
}
})
}
func testTextDocumentClientCapabilitiesReferences(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesReferences{
DynamicRegistration: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesReferences
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesReferences{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesReferences
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesReferences{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesReferences
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDocumentHighlightOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantNil = `{}`
wantInvalid = `{"workDoneProgress":false}`
)
wantType := DocumentHighlightOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DocumentHighlightOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: DocumentHighlightOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DocumentHighlightOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: DocumentHighlightOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DocumentHighlightOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDocumentHighlightParams(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
wantWorkDoneToken = "156edea9-9d8d-422f-b7ee-81a84594afbb"
wantPartialResultToken = "dd134d84-c134-4d7a-a2a3-f8af3ef4a568"
)
const (
want = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1},"workDoneToken":"` + wantWorkDoneToken + `","partialResultToken":"` + wantPartialResultToken + `"}`
wantNilAll = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1}}`
wantInvalid = `{"textDocument":{"uri":"file:///path/to/basic_gen.go"},"position":{"line":2,"character":1},"workDoneToken":"` + wantPartialResultToken + `","partialResultToken":"` + wantWorkDoneToken + `"}`
)
wantType := DocumentHighlightParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
WorkDoneProgressParams: WorkDoneProgressParams{
WorkDoneToken: NewProgressToken(wantWorkDoneToken),
},
PartialResultParams: PartialResultParams{
PartialResultToken: NewProgressToken(wantPartialResultToken),
},
}
wantTypeNilAll := DocumentHighlightParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DocumentHighlightParams
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNilAll,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DocumentHighlightParams
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNilAll,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DocumentHighlightParams
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want, cmpopts.IgnoreTypes(WorkDoneProgressParams{}, PartialResultParams{})); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
if workDoneToken := got.WorkDoneToken; workDoneToken != nil {
if diff := cmp.Diff(fmt.Sprint(workDoneToken), wantWorkDoneToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
if partialResultToken := got.PartialResultToken; partialResultToken != nil {
if diff := cmp.Diff(fmt.Sprint(partialResultToken), wantPartialResultToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
})
}
})
}
func testTextDocumentClientCapabilitiesDocumentHighlight(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesDocumentHighlight{
DynamicRegistration: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesDocumentHighlight
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesDocumentHighlight{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesDocumentHighlight
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesDocumentHighlight{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesDocumentHighlight
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDocumentSymbolOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantInvalid = `{"workDoneProgress":false}`
wantNil = `{}`
)
wantType := DocumentSymbolOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DocumentSymbolOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: DocumentSymbolOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DocumentSymbolOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: DocumentSymbolOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DocumentSymbolOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesDocumentSymbol(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6]},"hierarchicalDocumentSymbolSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesDocumentSymbol{
DynamicRegistration: true,
SymbolKind: &WorkspaceClientCapabilitiesSymbolKind{
ValueSet: []SymbolKind{
FileSymbol,
ModuleSymbol,
NamespaceSymbol,
PackageSymbol,
ClassSymbol,
MethodSymbol,
},
},
HierarchicalDocumentSymbolSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesDocumentSymbol
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesDocumentSymbol{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesDocumentSymbol
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesDocumentSymbol{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesDocumentSymbol
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testWorkspaceSymbolOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantInvalid = `{"workDoneProgress":false}`
wantNil = `{}`
)
wantType := WorkspaceSymbolOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field WorkspaceSymbolOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: WorkspaceSymbolOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want WorkspaceSymbolOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: WorkspaceSymbolOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got WorkspaceSymbolOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDocumentFormattingOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantInvalid = `{"workDoneProgress":false}`
wantNil = `{}`
)
wantType := DocumentFormattingOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DocumentFormattingOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: DocumentFormattingOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DocumentFormattingOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: DocumentFormattingOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DocumentFormattingOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesFormatting(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesFormatting{
DynamicRegistration: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesFormatting
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesFormatting{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesFormatting
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesFormatting{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesFormatting
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDocumentRangeFormattingOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantNil = `{}`
wantInvalid = `{"workDoneProgress":false}`
)
wantType := DocumentRangeFormattingOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DocumentRangeFormattingOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: DocumentRangeFormattingOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DocumentRangeFormattingOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: DocumentRangeFormattingOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DocumentRangeFormattingOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesRangeFormatting(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesRangeFormatting{
DynamicRegistration: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesRangeFormatting
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesRangeFormatting{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesRangeFormatting
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesRangeFormatting{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesRangeFormatting
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesOnTypeFormatting(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesOnTypeFormatting{
DynamicRegistration: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesOnTypeFormatting
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesOnTypeFormatting{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesOnTypeFormatting
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesOnTypeFormatting{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesOnTypeFormatting
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDeclarationOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantNil = `{}`
wantInvalid = `{"workDoneProgress":false}`
)
wantType := DeclarationOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DeclarationOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: DeclarationOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DeclarationOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: DeclarationOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DeclarationOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDeclarationRegistrationOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true,"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}],"id":"1"}`
wantNil = `{"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}]}`
wantInvalid = `{"workDoneProgress":false,"documentSelector":[{"language":"typescript","scheme":"file","pattern":"*.{ts,js}"}],"id":"0"}`
)
wantType := DeclarationRegistrationOptions{
DeclarationOptions: DeclarationOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
StaticRegistrationOptions: StaticRegistrationOptions{
ID: "1",
},
}
wantTypeNil := DeclarationRegistrationOptions{
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DeclarationRegistrationOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNil,
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DeclarationRegistrationOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: wantTypeNil,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DeclarationRegistrationOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDeclarationParams(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
wantWorkDoneToken = "156edea9-9d8d-422f-b7ee-81a84594afbb"
wantPartialResultToken = "dd134d84-c134-4d7a-a2a3-f8af3ef4a568"
)
const (
want = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1},"workDoneToken":"` + wantWorkDoneToken + `","partialResultToken":"` + wantPartialResultToken + `"}`
wantNilAll = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1}}`
wantInvalid = `{"textDocument":{"uri":"file:///path/to/basic_gen.go"},"position":{"line":2,"character":1},"workDoneToken":"` + wantPartialResultToken + `","partialResultToken":"` + wantWorkDoneToken + `"}`
)
wantType := DeclarationParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
WorkDoneProgressParams: WorkDoneProgressParams{
WorkDoneToken: NewProgressToken(wantWorkDoneToken),
},
PartialResultParams: PartialResultParams{
PartialResultToken: NewProgressToken(wantPartialResultToken),
},
}
wantTypeNilAll := DeclarationParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DeclarationParams
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNilAll,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DeclarationParams
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNilAll,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DeclarationParams
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want, cmpopts.IgnoreTypes(WorkDoneProgressParams{}, PartialResultParams{})); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
if workDoneToken := got.WorkDoneToken; workDoneToken != nil {
if diff := cmp.Diff(fmt.Sprint(workDoneToken), wantWorkDoneToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
if partialResultToken := got.PartialResultToken; partialResultToken != nil {
if diff := cmp.Diff(fmt.Sprint(partialResultToken), wantPartialResultToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
})
}
})
}
func testTextDocumentClientCapabilitiesDeclaration(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"linkSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesDeclaration{
DynamicRegistration: true,
LinkSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesDeclaration
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesDeclaration{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesDeclaration
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesDeclaration{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesDeclaration
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDefinitionOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantNil = `{}`
wantInvalid = `{"workDoneProgress":false}`
)
wantType := DefinitionOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DefinitionOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: DefinitionOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DefinitionOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: DefinitionOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DefinitionOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDefinitionParams(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
wantWorkDoneToken = "156edea9-9d8d-422f-b7ee-81a84594afbb"
wantPartialResultToken = "dd134d84-c134-4d7a-a2a3-f8af3ef4a568"
)
const (
want = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1},"workDoneToken":"` + wantWorkDoneToken + `","partialResultToken":"` + wantPartialResultToken + `"}`
wantNilAll = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1}}`
wantInvalid = `{"textDocument":{"uri":"file:///path/to/basic_gen.go"},"position":{"line":2,"character":1},"workDoneToken":"` + wantPartialResultToken + `","partialResultToken":"` + wantWorkDoneToken + `"}`
)
wantType := DefinitionParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
WorkDoneProgressParams: WorkDoneProgressParams{
WorkDoneToken: NewProgressToken(wantWorkDoneToken),
},
PartialResultParams: PartialResultParams{
PartialResultToken: NewProgressToken(wantPartialResultToken),
},
}
wantTypeNilAll := DefinitionParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DefinitionParams
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNilAll,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DefinitionParams
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNilAll,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DefinitionParams
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want, cmpopts.IgnoreTypes(WorkDoneProgressParams{}, PartialResultParams{})); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
if workDoneToken := got.WorkDoneToken; workDoneToken != nil {
if diff := cmp.Diff(fmt.Sprint(workDoneToken), wantWorkDoneToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
if partialResultToken := got.PartialResultToken; partialResultToken != nil {
if diff := cmp.Diff(fmt.Sprint(partialResultToken), wantPartialResultToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
})
}
})
}
func testTextDocumentClientCapabilitiesDefinition(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"linkSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesDefinition{
DynamicRegistration: true,
LinkSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesDefinition
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesDefinition{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesDefinition
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesDefinition{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesDefinition
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTypeDefinitionOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantNil = `{}`
wantInvalid = `{"workDoneProgress":false}`
)
wantType := TypeDefinitionOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TypeDefinitionOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TypeDefinitionOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TypeDefinitionOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TypeDefinitionOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TypeDefinitionOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTypeDefinitionRegistrationOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}],"workDoneProgress":true,"id":"1"}`
wantNil = `{"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}]}`
wantInvalid = `{"documentSelector":[{"language":"typescript","scheme":"file","pattern":"*.{ts,js}"}],"workDoneProgress":false,"id":"0"}`
)
wantType := TypeDefinitionRegistrationOptions{
TypeDefinitionOptions: TypeDefinitionOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
StaticRegistrationOptions: StaticRegistrationOptions{
ID: "1",
},
}
wantTypeNil := TypeDefinitionRegistrationOptions{
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TypeDefinitionRegistrationOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNil,
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TypeDefinitionRegistrationOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: wantTypeNil,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
} | t.Parallel()
var got TypeDefinitionRegistrationOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTypeDefinitionParams(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
wantWorkDoneToken = "156edea9-9d8d-422f-b7ee-81a84594afbb"
wantPartialResultToken = "dd134d84-c134-4d7a-a2a3-f8af3ef4a568"
)
const (
want = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1},"workDoneToken":"` + wantWorkDoneToken + `","partialResultToken":"` + wantPartialResultToken + `"}`
wantNilAll = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1}}`
wantInvalid = `{"textDocument":{"uri":"file:///path/to/basic_gen.go"},"position":{"line":2,"character":1},"workDoneToken":"` + wantPartialResultToken + `","partialResultToken":"` + wantWorkDoneToken + `"}`
)
wantType := TypeDefinitionParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
WorkDoneProgressParams: WorkDoneProgressParams{
WorkDoneToken: NewProgressToken(wantWorkDoneToken),
},
PartialResultParams: PartialResultParams{
PartialResultToken: NewProgressToken(wantPartialResultToken),
},
}
wantTypeNilAll := TypeDefinitionParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TypeDefinitionParams
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNilAll,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TypeDefinitionParams
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNilAll,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TypeDefinitionParams
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want, cmpopts.IgnoreTypes(WorkDoneProgressParams{}, PartialResultParams{})); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
if workDoneToken := got.WorkDoneToken; workDoneToken != nil {
if diff := cmp.Diff(fmt.Sprint(workDoneToken), wantWorkDoneToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
if partialResultToken := got.PartialResultToken; partialResultToken != nil {
if diff := cmp.Diff(fmt.Sprint(partialResultToken), wantPartialResultToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
})
}
})
}
func testTextDocumentClientCapabilitiesTypeDefinition(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"linkSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesTypeDefinition{
DynamicRegistration: true,
LinkSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesTypeDefinition
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesTypeDefinition{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesTypeDefinition
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesTypeDefinition{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesTypeDefinition
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testImplementationParams(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
wantWorkDoneToken = "156edea9-9d8d-422f-b7ee-81a84594afbb"
wantPartialResultToken = "dd134d84-c134-4d7a-a2a3-f8af3ef4a568"
)
const (
want = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1},"workDoneToken":"` + wantWorkDoneToken + `","partialResultToken":"` + wantPartialResultToken + `"}`
wantNilAll = `{"textDocument":{"uri":"file:///path/to/basic.go"},"position":{"line":25,"character":1}}`
wantInvalid = `{"textDocument":{"uri":"file:///path/to/basic_gen.go"},"position":{"line":2,"character":1},"workDoneToken":"` + wantPartialResultToken + `","partialResultToken":"` + wantWorkDoneToken + `"}`
)
wantType := ImplementationParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
WorkDoneProgressParams: WorkDoneProgressParams{
WorkDoneToken: NewProgressToken(wantWorkDoneToken),
},
PartialResultParams: PartialResultParams{
PartialResultToken: NewProgressToken(wantPartialResultToken),
},
}
wantTypeNilAll := ImplementationParams{
TextDocumentPositionParams: TextDocumentPositionParams{
TextDocument: TextDocumentIdentifier{
URI: uri.File("/path/to/basic.go"),
},
Position: Position{
Line: 25,
Character: 1,
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field ImplementationParams
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNilAll,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want ImplementationParams
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNilAll,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got ImplementationParams
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want, cmpopts.IgnoreTypes(WorkDoneProgressParams{}, PartialResultParams{})); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
if workDoneToken := got.WorkDoneToken; workDoneToken != nil {
if diff := cmp.Diff(fmt.Sprint(workDoneToken), wantWorkDoneToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
if partialResultToken := got.PartialResultToken; partialResultToken != nil {
if diff := cmp.Diff(fmt.Sprint(partialResultToken), wantPartialResultToken); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
}
})
}
})
}
func testTextDocumentClientCapabilitiesImplementation(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"linkSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesImplementation{
DynamicRegistration: true,
LinkSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesImplementation
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesImplementation{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesImplementation
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesImplementation{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesImplementation
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesCodeAction(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"codeActionLiteralSupport":{"codeActionKind":{"valueSet":["quickfix","refactor","refactor.extract","refactor.rewrite","source","source.organizeImports"]}},"isPreferredSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesCodeAction{
DynamicRegistration: true,
CodeActionLiteralSupport: &TextDocumentClientCapabilitiesCodeActionLiteralSupport{
CodeActionKind: &TextDocumentClientCapabilitiesCodeActionKind{
ValueSet: []CodeActionKind{
QuickFix,
Refactor,
RefactorExtract,
RefactorRewrite,
Source,
SourceOrganizeImports,
},
},
},
IsPreferredSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesCodeAction
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesCodeAction{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesCodeAction
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesCodeAction{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesCodeAction
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesCodeLens(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"tooltipSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesCodeLens{
DynamicRegistration: true,
TooltipSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesCodeLens
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesCodeLens{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesCodeLens
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesCodeLens{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesCodeLens
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesDocumentLink(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"tooltipSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesDocumentLink{
DynamicRegistration: true,
TooltipSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesDocumentLink
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesDocumentLink{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesDocumentLink
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesDocumentLink{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesDocumentLink
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDocumentColorOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantNil = `{}`
wantInvalid = `{"workDoneProgress":false}`
)
wantType := DocumentColorOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DocumentColorOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: DocumentColorOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DocumentColorOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: DocumentColorOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DocumentColorOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testDocumentColorRegistrationOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}],"id":"1","workDoneProgress":true}`
wantNil = `{"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}]}`
wantInvalid = `{"documentSelector":[{"language":"typescript","scheme":"file","pattern":"*.{ts,js}"}],"id":"0","workDoneProgress":false}`
)
wantType := DocumentColorRegistrationOptions{
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
StaticRegistrationOptions: StaticRegistrationOptions{
ID: "1",
},
DocumentColorOptions: DocumentColorOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
}
wantTypeNil := DocumentColorRegistrationOptions{
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DocumentColorRegistrationOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNil,
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DocumentColorRegistrationOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: wantTypeNil,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DocumentColorRegistrationOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesColorProvider(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesColorProvider{
DynamicRegistration: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesColorProvider
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesColorProvider{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesColorProvider
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesColorProvider{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesColorProvider
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesRename(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"prepareSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesRename{
DynamicRegistration: true,
PrepareSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesRename
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesRename{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesRename
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesRename{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesRename
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesPublishDiagnostics(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"relatedInformation":true,"tagSupport":{"valueSet":[2,1]},"versionSupport":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesPublishDiagnostics{
RelatedInformation: true,
TagSupport: &TextDocumentClientCapabilitiesPublishDiagnosticsTagSupport{
ValueSet: []DiagnosticTag{
DiagnosticDeprecated,
DiagnosticUnnecessary,
},
},
VersionSupport: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesPublishDiagnostics
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesPublishDiagnostics{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesPublishDiagnostics
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesPublishDiagnostics{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesPublishDiagnostics
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testFoldingRangeOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workDoneProgress":true}`
wantNil = `{}`
wantInvalid = `{"workDoneProgress":false}`
)
wantType := FoldingRangeOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field FoldingRangeOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: FoldingRangeOptions{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want FoldingRangeOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: FoldingRangeOptions{},
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got FoldingRangeOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testFoldingRangeRegistrationOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}],"workDoneProgress":true,"id":"1"}`
wantNil = `{"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}]}`
wantInvalid = `{"documentSelector":[{"language":"typescript","scheme":"file","pattern":"*.{ts,js}"}],"workDoneProgress":false,"id":"0"}`
)
wantType := FoldingRangeRegistrationOptions{
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
FoldingRangeOptions: FoldingRangeOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: true,
},
},
StaticRegistrationOptions: StaticRegistrationOptions{
ID: "1",
},
}
wantTypeNil := FoldingRangeRegistrationOptions{
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field FoldingRangeRegistrationOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNil,
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantType,
want: wantInvalid,
wantMarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want FoldingRangeRegistrationOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: wantTypeNil,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "Invalid",
field: wantInvalid,
want: wantType,
wantUnmarshalErr: false,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got FoldingRangeRegistrationOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilitiesFoldingRange(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"dynamicRegistration":true,"rangeLimit":0.5,"lineFoldingOnly":true}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilitiesFoldingRange{
DynamicRegistration: true,
RangeLimit: float64(0.5),
LineFoldingOnly: true,
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilitiesFoldingRange
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilitiesFoldingRange{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilitiesFoldingRange
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilitiesFoldingRange{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilitiesFoldingRange
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testTextDocumentClientCapabilities(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"synchronization":{"didSave":true,"dynamicRegistration":true,"willSave":true,"willSaveWaitUntil":true},"completion":{"dynamicRegistration":true,"completionItem":{"snippetSupport":true,"commitCharactersSupport":true,"documentationFormat":["plaintext","markdown"],"deprecatedSupport":true,"preselectSupport":true},"completionItemKind":{"valueSet":[1]},"contextSupport":true},"hover":{"dynamicRegistration":true,"contentFormat":["plaintext","markdown"]},"signatureHelp":{"dynamicRegistration":true,"signatureInformation":{"documentationFormat":["plaintext","markdown"]}},"references":{"dynamicRegistration":true},"documentHighlight":{"dynamicRegistration":true},"documentSymbol":{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6]},"hierarchicalDocumentSymbolSupport":true},"formatting":{"dynamicRegistration":true},"rangeFormatting":{"dynamicRegistration":true},"onTypeFormatting":{"dynamicRegistration":true},"declaration":{"dynamicRegistration":true,"linkSupport":true},"definition":{"dynamicRegistration":true,"linkSupport":true},"typeDefinition":{"dynamicRegistration":true,"linkSupport":true},"implementation":{"dynamicRegistration":true,"linkSupport":true},"codeAction":{"dynamicRegistration":true,"codeActionLiteralSupport":{"codeActionKind":{"valueSet":["quickfix","refactor","refactor.extract","refactor.rewrite","source","source.organizeImports"]}}},"codeLens":{"dynamicRegistration":true},"documentLink":{"dynamicRegistration":true},"colorProvider":{"dynamicRegistration":true},"rename":{"dynamicRegistration":true,"prepareSupport":true},"publishDiagnostics":{"relatedInformation":true},"foldingRange":{"dynamicRegistration":true,"rangeLimit":0.5,"lineFoldingOnly":true},"selectionRange":{"dynamicRegistration":true}}`
wantNil = `{}`
)
wantType := TextDocumentClientCapabilities{
Synchronization: &TextDocumentClientCapabilitiesSynchronization{
DidSave: true,
DynamicRegistration: true,
WillSave: true,
WillSaveWaitUntil: true,
},
Completion: &TextDocumentClientCapabilitiesCompletion{
DynamicRegistration: true,
CompletionItem: &TextDocumentClientCapabilitiesCompletionItem{
SnippetSupport: true,
CommitCharactersSupport: true,
DocumentationFormat: []MarkupKind{
PlainText,
Markdown,
},
DeprecatedSupport: true,
PreselectSupport: true,
},
CompletionItemKind: &TextDocumentClientCapabilitiesCompletionItemKind{
ValueSet: []CompletionItemKind{TextCompletion},
},
ContextSupport: true,
},
Hover: &TextDocumentClientCapabilitiesHover{
DynamicRegistration: true,
ContentFormat: []MarkupKind{
PlainText,
Markdown,
},
},
SignatureHelp: &TextDocumentClientCapabilitiesSignatureHelp{
DynamicRegistration: true,
SignatureInformation: &TextDocumentClientCapabilitiesSignatureInformation{
DocumentationFormat: []MarkupKind{
PlainText,
Markdown,
},
},
},
References: &TextDocumentClientCapabilitiesReferences{
DynamicRegistration: true,
},
DocumentHighlight: &TextDocumentClientCapabilitiesDocumentHighlight{
DynamicRegistration: true,
},
DocumentSymbol: &TextDocumentClientCapabilitiesDocumentSymbol{
DynamicRegistration: true,
SymbolKind: &WorkspaceClientCapabilitiesSymbolKind{
ValueSet: []SymbolKind{
FileSymbol,
ModuleSymbol,
NamespaceSymbol,
PackageSymbol,
ClassSymbol,
MethodSymbol,
},
},
HierarchicalDocumentSymbolSupport: true,
},
Formatting: &TextDocumentClientCapabilitiesFormatting{
DynamicRegistration: true,
},
RangeFormatting: &TextDocumentClientCapabilitiesRangeFormatting{
DynamicRegistration: true,
},
OnTypeFormatting: &TextDocumentClientCapabilitiesOnTypeFormatting{
DynamicRegistration: true,
},
Declaration: &TextDocumentClientCapabilitiesDeclaration{
DynamicRegistration: true,
LinkSupport: true,
},
Definition: &TextDocumentClientCapabilitiesDefinition{
DynamicRegistration: true,
LinkSupport: true,
},
TypeDefinition: &TextDocumentClientCapabilitiesTypeDefinition{
DynamicRegistration: true,
LinkSupport: true,
},
Implementation: &TextDocumentClientCapabilitiesImplementation{
DynamicRegistration: true,
LinkSupport: true,
},
CodeAction: &TextDocumentClientCapabilitiesCodeAction{
DynamicRegistration: true,
CodeActionLiteralSupport: &TextDocumentClientCapabilitiesCodeActionLiteralSupport{
CodeActionKind: &TextDocumentClientCapabilitiesCodeActionKind{
ValueSet: []CodeActionKind{
QuickFix,
Refactor,
RefactorExtract,
RefactorRewrite,
Source,
SourceOrganizeImports,
},
},
},
},
CodeLens: &TextDocumentClientCapabilitiesCodeLens{
DynamicRegistration: true,
},
DocumentLink: &TextDocumentClientCapabilitiesDocumentLink{
DynamicRegistration: true,
},
ColorProvider: &TextDocumentClientCapabilitiesColorProvider{
DynamicRegistration: true,
},
Rename: &TextDocumentClientCapabilitiesRename{
DynamicRegistration: true,
PrepareSupport: true,
},
PublishDiagnostics: &TextDocumentClientCapabilitiesPublishDiagnostics{
RelatedInformation: true,
},
FoldingRange: &TextDocumentClientCapabilitiesFoldingRange{
DynamicRegistration: true,
RangeLimit: float64(0.5),
LineFoldingOnly: true,
},
SelectionRange: &TextDocumentClientCapabilitiesSelectionRange{
DynamicRegistration: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field TextDocumentClientCapabilities
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: TextDocumentClientCapabilities{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want TextDocumentClientCapabilities
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: TextDocumentClientCapabilities{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got TextDocumentClientCapabilities
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testClientCapabilities(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"workspace":{"applyEdit":true,"workspaceEdit":{"documentChanges":true,"failureHandling":"FailureHandling","resourceOperations":["ResourceOperations"]},"didChangeConfiguration":{"dynamicRegistration":true},"didChangeWatchedFiles":{"dynamicRegistration":true},"symbol":{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6]}},"executeCommand":{"dynamicRegistration":true},"workspaceFolders":true,"configuration":true},"textDocument":{"synchronization":{"didSave":true,"dynamicRegistration":true,"willSave":true,"willSaveWaitUntil":true},"completion":{"dynamicRegistration":true,"completionItem":{"snippetSupport":true,"commitCharactersSupport":true,"documentationFormat":["plaintext","markdown"],"deprecatedSupport":true,"preselectSupport":true},"completionItemKind":{"valueSet":[1]},"contextSupport":true},"hover":{"dynamicRegistration":true,"contentFormat":["plaintext","markdown"]},"signatureHelp":{"dynamicRegistration":true,"signatureInformation":{"documentationFormat":["plaintext","markdown"]}},"references":{"dynamicRegistration":true},"documentHighlight":{"dynamicRegistration":true},"documentSymbol":{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6]},"hierarchicalDocumentSymbolSupport":true},"formatting":{"dynamicRegistration":true},"rangeFormatting":{"dynamicRegistration":true},"onTypeFormatting":{"dynamicRegistration":true},"declaration":{"dynamicRegistration":true,"linkSupport":true},"definition":{"dynamicRegistration":true,"linkSupport":true},"typeDefinition":{"dynamicRegistration":true,"linkSupport":true},"implementation":{"dynamicRegistration":true,"linkSupport":true},"codeAction":{"dynamicRegistration":true,"codeActionLiteralSupport":{"codeActionKind":{"valueSet":["quickfix","refactor","refactor.extract","refactor.rewrite","source","source.organizeImports"]}}},"codeLens":{"dynamicRegistration":true},"documentLink":{"dynamicRegistration":true},"colorProvider":{"dynamicRegistration":true},"rename":{"dynamicRegistration":true,"prepareSupport":true},"publishDiagnostics":{"relatedInformation":true},"foldingRange":{"dynamicRegistration":true,"rangeLimit":0.5,"lineFoldingOnly":true},"selectionRange":{"dynamicRegistration":true}},"window":{"workDoneProgress":true}}`
wantNil = `{}`
)
wantType := ClientCapabilities{
Workspace: &WorkspaceClientCapabilities{
ApplyEdit: true,
WorkspaceEdit: &WorkspaceClientCapabilitiesWorkspaceEdit{
DocumentChanges: true,
FailureHandling: "FailureHandling",
ResourceOperations: []string{"ResourceOperations"},
},
DidChangeConfiguration: &WorkspaceClientCapabilitiesDidChangeConfiguration{
DynamicRegistration: true,
},
DidChangeWatchedFiles: &WorkspaceClientCapabilitiesDidChangeWatchedFiles{
DynamicRegistration: true,
},
Symbol: &WorkspaceClientCapabilitiesSymbol{
DynamicRegistration: true,
SymbolKind: &WorkspaceClientCapabilitiesSymbolKind{
ValueSet: []SymbolKind{
FileSymbol,
ModuleSymbol,
NamespaceSymbol,
PackageSymbol,
ClassSymbol,
MethodSymbol,
},
},
},
ExecuteCommand: &WorkspaceClientCapabilitiesExecuteCommand{
DynamicRegistration: true,
},
WorkspaceFolders: true,
Configuration: true,
},
TextDocument: &TextDocumentClientCapabilities{
Synchronization: &TextDocumentClientCapabilitiesSynchronization{
DidSave: true,
DynamicRegistration: true,
WillSave: true,
WillSaveWaitUntil: true,
},
Completion: &TextDocumentClientCapabilitiesCompletion{
DynamicRegistration: true,
CompletionItem: &TextDocumentClientCapabilitiesCompletionItem{
SnippetSupport: true,
CommitCharactersSupport: true,
DocumentationFormat: []MarkupKind{
PlainText,
Markdown,
},
DeprecatedSupport: true,
PreselectSupport: true,
},
CompletionItemKind: &TextDocumentClientCapabilitiesCompletionItemKind{
ValueSet: []CompletionItemKind{TextCompletion},
},
ContextSupport: true,
},
Hover: &TextDocumentClientCapabilitiesHover{
DynamicRegistration: true,
ContentFormat: []MarkupKind{
PlainText,
Markdown,
},
},
SignatureHelp: &TextDocumentClientCapabilitiesSignatureHelp{
DynamicRegistration: true,
SignatureInformation: &TextDocumentClientCapabilitiesSignatureInformation{
DocumentationFormat: []MarkupKind{
PlainText,
Markdown,
},
},
},
References: &TextDocumentClientCapabilitiesReferences{
DynamicRegistration: true,
},
DocumentHighlight: &TextDocumentClientCapabilitiesDocumentHighlight{
DynamicRegistration: true,
},
DocumentSymbol: &TextDocumentClientCapabilitiesDocumentSymbol{
DynamicRegistration: true,
SymbolKind: &WorkspaceClientCapabilitiesSymbolKind{
ValueSet: []SymbolKind{
FileSymbol,
ModuleSymbol,
NamespaceSymbol,
PackageSymbol,
ClassSymbol,
MethodSymbol,
},
},
HierarchicalDocumentSymbolSupport: true,
},
Formatting: &TextDocumentClientCapabilitiesFormatting{
DynamicRegistration: true,
},
RangeFormatting: &TextDocumentClientCapabilitiesRangeFormatting{
DynamicRegistration: true,
},
OnTypeFormatting: &TextDocumentClientCapabilitiesOnTypeFormatting{
DynamicRegistration: true,
},
Declaration: &TextDocumentClientCapabilitiesDeclaration{
DynamicRegistration: true,
LinkSupport: true,
},
Definition: &TextDocumentClientCapabilitiesDefinition{
DynamicRegistration: true,
LinkSupport: true,
},
TypeDefinition: &TextDocumentClientCapabilitiesTypeDefinition{
DynamicRegistration: true,
LinkSupport: true,
},
Implementation: &TextDocumentClientCapabilitiesImplementation{
DynamicRegistration: true,
LinkSupport: true,
},
CodeAction: &TextDocumentClientCapabilitiesCodeAction{
DynamicRegistration: true,
CodeActionLiteralSupport: &TextDocumentClientCapabilitiesCodeActionLiteralSupport{
CodeActionKind: &TextDocumentClientCapabilitiesCodeActionKind{
ValueSet: []CodeActionKind{
QuickFix,
Refactor,
RefactorExtract,
RefactorRewrite,
Source,
SourceOrganizeImports,
},
},
},
},
CodeLens: &TextDocumentClientCapabilitiesCodeLens{
DynamicRegistration: true,
},
DocumentLink: &TextDocumentClientCapabilitiesDocumentLink{
DynamicRegistration: true,
},
ColorProvider: &TextDocumentClientCapabilitiesColorProvider{
DynamicRegistration: true,
},
Rename: &TextDocumentClientCapabilitiesRename{
DynamicRegistration: true,
PrepareSupport: true,
},
PublishDiagnostics: &TextDocumentClientCapabilitiesPublishDiagnostics{
RelatedInformation: true,
},
FoldingRange: &TextDocumentClientCapabilitiesFoldingRange{
DynamicRegistration: true,
RangeLimit: float64(0.5),
LineFoldingOnly: true,
},
SelectionRange: &TextDocumentClientCapabilitiesSelectionRange{
DynamicRegistration: true,
},
},
Window: &WindowClientCapabilities{
WorkDoneProgress: true,
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field ClientCapabilities
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: ClientCapabilities{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want ClientCapabilities
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: ClientCapabilities{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got ClientCapabilities
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testInitializeResult(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"capabilities":{"textDocumentSync":3,"hoverProvider":true,"completionProvider":{"resolveProvider":true,"triggerCharacters":["Tab"]},"signatureHelpProvider":{"triggerCharacters":["C-K"]},"declarationProvider":true,"definitionProvider":true,"typeDefinitionProvider":true,"implementationProvider":true,"referencesProvider":true,"documentHighlightProvider":true,"documentSymbolProvider":true,"workspaceSymbolProvider":true,"codeActionProvider":true,"codeLensProvider":{"resolveProvider":true},"documentFormattingProvider":true,"documentRangeFormattingProvider":true,"documentOnTypeFormattingProvider":{"firstTriggerCharacter":".","moreTriggerCharacter":["f"]},"renameProvider":true,"documentLinkProvider":{"resolveProvider":true},"colorProvider":true,"foldingRangeProvider":true,"selectionRangeProvider":true,"executeCommandProvider":{"commands":["test","command"]},"workspace":{"workspaceFolders":{"supported":true}},"experimental":"Awesome Experimentals"}}`
wantNil = `{"capabilities":{}}`
)
enableSelectionRange := EnableSelectionRange(true)
wantType := InitializeResult{
Capabilities: ServerCapabilities{
TextDocumentSync: float64(3),
HoverProvider: true,
CompletionProvider: &CompletionOptions{
ResolveProvider: true,
TriggerCharacters: []string{"Tab"},
},
SignatureHelpProvider: &SignatureHelpOptions{
TriggerCharacters: []string{"C-K"},
},
DeclarationProvider: true,
DefinitionProvider: true,
TypeDefinitionProvider: true,
ImplementationProvider: true,
ReferencesProvider: true,
DocumentHighlightProvider: true,
DocumentSymbolProvider: true,
WorkspaceSymbolProvider: true,
CodeActionProvider: true,
CodeLensProvider: &CodeLensOptions{
ResolveProvider: true,
},
DocumentFormattingProvider: true,
DocumentRangeFormattingProvider: true,
DocumentOnTypeFormattingProvider: &DocumentOnTypeFormattingOptions{
FirstTriggerCharacter: ".",
MoreTriggerCharacter: []string{"f"},
},
RenameProvider: true,
DocumentLinkProvider: &DocumentLinkOptions{
ResolveProvider: true,
},
ColorProvider: true,
FoldingRangeProvider: true,
ExecuteCommandProvider: &ExecuteCommandOptions{
Commands: []string{"test", "command"},
},
Workspace: &ServerCapabilitiesWorkspace{
WorkspaceFolders: &ServerCapabilitiesWorkspaceFolders{
Supported: true,
ChangeNotifications: nil,
},
},
SelectionRangeProvider: &enableSelectionRange,
Experimental: "Awesome Experimentals",
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field InitializeResult
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: InitializeResult{},
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Logf("got: %s", string(got))
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want InitializeResult
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: InitializeResult{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got InitializeResult
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
cmpOpts := cmpopts.IgnoreFields(ServerCapabilities{}, "SelectionRangeProvider") // ignore SelectionRangeProvider field but assert below
if diff := cmp.Diff(got, tt.want, cmpOpts); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
if srp := got.Capabilities.SelectionRangeProvider; srp != nil {
switch srp := srp.(type) {
case bool: // EnableSelectionRange
if diff := cmp.Diff(EnableSelectionRange(srp), enableSelectionRange); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
default:
t.Fatalf("srp type is %[1]T, not bool: %#[1]v\n", srp)
}
}
})
}
})
}
func TestTextDocumentSyncKind_String(t *testing.T) {
tests := []struct {
name string
k TextDocumentSyncKind
want string
}{
{
name: "NoneKind",
k: None,
want: "None",
},
{
name: "FullKind",
k: Full,
want: "Full",
},
{
name: "IncrementalKind",
k: Incremental,
want: "Incremental",
},
{
name: "UnknownKind",
k: TextDocumentSyncKind(99),
want: "99",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if got := tt.k.String(); got != tt.want {
t.Errorf("TextDocumentSyncKind.String() = %v, want %v", got, tt.want)
}
})
}
}
func testDocumentLinkRegistrationOptions(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const (
want = `{"documentSelector":[{"language":"go","scheme":"file","pattern":"*"}],"resolveProvider":true}`
wantNil = `{"documentSelector":[]}`
)
wantType := DocumentLinkRegistrationOptions{
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{
{
Language: "go",
Scheme: "file",
Pattern: `*`,
},
},
},
ResolveProvider: true,
}
wantTypeNilAll := DocumentLinkRegistrationOptions{
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: DocumentSelector{},
},
}
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field DocumentLinkRegistrationOptions
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: wantType,
want: want,
wantMarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantTypeNilAll,
want: wantNil,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want DocumentLinkRegistrationOptions
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: wantType,
wantUnmarshalErr: false,
wantErr: false,
},
{
name: "ValidNilAll",
field: wantNil,
want: wantTypeNilAll,
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got DocumentLinkRegistrationOptions
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
}
func testInitializedParams(t *testing.T, marshal marshalFunc, unmarshal unmarshalFunc) {
const want = `{}`
t.Run("Marshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field InitializedParams
want string
wantMarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: InitializedParams{},
want: want,
wantMarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := marshal(&tt.field)
if (err != nil) != tt.wantMarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(string(got), tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
t.Run("Unmarshal", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
field string
want InitializedParams
wantUnmarshalErr bool
wantErr bool
}{
{
name: "Valid",
field: want,
want: InitializedParams{},
wantUnmarshalErr: false,
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var got InitializedParams
if err := unmarshal([]byte(tt.field), &got); (err != nil) != tt.wantUnmarshalErr {
t.Fatal(err)
}
if diff := cmp.Diff(got, tt.want); (diff != "") != tt.wantErr {
t.Errorf("%s: wantErr: %t\n(-got, +want)\n%s", tt.name, tt.wantErr, diff)
}
})
}
})
} |
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { |
project.rs | use crate::options::{self, BuildOptions, Sanitizer};
use crate::utils::default_target;
use anyhow::{anyhow, bail, Context, Result};
use std::collections::HashSet;
use std::io::Read;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::{
env, ffi, fs,
process::{Command, Stdio},
time,
};
pub struct FuzzProject {
/// Path to the root cargo project
///
/// Not the project with fuzz targets, but the project being fuzzed
root_project: PathBuf,
targets: Vec<String>,
}
impl FuzzProject {
/// Find an existing `cargo fuzz` project by starting at the current
/// directory and walking up the filesystem.
pub fn find_existing() -> Result<Self> {
let mut project = FuzzProject {
root_project: find_package()?,
targets: Vec::new(),
};
let manifest = project.manifest()?;
if !is_fuzz_manifest(&manifest) {
bail!(
"manifest `{}` does not look like a cargo-fuzz manifest. \
Add following lines to override:\n\
[package.metadata]\n\
cargo-fuzz = true",
project.manifest_path().display()
);
}
project.targets = collect_targets(&manifest);
Ok(project)
}
/// Create the fuzz project structure
///
/// This will not clone libfuzzer-sys
pub fn init(init: &options::Init) -> Result<Self> {
let project = FuzzProject {
root_project: find_package()?,
targets: Vec::new(),
};
let fuzz_project = project.path();
let root_project_name = project.root_project_name()?;
// TODO: check if the project is already initialized
fs::create_dir(&fuzz_project)
.with_context(|| format!("failed to create directory {}", fuzz_project.display()))?;
let fuzz_targets_dir = fuzz_project.join(crate::FUZZ_TARGETS_DIR);
fs::create_dir(&fuzz_targets_dir).with_context(|| {
format!("failed to create directory {}", fuzz_targets_dir.display())
})?;
let cargo_toml = fuzz_project.join("Cargo.toml");
let mut cargo = fs::File::create(&cargo_toml)
.with_context(|| format!("failed to create {}", cargo_toml.display()))?;
cargo
.write_fmt(toml_template!(root_project_name))
.with_context(|| format!("failed to write to {}", cargo_toml.display()))?;
let gitignore = fuzz_project.join(".gitignore");
let mut ignore = fs::File::create(&gitignore)
.with_context(|| format!("failed to create {}", gitignore.display()))?;
ignore
.write_fmt(gitignore_template!())
.with_context(|| format!("failed to write to {}", gitignore.display()))?;
project
.create_target_template(&init.target)
.with_context(|| {
format!(
"could not create template file for target {:?}",
init.target
)
})?;
Ok(project)
}
pub fn list_targets(&self) -> Result<()> {
for bin in &self.targets {
println!("{}", bin);
}
Ok(())
}
/// Create a new fuzz target.
pub fn add_target(&self, add: &options::Add) -> Result<()> {
// Create corpus and artifact directories for the newly added target
self.corpus_for(&add.target)?;
self.artifacts_for(&add.target)?;
self.create_target_template(&add.target)
.with_context(|| format!("could not add target {:?}", add.target))
}
/// Add a new fuzz target script with a given name
fn create_target_template(&self, target: &str) -> Result<()> {
let target_path = self.target_path(target);
// If the user manually created a fuzz project, but hasn't created any
// targets yet, the `fuzz_targets` directory might not exist yet,
// despite a `fuzz/Cargo.toml` manifest with the `metadata.cargo-fuzz`
// key present. Make sure it does exist.
fs::create_dir_all(self.fuzz_targets_dir())
.context("ensuring that `fuzz_targets` directory exists failed")?;
let mut script = fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&target_path)
.with_context(|| format!("could not create target script file at {:?}", target_path))?;
script.write_fmt(target_template!())?;
let mut cargo = fs::OpenOptions::new()
.append(true)
.open(self.manifest_path())?;
Ok(cargo.write_fmt(toml_bin_template!(target))?)
}
fn cargo(&self, subcommand: &str, build: &BuildOptions) -> Result<Command> {
let mut cmd = Command::new("cargo");
cmd.arg(subcommand)
.arg("--manifest-path")
.arg(self.manifest_path())
// --target=<TARGET> won't pass rustflags to build scripts
.arg("--target")
.arg(&build.triple);
// we default to release mode unless debug mode is explicitly requested
if !build.dev {
cmd.arg("--release");
}
if build.verbose {
cmd.arg("--verbose");
}
if build.no_default_features {
cmd.arg("--no-default-features");
}
if build.all_features {
cmd.arg("--all-features");
}
if let Some(ref features) = build.features {
cmd.arg("--features").arg(features);
}
for flag in &build.unstable_flags {
cmd.arg("-Z").arg(flag);
}
if let Sanitizer::Memory = build.sanitizer {
cmd.arg("-Z").arg("build-std");
}
let mut rustflags: String = "--cfg fuzzing \
-Cpasses=sancov \
-Cllvm-args=-sanitizer-coverage-level=4 \
-Cllvm-args=-sanitizer-coverage-trace-compares \
-Cllvm-args=-sanitizer-coverage-inline-8bit-counters \
-Cllvm-args=-sanitizer-coverage-pc-table \
-Clink-dead-code"
.to_owned();
if build.coverage {
rustflags.push_str(" -Zinstrument-coverage");
}
match build.sanitizer {
Sanitizer::None => {}
Sanitizer::Memory => {
// Memory sanitizer requires more flags to function than others:
// https://doc.rust-lang.org/unstable-book/compiler-flags/sanitizer.html#memorysanitizer
rustflags.push_str(" -Zsanitizer=memory -Zsanitizer-memory-track-origins")
}
_ => rustflags.push_str(&format!(
" -Zsanitizer={sanitizer}",
sanitizer = build.sanitizer
)),
}
if build.triple.contains("-linux-") {
rustflags.push_str(" -Cllvm-args=-sanitizer-coverage-stack-depth");
}
if !build.release || build.debug_assertions {
rustflags.push_str(" -Cdebug-assertions");
}
// If release mode is enabled then we force 1 CGU to be used in rustc.
// This will result in slower compilations but it looks like the sancov
// passes otherwise add `notEligibleToImport` annotations to functions
// in LLVM IR, meaning that *nothing* can get imported with ThinLTO.
// This means that in release mode, where ThinLTO is critical for
// performance, we're taking a huge hit relative to actual release mode.
// Local tests have once showed this to be a ~3x faster runtime where
// otherwise functions like `Vec::as_ptr` aren't inlined.
if !build.dev {
rustflags.push_str(" -C codegen-units=1");
}
if let Ok(other_flags) = env::var("RUSTFLAGS") {
rustflags.push_str(" ");
rustflags.push_str(&other_flags);
}
cmd.env("RUSTFLAGS", rustflags);
// For asan and tsan we have default options. Merge them to the given
// options, so users can still provide their own options to e.g. disable
// the leak sanitizer. Options are colon-separated.
match build.sanitizer {
Sanitizer::Address => {
let mut asan_opts = env::var("ASAN_OPTIONS").unwrap_or_default();
if !asan_opts.is_empty() {
asan_opts.push(':');
}
asan_opts.push_str("detect_odr_violation=0");
cmd.env("ASAN_OPTIONS", asan_opts);
}
Sanitizer::Thread => {
let mut tsan_opts = env::var("TSAN_OPTIONS").unwrap_or_default();
if !tsan_opts.is_empty() {
tsan_opts.push(':');
}
tsan_opts.push_str("report_signal_unsafe=0");
cmd.env("TSAN_OPTIONS", tsan_opts);
}
_ => {}
}
Ok(cmd)
}
fn cargo_run(&self, build: &options::BuildOptions, fuzz_target: &str) -> Result<Command> {
let mut cmd = self.cargo("run", build)?;
cmd.arg("--bin").arg(fuzz_target);
if let Some(target_dir) = &build.target_dir {
cmd.arg("--target-dir").arg(target_dir);
}
let mut artifact_arg = ffi::OsString::from("-artifact_prefix=");
artifact_arg.push(self.artifacts_for(&fuzz_target)?);
cmd.arg("--").arg(artifact_arg);
Ok(cmd)
}
pub fn exec_build(
&self,
build: &options::BuildOptions,
fuzz_target: Option<&str>,
) -> Result<()> {
let mut cmd = self.cargo("build", build)?;
if let Some(fuzz_target) = fuzz_target {
cmd.arg("--bin").arg(fuzz_target);
} else {
cmd.arg("--bins");
}
if let Some(target_dir) = &build.target_dir {
cmd.arg("--target-dir").arg(target_dir);
} else if build.coverage {
// To ensure that fuzzing and coverage-output generation can run in parallel, we
// produce a separate binary for the coverage command.
let target_dir = env::current_dir()?
.join("target")
.join(default_target())
.join("coverage");
cmd.arg("--target-dir").arg(target_dir);
}
let status = cmd
.status()
.with_context(|| format!("failed to execute: {:?}", cmd))?;
if !status.success() {
bail!("failed to build fuzz script: {:?}", cmd);
}
Ok(())
}
fn get_artifacts_since(
&self,
target: &str,
since: &time::SystemTime,
) -> Result<HashSet<PathBuf>> {
let mut artifacts = HashSet::new();
let artifacts_dir = self.artifacts_for(target)?;
for entry in fs::read_dir(&artifacts_dir).with_context(|| {
format!(
"failed to read directory entries of {}",
artifacts_dir.display()
)
})? {
let entry = entry.with_context(|| {
format!(
"failed to read directory entry inside {}",
artifacts_dir.display()
)
})?;
let metadata = entry
.metadata()
.context("failed to read artifact metadata")?;
let modified = metadata
.modified()
.context("failed to get artifact modification time")?;
if !metadata.is_file() || modified <= *since {
continue;
}
artifacts.insert(entry.path());
}
Ok(artifacts)
}
fn run_fuzz_target_debug_formatter(
&self,
build: &BuildOptions,
target: &str,
artifact: &Path,
) -> Result<String> {
let debug_output = tempfile::NamedTempFile::new().context("failed to create temp file")?;
let mut cmd = self.cargo_run(&build, &target)?;
cmd.stdin(Stdio::null());
cmd.env("RUST_LIBFUZZER_DEBUG_PATH", &debug_output.path());
cmd.arg(&artifact);
let output = cmd
.output()
.with_context(|| format!("failed to run command: {:?}", cmd))?;
if !output.status.success() {
bail!(
"Fuzz target '{target}' exited with failure when attemping to \
debug formatting an interesting input that we discovered!\n\n\
Artifact: {artifact}\n\n\
Command: {cmd:?}\n\n\
Status: {status}\n\n\
=== stdout ===\n\
{stdout}\n\n\
=== stderr ===\n\
{stderr}",
target = target,
status = output.status,
cmd = cmd,
artifact = artifact.display(),
stdout = String::from_utf8_lossy(&output.stdout),
stderr = String::from_utf8_lossy(&output.stderr),
);
}
let debug = fs::read_to_string(&debug_output).context("failed to read temp file")?;
Ok(debug)
}
/// Prints the debug output of an input test case
pub fn debug_fmt_input(&self, debugfmt: &options::Fmt) -> Result<()> {
if !debugfmt.input.exists() {
bail!(
"Input test case does not exist: {}",
debugfmt.input.display()
);
}
let debug = self
.run_fuzz_target_debug_formatter(&debugfmt.build, &debugfmt.target, &debugfmt.input)
.with_context(|| {
format!(
"failed to run `cargo fuzz fmt` on input: {}",
debugfmt.input.display()
)
})?;
eprintln!("\nOutput of `std::fmt::Debug`:\n");
for l in debug.lines() {
eprintln!("{}", l);
}
Ok(())
}
/// Fuzz a given fuzz target
pub fn exec_fuzz(&self, run: &options::Run) -> Result<()> {
self.exec_build(&run.build, Some(&run.target))?;
let mut cmd = self.cargo_run(&run.build, &run.target)?;
for arg in &run.args {
cmd.arg(arg);
}
if !run.corpus.is_empty() {
for corpus in &run.corpus {
cmd.arg(corpus);
}
} else {
cmd.arg(self.corpus_for(&run.target)?);
}
if run.jobs != 1 {
cmd.arg(format!("-fork={}", run.jobs));
}
// When libfuzzer finds failing inputs, those inputs will end up in the
// artifacts directory. To easily filter old artifacts from new ones,
// get the current time, and then later we only consider files modified
// after now.
let before_fuzzing = time::SystemTime::now();
let mut child = cmd
.spawn()
.with_context(|| format!("failed to spawn command: {:?}", cmd))?;
let status = child
.wait()
.with_context(|| format!("failed to wait on child process for command: {:?}", cmd))?;
if status.success() {
return Ok(());
}
// Get and print the `Debug` formatting of any new artifacts, along with
// tips about how to reproduce failures and/or minimize test cases.
let new_artifacts = self.get_artifacts_since(&run.target, &before_fuzzing)?;
for artifact in new_artifacts {
// To make the artifact a little easier to read, strip the current
// directory prefix when possible.
let artifact = strip_current_dir_prefix(&artifact);
eprintln!("\n{:โ<80}", "");
eprintln!("\nFailing input:\n\n\t{}\n", artifact.display());
// Note: ignore errors when running the debug formatter. This most
// likely just means that we're dealing with a fuzz target that uses
// an older version of the libfuzzer crate, and doesn't support
// `RUST_LIBFUZZER_DEBUG_PATH`.
if let Ok(debug) =
self.run_fuzz_target_debug_formatter(&run.build, &run.target, artifact)
{
eprintln!("Output of `std::fmt::Debug`:\n");
for l in debug.lines() {
eprintln!("\t{}", l);
}
eprintln!();
}
eprintln!(
"Reproduce with:\n\n\tcargo fuzz run{options} {target} {artifact}\n",
options = &run.build,
target = &run.target,
artifact = artifact.display()
);
eprintln!(
"Minimize test case with:\n\n\tcargo fuzz tmin{options} {target} {artifact}\n",
options = &run.build,
target = &run.target,
artifact = artifact.display()
);
}
eprintln!("{:โ<80}\n", "");
bail!("Fuzz target exited with {}", status)
}
pub fn exec_tmin(&self, tmin: &options::Tmin) -> Result<()> {
self.exec_build(&tmin.build, Some(&tmin.target))?;
let mut cmd = self.cargo_run(&tmin.build, &tmin.target)?;
cmd.arg("-minimize_crash=1")
.arg(format!("-runs={}", tmin.runs))
.arg(&tmin.test_case);
for arg in &tmin.args {
cmd.arg(arg);
}
let before_tmin = time::SystemTime::now();
let mut child = cmd
.spawn()
.with_context(|| format!("failed to spawn command: {:?}", cmd))?;
let status = child
.wait()
.with_context(|| format!("failed to wait on child process for command: {:?}", cmd))?;
if !status.success() {
eprintln!("\n{:โ<80}\n", "");
return Err(anyhow!("Command `{:?}` exited with {}", cmd, status)).with_context(|| {
"Test case minimization failed.\n\
\n\
Usually this isn't a hard error, and just means that libfuzzer\n\
doesn't know how to minimize the test case any further while\n\
still reproducing the original crash.\n\
\n\
See the logs above for details."
});
}
// Find and display the most recently modified artifact, which is
// presumably the result of minification. Yeah, this is a little hacky,
// but it seems to work. I don't want to parse libfuzzer's stderr output
// and hope it never changes.
let minimized_artifact = self
.get_artifacts_since(&tmin.target, &before_tmin)?
.into_iter()
.max_by_key(|a| {
a.metadata()
.and_then(|m| m.modified())
.unwrap_or(time::SystemTime::UNIX_EPOCH)
});
if let Some(artifact) = minimized_artifact {
let artifact = strip_current_dir_prefix(&artifact);
eprintln!("\n{:โ<80}\n", "");
eprintln!("Minimized artifact:\n\n\t{}\n", artifact.display());
// Note: ignore errors when running the debug formatter. This most
// likely just means that we're dealing with a fuzz target that uses
// an older version of the libfuzzer crate, and doesn't support
// `RUST_LIBFUZZER_DEBUG_PATH`.
if let Ok(debug) =
self.run_fuzz_target_debug_formatter(&tmin.build, &tmin.target, artifact)
{
eprintln!("Output of `std::fmt::Debug`:\n");
for l in debug.lines() {
eprintln!("\t{}", l);
}
eprintln!();
}
eprintln!(
"Reproduce with:\n\n\tcargo fuzz run {target} {artifact}\n",
target = &tmin.target,
artifact = artifact.display()
);
}
Ok(())
}
pub fn exec_cmin(&self, cmin: &options::Cmin) -> Result<()> {
self.exec_build(&cmin.build, Some(&cmin.target))?;
let mut cmd = self.cargo_run(&cmin.build, &cmin.target)?;
for arg in &cmin.args {
cmd.arg(arg);
}
let corpus = if let Some(corpus) = cmin.corpus.clone() {
corpus
} else {
self.corpus_for(&cmin.target)?
};
let corpus = corpus
.to_str()
.ok_or_else(|| anyhow!("corpus must be valid unicode"))?
.to_owned();
let tmp = tempfile::TempDir::new_in(self.path())?;
let tmp_corpus = tmp.path().join("corpus");
fs::create_dir(&tmp_corpus)?;
cmd.arg("-merge=1").arg(&tmp_corpus).arg(&corpus);
// Spawn cmd in child process instead of exec-ing it
let status = cmd
.status()
.with_context(|| format!("could not execute command: {:?}", cmd))?;
if status.success() {
// move corpus directory into tmp to auto delete it
fs::rename(&corpus, tmp.path().join("old"))?;
fs::rename(tmp.path().join("corpus"), corpus)?;
} else {
println!("Failed to minimize corpus: {}", status);
}
Ok(())
}
/// Produce coverage information for a given corpus
pub fn exec_coverage(self, coverage: &options::Coverage) -> Result<()> {
// Build project with source-based coverage generation enabled.
self.exec_build(&coverage.build, Some(&coverage.target))?;
// Retrieve corpus directories.
let corpora = if coverage.corpus.is_empty() {
vec![self.corpus_for(&coverage.target)?]
} else {
coverage
.corpus
.iter()
.map(|name| Path::new(name).to_path_buf())
.collect()
};
// Collect the (non-directory) readable input files from the corpora.
let files_and_dirs = corpora.iter().flat_map(fs::read_dir).flatten().flatten();
let mut readable_input_files = files_and_dirs
.filter(|file| match file.file_type() {
Ok(ft) => ft.is_file(),
_ => false,
})
.peekable();
if readable_input_files.peek().is_none() {
bail!(
"The corpus does not contain program-input files. \
Coverage information requires existing input files. \
Try running the fuzzer first (`cargo fuzz run ...`) to generate a corpus, \
or provide a nonempty corpus directory."
)
}
let (coverage_out_raw_dir, coverage_out_file) = self.coverage_for(&coverage.target)?;
// Generating individual coverage data for all files in corpora.
for input_file in readable_input_files {
let (mut cmd, file_name) =
self.create_coverage_cmd(coverage, &coverage_out_raw_dir, &input_file.path())?;
eprintln!("Generating coverage data for {:?}", file_name);
let status = cmd
.status()
.with_context(|| format!("Failed to run command: {:?}", cmd))?;
if !status.success() {
Err(anyhow!(
"Command exited with failure status {}: {:?}",
status,
cmd
))
.context("Failed to generage coverage data")?;
}
}
self.merge_coverage(&coverage_out_raw_dir, &coverage_out_file)?;
Ok(())
}
fn create_coverage_cmd(
&self,
coverage: &options::Coverage,
coverage_dir: &Path,
input_file: &Path,
) -> Result<(Command, String)> {
let mut cmd = self.cargo_run(&coverage.build, &coverage.target)?;
// Raw coverage data will be saved in `coverage/<target>` directory.
let input_file_name = input_file
.file_name()
.and_then(|x| x.to_str())
.with_context(|| format!("Corpus contains file with invalid name {:?}", input_file))?;
cmd.env(
"LLVM_PROFILE_FILE",
coverage_dir.join(format!("default-{}.profraw", input_file_name)),
);
cmd.arg(input_file);
for arg in &coverage.args {
cmd.arg(arg);
}
Ok((cmd, input_file_name.to_string()))
}
fn merge_coverage(&self, profdata_raw_path: &Path, profdata_out_path: &Path) -> Result<()> {
let mut merge_cmd = Command::new(cargo_binutils::Tool::Profdata.path()?);
merge_cmd.arg("merge").arg("-sparse");
for raw_file in fs::read_dir(profdata_raw_path).with_context(|| {
format!(
"failed to read directory entries of {}",
profdata_raw_path.display()
)
})? {
merge_cmd.arg(raw_file?.path());
}
merge_cmd.arg("-o").arg(profdata_out_path);
eprintln!("Merging raw coverage data...");
let status = merge_cmd
.status()
.with_context(|| format!("Failed to run command: {:?}", merge_cmd))
.with_context(|| "Merging raw coverage files failed.\n\
\n\
Do you have LLVM coverage tools installed?\n\
https://doc.rust-lang.org/beta/unstable-book/compiler-flags/source-based-code-coverage.html#installing-llvm-coverage-tools")?;
if !status.success() {
Err(anyhow!(
"Command exited with failure status {}: {:?}",
status,
merge_cmd
))
.context("Merging raw coverage files failed")?;
}
if profdata_out_path.exists() {
eprintln!("Coverage data merged and saved in {:?}.", profdata_out_path);
Ok(())
} else {
bail!("Coverage data could not be merged.")
}
}
fn path(&self) -> PathBuf {
self.root_project.join("fuzz")
}
fn manifest | -> PathBuf {
self.path().join("Cargo.toml")
}
/// Returns paths to the `coverage/<target>/raw` directory and `coverage/<target>/coverage.profdata` file.
fn coverage_for(&self, target: &str) -> Result<(PathBuf, PathBuf)> {
let mut coverage_data = self.path();
coverage_data.push("coverage");
coverage_data.push(target);
let mut coverage_raw = coverage_data.clone();
coverage_data.push("coverage.profdata");
coverage_raw.push("raw");
fs::create_dir_all(&coverage_raw).with_context(|| {
format!("could not make a coverage directory at {:?}", coverage_raw)
})?;
Ok((coverage_raw, coverage_data))
}
fn corpus_for(&self, target: &str) -> Result<PathBuf> {
let mut p = self.path();
p.push("corpus");
p.push(target);
fs::create_dir_all(&p)
.with_context(|| format!("could not make a corpus directory at {:?}", p))?;
Ok(p)
}
fn artifacts_for(&self, target: &str) -> Result<PathBuf> {
let mut p = self.path();
p.push("artifacts");
p.push(target);
// This adds a trailing slash, which is necessary for libFuzzer, because
// it does simple string concatenation when joining paths.
p.push("");
fs::create_dir_all(&p)
.with_context(|| format!("could not make a artifact directory at {:?}", p))?;
Ok(p)
}
fn fuzz_targets_dir(&self) -> PathBuf {
let mut root = self.path();
if root.join(crate::FUZZ_TARGETS_DIR_OLD).exists() {
println!(
"warning: The `fuzz/fuzzers/` directory has renamed to `fuzz/fuzz_targets/`. \
Please rename the directory as such. This will become a hard error in the \
future."
);
root.push(crate::FUZZ_TARGETS_DIR_OLD);
} else {
root.push(crate::FUZZ_TARGETS_DIR);
}
root
}
fn target_path(&self, target: &str) -> PathBuf {
let mut root = self.fuzz_targets_dir();
root.push(target);
root.set_extension("rs");
root
}
fn manifest(&self) -> Result<toml::Value> {
let filename = self.manifest_path();
let mut file = fs::File::open(&filename)
.with_context(|| format!("could not read the manifest file: {}", filename.display()))?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
toml::from_slice(&data).with_context(|| {
format!(
"could not decode the manifest file at {}",
filename.display()
)
})
}
fn root_project_name(&self) -> Result<String> {
let filename = self.root_project.join("Cargo.toml");
let mut file = fs::File::open(&filename)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
let value: toml::Value = toml::from_slice(&data)?;
let name = value
.as_table()
.and_then(|v| v.get("package"))
.and_then(toml::Value::as_table)
.and_then(|v| v.get("name"))
.and_then(toml::Value::as_str);
if let Some(name) = name {
Ok(String::from(name))
} else {
bail!("{} (package.name) is malformed", filename.display());
}
}
}
fn collect_targets(value: &toml::Value) -> Vec<String> {
let bins = value
.as_table()
.and_then(|v| v.get("bin"))
.and_then(toml::Value::as_array);
let mut bins = if let Some(bins) = bins {
bins.iter()
.map(|bin| {
bin.as_table()
.and_then(|v| v.get("name"))
.and_then(toml::Value::as_str)
})
.filter_map(|name| name.map(String::from))
.collect()
} else {
Vec::new()
};
// Always sort them, so that we have deterministic output.
bins.sort();
bins
}
fn is_fuzz_manifest(value: &toml::Value) -> bool {
let is_fuzz = value
.as_table()
.and_then(|v| v.get("package"))
.and_then(toml::Value::as_table)
.and_then(|v| v.get("metadata"))
.and_then(toml::Value::as_table)
.and_then(|v| v.get("cargo-fuzz"))
.and_then(toml::Value::as_bool);
is_fuzz == Some(true)
}
/// Returns the path for the first found non-fuzz Cargo package
fn find_package() -> Result<PathBuf> {
let mut dir = env::current_dir()?;
let mut data = Vec::new();
loop {
let manifest_path = dir.join("Cargo.toml");
match fs::File::open(&manifest_path) {
Err(_) => {}
Ok(mut f) => {
data.clear();
f.read_to_end(&mut data)
.with_context(|| format!("failed to read {}", manifest_path.display()))?;
let value: toml::Value = toml::from_slice(&data).with_context(|| {
format!(
"could not decode the manifest file at {}",
manifest_path.display()
)
})?;
if !is_fuzz_manifest(&value) {
// Not a cargo-fuzz project => must be a proper cargo project :)
return Ok(dir);
}
}
}
if !dir.pop() {
break;
}
}
bail!("could not find a cargo project")
}
fn strip_current_dir_prefix(path: &Path) -> &Path {
env::current_dir()
.ok()
.and_then(|curdir| path.strip_prefix(curdir).ok())
.unwrap_or(&path)
}
| _path(&self) |
FinalModelUUID.go | // Automatically generated by the Fast Binary Encoding compiler, do not modify!
// https://github.com/chronoxor/FastBinaryEncoding
// Source: FBE
// Version: 1.4.0.0
package fbe
import "errors"
// Fast Binary Encoding UUID final model
type FinalModelUUID struct {
// Final model buffer
buffer *Buffer
// Final model buffer offset
offset int
}
// Create a new UUID final model
func NewFinalModelUUID(buffer *Buffer, offset int) *FinalModelUUID {
return &FinalModelUUID{buffer: buffer, offset: offset}
}
// Get the allocation size
func (fm *FinalModelUUID) FBEAllocationSize(value UUID) int { return fm.FBESize() }
// Get the final size
func (fm *FinalModelUUID) FBESize() int { return 16 }
// Get the final offset
func (fm *FinalModelUUID) FBEOffset() int { return fm.offset }
// Set the final offset
func (fm *FinalModelUUID) SetFBEOffset(value int) { fm.offset = value }
// Shift the current final offset
func (fm *FinalModelUUID) FBEShift(size int) { fm.offset += size }
// Unshift the current final offset
func (fm *FinalModelUUID) FBEUnshift(size int) { fm.offset -= size }
// Check if the UUID value is valid
func (fm *FinalModelUUID) Verify() int {
if (fm.buffer.Offset() + fm.FBEOffset() + fm.FBESize()) > fm.buffer.Size() {
return MaxInt
}
|
// Get the UUID value
func (fm *FinalModelUUID) Get() (UUID, int, error) {
if (fm.buffer.Offset() + fm.FBEOffset() + fm.FBESize()) > fm.buffer.Size() {
return UUIDNil(), 0, errors.New("model is broken")
}
return ReadUUID(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset()), fm.FBESize(), nil
}
// Set the UUID value
func (fm *FinalModelUUID) Set(value UUID) (int, error) {
if (fm.buffer.Offset() + fm.FBEOffset() + fm.FBESize()) > fm.buffer.Size() {
return 0, errors.New("model is broken")
}
WriteUUID(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset(), value)
return fm.FBESize(), nil
} | return fm.FBESize()
} |
listeners.rs | // Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Manage listening on multiple multiaddresses at once.
use crate::{Multiaddr, Transport, transport::{TransportError, ListenerEvent}};
use futures::{prelude::*, task::Context, task::Poll};
use log::debug;
use smallvec::SmallVec;
use std::{collections::VecDeque, fmt, pin::Pin};
/// Implementation of `futures::Stream` that allows listening on multiaddresses.
///
/// To start using a `ListenersStream`, create one with `new` by passing an implementation of
/// `Transport`. This `Transport` will be used to start listening, therefore you want to pass
/// a `Transport` that supports the protocols you wish you listen on.
///
/// Then, call `ListenerStream::listen_on` for all addresses you want to start listening on.
///
/// The `ListenersStream` never ends and never produces errors. If a listener errors or closes,
/// an event is generated on the stream and the listener is then dropped, but the `ListenersStream`
/// itself continues.
///
/// # Example
///
/// ```no_run
/// # fn main() {
/// use futures::prelude::*;
/// use libp2p_core::nodes::listeners::{ListenersEvent, ListenersStream};
///
/// let mut listeners = ListenersStream::new(libp2p_tcp::TcpConfig::new());
///
/// // Ask the `listeners` to start listening on the given multiaddress.
/// listeners.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()).unwrap();
///
/// // The `listeners` will now generate events when polled.
/// futures::executor::block_on(async move {
/// while let Some(event) = listeners.next().await {
/// match event {
/// ListenersEvent::NewAddress { listener_id, listen_addr } => {
/// println!("Listener {:?} is listening at address {}", listener_id, listen_addr);
/// },
/// ListenersEvent::AddressExpired { listener_id, listen_addr } => {
/// println!("Listener {:?} is no longer listening at address {}", listener_id, listen_addr);
/// },
/// ListenersEvent::Closed { listener_id, .. } => {
/// println!("Listener {:?} has been closed", listener_id);
/// },
/// ListenersEvent::Error { listener_id, error } => {
/// println!("Listener {:?} has experienced an error: {}", listener_id, error);
/// },
/// ListenersEvent::Incoming { listener_id, upgrade, local_addr, .. } => {
/// println!("Listener {:?} has a new connection on {}", listener_id, local_addr);
/// // We don't do anything with the newly-opened connection, but in a real-life
/// // program you probably want to use it!
/// drop(upgrade);
/// },
/// }
/// }
/// })
/// # }
/// ```
pub struct ListenersStream<TTrans>
where
TTrans: Transport,
{
/// Transport used to spawn listeners.
transport: TTrans,
/// All the active listeners.
/// The `Listener` struct contains a stream that we want to be pinned. Since the `VecDeque`
/// can be resized, the only way is to use a `Pin<Box<>>`.
listeners: VecDeque<Pin<Box<Listener<TTrans>>>>,
/// The next listener ID to assign.
next_id: ListenerId
}
/// The ID of a single listener.
///
/// It is part of most [`ListenersEvent`]s and can be used to remove
/// individual listeners from the [`ListenersStream`].
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct ListenerId(u64);
/// A single active listener.
#[pin_project::pin_project]
#[derive(Debug)]
struct Listener<TTrans>
where
TTrans: Transport,
{
/// The ID of this listener.
id: ListenerId,
/// The object that actually listens.
#[pin]
listener: TTrans::Listener,
/// Addresses it is listening on.
addresses: SmallVec<[Multiaddr; 4]>
}
/// Event that can happen on the `ListenersStream`.
pub enum ListenersEvent<TTrans>
where
TTrans: Transport,
{
/// A new address is being listened on.
NewAddress {
/// The listener that is listening on the new address.
listener_id: ListenerId,
/// The new address that is being listened on.
listen_addr: Multiaddr
},
/// An address is no longer being listened on.
AddressExpired {
/// The listener that is no longer listening on the address.
listener_id: ListenerId,
/// The new address that is being listened on.
listen_addr: Multiaddr
},
/// A connection is incoming on one of the listeners.
Incoming {
/// The listener that produced the upgrade.
listener_id: ListenerId,
/// The produced upgrade.
upgrade: TTrans::ListenerUpgrade,
/// Local connection address.
local_addr: Multiaddr,
/// Address used to send back data to the incoming client.
send_back_addr: Multiaddr,
},
/// A listener closed.
Closed {
/// The ID of the listener that closed.
listener_id: ListenerId,
/// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err`
/// if the stream produced an error.
reason: Result<(), TTrans::Error>,
},
/// A listener errored.
///
/// The listener will continue to be polled for new events and the event
/// is for informational purposes only.
Error {
/// The ID of the listener that errored.
listener_id: ListenerId,
/// The error value.
error: TTrans::Error,
}
}
impl<TTrans> ListenersStream<TTrans>
where
TTrans: Transport,
{
/// Starts a new stream of listeners.
pub fn new(transport: TTrans) -> Self {
ListenersStream {
transport,
listeners: VecDeque::new(),
next_id: ListenerId(1)
}
}
/// Same as `new`, but pre-allocates enough memory for the given number of
/// simultaneous listeners.
pub fn with_capacity(transport: TTrans, capacity: usize) -> Self {
ListenersStream {
transport,
listeners: VecDeque::with_capacity(capacity),
next_id: ListenerId(1)
}
}
/// Start listening on a multiaddress.
///
/// Returns an error if the transport doesn't support the given multiaddress.
pub fn listen_on(&mut self, addr: Multiaddr) -> Result<ListenerId, TransportError<TTrans::Error>>
where
TTrans: Clone,
{
let listener = self.transport.clone().listen_on(addr)?;
self.listeners.push_back(Box::pin(Listener {
id: self.next_id,
listener,
addresses: SmallVec::new()
}));
let id = self.next_id;
self.next_id = ListenerId(self.next_id.0 + 1);
Ok(id)
}
/// Remove the listener matching the given `ListenerId`.
///
/// Return `Ok(())` if a listener with this ID was in the list.
pub fn remove_listener(&mut self, id: ListenerId) -> Result<(), ()> {
if let Some(i) = self.listeners.iter().position(|l| l.id == id) {
self.listeners.remove(i);
Ok(())
} else {
Err(())
}
}
/// Returns the transport passed when building this object.
pub fn transport(&self) -> &TTrans {
&self.transport
}
/// Returns an iterator that produces the list of addresses we're listening on.
pub fn listen_addrs(&self) -> impl Iterator<Item = &Multiaddr> {
self.listeners.iter().flat_map(|l| l.addresses.iter())
}
/// Provides an API similar to `Stream`, except that it cannot end.
pub fn | (mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<ListenersEvent<TTrans>> {
// We remove each element from `listeners` one by one and add them back.
let mut remaining = self.listeners.len();
while let Some(mut listener) = self.listeners.pop_back() {
let mut listener_project = listener.as_mut().project();
match TryStream::try_poll_next(listener_project.listener.as_mut(), cx) {
Poll::Pending => {
self.listeners.push_front(listener);
remaining -= 1;
if remaining == 0 { break }
}
Poll::Ready(Some(Ok(ListenerEvent::Upgrade { upgrade, local_addr, remote_addr }))) => {
let id = *listener_project.id;
self.listeners.push_front(listener);
return Poll::Ready(ListenersEvent::Incoming {
listener_id: id,
upgrade,
local_addr,
send_back_addr: remote_addr
})
}
Poll::Ready(Some(Ok(ListenerEvent::NewAddress(a)))) => {
if listener_project.addresses.contains(&a) {
debug!("Transport has reported address {} multiple times", a)
}
if !listener_project.addresses.contains(&a) {
listener_project.addresses.push(a.clone());
}
let id = *listener_project.id;
self.listeners.push_front(listener);
return Poll::Ready(ListenersEvent::NewAddress {
listener_id: id,
listen_addr: a
})
}
Poll::Ready(Some(Ok(ListenerEvent::AddressExpired(a)))) => {
listener_project.addresses.retain(|x| x != &a);
let id = *listener_project.id;
self.listeners.push_front(listener);
return Poll::Ready(ListenersEvent::AddressExpired {
listener_id: id,
listen_addr: a
})
}
Poll::Ready(Some(Ok(ListenerEvent::Error(error)))) => {
let id = *listener_project.id;
self.listeners.push_front(listener);
return Poll::Ready(ListenersEvent::Error {
listener_id: id,
error,
})
}
Poll::Ready(None) => {
return Poll::Ready(ListenersEvent::Closed {
listener_id: *listener_project.id,
reason: Ok(()),
})
}
Poll::Ready(Some(Err(err))) => {
return Poll::Ready(ListenersEvent::Closed {
listener_id: *listener_project.id,
reason: Err(err),
})
}
}
}
// We register the current task to be woken up if a new listener is added.
Poll::Pending
}
}
impl<TTrans> Stream for ListenersStream<TTrans>
where
TTrans: Transport,
{
type Item = ListenersEvent<TTrans>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
ListenersStream::poll(self, cx).map(Option::Some)
}
}
impl<TTrans> Unpin for ListenersStream<TTrans>
where
TTrans: Transport,
{
}
impl<TTrans> fmt::Debug for ListenersStream<TTrans>
where
TTrans: Transport + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("ListenersStream")
.field("transport", &self.transport)
.field("listen_addrs", &self.listen_addrs().collect::<Vec<_>>())
.finish()
}
}
impl<TTrans> fmt::Debug for ListenersEvent<TTrans>
where
TTrans: Transport,
TTrans::Error: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
ListenersEvent::NewAddress { listener_id, listen_addr } => f
.debug_struct("ListenersEvent::NewAddress")
.field("listener_id", listener_id)
.field("listen_addr", listen_addr)
.finish(),
ListenersEvent::AddressExpired { listener_id, listen_addr } => f
.debug_struct("ListenersEvent::AddressExpired")
.field("listener_id", listener_id)
.field("listen_addr", listen_addr)
.finish(),
ListenersEvent::Incoming { listener_id, local_addr, .. } => f
.debug_struct("ListenersEvent::Incoming")
.field("listener_id", listener_id)
.field("local_addr", local_addr)
.finish(),
ListenersEvent::Closed { listener_id, reason } => f
.debug_struct("ListenersEvent::Closed")
.field("listener_id", listener_id)
.field("reason", reason)
.finish(),
ListenersEvent::Error { listener_id, error } => f
.debug_struct("ListenersEvent::Error")
.field("listener_id", listener_id)
.field("error", error)
.finish()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::transport;
use futures::prelude::*;
#[test]
fn incoming_event() {
async_std::task::block_on(async move {
let mem_transport = transport::MemoryTransport::default();
let mut listeners = ListenersStream::new(mem_transport);
listeners.listen_on("/memory/0".parse().unwrap()).unwrap();
let address = {
let event = listeners.next().await.unwrap();
if let ListenersEvent::NewAddress { listen_addr, .. } = event {
listen_addr
} else {
panic!("Was expecting the listen address to be reported")
}
};
let address2 = address.clone();
async_std::task::spawn(async move {
mem_transport.dial(address2).unwrap().await.unwrap();
});
match listeners.next().await.unwrap() {
ListenersEvent::Incoming { local_addr, send_back_addr, .. } => {
assert_eq!(local_addr, address);
assert_eq!(send_back_addr, address);
},
_ => panic!()
}
});
}
#[test]
fn listener_event_error_isnt_fatal() {
// Tests that a listener continues to be polled even after producing
// a `ListenerEvent::Error`.
#[derive(Clone)]
struct DummyTrans;
impl transport::Transport for DummyTrans {
type Output = ();
type Error = std::io::Error;
type Listener = Pin<Box<dyn Stream<Item = Result<ListenerEvent<Self::ListenerUpgrade, std::io::Error>, std::io::Error>>>>;
type ListenerUpgrade = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>>>>;
type Dial = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>>>>;
fn listen_on(self, _: Multiaddr) -> Result<Self::Listener, transport::TransportError<Self::Error>> {
Ok(Box::pin(stream::unfold((), |()| async move {
Some((Ok(ListenerEvent::Error(std::io::Error::from(std::io::ErrorKind::Other))), ()))
})))
}
fn dial(self, _: Multiaddr) -> Result<Self::Dial, transport::TransportError<Self::Error>> {
panic!()
}
}
async_std::task::block_on(async move {
let transport = DummyTrans;
let mut listeners = ListenersStream::new(transport);
listeners.listen_on("/memory/0".parse().unwrap()).unwrap();
for _ in 0..10 {
match listeners.next().await.unwrap() {
ListenersEvent::Error { .. } => {},
_ => panic!()
}
}
});
}
#[test]
fn listener_error_is_fatal() {
// Tests that a listener stops after producing an error on the stream itself.
#[derive(Clone)]
struct DummyTrans;
impl transport::Transport for DummyTrans {
type Output = ();
type Error = std::io::Error;
type Listener = Pin<Box<dyn Stream<Item = Result<ListenerEvent<Self::ListenerUpgrade, std::io::Error>, std::io::Error>>>>;
type ListenerUpgrade = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>>>>;
type Dial = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>>>>;
fn listen_on(self, _: Multiaddr) -> Result<Self::Listener, transport::TransportError<Self::Error>> {
Ok(Box::pin(stream::unfold((), |()| async move {
Some((Err(std::io::Error::from(std::io::ErrorKind::Other)), ()))
})))
}
fn dial(self, _: Multiaddr) -> Result<Self::Dial, transport::TransportError<Self::Error>> {
panic!()
}
}
async_std::task::block_on(async move {
let transport = DummyTrans;
let mut listeners = ListenersStream::new(transport);
listeners.listen_on("/memory/0".parse().unwrap()).unwrap();
match listeners.next().await.unwrap() {
ListenersEvent::Closed { .. } => {},
_ => panic!()
}
});
}
}
| poll |
auth.ts | import { PLuginFunction, HttpError, Sact } from '@sact/core';
import { SessionReq, SessionRes, MemoryStore } from '@sact/session';
import { RedisStore } from '@sact/session/src';
import * as jwt from 'jsonwebtoken';
export interface Options {
/**
* secret for jwt token
*/
secret?: string;
tokenName?: string;
}
export interface AuthReq {
user?: string;
/**
* Log user out
*/
logout: () => Promise<void>;
/**
* Login in the user by id
*
* ```typescript
* // login the user by id and some optional meta data
* await req.login('some-user-id', {})
* ```
*/
login: <T>(id: string, meta?: T) => Promise<void>;
/**
* Authenticate the user
*
* ```typescript
* await req.authenticate() // ๐ All you need to do, the user will then be available on req.user
* ``` | /**
* Authenticate the user or throw a 401 status if failed
*
* ```typescript
* await req.authenticateOrFail() // ๐ All you need to do, the user will then be available on req.user
* ```
*/
authenticateOrFail: () => Promise<void>;
}
const verify = (
token: string,
secret: string
): Promise<{ id: string | number }> =>
new Promise((resolve, reject) => {
jwt.verify(token, secret, (err, decoded: any) => {
if (err) {
return reject(err);
} else {
return resolve(decoded);
}
});
});
const sign = (data: { id: string | number }, secret: string): Promise<string> =>
new Promise((resolve, reject) => {
jwt.sign(data, secret, { expiresIn: '1h' }, (err, encode) => {
if (err) {
reject(err);
} else {
resolve(encode as string);
}
});
});
const auth: PLuginFunction<Options> = (
sact: Sact<AuthReq & SessionReq<MemoryStore | RedisStore>, SessionRes>,
options
) => {
if (!options?.secret) {
throw new Error('[sact-auth] secret is required');
}
const secret = options.secret;
const tokenName = options.tokenName || 'token';
sact.use((req, res) => {
req.login = async (id: string, meta?: any) => {
const token = await sign({ id }, secret);
await req.session.set(id, meta);
res.setCookie(tokenName, token);
};
req.logout = async () => {
await req.session.delete();
res.setCookie(tokenName, '');
};
req.authenticateOrFail = async () => {
const token = req.cookies[tokenName];
if (token) {
let id: string | undefined;
try {
const jwt = await verify(token, secret);
id = jwt.id as string;
} catch (error: any) {
if (error.name === 'TokenExpiredError') {
const session = await req.session.get();
if (session) {
const token = await sign({ id: session.id }, secret);
res.setCookie(tokenName, token);
id = session.id;
}
}
}
if (!id) {
throw new HttpError('Unauthorized', 401);
}
req.user = id;
} else {
throw new HttpError('Unauthorized', 401);
}
};
req.authenticate = async () => {
const token = req.cookies[tokenName];
if (token) {
let id: string | undefined;
try {
const jwt = await verify(token, secret);
id = jwt.id as string;
} catch (error: any) {
if (error.name === 'TokenExpiredError') {
const session = await req.session.get();
if (session && session.id) {
const token = await sign({ id: session.id }, secret);
res.setCookie(tokenName, token);
id = session.id;
}
}
}
req.user = id;
}
};
});
};
export { auth }; | */
authenticate: () => Promise<void>; |
get_asa_full_config.py | #!/usr/bin/python3
# BY NOMO
from netmiko import Netmiko
from getpass import getpass
from datetime import datetime
from pprint import pprint
import re
import os
import sys
import socket
# Vars
config_dir = "/home/reponeg/logs/asa_configs"
# Function for DNS resolution
def | (hostname):
try:
socket.gethostbyname(hostname)
return 1 # If lookup works
except socket.error:
return 0 # If lookup fails
def getShowRun(connection_handle, context, dirname):
output = connection_handle.send_command("changeto context " + context)
sh_run = connection_handle.send_command("show run")
hostname_simple = re.findall( r'(.+?)\.+', hostname_arg )[0]
file_path = dirname + "/" + "sh_run_" + hostname_simple +"_"+ context + ".txt"
with open(file_path, "w") as file_handle:
file_handle.write(sh_run)
return 1
# Check arguments for hostname and hostname dns resolution
if len(sys.argv) < 4:
print("\nMissing parameter. Please enter the hostname or IP address:")
print("\nUsage:", sys.argv[0], "<hostname>\n\n")
exit()
elif len(sys.argv) > 4:
print("Too many parameters. Use a single hostname.")
exit()
hostname_arg = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
dns_lookup_result = hostnameLookup(hostname_arg)
if dns_lookup_result == 0:
print("Hostname lookup for %s failed. Please check name and retry." %(hostname_arg) )
exit()
# Device
asa = {
'host': hostname_arg,
'username': username,
'password': password,
'device_type': 'cisco_asa'
}
auth_pending = True
while auth_pending:
try:
conn1 = Netmiko(**asa)
auth_pending = False
except:
print("Authentication failed. This is host " + hostname_arg)
asa['username'] = input("\nEnter your Username FOR THIS HOST): ")
asa['password'] = getpass()
try:
conn1 = Netmiko(**asa)
pass
except:
print("Failed to authenticate on " + hostname_arg + "\nTry again.")
pass
# Move to context sys to grab the list of all contexts
command = "changeto context sys"
output = conn1.send_command(command)
command = "show run | i context"
output = conn1.send_command(command).splitlines()
# Get the list
context_list = []
for line in output:
if line.startswith("context "):
context_name = line.replace("context ", "")
context_list.append(context_name)
# Start hopping contexts and retrieving running configs
for context in context_list:
getShowRun(conn1, context, config_dir)
print("Retrieved config for contexts:")
print(context_list)
print("\n")
| hostnameLookup |
forward.rs | use crate::{Error, Result};
use futures::FutureExt;
use std::num::NonZeroUsize;
use tokio::{
io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt},
net::TcpStream,
};
async fn forward_reader_to_writer<R, W>(mut reader: R, mut writer: W) -> Result<()>
where
R: AsyncRead + Unpin,
W: AsyncWrite + Unpin,
|
pub async fn forward_socket(client: TcpStream, server: TcpStream) -> Result<()> {
let (client_reader, client_writer) = tokio::io::split(client);
let (server_reader, server_writer) = tokio::io::split(server);
let client_to_server = forward_reader_to_writer(client_reader, server_writer).fuse();
let server_to_client = forward_reader_to_writer(server_reader, client_writer).fuse();
log::info!("Forwarding started...");
tokio::select! {
_ = client_to_server => {},
_ = server_to_client => {},
}
Ok(())
}
| {
let mut buf = vec![0u8; 4096];
let bytes_read = reader.read(&mut buf).await.map_err(Error::from_io_error)?;
while let Some(count) = NonZeroUsize::new(bytes_read) {
writer
.write_all(&buf[0..count.get()])
.await
.map_err(Error::from_io_error)?;
}
Ok(())
} |
test_tracker_stores.py | import pytest
from rasa.core.channels.channel import UserMessage
from rasa.core.domain import Domain
from rasa.core.events import SlotSet, ActionExecuted, Restarted
from rasa.core.tracker_store import (
TrackerStore,
InMemoryTrackerStore,
RedisTrackerStore,
SQLTrackerStore,
)
from rasa.utils.endpoints import EndpointConfig, read_endpoint_config
from tests.core.conftest import DEFAULT_ENDPOINTS_FILE
domain = Domain.load("data/test_domains/default.yml")
def test_get_or_create():
slot_key = "location"
slot_val = "Easter Island"
store = InMemoryTrackerStore(domain)
tracker = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
ev = SlotSet(slot_key, slot_val)
tracker.update(ev)
assert tracker.get_slot(slot_key) == slot_val
store.save(tracker)
again = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
assert again.get_slot(slot_key) == slot_val
def test_restart_after_retrieval_from_tracker_store(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser")
synth = [ActionExecuted("action_listen") for _ in range(4)]
for e in synth:
tr.update(e)
tr.update(Restarted())
latest_restart = tr.idx_after_latest_restart()
store.save(tr)
tr2 = store.retrieve("myuser")
latest_restart_after_loading = tr2.idx_after_latest_restart()
assert latest_restart == latest_restart_after_loading
def test_tracker_store_remembers_max_history(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser", max_event_history=42)
tr.update(Restarted())
store.save(tr)
tr2 = store.retrieve("myuser")
assert tr._max_event_history == tr2._max_event_history == 42
def test_tracker_store_endpoint_config_loading():
cfg = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
assert cfg == EndpointConfig.from_dict(
{
"type": "redis",
"url": "localhost",
"port": 6379,
"db": 0,
"password": "password",
"timeout": 30000,
}
)
def test_find_tracker_store(default_domain):
store = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
tracker_store = RedisTrackerStore(
domain=default_domain,
host="localhost",
port=6379,
db=0,
password="password",
record_exp=3000,
)
assert isinstance(
tracker_store, type(TrackerStore.find_tracker_store(default_domain, store))
)
class ExampleTrackerStore(RedisTrackerStore):
def __init__(self, domain, url, port, db, password, record_exp):
super(ExampleTrackerStore, self).__init__(
domain, host=url, port=port, db=db, password=password, record_exp=record_exp
)
def test_tracker_store_from_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, ExampleTrackerStore)
def test_tracker_store_from_invalid_module(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "a.module.which.cannot.be.found"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
def test_tracker_store_from_invalid_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "any string"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
@pytest.mark.parametrize(
"full_url",
[
"postgresql://localhost",
"postgresql://localhost:5432",
"postgresql://user:secret@localhost",
],
)
def test_get_db_url_with_fully_specified_url(full_url):
assert SQLTrackerStore._get_db_url(host=full_url) == full_url
def test_get_db_url_with_port_in_host():
host = "localhost:1234"
dialect = "postgresql"
db = "mydb"
expected = "{}://{}/{}".format(dialect, host, db)
assert (
str(SQLTrackerStore._get_db_url(dialect="postgresql", host=host, db=db))
== expected
)
def test_get_db_url_with_correct_host():
| expected = "postgresql://localhost:5005/mydb"
assert (
str(
SQLTrackerStore._get_db_url(
dialect="postgresql", host="localhost", port=5005, db="mydb"
)
)
== expected
) |
|
pagination.utils.ts | import { PaginationComponentOptions } from './pagination-component-options.model';
import { FindListOptions } from '../../core/data/find-list-options.model';
/**
* Transform a PaginationComponentOptions object into a FindListOptions object
* @param pagination The PaginationComponentOptions to transform
* @param original An original FindListOptions object to start from
*/
export function | (pagination: PaginationComponentOptions, original?: FindListOptions): FindListOptions {
return Object.assign(new FindListOptions(), original, {
currentPage: pagination.currentPage,
elementsPerPage: pagination.pageSize
});
}
| toFindListOptions |
openapi_generated.go | // +build !ignore_autogenerated
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by openapi-gen. DO NOT EDIT.
// This file was autogenerated by openapi-gen. Do not edit it manually!
package v1beta1
import (
common "k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.BitbucketInterceptor": schema_pkg_apis_triggers_v1beta1_BitbucketInterceptor(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.CELInterceptor": schema_pkg_apis_triggers_v1beta1_CELInterceptor(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.CELOverlay": schema_pkg_apis_triggers_v1beta1_CELOverlay(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.ClusterTriggerBinding": schema_pkg_apis_triggers_v1beta1_ClusterTriggerBinding(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.ClusterTriggerBindingList": schema_pkg_apis_triggers_v1beta1_ClusterTriggerBindingList(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.CustomResource": schema_pkg_apis_triggers_v1beta1_CustomResource(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListener": schema_pkg_apis_triggers_v1beta1_EventListener(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerConfig": schema_pkg_apis_triggers_v1beta1_EventListenerConfig(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerList": schema_pkg_apis_triggers_v1beta1_EventListenerList(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerSpec": schema_pkg_apis_triggers_v1beta1_EventListenerSpec(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerStatus": schema_pkg_apis_triggers_v1beta1_EventListenerStatus(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTrigger": schema_pkg_apis_triggers_v1beta1_EventListenerTrigger(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTriggerGroup": schema_pkg_apis_triggers_v1beta1_EventListenerTriggerGroup(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTriggerSelector": schema_pkg_apis_triggers_v1beta1_EventListenerTriggerSelector(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.GitHubInterceptor": schema_pkg_apis_triggers_v1beta1_GitHubInterceptor(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.GitLabInterceptor": schema_pkg_apis_triggers_v1beta1_GitLabInterceptor(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.InterceptorParams": schema_pkg_apis_triggers_v1beta1_InterceptorParams(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.InterceptorRef": schema_pkg_apis_triggers_v1beta1_InterceptorRef(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.InterceptorRequest": schema_pkg_apis_triggers_v1beta1_InterceptorRequest(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.InterceptorResponse": schema_pkg_apis_triggers_v1beta1_InterceptorResponse(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.KubernetesResource": schema_pkg_apis_triggers_v1beta1_KubernetesResource(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.NamespaceSelector": schema_pkg_apis_triggers_v1beta1_NamespaceSelector(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Param": schema_pkg_apis_triggers_v1beta1_Param(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.ParamSpec": schema_pkg_apis_triggers_v1beta1_ParamSpec(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.PodTemplate": schema_pkg_apis_triggers_v1beta1_PodTemplate(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Resources": schema_pkg_apis_triggers_v1beta1_Resources(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.SecretRef": schema_pkg_apis_triggers_v1beta1_SecretRef(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Status": schema_pkg_apis_triggers_v1beta1_Status(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.StatusError": schema_pkg_apis_triggers_v1beta1_StatusError(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Trigger": schema_pkg_apis_triggers_v1beta1_Trigger(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBinding": schema_pkg_apis_triggers_v1beta1_TriggerBinding(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingList": schema_pkg_apis_triggers_v1beta1_TriggerBindingList(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingSpec": schema_pkg_apis_triggers_v1beta1_TriggerBindingSpec(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingStatus": schema_pkg_apis_triggers_v1beta1_TriggerBindingStatus(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerContext": schema_pkg_apis_triggers_v1beta1_TriggerContext(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerInterceptor": schema_pkg_apis_triggers_v1beta1_TriggerInterceptor(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerList": schema_pkg_apis_triggers_v1beta1_TriggerList(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerResourceTemplate": schema_pkg_apis_triggers_v1beta1_TriggerResourceTemplate(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpec": schema_pkg_apis_triggers_v1beta1_TriggerSpec(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecBinding": schema_pkg_apis_triggers_v1beta1_TriggerSpecBinding(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecTemplate": schema_pkg_apis_triggers_v1beta1_TriggerSpecTemplate(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplate": schema_pkg_apis_triggers_v1beta1_TriggerTemplate(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateList": schema_pkg_apis_triggers_v1beta1_TriggerTemplateList(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateSpec": schema_pkg_apis_triggers_v1beta1_TriggerTemplateSpec(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateStatus": schema_pkg_apis_triggers_v1beta1_TriggerTemplateStatus(ref),
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.WebhookInterceptor": schema_pkg_apis_triggers_v1beta1_WebhookInterceptor(ref),
}
}
func schema_pkg_apis_triggers_v1beta1_BitbucketInterceptor(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "BitbucketInterceptor provides a webhook to intercept and pre-process events",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secretRef": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.SecretRef"),
},
},
"eventTypes": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.SecretRef"},
}
}
func schema_pkg_apis_triggers_v1beta1_CELInterceptor(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CELInterceptor provides a webhook to intercept and pre-process events",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"filter": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"overlays": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.CELOverlay"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.CELOverlay"},
}
}
func schema_pkg_apis_triggers_v1beta1_CELOverlay(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CELOverlay provides a way to modify the request body using CEL expressions",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"expression": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_ClusterTriggerBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ClusterTriggerBinding is a TriggerBinding with a cluster scope. ClusterTriggerBindings are used to represent TriggerBindings that should be publicly addressable from any namespace in the cluster.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the ClusterTriggerBinding from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingSpec", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_ClusterTriggerBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ClusterTriggerBindingList contains a list of ClusterTriggerBinding",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.ClusterTriggerBinding"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.ClusterTriggerBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_CustomResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_EventListener(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventListener exposes a service to accept HTTP event payloads.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the EventListener from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerSpec", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_EventListenerConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventListenerConfig stores configuration for resources generated by the EventListener",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"generatedName": {
SchemaProps: spec.SchemaProps{
Description: "GeneratedResourceName is the name given to all resources reconciled by the EventListener",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"generatedName"},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_EventListenerList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventListenerList contains a list of TriggerBinding",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListener"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListener", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_EventListenerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventListenerSpec defines the desired state of the EventListener, represented by a list of Triggers.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"triggers": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTrigger"),
},
},
},
},
},
"triggerGroups": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Trigger groups allow for centralized processing of an interceptor chain",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTriggerGroup"),
},
},
},
},
},
"namespaceSelector": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.NamespaceSelector"),
},
},
"labelSelector": {
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Resources"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTrigger", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTriggerGroup", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.NamespaceSelector", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Resources", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_pkg_apis_triggers_v1beta1_EventListenerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventListenerStatus holds the status of the EventListener",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.",
Type: []string{"integer"},
Format: "int64",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Conditions the latest available observations of a resource's current state.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("knative.dev/pkg/apis.Condition"),
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"address": {
SchemaProps: spec.SchemaProps{
Ref: ref("knative.dev/pkg/apis/duck/v1beta1.Addressable"),
},
},
"configuration": {
SchemaProps: spec.SchemaProps{
Description: "Configuration stores configuration for the EventListener service",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerConfig"),
},
},
},
Required: []string{"configuration"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerConfig", "knative.dev/pkg/apis.Condition", "knative.dev/pkg/apis/duck/v1beta1.Addressable"},
}
}
func schema_pkg_apis_triggers_v1beta1_EventListenerTrigger(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventListenerTrigger represents a connection between TriggerBinding, Params, and TriggerTemplate; TriggerBinding provides extracted values for TriggerTemplate to then create resources from. TriggerRef can also be provided instead of TriggerBinding, Interceptors and TriggerTemplate",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"bindings": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecBinding"),
},
},
},
},
},
"template": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecTemplate"),
},
},
"triggerRef": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"interceptors": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerInterceptor"),
},
},
},
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Description: "ServiceAccountName optionally associates credentials with each trigger; more granular authorization for who is allowed to utilize the associated pipeline vs. defaulting to whatever permissions are associated with the entire EventListener and associated sink facilitates multi-tenant model based scenarios",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerInterceptor", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecBinding", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecTemplate"},
}
}
func schema_pkg_apis_triggers_v1beta1_EventListenerTriggerGroup(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventListenerTriggerGroup defines a group of Triggers that share a common set of interceptors",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"interceptors": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerInterceptor"),
},
},
},
},
},
"triggerSelector": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTriggerSelector"),
},
},
},
Required: []string{"name", "interceptors", "triggerSelector"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.EventListenerTriggerSelector", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerInterceptor"},
}
}
func schema_pkg_apis_triggers_v1beta1_EventListenerTriggerSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventListenerTriggerSelector defines ways to select a group of triggers using their metadata",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"namespaceSelector": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.NamespaceSelector"),
},
},
"labelSelector": {
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.NamespaceSelector", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_pkg_apis_triggers_v1beta1_GitHubInterceptor(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GitHubInterceptor provides a webhook to intercept and pre-process events",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secretRef": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.SecretRef"),
},
},
"eventTypes": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.SecretRef"},
}
}
func schema_pkg_apis_triggers_v1beta1_GitLabInterceptor(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GitLabInterceptor provides a webhook to intercept and pre-process events",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secretRef": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.SecretRef"),
},
},
"eventTypes": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.SecretRef"},
}
}
func schema_pkg_apis_triggers_v1beta1_InterceptorParams(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "InterceptorParams defines a key-value pair that can be passed on an interceptor",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"),
},
},
},
Required: []string{"name", "value"},
},
},
Dependencies: []string{
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"},
}
}
func schema_pkg_apis_triggers_v1beta1_InterceptorRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "InterceptorRef provides a Reference to a ClusterInterceptor",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "InterceptorKind indicates the kind of the Interceptor, namespaced or cluster scoped. Currently only InterceptorKind is ClusterInterceptor, so the only valid value is the default one",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_InterceptorRequest(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Do not generate DeepCopy(). See #827",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"body": {
SchemaProps: spec.SchemaProps{
Description: "Body is the incoming HTTP event body. We use a \"string\" representation of the JSON body in order to preserve the body exactly as it was sent (including spaces etc.). This is necessary for some interceptors e.g. GitHub for validating the body with a signature. While []byte can also store an exact representation of the body, `json.Marshal` will compact []byte to a base64 encoded string which means that we will lose the spaces any time we marshal this struct.",
Type: []string{"string"},
Format: "",
},
},
"header": {
SchemaProps: spec.SchemaProps{
Description: "Header are the headers for the incoming HTTP event",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
"extensions": {
SchemaProps: spec.SchemaProps{
Description: "Extensions are extra values that are added by previous interceptors in a chain",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
"interceptor_params": {
SchemaProps: spec.SchemaProps{
Description: "InterceptorParams are the user specified params for interceptor in the Trigger",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
"context": {
SchemaProps: spec.SchemaProps{
Description: "Context contains additional metadata about the event being processed",
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerContext"),
},
},
},
Required: []string{"context"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerContext"},
}
}
func schema_pkg_apis_triggers_v1beta1_InterceptorResponse(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Do not generate Deepcopy(). See #827",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"extensions": {
SchemaProps: spec.SchemaProps{
Description: "Extensions are additional fields that is added to the interceptor event.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
"continue": {
SchemaProps: spec.SchemaProps{
Description: "Continue indicates if the EventListener should continue processing the Trigger or not",
Default: false,
Type: []string{"boolean"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status is an Error status containing details on any interceptor processing errors",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Status"),
},
},
},
Required: []string{"continue", "status"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Status"},
}
}
func schema_pkg_apis_triggers_v1beta1_KubernetesResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"replicas": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"serviceType": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"servicePort": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("knative.dev/pkg/apis/duck/v1.WithPodSpec"),
},
},
},
},
},
Dependencies: []string{
"knative.dev/pkg/apis/duck/v1.WithPodSpec"},
}
}
func schema_pkg_apis_triggers_v1beta1_NamespaceSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NamespaceSelector is a selector for selecting either all namespaces or a list of namespaces.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"matchNames": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of namespace names.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_Param(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Param defines a string value to be used for a ParamSpec with the same name.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "value"},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_ParamSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ParamSpec defines an arbitrary named input whose value can be supplied by a `Param`.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name declares the name by which a parameter is referenced.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "Description is a user-facing description of the parameter that may be used to populate a UI.",
Type: []string{"string"},
Format: "",
},
},
"default": {
SchemaProps: spec.SchemaProps{
Description: "Default is the value a parameter takes if no input value via a Param is supplied.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_PodTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"tolerations": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's tolerations.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_triggers_v1beta1_Resources(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kubernetesResource": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.KubernetesResource"),
},
},
"customResource": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.CustomResource"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.CustomResource", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.KubernetesResource"},
}
}
func schema_pkg_apis_triggers_v1beta1_SecretRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SecretRef contains the information required to reference a single secret string This is needed because the other secretRef types are not cross-namespace and do not actually contain the \"SecretName\" field, which allows us to access a single secret value.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secretKey": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"secretName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_Status(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"code": {
SchemaProps: spec.SchemaProps{
Description: "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].",
Type: []string{"integer"},
Format: "int64",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A developer-facing error message, which should be in English.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_StatusError(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"s": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Status"),
},
},
},
Required: []string{"s"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Status"},
}
}
func schema_pkg_apis_triggers_v1beta1_Trigger(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Trigger defines a mapping of an input event to parameters. This is used to extract information from events to be passed to TriggerTemplates within a Trigger.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the Trigger",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerBinding defines a mapping of an input event to parameters. This is used to extract information from events to be passed to TriggerTemplates within a Trigger.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the TriggerBinding",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingSpec", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerBindingList contains a list of TriggerBindings. We don't use this but it's required for certain codegen features.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBinding"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerBindingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerBindingSpec defines the desired state of the TriggerBinding.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"params": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Params defines the parameter mapping from the given input event.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Param"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Param"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerBindingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerBindingStatus defines the observed state of TriggerBinding.",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerContext(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"event_url": {
SchemaProps: spec.SchemaProps{
Description: "EventURL is the URL of the incoming event",
Type: []string{"string"},
Format: "",
},
},
"event_id": {
SchemaProps: spec.SchemaProps{
Description: "EventID is a unique ID assigned by Triggers to each event",
Type: []string{"string"},
Format: "",
},
},
"trigger_id": {
SchemaProps: spec.SchemaProps{
Description: "TriggerID is of the form namespace/$ns/triggers/$name",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerInterceptor(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerInterceptor provides a hook to intercept and pre-process events",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Optional name to identify the current interceptor configuration",
Type: []string{"string"},
Format: "",
},
},
"ref": {
SchemaProps: spec.SchemaProps{
Description: "Ref refers to the Interceptor to use",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.InterceptorRef"),
},
},
"params": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Params are the params to send to the interceptor",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.InterceptorParams"),
},
},
},
},
},
"webhook": {
SchemaProps: spec.SchemaProps{
Description: "WebhookInterceptor refers to an old style webhook interceptor service",
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.WebhookInterceptor"),
},
},
},
Required: []string{"ref"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.InterceptorParams", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.InterceptorRef", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.WebhookInterceptor"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerList contains a list of Triggers. We don't use this but it's required for certain codegen features.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Trigger"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.Trigger", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerResourceTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerResourceTemplate describes a resource to create",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerSpec represents a connection between TriggerSpecBinding, and TriggerSpecTemplate; TriggerSpecBinding provides extracted values for TriggerSpecTemplate to then create resources from.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"bindings": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecBinding"),
},
},
},
},
},
"template": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecTemplate"),
},
},
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"interceptors": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerInterceptor"),
},
},
},
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Description: "ServiceAccountName optionally associates credentials with each trigger; Unlike EventListeners, this should be scoped to the same namespace as the Trigger itself",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"bindings", "template"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerInterceptor", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecBinding", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerSpecTemplate"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerSpecBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of the binding param Mutually exclusive with Ref",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value is the value of the binding param. Can contain JSONPath Has to be pointer since \"\" is a valid value Required if Name is also specified.",
Type: []string{"string"},
Format: "",
},
},
"ref": {
SchemaProps: spec.SchemaProps{
Description: "Ref is a reference to a TriggerBinding kind. Mutually exclusive with Name",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind can only be provided if Ref is also provided. Defaults to TriggerBinding",
Type: []string{"string"},
Format: "",
},
},
"apiversion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion of the binding ref",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerSpecTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ref": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
}, | "apiversion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateSpec"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerTemplate takes parameters and uses them to create CRDs",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec holds the desired state of the TriggerTemplate from the client",
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateSpec", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplateStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerTemplateList contains a list of TriggerTemplate",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplate"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerTemplateSpec holds the desired state of TriggerTemplate",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"params": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.ParamSpec"),
},
},
},
},
},
"resourcetemplates": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerResourceTemplate"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.ParamSpec", "github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1.TriggerResourceTemplate"},
}
}
func schema_pkg_apis_triggers_v1beta1_TriggerTemplateStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TriggerTemplateStatus describes the desired state of TriggerTemplate",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_triggers_v1beta1_WebhookInterceptor(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WebhookInterceptor provides a webhook to intercept and pre-process events",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"objectRef": {
SchemaProps: spec.SchemaProps{
Description: "ObjectRef is a reference to an object that will resolve to a cluster DNS name to use as the EventInterceptor. Either objectRef or url can be specified",
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
"url": {
SchemaProps: spec.SchemaProps{
Ref: ref("knative.dev/pkg/apis.URL"),
},
},
"header": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Header is a group of key-value pairs that can be appended to the interceptor request headers. This allows the interceptor to make decisions specific to an EventListenerTrigger.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "k8s.io/api/core/v1.ObjectReference", "knative.dev/pkg/apis.URL"},
}
} | }, |
mod.rs | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashMap;
use crate::recorder::localstorage::LocalStorage;
use crate::RawRecords;
pub mod cpu;
pub mod summary;
/// This trait defines a general framework that works at a certain frequency. Typically,
/// it describes the recorder(sampler) framework for a specific resource.
///
/// [Recorder] will maintain a list of sub-recorders, driving all sub-recorders to work
/// according to the behavior described in this trait.
pub trait SubRecorder: Send {
/// This function is called at a fixed frequency. (A typical frequency is 99hz.)
///
/// The [RawRecords] and [LocalStorage] map of all threads will be passed in through
/// parameters. We need to collect resources (may be from each `LocalStorage`) and
/// write them into `RawRecords`.
///
/// The implementation needs to sample the resource in this function (in general).
///
/// [RawRecords]: crate::model::RawRecords
/// [LocalStorage]: crate::localstorage::LocalStorage
fn tick(
&mut self,
_records: &mut RawRecords,
_thread_stores: &mut HashMap<usize, LocalStorage>,
) {
}
/// This function is called every time before reporting to Collector.
/// The default period is 1 second.
///
/// The [RawRecords] and [LocalStorage] map of all threads will be passed in through parameters.
/// `usize` is thread_id without platform dependency.
///
/// [RawRecords]: crate::model::RawRecords
/// [LocalStorage]: crate::localstorage::LocalStorage
fn collect(
&mut self,
_records: &mut RawRecords,
_thread_stores: &mut HashMap<usize, LocalStorage>,
) {
}
/// This function is called when we need to clean up data.
/// The default period is 5 minutes.
fn cleanup(
&mut self,
_records: &mut RawRecords,
_thread_stores: &mut HashMap<usize, LocalStorage>,
) {
}
/// This function is called before the [Recorder] thread pause.
fn pause(
&mut self,
_records: &mut RawRecords,
_thread_stores: &mut HashMap<usize, LocalStorage>,
) {
}
/// This function is called when the [Recorder] thread is resume execution.
fn resume(
&mut self,
_records: &mut RawRecords,
_thread_stores: &mut HashMap<usize, LocalStorage>,
) {
}
/// This function is called when a new thread accesses thread-local-storage.
///
/// This function exists because the sampling work of `SubRecorder` may need | fn thread_created(&mut self, _id: usize, _store: &LocalStorage) {}
} | /// to be performed on all functions, and `SubRecorder` may wish to maintain
/// a thread-related data structure by itself. |
aws_ec2_test.go | package e2e_test
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/solo-io/gloo/test/helpers"
"github.com/solo-io/solo-kit/pkg/api/v1/resources"
"github.com/aws/aws-sdk-go/aws/credentials"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/rotisserie/eris"
gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1"
glooec2 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/options/aws/ec2"
"github.com/solo-io/gloo/projects/gloo/pkg/defaults"
"github.com/solo-io/gloo/test/services"
"github.com/solo-io/solo-kit/pkg/api/v1/clients"
"github.com/solo-io/solo-kit/pkg/api/v1/resources/core"
)
/*
# Configure an EC2 instance for this test
- Do this if this test ever starts to fail because the EC2 instance that it tests against has become unavailable.
- Provision an EC2 instance
- Use an "amazon linux" image
- Configure the security group to allow http traffic on port 80
- Tag your instance with the following tags
- svc: worldwide-hello
- Set up your EC2 instance
- ssh into your instance
- download a demo app: an http response code echo app
- this app responds to requests with the corresponding response code
- ex: http://<my-instance-ip>/?code=404 produces a `404` response
- make the app executable
- run it in the background
```bash
wget https://mitch-solo-public.s3.amazonaws.com/echoapp2
chmod +x echoapp2
sudo ./echoapp2 --port 80 &
```
- Note: other dummy webservers will work fine - you may just need to update the path of the request
- Currently, we call the /metrics path during our tests
- Verify that you can reach the app
- `curl` the app, you should see a help menu for the app
```bash
curl http://<instance-public-ip>/
```
*/
var _ = Describe("AWS EC2 Plugin utils test", func() {
const region = "us-east-1"
var (
ctx context.Context
cancel context.CancelFunc
testClients services.TestClients
envoyInstance *services.EnvoyInstance
secret *gloov1.Secret
upstream *gloov1.Upstream
roleArn string
)
addCredentials := func() {
localAwsCredentials := credentials.NewSharedCredentials("", "")
v, err := localAwsCredentials.Get()
if err != nil {
Skip("no AWS creds available")
}
// role arn format: "arn:aws:iam::[account_number]:role/[role_name]"
roleArn = os.Getenv("AWS_ARN_ROLE_1")
if roleArn == "" {
Skip("no AWS role ARN available")
}
var opts clients.WriteOpts
accessKey := v.AccessKeyID
secretKey := v.SecretAccessKey
secret = &gloov1.Secret{
Metadata: &core.Metadata{
Namespace: "default",
Name: region,
},
Kind: &gloov1.Secret_Aws{
Aws: &gloov1.AwsSecret{
AccessKey: accessKey,
SecretKey: secretKey,
},
},
}
_, err = testClients.SecretClient.Write(secret, opts)
Expect(err).NotTo(HaveOccurred())
}
addUpstream := func() {
secretRef := secret.Metadata.Ref()
upstream = &gloov1.Upstream{
Metadata: &core.Metadata{
Namespace: "default",
Name: region,
},
UpstreamType: &gloov1.Upstream_AwsEc2{
AwsEc2: &glooec2.UpstreamSpec{
Region: region,
SecretRef: secretRef,
RoleArn: roleArn,
Filters: []*glooec2.TagFilter{
{
Spec: &glooec2.TagFilter_KvPair_{
KvPair: &glooec2.TagFilter_KvPair{
Key: "svc",
Value: "worldwide-hello",
},
},
},
},
PublicIp: true,
Port: 80,
},
},
}
var opts clients.WriteOpts
_, err := testClients.UpstreamClient.Write(upstream, opts)
Expect(err).NotTo(HaveOccurred())
}
validateUrl := func(url, substring string) {
Eventually(func() (string, error) {
res, err := http.Get(url)
if err != nil {
return "", eris.Wrapf(err, "unable to call GET")
}
if res.StatusCode != http.StatusOK {
return "", eris.New(fmt.Sprintf("%v is not OK", res.StatusCode))
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", eris.Wrapf(err, "unable to read body")
}
return string(body), nil
}, "10s", "1s").Should(ContainSubstring(substring))
}
validateEc2Endpoint := func(envoyPort uint32, substring string) {
// first make sure that the instance is ready (to avoid false negatives)
By("verifying instance is ready - if this failed, you may need to restart the EC2 instance")
// Stitch the url together to avoid bot spam
// The IP address corresponds to the public ip of an EC2 instance managed by Solo.io for the purpose of
// verifying that the EC2 upstream works as expected.
// The port is where the app listens for connections. The instance has been configured with an inbound traffic
// rule that allows port 80.
// TODO[test enhancement] - create an EC2 instance on demand (or auto-skip the test) if the expected instance is unavailable
// See notes in the header of this file for instructions on how to restore the instance
ec2Port := 80
// This is an Elastic IP in us-east-1 and can be reassigned if the instance ever goes down
ec2Url := fmt.Sprintf("http://%v:%v/metrics", strings.Join([]string{"100", "24", "224", "6"}, "."), ec2Port)
validateUrl(ec2Url, substring)
// do the actual verification
By("verifying Gloo has routed to the instance")
gatewayUrl := fmt.Sprintf("http://%v:%v/metrics", "localhost", envoyPort)
validateUrl(gatewayUrl, substring)
}
AfterEach(func() {
if envoyInstance != nil {
_ = envoyInstance.Clean()
}
cancel()
})
// NOTE: you need to configure EC2 instances before running this
It("be able to call upstream function", func() {
err := envoyInstance.RunWithRoleAndRestXds(services.DefaultProxyName, testClients.GlooPort, testClients.RestXdsPort)
Expect(err).NotTo(HaveOccurred())
proxy := &gloov1.Proxy{
Metadata: &core.Metadata{
Name: "proxy",
Namespace: "default",
},
Listeners: []*gloov1.Listener{{ | Name: "listener",
BindAddress: "::",
BindPort: defaults.HttpPort,
ListenerType: &gloov1.Listener_HttpListener{
HttpListener: &gloov1.HttpListener{
VirtualHosts: []*gloov1.VirtualHost{{
Name: "virt1",
Domains: []string{"*"},
Routes: []*gloov1.Route{{
Action: &gloov1.Route_RouteAction{
RouteAction: &gloov1.RouteAction{
Destination: &gloov1.RouteAction_Single{
Single: &gloov1.Destination{
DestinationType: &gloov1.Destination_Upstream{
Upstream: upstream.Metadata.Ref(),
},
},
},
},
},
}},
}},
},
},
}},
}
var opts clients.WriteOpts
_, err = testClients.ProxyClient.Write(proxy, opts)
Expect(err).NotTo(HaveOccurred())
helpers.EventuallyResourceAccepted(func() (resources.InputResource, error) {
return testClients.ProxyClient.Read(proxy.Metadata.Namespace, proxy.Metadata.Name, clients.ReadOpts{})
})
validateEc2Endpoint(defaults.HttpPort, "Counts")
})
BeforeEach(func() {
ctx, cancel = context.WithCancel(context.Background())
defaults.HttpPort = services.NextBindPort()
defaults.HttpsPort = services.NextBindPort()
testClients = services.RunGateway(ctx, false)
var err error
envoyInstance, err = envoyFactory.NewEnvoyInstance()
Expect(err).NotTo(HaveOccurred())
addCredentials()
addUpstream()
})
}) | |
Recipe.tsx | import { useEffect, useState } from "react";
import sanityClient from "../client";
interface IRecipe {
title: string;
recipeName: string;
recipeDesc: string;
recipeImage: { asset: { _id: string; url: string } }[];
recipeVideo: string;
recipePart: string;
recipeToDo: string;
recipeIngredients: {
ingredientName: string;
quantity: string;
unit: string;
}[];
}
const Recipe = () => {
const queryString = window.location.search;
const urlParams = new URLSearchParams(queryString);
const code = urlParams.get("code") || "0";
console.log(code);
const [recipes, setRecipes] = useState<IRecipe[]>([]);
useEffect(() => {
sanityClient
.fetch(
`*[_type == "recipes"]{
title,
recipeName,
recipeDesc,
recipePart,
recipeToDo,
recipeIngredients[] {
ingredientName,
unit,
quantity
}
,
recipeImage[]{
asset->{
_id,
url
},
},
recipeVideo,
}`
)
.then((data) => setRecipes(data))
.catch(() => setRecipes([]));
}, []);
console.log(recipes);
let codHead = recipes[parseInt(code)];
console.log(codHead);
return (
<div className="flex flex-col items-center bg-blue-100">
<div className="flex items-center justify-around w-full mt-5 mb-10">
<a href="about-us" className="text-2xl text-black">
Om oss
</a>
<a href="/">
<img src="Svart logo.png" alt="logo" />
</a>
<a href="/recipes" className="text-2xl text-blue-600">
Oppskrifter
</a>
</div>
{codHead ? (
<div className="w-4/5 m-auto">
<img
className="h-[40rem] object-cover w-full"
src={codHead.recipeImage[0].asset.url}
alt="recipe"
/>
<h1 className="my-3 text-6xl text-black">{codHead.recipeName}</h1>
<p className="mt-3 mb-10">{codHead.recipeDesc}</p>
<div className="flex w-full mb-10">
<div className="w-2/5 divide-blue-600">
<h3 className="text-3xl">INGREDIENSER</h3>
<hr className="w-3/5 mb-5" />
{codHead.recipeIngredients.map((ingredient) => {
return (
<p>{`${ingredient.quantity ? ingredient.quantity : ""} ${
ingredient.ingredientName
}`}</p>
);
})}
</div>
<div className="w-3/5 divide-blue-600">
<h3 className="text-3xl">SLIK GJรR DU:</h3>
<hr className="w-3/5 mb-5" />
{codHead.recipeToDo
.replace("\n", "")
.split(/\d/)
.filter((elem) => elem.length >= 5)
.map((step, i) => {
return (
<div className="flex items-center">
<p className="mr-2 text-[#7C8EEE] text-5xl">{i + 1}</p>
<p>{step.replace(".", "")}</p>
</div>
);
})}
</div>
</div>
{codHead.recipeVideo ? ( | <div className="divide-blue-600">
<h3 className="w-full text-3xl text-center">FREMGANGSMร
TE:</h3>
<hr className="w-3/5 mx-auto mb-5" />
<iframe
title="video"
className="w-full h-[40rem] mb-16"
src={codHead.recipeVideo}
/>
</div>
) : null}
</div>
) : null}
</div>
);
};
export default Recipe; | |
js.js | var inputAction = "";
var display = document.getElementById('display').value;
//SAIDA PARA DISPLAY
function output(){
var display = document.getElementById('display').value;
if (display == "0"){display = "";}
//LIMPAR TODOS OS CARACTERES
if (inputAction == "limpar"){
display = "0"
inputAction = "";
}
//LIMPAR UM CARACTERE
if (inputAction == "limparCaractere"){
display = display.slice(0, -1);
inputAction = "";
}
//IGUALAR
if (inputAction == "igualar"){
inputAction = "";
try{
display = eval(display);
document.getElementById("display").style.borderColor = "gray";
document.getElementById("display").style.borderWidth = "1px";
}
catch(e){
document.getElementById("display").style.borderColor = "red";
document.getElementById("display").style.borderWidth = "1px";
}
}
//INVERTE VALOR
if (inputAction == "inverter"){
display = display * -1;
inputAction = "";
}
//ADICIONAR VALOR PARA DISPLAY
var iA = inputAction;
var dlen = display.length;
if (iA == "+" || iA == "-" || iA == "*" || iA == "/" || iA == ".") {
if (display.substr(-1) == "+" || display.substr(-1) == "-" || display.substr(-1) == "*" || display.substr(-1) == "/" || display.substr(-1) == "."){
inputAction = "";
}
if (dlen < 2 && display == "0"){
inputAction = "";
}
}
display = display + inputAction;
if (display.length <= 10 && display.length < 14){
document.getElementById("display").style.fontSize = "50px";
}
if (display.length > 10 && display.length < 14){
document.getElementById("display").style.fontSize = "35px";
}
if (display.length > 15 && display.length < 43){
document.getElementById("display").style.fontSize = "25px";
}
if (display.length > 43){
document.getElementById("display").style.fontSize = "15px";
}
if (display == ""){display = "0";}
if (display == "/" || display == "+" || display == "*" || display == "*"){display = "0";}
//ESCREVER
document.getElementById('display').value = display;
}
//FUNรรES DOS BOTรES
function b1(){
inputAction = "1";
output();
}
function b2(){
inputAction = "2";
output();
}
function b3(){
inputAction = "3";
output();
}
function b4(){
| output();
}
function b5(){
inputAction = "5";
output();
}
function b6(){
inputAction = "6";
output();
}
function b7(){
inputAction = "7";
output();
}
function b8(){
inputAction = "8";
output();
}
function b9(){
inputAction = "9";
output();
}
function b0(){
inputAction = "0";
output();
}
function somar(){
inputAction = "+";
output();
}
function subtrair(){
inputAction = "-";
output();
}
function dividir(){
inputAction = "/";
output();
}
function multiplicar(){
inputAction = "*";
output();
}
function igualar(){
inputAction = "igualar";
output();
}
function posFloat(){
console
inputAction = ".";
output();
}
function limpar(){
inputAction = "limpar";
output();
}
function limparCaractere(){
inputAction = "limparCaractere";
output();
}
function inverter(){
inputAction = "inverter";
output();
}
function percent(){
inputAction = "*(n/100)";
output();
} | inputAction = "4";
|
cli.py | # pylint: disable-msg=W0613,W0612,W0212,W0511,R0912,C0322,W0704
# W0511 = XXX (my own todo's)
"""
cli.py
======
Desc: Command-line tool for listing Python packages installed by setuptools,
package metadata, package dependencies, and querying The Cheese Shop
(PyPI) for Python package release information such as which installed
packages have updates available.
Author: Rob Cakebread <gentoodev a t gmail.com>
License : BSD (See COPYING)
"""
__docformat__ = 'restructuredtext'
import inspect
import re
import pprint
import os
import sys
import optparse
import pkg_resources
import webbrowser
import logging
import platform
if platform.python_version().startswith('2'):
from xmlrpclib import Fault as XMLRPCFault
from urllib import urlretrieve
from urlparse import urlparse
else:
from xmlrpc.client import Fault as XMLRPCFault
from urllib.request import urlretrieve
from urllib.parse import urlparse
from distutils.sysconfig import get_python_lib
from yolk.metadata import get_metadata
from yolk.yolklib import get_highest_version, Distributions
from yolk.pypi import CheeseShop
from yolk.setuptools_support import get_download_uri, get_pkglist
from yolk.plugins import load_plugins
from yolk.utils import run_command, command_successful
from yolk.__init__ import __version__ as VERSION
class StdOut:
"""
Filter stdout or stderr from specific modules
So far this is just used for pkg_resources
"""
def __init__(self, stream, modulenames):
self.stdout = stream
#Modules to squelch
self.modulenames = modulenames
def | (self, attribute):
if not self.__dict__.has_key(attribute) or attribute == '__doc__':
return getattr(self.stdout, attribute)
return self.__dict__[attribute]
def flush(self):
"""Bug workaround for Python 3.2+:
Exception AttributeError: 'flush' in <yolk.cli.StdOut object...
"""
pass
def write(self, inline):
"""
Write a line to stdout if it isn't in a blacklist
Try to get the name of the calling module to see if we want
to filter it. If there is no calling module, use current
frame in case there's a traceback before there is any calling module
"""
frame = inspect.currentframe().f_back
if frame:
mod = frame.f_globals.get('__name__')
else:
mod = sys._getframe(0).f_globals.get('__name__')
if not mod in self.modulenames:
self.stdout.write(inline)
def writelines(self, inline):
"""Write multiple lines"""
for line in inline:
self.write(line)
class Yolk(object):
"""
Main class for yolk
"""
def __init__(self):
#PyPI project name with proper case
self.project_name = ""
#PyPI project version
self.version = ""
#List of all versions not hidden on PyPI
self.all_versions = []
self.pkg_spec = []
self.options = None
self.logger = logging.getLogger("yolk")
#Squelch output from setuptools
#Add future offenders to this list.
shut_up = ['distutils.log']
sys.stdout = StdOut(sys.stdout, shut_up)
sys.stderr = StdOut(sys.stderr, shut_up)
self.pypi = None
def get_plugin(self, method):
"""
Return plugin object if CLI option is activated and method exists
@param method: name of plugin's method we're calling
@type method: string
@returns: list of plugins with `method`
"""
all_plugins = []
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
plugin_obj = entry_point.load()
plugin = plugin_obj()
plugin.configure(self.options, None)
if plugin.enabled:
if not hasattr(plugin, method):
self.logger.warn("Error: plugin has no method: %s" % method)
plugin = None
else:
all_plugins.append(plugin)
return all_plugins
def set_log_level(self):
"""
Set log level according to command-line options
@returns: logger object
"""
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
elif self.options.quiet:
self.logger.setLevel(logging.ERROR)
else:
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler())
return self.logger
def run(self):
"""
Perform actions based on CLI options
@returns: status code
"""
opt_parser = setup_opt_parser()
(self.options, remaining_args) = opt_parser.parse_args()
logger = self.set_log_level()
pkg_spec = validate_pypi_opts(opt_parser)
if not pkg_spec:
pkg_spec = remaining_args
self.pkg_spec = pkg_spec
if not self.options.pypi_search and (len(sys.argv) == 1 or\
len(remaining_args) > 2):
opt_parser.print_help()
return 2
#Options that depend on querying installed packages, not PyPI.
#We find the proper case for package names if they are installed,
#otherwise PyPI returns the correct case.
if self.options.show_deps or self.options.show_all or \
self.options.show_active or self.options.show_non_active or \
(self.options.show_updates and pkg_spec):
want_installed = True
else:
want_installed = False
#show_updates may or may not have a pkg_spec
if not want_installed or self.options.show_updates:
self.pypi = CheeseShop(self.options.debug)
#XXX: We should return 2 here if we couldn't create xmlrpc server
if pkg_spec:
(self.project_name, self.version, self.all_versions) = \
self.parse_pkg_ver(want_installed)
if want_installed and not self.project_name:
logger.error("%s is not installed." % pkg_spec[0])
return 1
#I could prefix all these with 'cmd_' and the methods also
#and then iterate over the `options` dictionary keys...
commands = ['show_deps', 'query_metadata_pypi', 'fetch',
'versions_available', 'show_updates', 'browse_website',
'show_download_links', 'pypi_search', 'show_pypi_changelog',
'show_pypi_releases', 'yolk_version', 'show_all',
'show_active', 'show_non_active', 'show_entry_map',
'show_entry_points']
#Run first command it finds, and only the first command, then return
#XXX: Check if more than one command was set in options and give error?
for action in commands:
if getattr(self.options, action):
return getattr(self, action)()
opt_parser.print_help()
def show_active(self):
"""
Show installed active packages
"""
return self.show_distributions("active")
def show_non_active(self):
"""
Show installed non-active packages
"""
return self.show_distributions("nonactive")
def show_all(self):
"""
Show all installed packages
"""
return self.show_distributions("all")
def show_updates(self):
"""
Check installed packages for available updates on PyPI
@param project_name: optional package name to check; checks every
installed pacakge if none specified
@type project_name: string
@returns: None
"""
dists = Distributions()
if self.project_name:
#Check for a single package
pkg_list = [self.project_name]
else:
#Check for every installed package
pkg_list = get_pkglist()
found = None
for pkg in pkg_list:
for (dist, active) in dists.get_distributions("all", pkg,
dists.get_highest_installed(pkg)):
(project_name, versions) = \
self.pypi.query_versions_pypi(dist.project_name)
if versions:
#PyPI returns them in chronological order,
#but who knows if its guaranteed in the API?
#Make sure we grab the highest version:
newest = get_highest_version(versions)
if newest != dist.version:
#We may have newer than what PyPI knows about
if pkg_resources.parse_version(dist.version) < \
pkg_resources.parse_version(newest):
found = True
print(" %s %s (%s)" % (project_name, dist.version,
newest))
if not found and self.project_name:
self.logger.info("You have the latest version installed.")
elif not found:
self.logger.info("No newer packages found at The Cheese Shop")
return 0
def show_distributions(self, show):
"""
Show list of installed activated OR non-activated packages
@param show: type of pkgs to show (all, active or nonactive)
@type show: string
@returns: None or 2 if error
"""
show_metadata = self.options.metadata
#Search for any plugins with active CLI options with add_column() method
plugins = self.get_plugin("add_column")
#Some locations show false positive for 'development' packages:
ignores = ["/UNIONFS", "/KNOPPIX.IMG"]
#Check if we're in a workingenv
#See http://cheeseshop.python.org/pypi/workingenv.py
workingenv = os.environ.get('WORKING_ENV')
if workingenv:
ignores.append(workingenv)
dists = Distributions()
results = None
for (dist, active) in dists.get_distributions(show, self.project_name,
self.version):
metadata = get_metadata(dist)
for prefix in ignores:
if dist.location.startswith(prefix):
dist.location = dist.location.replace(prefix, "")
#Case-insensitve search because of Windows
if dist.location.lower().startswith(get_python_lib().lower()):
develop = ""
else:
develop = dist.location
if metadata:
add_column_text = ""
for my_plugin in plugins:
#See if package is 'owned' by a package manager such as
#portage, apt, rpm etc.
#add_column_text += my_plugin.add_column(filename) + " "
add_column_text += my_plugin.add_column(dist) + " "
self.print_metadata(metadata, develop, active, add_column_text)
else:
print(str(dist) + " has no metadata")
results = True
if not results and self.project_name:
if self.version:
pkg_spec = "%s==%s" % (self.project_name, self.version)
else:
pkg_spec = "%s" % self.project_name
if show == "all":
self.logger.error("There are no versions of %s installed." \
% pkg_spec)
else:
self.logger.error("There are no %s versions of %s installed." \
% \
(show, pkg_spec))
return 2
elif show == "all" and results and self.options.fields:
print("Versions with '*' are non-active.")
print("Versions with '!' are deployed in development mode.")
def print_metadata(self, metadata, develop, active, installed_by):
"""
Print out formatted metadata
@param metadata: package's metadata
@type metadata: pkg_resources Distribution obj
@param develop: path to pkg if its deployed in development mode
@type develop: string
@param active: show if package is activated or not
@type active: boolean
@param installed_by: Shows if pkg was installed by a package manager other
than setuptools
@type installed_by: string
@returns: None
"""
show_metadata = self.options.metadata
if self.options.fields:
fields = self.options.fields.split(',')
fields = map(str.strip, fields)
else:
fields = []
version = metadata['Version']
#When showing all packages, note which are not active:
if active:
if fields:
active_status = ""
else:
active_status = "active"
else:
if fields:
active_status = "*"
else:
active_status = "non-active"
if develop:
if fields:
development_status = "! (%s)" % develop
else:
development_status = "development (%s)" % develop
else:
development_status = installed_by
status = "%s %s" % (active_status, development_status)
if fields:
print('%s (%s)%s %s' % (metadata['Name'], version, active_status,
development_status))
else:
# Need intelligent justification
print(metadata['Name'].ljust(15) + " - " + version.ljust(12) + \
" - " + status)
if fields:
#Only show specific fields, using case-insensitive search
fields = map(str.lower, fields)
for field in metadata.keys():
if field.lower() in fields:
print(' %s: %s' % (field, metadata[field]))
print()
elif show_metadata:
#Print all available metadata fields
for field in metadata.keys():
if field != 'Name' and field != 'Summary':
print(' %s: %s' % (field, metadata[field]))
def show_deps(self):
"""
Show dependencies for package(s)
@returns: 0 - sucess 1 - No dependency info supplied
"""
pkgs = pkg_resources.Environment()
for pkg in pkgs[self.project_name]:
if not self.version:
print(pkg.project_name, pkg.version)
i = len(pkg._dep_map.values()[0])
if i:
while i:
if not self.version or self.version and \
pkg.version == self.version:
if self.version and i == len(pkg._dep_map.values()[0]):
print(pkg.project_name, pkg.version)
print(" " + str(pkg._dep_map.values()[0][i - 1]))
i -= 1
else:
self.logger.info(\
"No dependency information was supplied with the package.")
return 1
return 0
def show_pypi_changelog(self):
"""
Show detailed PyPI ChangeLog for the last `hours`
@returns: 0 = sucess or 1 if failed to retrieve from XML-RPC server
"""
hours = self.options.show_pypi_changelog
if not hours.isdigit():
self.logger.error("Error: You must supply an integer.")
return 1
try:
changelog = self.pypi.changelog(int(hours))
except XMLRPCFault as err_msg:
self.logger.error(err_msg)
self.logger.error("ERROR: Couldn't retrieve changelog.")
return 1
last_pkg = ''
for entry in changelog:
pkg = entry[0]
if pkg != last_pkg:
print("%s %s\n\t%s" % (entry[0], entry[1], entry[3]))
last_pkg = pkg
else:
print("\t%s" % entry[3])
return 0
def show_pypi_releases(self):
"""
Show PyPI releases for the last number of `hours`
@returns: 0 = success or 1 if failed to retrieve from XML-RPC server
"""
try:
hours = int(self.options.show_pypi_releases)
except ValueError:
self.logger.error("ERROR: You must supply an integer.")
return 1
try:
latest_releases = self.pypi.updated_releases(hours)
except XMLRPCFault as err_msg:
self.logger.error(err_msg)
self.logger.error("ERROR: Couldn't retrieve latest releases.")
return 1
for release in latest_releases:
print("%s %s" % (release[0], release[1]))
return 0
def show_download_links(self):
"""
Query PyPI for pkg download URI for a packge
@returns: 0
"""
#In case they specify version as 'dev' instead of using -T svn,
#don't show three svn URI's
if self.options.file_type == "all" and self.version == "dev":
self.options.file_type = "svn"
if self.options.file_type == "svn":
version = "dev"
else:
if self.version:
version = self.version
else:
version = self.all_versions[0]
if self.options.file_type == "all":
#Search for source, egg, and svn
self.print_download_uri(version, True)
self.print_download_uri(version, False)
self.print_download_uri("dev", True)
else:
if self.options.file_type == "source":
source = True
else:
source = False
self.print_download_uri(version, source)
return 0
def print_download_uri(self, version, source):
"""
@param version: version number or 'dev' for svn
@type version: string
@param source: download source or egg
@type source: boolean
@returns: None
"""
if version == "dev":
pkg_type = "subversion"
source = True
elif source:
pkg_type = "source"
else:
pkg_type = "egg"
#Use setuptools monkey-patch to grab url
url = get_download_uri(self.project_name, version, source,
self.options.pypi_index)
if url:
print("%s" % url)
else:
self.logger.info("No download URL found for %s" % pkg_type)
def fetch(self):
"""
Download a package
@returns: 0 = success or 1 if failed download
"""
#Default type to download
source = True
directory = "."
if self.options.file_type == "svn":
version = "dev"
svn_uri = get_download_uri(self.project_name, \
"dev", True)
if svn_uri:
directory = self.project_name + "_svn"
return self.fetch_svn(svn_uri, directory)
else:
self.logger.error(\
"ERROR: No subversion repository found for %s" % \
self.project_name)
return 1
elif self.options.file_type == "source":
source = True
elif self.options.file_type == "egg":
source = False
uri = get_download_uri(self.project_name, self.version, source)
if uri:
return self.fetch_uri(directory, uri)
else:
self.logger.error("No %s URI found for package: %s " % \
(self.options.file_type, self.project_name))
return 1
def fetch_uri(self, directory, uri):
"""
Use ``urllib.urlretrieve`` to download package to file in sandbox dir.
@param directory: directory to download to
@type directory: string
@param uri: uri to download
@type uri: string
@returns: 0 = success or 1 for failed download
"""
filename = os.path.basename(urlparse(uri)[2])
if os.path.exists(filename):
self.logger.error("ERROR: File exists: " + filename)
return 1
try:
downloaded_filename, headers = urlretrieve(uri, filename)
self.logger.info("Downloaded ./" + filename)
except IOError as err_msg:
self.logger.error("Error downloading package %s from URL %s" \
% (filename, uri))
self.logger.error(str(err_msg))
return 1
if headers.gettype() in ["text/html"]:
dfile = open(downloaded_filename)
if re.search("404 Not Found", "".join(dfile.readlines())):
dfile.close()
self.logger.error("'404 Not Found' error")
return 1
dfile.close()
return 0
def fetch_svn(self, svn_uri, directory):
"""
Fetch subversion repository
@param svn_uri: subversion repository uri to check out
@type svn_uri: string
@param directory: directory to download to
@type directory: string
@returns: 0 = success or 1 for failed download
"""
if not command_successful("svn --version"):
self.logger.error("ERROR: Do you have subversion installed?")
return 1
if os.path.exists(directory):
self.logger.error("ERROR: Checkout directory exists - %s" \
% directory)
return 1
try:
os.mkdir(directory)
except OSError as err_msg:
self.logger.error("ERROR: " + str(err_msg))
return 1
cwd = os.path.realpath(os.curdir)
os.chdir(directory)
self.logger.info("Doing subversion checkout for %s" % svn_uri)
status, output = run_command("/usr/bin/svn co %s" % svn_uri)
self.logger.info(output)
os.chdir(cwd)
self.logger.info("subversion checkout is in directory './%s'" \
% directory)
return 0
def browse_website(self, browser=None):
"""
Launch web browser at project's homepage
@param browser: name of web browser to use
@type browser: string
@returns: 0 if homepage found, 1 if no homepage found
"""
if len(self.all_versions):
metadata = self.pypi.release_data(self.project_name, \
self.all_versions[0])
self.logger.debug("DEBUG: browser: %s" % browser)
if metadata.has_key("home_page"):
self.logger.info("Launching browser: %s" \
% metadata["home_page"])
if browser == 'konqueror':
browser = webbrowser.Konqueror()
else:
browser = webbrowser.get()
browser.open(metadata["home_page"], 2)
return 0
self.logger.error("No homepage URL found.")
return 1
def query_metadata_pypi(self):
"""
Show pkg metadata queried from PyPI
@returns: 0
"""
if self.version and self.version in self.all_versions:
metadata = self.pypi.release_data(self.project_name, self.version)
else:
#Give highest version
metadata = self.pypi.release_data(self.project_name, \
self.all_versions[0])
if metadata:
for key in metadata.keys():
if not self.options.fields or (self.options.fields and \
self.options.fields==key):
print("%s: %s" % (key, metadata[key]))
return 0
def versions_available(self):
"""
Query PyPI for a particular version or all versions of a package
@returns: 0 if version(s) found or 1 if none found
"""
if self.version:
spec = "%s==%s" % (self.project_name, self.version)
else:
spec = self.project_name
if self.all_versions and self.version in self.all_versions:
print_pkg_versions(self.project_name, [self.version])
elif not self.version and self.all_versions:
print_pkg_versions(self.project_name, self.all_versions)
else:
if self.version:
self.logger.error("No pacakge found for version %s" \
% self.version)
else:
self.logger.error("No pacakge found for %s" % self.project_name)
return 1
return 0
def parse_search_spec(self, spec):
"""
Parse search args and return spec dict for PyPI
* Owwww, my eyes!. Re-write this.
@param spec: Cheese Shop package search spec
e.g.
name=Cheetah
license=ZPL
license=ZPL AND name=Cheetah
@type spec: string
@returns: tuple with spec and operator
"""
usage = \
"""You can search PyPI by the following:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
e.g. yolk -S name=Cheetah
yolk -S name=yolk AND license=PSF
"""
if not spec:
self.logger.error(usage)
return (None, None)
try:
spec = (" ").join(spec)
operator = 'AND'
first = second = ""
if " AND " in spec:
(first, second) = spec.split('AND')
elif " OR " in spec:
(first, second) = spec.split('OR')
operator = 'OR'
else:
first = spec
(key1, term1) = first.split('=')
key1 = key1.strip()
if second:
(key2, term2) = second.split('=')
key2 = key2.strip()
spec = {}
spec[key1] = term1
if second:
spec[key2] = term2
except:
self.logger.error(usage)
spec = operator = None
return (spec, operator)
def pypi_search(self):
"""
Search PyPI by metadata keyword
e.g. yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec
"""
spec = self.pkg_spec
#Add remainging cli arguments to options.pypi_search
search_arg = self.options.pypi_search
spec.insert(0, search_arg.strip())
(spec, operator) = self.parse_search_spec(spec)
if not spec:
return 1
for pkg in self.pypi.search(spec, operator):
if pkg['summary']:
summary = pkg['summary'].encode('utf-8')
else:
summary = ""
print("""%s (%s):
%s
""" % (pkg['name'].encode('utf-8'), pkg["version"],
summary))
return 0
def show_entry_map(self):
"""
Show entry map for a package
@param dist: package
@param type: srting
@returns: 0 for success or 1 if error
"""
pprinter = pprint.PrettyPrinter()
try:
entry_map = pkg_resources.get_entry_map(self.options.show_entry_map)
if entry_map:
pprinter.pprint(entry_map)
except pkg_resources.DistributionNotFound:
self.logger.error("Distribution not found: %s" \
% self.options.show_entry_map)
return 1
return 0
def show_entry_points(self):
"""
Show entry points for a module
@returns: 0 for success or 1 if error
"""
found = False
for entry_point in \
pkg_resources.iter_entry_points(self.options.show_entry_points):
found = True
try:
plugin = entry_point.load()
print(plugin.__module__)
print(" %s" % entry_point)
if plugin.__doc__:
print(plugin.__doc__)
print
except ImportError:
pass
if not found:
self.logger.error("No entry points found for %s" \
% self.options.show_entry_points)
return 1
return 0
def yolk_version(self):
"""
Show yolk's version
@returns: 0
"""
self.logger.info("yolk version %s" % VERSION)
return 0
def parse_pkg_ver(self, want_installed):
"""
Return tuple with project_name and version from CLI args
If the user gave the wrong case for the project name, this corrects it
@param want_installed: whether package we want is installed or not
@type want_installed: boolean
@returns: tuple(project_name, version, all_versions)
"""
all_versions = []
arg_str = ("").join(self.pkg_spec)
if "==" not in arg_str:
#No version specified
project_name = arg_str
version = None
else:
(project_name, version) = arg_str.split("==")
project_name = project_name.strip()
version = version.strip()
#Find proper case for package name
if want_installed:
dists = Distributions()
project_name = dists.case_sensitive_name(project_name)
else:
(project_name, all_versions) = \
self.pypi.query_versions_pypi(project_name)
if not len(all_versions):
msg = "I'm afraid we have no '%s' at " % project_name
msg += "The Cheese Shop. A little Red Leicester, perhaps?"
self.logger.error(msg)
sys.exit(2)
return (project_name, version, all_versions)
def setup_opt_parser():
"""
Setup the optparser
@returns: opt_parser.OptionParser
"""
#pylint: disable-msg=C0301
#line too long
usage = "usage: %prog [options]"
opt_parser = optparse.OptionParser(usage=usage)
opt_parser.add_option("--version", action='store_true', dest=
"yolk_version", default=False, help=
"Show yolk version and exit.")
opt_parser.add_option("--debug", action='store_true', dest=
"debug", default=False, help=
"Show debugging information.")
opt_parser.add_option("-q", "--quiet", action='store_true', dest=
"quiet", default=False, help=
"Show less output.")
group_local = optparse.OptionGroup(opt_parser,
"Query installed Python packages",
"The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9")
group_local.add_option("-l", "--list", action='store_true', dest=
"show_all", default=False, help=
"List all Python packages installed by distutils or setuptools. Use PKG_SPEC to narrow results.")
group_local.add_option("-a", "--activated", action='store_true',
dest="show_active", default=False, help=
'List activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-n", "--non-activated", action='store_true',
dest="show_non_active", default=False, help=
'List non-activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-m", "--metadata", action='store_true', dest=
"metadata", default=False, help=
'Show all metadata for packages installed by ' +
'setuptools (use with -l -a or -n)')
group_local.add_option("-f", "--fields", action="store", dest=
"fields", default=False, help=
'Show specific metadata fields. ' +
'(use with -m or -M)')
group_local.add_option("-d", "--depends", action='store', dest=
"show_deps", metavar='PKG_SPEC',
help= "Show dependencies for a package installed by " +
"setuptools if they are available.")
group_local.add_option("--entry-points", action='store',
dest="show_entry_points", default=False, help=
'List entry points for a module. e.g. --entry-points nose.plugins',
metavar="MODULE")
group_local.add_option("--entry-map", action='store',
dest="show_entry_map", default=False, help=
'List entry map for a package. e.g. --entry-map yolk',
metavar="PACKAGE_NAME")
group_pypi = optparse.OptionGroup(opt_parser,
"PyPI (Cheese Shop) options",
"The following options query the Python Package Index:")
group_pypi.add_option("-C", "--changelog", action='store',
dest="show_pypi_changelog", metavar='HOURS',
default=False, help=
"Show detailed ChangeLog for PyPI for last n hours. ")
group_pypi.add_option("-D", "--download-links", action='store',
metavar="PKG_SPEC", dest="show_download_links",
default=False, help=
"Show download URL's for package listed on PyPI. Use with -T to specify egg, source etc.")
group_pypi.add_option("-F", "--fetch-package", action='store',
metavar="PKG_SPEC", dest="fetch",
default=False, help=
"Download package source or egg. You can specify a file type with -T")
group_pypi.add_option("-H", "--browse-homepage", action='store',
metavar="PKG_SPEC", dest="browse_website",
default=False, help=
"Launch web browser at home page for package.")
group_pypi.add_option("-I", "--pypi-index", action='store',
dest="pypi_index",
default=False, help=
"Specify PyPI mirror for package index.")
group_pypi.add_option("-L", "--latest-releases", action='store',
dest="show_pypi_releases", metavar="HOURS",
default=False, help=
"Show PyPI releases for last n hours. ")
group_pypi.add_option("-M", "--query-metadata", action='store',
dest="query_metadata_pypi", default=False,
metavar="PKG_SPEC", help=
"Show metadata for a package listed on PyPI. Use -f to show particular fields.")
group_pypi.add_option("-S", "", action="store", dest="pypi_search",
default=False, help=
"Search PyPI by spec and optional AND/OR operator.",
metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_option("-T", "--file-type", action="store", dest=
"file_type", default="all", help=
"You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_option("-U", "--show-updates", action='store_true',
dest="show_updates", metavar='<PKG_NAME>',
default=False, help=
"Check PyPI for updates on package(s).")
group_pypi.add_option("-V", "--versions-available", action=
'store', dest="versions_available",
default=False, metavar='PKG_SPEC',
help="Show available versions for given package " +
"listed on PyPI.")
opt_parser.add_option_group(group_local)
opt_parser.add_option_group(group_pypi)
# add opts from plugins
all_plugins = []
for plugcls in load_plugins(others=True):
plug = plugcls()
try:
plug.add_options(opt_parser)
except AttributeError:
pass
return opt_parser
def print_pkg_versions(project_name, versions):
"""
Print list of versions available for a package
@returns: None
"""
for ver in versions:
print("%s %s" % (project_name, ver))
def validate_pypi_opts(opt_parser):
"""
Check parse options that require pkg_spec
@returns: pkg_spec
"""
(options, remaining_args) = opt_parser.parse_args()
options_pkg_specs = [ options.versions_available,
options.query_metadata_pypi,
options.show_download_links,
options.browse_website,
options.fetch,
options.show_deps,
]
for pkg_spec in options_pkg_specs:
if pkg_spec:
return pkg_spec
def main():
"""
Let's do it.
"""
my_yolk = Yolk()
my_yolk.run()
if __name__ == "__main__":
sys.exit(main())
| __getattr__ |
function.rs | use super::{Context, IntoValue, Local, V8};
use V8::Function;
impl Local<Function> {
pub fn call(
&mut self,
context: Local<Context>,
recv: &IntoValue,
argv: Vec<&IntoValue>,
) -> Local<V8::Value> {
let argc = argv.len() as i32; | .Call(
context.into(),
recv.into_value().into(),
argc,
argv.as_mut_ptr(),
)
.to_local_checked()
.unwrap()
}
}
} | let mut argv: Vec<V8::Local<V8::Value>> =
argv.iter().map(|&arg| arg.into_value().into()).collect();
unsafe {
self.inner_mut() |
process.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::{pid_t, c_void, c_int};
use libc;
use std::io;
use std::mem;
use std::os;
use std::ptr;
use std::rt::rtio;
use p = std::io::process;
use super::IoResult;
use super::file;
use super::util;
#[cfg(windows)] use std::strbuf::StrBuf;
#[cfg(unix)] use super::c;
#[cfg(unix)] use super::retry;
#[cfg(unix)] use io::helper_thread::Helper;
#[cfg(unix)]
helper_init!(static mut HELPER: Helper<Req>)
/**
* A value representing a child process.
*
* The lifetime of this value is linked to the lifetime of the actual
* process - the Process destructor calls self.finish() which waits
* for the process to terminate.
*/
pub struct Process {
/// The unique id of the process (this should never be negative).
pid: pid_t,
/// A handle to the process - on unix this will always be NULL, but on
/// windows it will be a HANDLE to the process, which will prevent the
/// pid being re-used until the handle is closed.
handle: *(),
/// None until finish() is called.
exit_code: Option<p::ProcessExit>,
/// Manually delivered signal
exit_signal: Option<int>,
/// Deadline after which wait() will return
deadline: u64,
}
#[cfg(unix)]
enum Req {
NewChild(libc::pid_t, Sender<p::ProcessExit>, u64),
}
impl Process {
/// Creates a new process using native process-spawning abilities provided
/// by the OS. Operations on this process will be blocking instead of using
/// the runtime for sleeping just this current task.
///
/// # Arguments
///
/// * prog - the program to run
/// * args - the arguments to pass to the program, not including the program
/// itself
/// * env - an optional environment to specify for the child process. If
/// this value is `None`, then the child will inherit the parent's
/// environment
/// * cwd - an optionally specified current working directory of the child,
/// defaulting to the parent's current working directory
/// * stdin, stdout, stderr - These optionally specified file descriptors
/// dictate where the stdin/out/err of the child process will go. If
/// these are `None`, then this module will bind the input/output to an
/// os pipe instead. This process takes ownership of these file
/// descriptors, closing them upon destruction of the process.
pub fn spawn(config: p::ProcessConfig)
-> Result<(Process, Vec<Option<file::FileDesc>>), io::IoError>
{
// right now we only handle stdin/stdout/stderr.
if config.extra_io.len() > 0 {
return Err(super::unimpl());
}
fn get_io(io: p::StdioContainer, ret: &mut Vec<Option<file::FileDesc>>)
-> (Option<os::Pipe>, c_int)
{
match io {
p::Ignored => { ret.push(None); (None, -1) }
p::InheritFd(fd) => { ret.push(None); (None, fd) }
p::CreatePipe(readable, _writable) => {
let pipe = os::pipe();
let (theirs, ours) = if readable {
(pipe.input, pipe.out)
} else {
(pipe.out, pipe.input)
};
ret.push(Some(file::FileDesc::new(ours, true)));
(Some(pipe), theirs)
}
}
}
let mut ret_io = Vec::new();
let (in_pipe, in_fd) = get_io(config.stdin, &mut ret_io);
let (out_pipe, out_fd) = get_io(config.stdout, &mut ret_io);
let (err_pipe, err_fd) = get_io(config.stderr, &mut ret_io);
let env = config.env.map(|a| a.to_owned());
let cwd = config.cwd.map(|a| Path::new(a));
let res = spawn_process_os(config, env, cwd.as_ref(), in_fd, out_fd,
err_fd);
unsafe {
for pipe in in_pipe.iter() { let _ = libc::close(pipe.input); }
for pipe in out_pipe.iter() { let _ = libc::close(pipe.out); }
for pipe in err_pipe.iter() { let _ = libc::close(pipe.out); }
}
match res {
Ok(res) => {
Ok((Process {
pid: res.pid,
handle: res.handle,
exit_code: None,
exit_signal: None,
deadline: 0,
},
ret_io))
}
Err(e) => Err(e)
}
}
pub fn kill(pid: libc::pid_t, signum: int) -> IoResult<()> {
unsafe { killpid(pid, signum) }
}
}
impl rtio::RtioProcess for Process {
fn id(&self) -> pid_t { self.pid }
fn set_timeout(&mut self, timeout: Option<u64>) {
self.deadline = timeout.map(|i| i + ::io::timer::now()).unwrap_or(0);
}
fn wait(&mut self) -> IoResult<p::ProcessExit> {
match self.exit_code {
Some(code) => Ok(code),
None => {
let code = try!(waitpid(self.pid, self.deadline));
// On windows, waitpid will never return a signal. If a signal
// was successfully delivered to the process, however, we can
// consider it as having died via a signal.
let code = match self.exit_signal {
None => code,
Some(signal) if cfg!(windows) => p::ExitSignal(signal),
Some(..) => code,
};
self.exit_code = Some(code);
Ok(code)
}
}
}
fn kill(&mut self, signum: int) -> Result<(), io::IoError> {
// On linux (and possibly other unices), a process that has exited will
// continue to accept signals because it is "defunct". The delivery of
// signals will only fail once the child has been reaped. For this
// reason, if the process hasn't exited yet, then we attempt to collect
// their status with WNOHANG.
if self.exit_code.is_none() {
match waitpid_nowait(self.pid) {
Some(code) => { self.exit_code = Some(code); }
None => {}
}
}
// if the process has finished, and therefore had waitpid called,
// and we kill it, then on unix we might ending up killing a
// newer process that happens to have the same (re-used) id
match self.exit_code {
Some(..) => return Err(io::IoError {
kind: io::OtherIoError,
desc: "can't kill an exited process",
detail: None,
}),
None => {}
}
// A successfully delivered signal that isn't 0 (just a poll for being
// alive) is recorded for windows (see wait())
match unsafe { killpid(self.pid, signum) } {
Ok(()) if signum == 0 => Ok(()),
Ok(()) => { self.exit_signal = Some(signum); Ok(()) }
Err(e) => Err(e),
}
}
}
impl Drop for Process {
fn drop(&mut self) {
free_handle(self.handle);
}
}
#[cfg(windows)]
unsafe fn killpid(pid: pid_t, signal: int) -> Result<(), io::IoError> {
let handle = libc::OpenProcess(libc::PROCESS_TERMINATE |
libc::PROCESS_QUERY_INFORMATION,
libc::FALSE, pid as libc::DWORD);
if handle.is_null() {
return Err(super::last_error())
}
let ret = match signal {
// test for existence on signal 0
0 => {
let mut status = 0;
let ret = libc::GetExitCodeProcess(handle, &mut status);
if ret == 0 {
Err(super::last_error())
} else if status != libc::STILL_ACTIVE {
Err(io::IoError {
kind: io::OtherIoError,
desc: "process no longer alive",
detail: None,
})
} else {
Ok(())
}
}
io::process::PleaseExitSignal | io::process::MustDieSignal => {
let ret = libc::TerminateProcess(handle, 1);
super::mkerr_winbool(ret)
}
_ => Err(io::IoError {
kind: io::OtherIoError,
desc: "unsupported signal on windows",
detail: None,
})
};
let _ = libc::CloseHandle(handle);
return ret;
}
#[cfg(not(windows))]
unsafe fn killpid(pid: pid_t, signal: int) -> Result<(), io::IoError> {
let r = libc::funcs::posix88::signal::kill(pid, signal as c_int);
super::mkerr_libc(r)
}
struct SpawnProcessResult {
pid: pid_t,
handle: *(),
}
#[cfg(windows)]
fn spawn_process_os(config: p::ProcessConfig,
env: Option<~[(~str, ~str)]>,
dir: Option<&Path>,
in_fd: c_int, out_fd: c_int,
err_fd: c_int) -> IoResult<SpawnProcessResult> {
use libc::types::os::arch::extra::{DWORD, HANDLE, STARTUPINFO};
use libc::consts::os::extra::{
TRUE, FALSE,
STARTF_USESTDHANDLES,
INVALID_HANDLE_VALUE,
DUPLICATE_SAME_ACCESS
};
use libc::funcs::extra::kernel32::{
GetCurrentProcess,
DuplicateHandle,
CloseHandle,
CreateProcessW
};
use libc::funcs::extra::msvcrt::get_osfhandle;
use std::mem;
if config.gid.is_some() || config.uid.is_some() {
return Err(io::IoError {
kind: io::OtherIoError,
desc: "unsupported gid/uid requested on windows",
detail: None,
})
}
unsafe {
let mut si = zeroed_startupinfo();
si.cb = mem::size_of::<STARTUPINFO>() as DWORD;
si.dwFlags = STARTF_USESTDHANDLES;
let cur_proc = GetCurrentProcess();
if in_fd != -1 {
let orig_std_in = get_osfhandle(in_fd) as HANDLE;
if orig_std_in == INVALID_HANDLE_VALUE as HANDLE {
fail!("failure in get_osfhandle: {}", os::last_os_error());
}
if DuplicateHandle(cur_proc, orig_std_in, cur_proc, &mut si.hStdInput,
0, TRUE, DUPLICATE_SAME_ACCESS) == FALSE {
fail!("failure in DuplicateHandle: {}", os::last_os_error());
}
}
if out_fd != -1 {
let orig_std_out = get_osfhandle(out_fd) as HANDLE;
if orig_std_out == INVALID_HANDLE_VALUE as HANDLE {
fail!("failure in get_osfhandle: {}", os::last_os_error());
}
if DuplicateHandle(cur_proc, orig_std_out, cur_proc, &mut si.hStdOutput,
0, TRUE, DUPLICATE_SAME_ACCESS) == FALSE {
fail!("failure in DuplicateHandle: {}", os::last_os_error());
}
}
if err_fd != -1 {
let orig_std_err = get_osfhandle(err_fd) as HANDLE;
if orig_std_err == INVALID_HANDLE_VALUE as HANDLE {
fail!("failure in get_osfhandle: {}", os::last_os_error());
}
if DuplicateHandle(cur_proc, orig_std_err, cur_proc, &mut si.hStdError,
0, TRUE, DUPLICATE_SAME_ACCESS) == FALSE {
fail!("failure in DuplicateHandle: {}", os::last_os_error());
}
}
let cmd = make_command_line(config.program, config.args);
let mut pi = zeroed_process_information();
let mut create_err = None;
// stolen from the libuv code.
let mut flags = libc::CREATE_UNICODE_ENVIRONMENT;
if config.detach {
flags |= libc::DETACHED_PROCESS | libc::CREATE_NEW_PROCESS_GROUP;
}
with_envp(env, |envp| {
with_dirp(dir, |dirp| {
os::win32::as_mut_utf16_p(cmd, |cmdp| {
let created = CreateProcessW(ptr::null(), cmdp,
ptr::mut_null(), ptr::mut_null(), TRUE,
flags, envp, dirp, &mut si,
&mut pi);
if created == FALSE {
create_err = Some(super::last_error());
}
})
})
});
if in_fd != -1 { assert!(CloseHandle(si.hStdInput) != 0); }
if out_fd != -1 { assert!(CloseHandle(si.hStdOutput) != 0); } | match create_err {
Some(err) => return Err(err),
None => {}
}
// We close the thread handle because we don't care about keeping the
// thread id valid, and we aren't keeping the thread handle around to be
// able to close it later. We don't close the process handle however
// because std::we want the process id to stay valid at least until the
// calling code closes the process handle.
assert!(CloseHandle(pi.hThread) != 0);
Ok(SpawnProcessResult {
pid: pi.dwProcessId as pid_t,
handle: pi.hProcess as *()
})
}
}
#[cfg(windows)]
fn zeroed_startupinfo() -> libc::types::os::arch::extra::STARTUPINFO {
libc::types::os::arch::extra::STARTUPINFO {
cb: 0,
lpReserved: ptr::mut_null(),
lpDesktop: ptr::mut_null(),
lpTitle: ptr::mut_null(),
dwX: 0,
dwY: 0,
dwXSize: 0,
dwYSize: 0,
dwXCountChars: 0,
dwYCountCharts: 0,
dwFillAttribute: 0,
dwFlags: 0,
wShowWindow: 0,
cbReserved2: 0,
lpReserved2: ptr::mut_null(),
hStdInput: ptr::mut_null(),
hStdOutput: ptr::mut_null(),
hStdError: ptr::mut_null()
}
}
#[cfg(windows)]
fn zeroed_process_information() -> libc::types::os::arch::extra::PROCESS_INFORMATION {
libc::types::os::arch::extra::PROCESS_INFORMATION {
hProcess: ptr::mut_null(),
hThread: ptr::mut_null(),
dwProcessId: 0,
dwThreadId: 0
}
}
#[cfg(windows)]
fn make_command_line(prog: &str, args: &[~str]) -> ~str {
let mut cmd = StrBuf::new();
append_arg(&mut cmd, prog);
for arg in args.iter() {
cmd.push_char(' ');
append_arg(&mut cmd, *arg);
}
return cmd.into_owned();
fn append_arg(cmd: &mut StrBuf, arg: &str) {
let quote = arg.chars().any(|c| c == ' ' || c == '\t');
if quote {
cmd.push_char('"');
}
let argvec: Vec<char> = arg.chars().collect();
for i in range(0u, argvec.len()) {
append_char_at(cmd, &argvec, i);
}
if quote {
cmd.push_char('"');
}
}
fn append_char_at(cmd: &mut StrBuf, arg: &Vec<char>, i: uint) {
match *arg.get(i) {
'"' => {
// Escape quotes.
cmd.push_str("\\\"");
}
'\\' => {
if backslash_run_ends_in_quote(arg, i) {
// Double all backslashes that are in runs before quotes.
cmd.push_str("\\\\");
} else {
// Pass other backslashes through unescaped.
cmd.push_char('\\');
}
}
c => {
cmd.push_char(c);
}
}
}
fn backslash_run_ends_in_quote(s: &Vec<char>, mut i: uint) -> bool {
while i < s.len() && *s.get(i) == '\\' {
i += 1;
}
return i < s.len() && *s.get(i) == '"';
}
}
#[cfg(unix)]
fn spawn_process_os(config: p::ProcessConfig,
env: Option<~[(~str, ~str)]>,
dir: Option<&Path>,
in_fd: c_int, out_fd: c_int,
err_fd: c_int) -> IoResult<SpawnProcessResult> {
use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
use libc::funcs::bsd44::getdtablesize;
use io::c;
mod rustrt {
extern {
pub fn rust_unset_sigprocmask();
}
}
#[cfg(target_os = "macos")]
unsafe fn set_environ(envp: *c_void) {
extern { fn _NSGetEnviron() -> *mut *c_void; }
*_NSGetEnviron() = envp;
}
#[cfg(not(target_os = "macos"))]
unsafe fn set_environ(envp: *c_void) {
extern { static mut environ: *c_void; }
environ = envp;
}
unsafe fn set_cloexec(fd: c_int) {
let ret = c::ioctl(fd, c::FIOCLEX);
assert_eq!(ret, 0);
}
let dirp = dir.map(|p| p.to_c_str());
let dirp = dirp.as_ref().map(|c| c.with_ref(|p| p)).unwrap_or(ptr::null());
with_envp(env, proc(envp) {
with_argv(config.program, config.args, proc(argv) unsafe {
let pipe = os::pipe();
let mut input = file::FileDesc::new(pipe.input, true);
let mut output = file::FileDesc::new(pipe.out, true);
set_cloexec(output.fd());
let pid = fork();
if pid < 0 {
fail!("failure in fork: {}", os::last_os_error());
} else if pid > 0 {
drop(output);
let mut bytes = [0, ..4];
return match input.inner_read(bytes) {
Ok(4) => {
let errno = (bytes[0] << 24) as i32 |
(bytes[1] << 16) as i32 |
(bytes[2] << 8) as i32 |
(bytes[3] << 0) as i32;
Err(io::IoError::from_errno(errno as uint, false))
}
Err(e) => {
assert!(e.kind == io::BrokenPipe ||
e.kind == io::EndOfFile,
"unexpected error: {}", e);
Ok(SpawnProcessResult {
pid: pid,
handle: ptr::null()
})
}
Ok(..) => fail!("short read on the cloexec pipe"),
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut file::FileDesc) -> ! {
let errno = os::errno();
let bytes = [
(errno << 24) as u8,
(errno << 16) as u8,
(errno << 8) as u8,
(errno << 0) as u8,
];
assert!(output.inner_write(bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
if in_fd == -1 {
let _ = libc::close(libc::STDIN_FILENO);
} else if retry(|| dup2(in_fd, 0)) == -1 {
fail(&mut output);
}
if out_fd == -1 {
let _ = libc::close(libc::STDOUT_FILENO);
} else if retry(|| dup2(out_fd, 1)) == -1 {
fail(&mut output);
}
if err_fd == -1 {
let _ = libc::close(libc::STDERR_FILENO);
} else if retry(|| dup2(err_fd, 2)) == -1 {
fail(&mut output);
}
// close all other fds
for fd in range(3, getdtablesize()).rev() {
if fd != output.fd() {
let _ = close(fd as c_int);
}
}
match config.gid {
Some(u) => {
if libc::setgid(u as libc::gid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
match config.uid {
Some(u) => {
// When dropping privileges from root, the `setgroups` call will
// remove any extraneous groups. If we don't call this, then
// even though our uid has dropped, we may still have groups
// that enable us to do super-user things. This will fail if we
// aren't root, so don't bother checking the return value, this
// is just done as an optimistic privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, 0 as *libc::c_void);
if libc::setuid(u as libc::uid_t) != 0 {
fail(&mut output);
}
}
None => {}
}
if config.detach {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if !dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if !envp.is_null() {
set_environ(envp);
}
let _ = execvp(*argv, argv);
fail(&mut output);
})
})
}
#[cfg(unix)]
fn with_argv<T>(prog: &str, args: &[~str], cb: proc(**libc::c_char) -> T) -> T {
// We can't directly convert `str`s into `*char`s, as someone needs to hold
// a reference to the intermediary byte buffers. So first build an array to
// hold all the ~[u8] byte strings.
let mut tmps = Vec::with_capacity(args.len() + 1);
tmps.push(prog.to_c_str());
for arg in args.iter() {
tmps.push(arg.to_c_str());
}
// Next, convert each of the byte strings into a pointer. This is
// technically unsafe as the caller could leak these pointers out of our
// scope.
let mut ptrs: Vec<_> = tmps.iter().map(|tmp| tmp.with_ref(|buf| buf)).collect();
// Finally, make sure we add a null pointer.
ptrs.push(ptr::null());
cb(ptrs.as_ptr())
}
#[cfg(unix)]
fn with_envp<T>(env: Option<~[(~str, ~str)]>, cb: proc(*c_void) -> T) -> T {
// On posixy systems we can pass a char** for envp, which is a
// null-terminated array of "k=v\n" strings. Like `with_argv`, we have to
// have a temporary buffer to hold the intermediary `~[u8]` byte strings.
match env {
Some(env) => {
let mut tmps = Vec::with_capacity(env.len());
for pair in env.iter() {
let kv = format!("{}={}", *pair.ref0(), *pair.ref1());
tmps.push(kv.to_c_str());
}
// Once again, this is unsafe.
let mut ptrs: Vec<*libc::c_char> = tmps.iter()
.map(|tmp| tmp.with_ref(|buf| buf))
.collect();
ptrs.push(ptr::null());
cb(ptrs.as_ptr() as *c_void)
}
_ => cb(ptr::null())
}
}
#[cfg(windows)]
fn with_envp<T>(env: Option<~[(~str, ~str)]>, cb: |*mut c_void| -> T) -> T {
// On win32 we pass an "environment block" which is not a char**, but
// rather a concatenation of null-terminated k=v\0 sequences, with a final
// \0 to terminate.
match env {
Some(env) => {
let mut blk = Vec::new();
for pair in env.iter() {
let kv = format!("{}={}", *pair.ref0(), *pair.ref1());
blk.push_all(kv.to_utf16().as_slice());
blk.push(0);
}
blk.push(0);
cb(blk.as_mut_ptr() as *mut c_void)
}
_ => cb(ptr::mut_null())
}
}
#[cfg(windows)]
fn with_dirp<T>(d: Option<&Path>, cb: |*u16| -> T) -> T {
match d {
Some(dir) => match dir.as_str() {
Some(dir_str) => os::win32::as_utf16_p(dir_str, cb),
None => cb(ptr::null())
},
None => cb(ptr::null())
}
}
#[cfg(windows)]
fn free_handle(handle: *()) {
assert!(unsafe {
libc::CloseHandle(mem::transmute(handle)) != 0
})
}
#[cfg(unix)]
fn free_handle(_handle: *()) {
// unix has no process handle object, just a pid
}
#[cfg(unix)]
fn translate_status(status: c_int) -> p::ProcessExit {
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0xff) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { (status >> 8) & 0xff }
pub fn WTERMSIG(status: i32) -> i32 { status & 0x7f }
}
#[cfg(target_os = "macos")]
#[cfg(target_os = "freebsd")]
mod imp {
pub fn WIFEXITED(status: i32) -> bool { (status & 0x7f) == 0 }
pub fn WEXITSTATUS(status: i32) -> i32 { status >> 8 }
pub fn WTERMSIG(status: i32) -> i32 { status & 0o177 }
}
if imp::WIFEXITED(status) {
p::ExitStatus(imp::WEXITSTATUS(status) as int)
} else {
p::ExitSignal(imp::WTERMSIG(status) as int)
}
}
/**
* Waits for a process to exit and returns the exit code, failing
* if there is no process with the specified id.
*
* Note that this is private to avoid race conditions on unix where if
* a user calls waitpid(some_process.get_id()) then some_process.finish()
* and some_process.destroy() and some_process.finalize() will then either
* operate on a none-existent process or, even worse, on a newer process
* with the same id.
*/
#[cfg(windows)]
fn waitpid(pid: pid_t, deadline: u64) -> IoResult<p::ProcessExit> {
use libc::types::os::arch::extra::DWORD;
use libc::consts::os::extra::{
SYNCHRONIZE,
PROCESS_QUERY_INFORMATION,
FALSE,
STILL_ACTIVE,
INFINITE,
WAIT_TIMEOUT,
WAIT_OBJECT_0,
};
use libc::funcs::extra::kernel32::{
OpenProcess,
GetExitCodeProcess,
CloseHandle,
WaitForSingleObject,
};
unsafe {
let process = OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION,
FALSE,
pid as DWORD);
if process.is_null() {
return Err(super::last_error())
}
loop {
let mut status = 0;
if GetExitCodeProcess(process, &mut status) == FALSE {
let err = Err(super::last_error());
assert!(CloseHandle(process) != 0);
return err;
}
if status != STILL_ACTIVE {
assert!(CloseHandle(process) != 0);
return Ok(p::ExitStatus(status as int));
}
let interval = if deadline == 0 {
INFINITE
} else {
let now = ::io::timer::now();
if deadline < now {0} else {(deadline - now) as u32}
};
match WaitForSingleObject(process, interval) {
WAIT_OBJECT_0 => {}
WAIT_TIMEOUT => {
assert!(CloseHandle(process) != 0);
return Err(util::timeout("process wait timed out"))
}
_ => {
let err = Err(super::last_error());
assert!(CloseHandle(process) != 0);
return err
}
}
}
}
}
#[cfg(unix)]
fn waitpid(pid: pid_t, deadline: u64) -> IoResult<p::ProcessExit> {
use std::cmp;
use std::comm;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(pid, &mut status, 0) }) {
-1 => fail!("unknown waitpid error: {}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
unsafe { HELPER.boot(register_sigchld, waitpid_helper) }
match waitpid_nowait(pid) {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
unsafe { HELPER.send(NewChild(pid, tx, deadline)); }
return match rx.recv_opt() {
Ok(e) => Ok(e),
Err(()) => Err(util::timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut old: c::sigaction = mem::init();
let mut new: c::sigaction = mem::init();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
let mut pipes = [0, ..2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
util::set_nonblocking(pipes[0], true).unwrap();
util::set_nonblocking(pipes[1], true).unwrap();
WRITE_FD = pipes[1];
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
util::set_nonblocking(input, true).unwrap();
let mut set: c::fd_set = unsafe { mem::init() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<p::ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| *a.ref2()).enumerate().min_by(|p| {
p.val1()
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = ::io::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = util::ms_to_timeval(ms);
(&tv as *_, idx)
}
None => (ptr::null(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
match unsafe { c::select(max, &set, ptr::null(), ptr::null(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as int => continue,
// We read something, break out and process
1 | 2 => {}
// Timeout, the pending request is removed
0 => {
drop(active.remove(idx));
continue
}
n => fail!("error in select {} ({})", os::errno(), n),
}
// Process any pending messages
if drain(input) {
loop {
match messages.try_recv() {
Ok(NewChild(pid, tx, deadline)) => {
active.push((pid, tx, deadline));
}
Err(comm::Disconnected) => {
assert!(active.len() == 0);
break 'outer;
}
Err(comm::Empty) => break,
}
}
}
// If a child exited (somehow received SIGCHLD), then poll all
// children to see if any of them exited.
//
// We also attempt to be responsible netizens when dealing with
// SIGCHLD by invoking any previous SIGCHLD handler instead of just
// ignoring any previous SIGCHLD handler. Note that we don't provide
// a 1:1 mapping of our handler invocations to the previous handler
// invocations because we drain the `read_fd` entirely. This is
// probably OK because the kernel is already allowed to coalesce
// simultaneous signals, we're just doing some extra coalescing.
//
// Another point of note is that this likely runs the signal handler
// on a different thread than the one that received the signal. I
// *think* this is ok at this time.
//
// The main reason for doing this is to allow stdtest to run native
// tests as well. Both libgreen and libnative are running around
// with process timeouts, but libgreen should get there first
// (currently libuv doesn't handle old signal handlers).
if drain(read_fd) {
let i: uint = unsafe { mem::transmute(old.sa_handler) };
if i != 0 {
assert!(old.sa_flags & c::SA_SIGINFO == 0);
(old.sa_handler)(c::SIGCHLD);
}
// FIXME: sure would be nice to not have to scan the entire
// array...
active.retain(|&(pid, ref tx, _)| {
match waitpid_nowait(pid) {
Some(msg) => { tx.send(msg); false }
None => true,
}
});
}
}
// Once this helper thread is done, we re-register the old sigchld
// handler and close our intermediate file descriptors.
unsafe {
assert_eq!(c::sigaction(c::SIGCHLD, &old, ptr::mut_null()), 0);
let _ = libc::close(read_fd);
let _ = libc::close(WRITE_FD);
WRITE_FD = -1;
}
}
// Drain all pending data from the file descriptor, returning if any data
// could be drained. This requires that the file descriptor is in
// nonblocking mode.
fn drain(fd: libc::c_int) -> bool {
let mut ret = false;
loop {
let mut buf = [0u8, ..1];
match unsafe {
libc::read(fd, buf.as_mut_ptr() as *mut libc::c_void,
buf.len() as libc::size_t)
} {
n if n > 0 => { ret = true; }
0 => return true,
-1 if util::wouldblock() => return ret,
n => fail!("bad read {} ({})", os::last_os_error(), n),
}
}
}
// Signal handler for SIGCHLD signals, must be async-signal-safe!
//
// This function will write to the writing half of the "self pipe" to wake
// up the helper thread if it's waiting. Note that this write must be
// nonblocking because if it blocks and the reader is the thread we
// interrupted, then we'll deadlock.
//
// When writing, if the write returns EWOULDBLOCK then we choose to ignore
// it. At that point we're guaranteed that there's something in the pipe
// which will wake up the other end at some point, so we just allow this
// signal to be coalesced with the pending signals on the pipe.
extern fn sigchld_handler(_signum: libc::c_int) {
let mut msg = 1;
match unsafe {
libc::write(WRITE_FD, &mut msg as *mut _ as *libc::c_void, 1)
} {
1 => {}
-1 if util::wouldblock() => {} // see above comments
n => fail!("bad error on write fd: {} {}", n, os::errno()),
}
}
}
fn waitpid_nowait(pid: pid_t) -> Option<p::ProcessExit> {
return waitpid_os(pid);
// This code path isn't necessary on windows
#[cfg(windows)]
fn waitpid_os(_pid: pid_t) -> Option<p::ProcessExit> { None }
#[cfg(unix)]
fn waitpid_os(pid: pid_t) -> Option<p::ProcessExit> {
let mut status = 0 as c_int;
match retry(|| unsafe {
c::waitpid(pid, &mut status, c::WNOHANG)
}) {
n if n == pid => Some(translate_status(status)),
0 => None,
n => fail!("unknown waitpid error `{}`: {}", n, super::last_error()),
}
}
}
#[cfg(test)]
mod tests {
#[test] #[cfg(windows)]
fn test_make_command_line() {
use super::make_command_line;
assert_eq!(
make_command_line("prog", ["aaa".to_owned(), "bbb".to_owned(), "ccc".to_owned()]),
"prog aaa bbb ccc".to_owned()
);
assert_eq!(
make_command_line("C:\\Program Files\\blah\\blah.exe", ["aaa".to_owned()]),
"\"C:\\Program Files\\blah\\blah.exe\" aaa".to_owned()
);
assert_eq!(
make_command_line("C:\\Program Files\\test", ["aa\"bb".to_owned()]),
"\"C:\\Program Files\\test\" aa\\\"bb".to_owned()
);
assert_eq!(
make_command_line("echo", ["a b c".to_owned()]),
"echo \"a b c\"".to_owned()
);
assert_eq!(
make_command_line("\u03c0\u042f\u97f3\u00e6\u221e", []),
"\u03c0\u042f\u97f3\u00e6\u221e".to_owned()
);
}
} | if err_fd != -1 { assert!(CloseHandle(si.hStdError) != 0); }
|
errors.rs | use std::convert::Into;
use std::error::Error as StdError;
use std::fmt;
/// The kind of an error (non-exhaustive)
#[derive(Debug)]
pub enum ErrorKind {
/// Generic error
Msg(String),
/// A loop was found while looking up the inheritance chain
CircularExtend {
/// Name of the template with the loop
tpl: String,
/// All the parents templates we found so far
inheritance_chain: Vec<String>,
},
/// A template is extending a template that wasn't found in the Tera instance
MissingParent {
/// The template we are currently looking at
current: String,
/// The missing template
parent: String,
},
/// A template was missing (more generic version of MissingParent)
TemplateNotFound(String),
/// A filter wasn't found
FilterNotFound(String),
/// A test wasn't found
TestNotFound(String),
/// A macro was defined in the middle of a template
InvalidMacroDefinition(String),
/// A function wasn't found
FunctionNotFound(String),
/// An error happened while serializing JSON
Json(serde_json::Error),
/// This enum may grow additional variants, so this makes sure clients
/// don't count on exhaustive matching. (Otherwise, adding a new variant
/// could break existing code.)
#[doc(hidden)]
__Nonexhaustive,
}
/// The Error type
#[derive(Debug)]
pub struct Error {
/// Kind of error
pub kind: ErrorKind,
source: Option<Box<dyn StdError + Sync + Send>>,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.kind {
ErrorKind::Msg(ref message) => write!(f, "{}", message),
ErrorKind::CircularExtend { ref tpl, ref inheritance_chain } => write!(
f,
"Circular extend detected for template '{}'. Inheritance chain: `{:?}`",
tpl, inheritance_chain
),
ErrorKind::MissingParent { ref current, ref parent } => write!(
f,
"Template '{}' is inheriting from '{}', which doesn't exist or isn't loaded.",
current, parent
),
ErrorKind::TemplateNotFound(ref name) => write!(f, "Template '{}' not found", name),
ErrorKind::FilterNotFound(ref name) => write!(f, "Filter '{}' not found", name),
ErrorKind::TestNotFound(ref name) => write!(f, "Test '{}' not found", name),
ErrorKind::FunctionNotFound(ref name) => write!(f, "Function '{}' not found", name),
ErrorKind::InvalidMacroDefinition(ref info) => {
write!(f, "Invalid macro definition: `{}`", info)
}
ErrorKind::Json(ref e) => write!(f, "{}", e),
ErrorKind::__Nonexhaustive => write!(f, "Nonexhaustive"),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
self.source.as_ref().map(|c| &**c as &(dyn StdError + 'static))
}
}
impl Error {
/// Creates generic error
pub fn msg(value: impl ToString) -> Self {
Self { kind: ErrorKind::Msg(value.to_string()), source: None }
}
/// Creates a circular extend error | Self {
kind: ErrorKind::CircularExtend { tpl: tpl.to_string(), inheritance_chain },
source: None,
}
}
/// Creates a missing parent error
pub fn missing_parent(current: impl ToString, parent: impl ToString) -> Self {
Self {
kind: ErrorKind::MissingParent {
current: current.to_string(),
parent: parent.to_string(),
},
source: None,
}
}
/// Creates a template not found error
pub fn template_not_found(tpl: impl ToString) -> Self {
Self { kind: ErrorKind::TemplateNotFound(tpl.to_string()), source: None }
}
/// Creates a filter not found error
pub fn filter_not_found(name: impl ToString) -> Self {
Self { kind: ErrorKind::FilterNotFound(name.to_string()), source: None }
}
/// Creates a test not found error
pub fn test_not_found(name: impl ToString) -> Self {
Self { kind: ErrorKind::TestNotFound(name.to_string()), source: None }
}
/// Creates a function not found error
pub fn function_not_found(name: impl ToString) -> Self {
Self { kind: ErrorKind::FunctionNotFound(name.to_string()), source: None }
}
/// Creates generic error with a source
pub fn chain(value: impl ToString, source: impl Into<Box<dyn StdError + Send + Sync>>) -> Self {
Self { kind: ErrorKind::Msg(value.to_string()), source: Some(source.into()) }
}
/// Creates JSON error
pub fn json(value: serde_json::Error) -> Self {
Self { kind: ErrorKind::Json(value), source: None }
}
/// Creates an invalid macro definition error
pub fn invalid_macro_def(name: impl ToString) -> Self {
Self { kind: ErrorKind::InvalidMacroDefinition(name.to_string()), source: None }
}
}
impl From<&str> for Error {
fn from(e: &str) -> Self {
Self::msg(e)
}
}
impl From<String> for Error {
fn from(e: String) -> Self {
Self::msg(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Self::json(e)
}
}
/// Convenient wrapper around std::Result.
pub type Result<T> = ::std::result::Result<T, Error>;
#[cfg(test)]
mod tests {
#[test]
fn test_error_is_send_and_sync() {
fn test_send_sync<T: Send + Sync>() {}
test_send_sync::<super::Error>();
}
} | pub fn circular_extend(tpl: impl ToString, inheritance_chain: Vec<String>) -> Self { |
VideoEncoderIncomingStreamTrack.js | const uuidV4 = require("uuid/v4");
const Native = require("./Native");
const EventEmitter = require("events").EventEmitter;
class |
{
/**
* @ignore
* @hideconstructor
* private constructor
*/
constructor(id,receiver,source)
{
//Create new id
this.id = id || uuidV4();
//Store track info
this.media = "video";
this.receiver = receiver;
//Attach counter
this.counter = 0;
//Create source map
this.encodings = new Map();
//Push new encoding
this.encodings.set("", {
source : source,
depacketizer : new Native.RTPIncomingMediaStreamDepacketizer(source)
});
//Set global depacketizer
this.depacketizer = this.encodings[""].depacketizer;
//Create event emitter
this.emitter = new EventEmitter();
}
/**
* Get stats for all encodings
*
*
* @returns {Map<String,Object>} Map with stats by encodingId
*/
getStats()
{
//Check if we have cachedd stats
if (!this.stats )
//Create new stats
this.stats = {};
//TODO
//Return a clone of cached stats;
return Object.assign({},this.stats);
}
/**
* Return ssrcs associated to this track
* @returns {Object}
*/
getSSRCs()
{
const ssrcs = {};
//For each source
for (let encoding of this.encodings.values())
//Push new encoding
ssrcs[encoding.id] = {
media : encoding.source.media,
rtx : encoding.source.rtx,
};
//Return the stats array
return ssrcs;
}
/**
* Get active encodings and layers ordered by bitrate
* @returns {Object} Active layers object containing an array of active and inactive encodings and an array of all available layer info
*/
getActiveLayers()
{
const active = [];
const inactive = [];
const all = [];
//Return ordered info
return {
active : active.sort((a, b) => a.bitrate<b.bitrate),
inactive : inactive,
layers : all.sort((a, b) => a.bitrate<b.bitrate)
};
}
/**
* Get track id as signaled on the SDP
*/
getId()
{
return this.id;
}
/**
* Get track media type
* @returns {String} "audio"|"video"
*/
getMedia()
{
return this.media;
}
/**
* Add event listener
* @param {String} event - Event name
* @param {function} listener - Event listener
* @returns {IncomingStreamTrack}
*/
on()
{
//Delegate event listeners to event emitter
this.emitter.on.apply(this.emitter, arguments);
//Return object so it can be chained
return this;
}
/**
* Add event listener once
* @param {String} event - Event name
* @param {function} listener - Event listener
* @returns {IncomingStream}
*/
once()
{
//Delegate event listeners to event emitter
this.emitter.once.apply(this.emitter, arguments);
//Return object so it can be chained
return this;
}
/**
* Remove event listener
* @param {String} event - Event name
* @param {function} listener - Event listener
* @returns {IncomingStreamTrack}
*/
off()
{
//Delegate event listeners to event emitter
this.emitter.removeListener.apply(this.emitter, arguments);
//Return object so it can be chained
return this;
}
/**
* Signal that this track has been attached.
* Internal use, you'd beter know what you are doing before calling this method
*/
attached()
{
//Increase attach counter
this.counter++;
//If it is the first
if (this.counter===1)
/**
* IncomingStreamTrack stopped event
*
* @name attached
* @memberof IncomingStreamTrack
* @kind event
* @argument {IncomingStreamTrack} incomingStreamTrack
*/
this.emitter.emit("attached",this);
}
/**
* Request an intra refres on all sources
*/
refresh()
{
}
/**
* Signal that this track has been detached.
* Internal use, you'd beter know what you are doing before calling this method
*/
detached()
{
//Decrease attach counter
this.counter--;
//If it is the last
if (this.counter===0)
/**
* IncomingStreamTrack stopped event
*
* @name detached
* @memberof IncomingStreamTrack
* @kind event
* @argument {IncomingStreamTrack} incomingStreamTrack
*/
this.emitter.emit("detached",this);
}
/**
* Removes the track from the incoming stream and also detaches any attached outgoing track or recorder
*/
stop()
{
//Don't call it twice
if (!this.receiver) return;
/**
* IncomingStreamTrack stopped event
*
* @name stopped
* @memberof IncomingStreamTrack
* @kind event
* @argument {IncomingStreamTrack} incomingStreamTrack
*/
this.emitter.emit("stopped",this);
//remove encpodings
this.encodings.clear();
//Remove transport reference, so destructor is called on GC
this.receiver = null;
}
}
module.exports = VideoEncoderIncomingStreamTrack;
| VideoEncoderIncomingStreamTrack |
lib.rs | mod event;
use near_sdk::borsh::{self, BorshDeserialize, BorshSerialize};
use near_sdk::collections::LookupMap;
use near_sdk::{AccountId, env, near_bindgen, BorshStorageKey};
#[derive(BorshStorageKey, BorshSerialize)]
enum StorageKey {
Records
}
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct StatusMessage {
records: LookupMap<AccountId, String>,
}
impl Default for StatusMessage {
fn default() -> Self {
Self {
records: LookupMap::new(StorageKey::Records),
}
}
}
#[near_bindgen]
impl StatusMessage {
pub fn set_status(&mut self, message: String) {
let account_id = env::predecessor_account_id();
self.records.insert(&account_id, &message);
event::emit::set_account_status(&account_id, message);
}
pub fn get_status(&self, account_id: AccountId) -> Option<String> {
self.records.get(&account_id)
}
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(test)]
mod tests {
use super::*;
use near_sdk::test_utils::{accounts, VMContextBuilder};
use near_sdk::{testing_env};
fn get_context() -> VMContextBuilder {
let mut builder = VMContextBuilder::new();
builder
.current_account_id(accounts(0))
.signer_account_id(accounts(1))
.predecessor_account_id(accounts(2));
builder
}
#[test]
fn set_get_message() |
#[test]
fn get_nonexistent_message() {
let context = get_context();
testing_env!(context.build());
let contract = StatusMessage::default();
assert_eq!(None, contract.get_status(accounts(4)));
}
}
| {
let context = get_context();
testing_env!(context.build());
let mut contract = StatusMessage::default();
contract.set_status("hello".to_string());
assert_eq!(
"hello".to_string(),
contract.get_status(accounts(2)).unwrap()
);
} |
views.py | #!/usr/bin/env python |
from snapshot import *
from log import * | # coding=utf8
from flask import Blueprint
running = Blueprint('running', __name__) |
bitcoin_be_BY.ts | <TS language="be_BY" version="2.1">
<context>
<name>AddressBookPage</name>
<message>
<source>Right-click to edit address or label</source>
<translation>ะัะฐะฒั ะบะปัะบ, ะบะฐะฑ ััะดะฐะณะฐะฒะฐัั ะฐะดัะฐั ัั ะผะตัะบั</translation>
</message>
<message>
<source>Create a new address</source>
<translation>ะกัะฒะฐัััั ะฝะพะฒั ะฐะดัะฐั</translation>
</message>
<message>
<source>&New</source>
<translation>ะะพะฒั</translation>
</message>
<message>
<source>Copy the currently selected address to the system clipboard</source>
<translation>ะะฐะฟััะฒะฐัั ะฟะฐะทะฝะฐัะฐะฝั ะฐะดัะฐั ั ัััััะผะฝั ะฑััะตั ะฐะฑะผะตะฝั</translation>
</message>
<message>
<source>&Copy</source>
<translation>ะะฐะฟััะฒะฐัั</translation>
</message>
<message>
<source>C&lose</source>
<translation>ะะฐััะฝััั</translation>
</message>
<message>
<source>Delete the currently selected address from the list</source>
<translation>ะัะดะฐะปััั ะฐะฑัะฐะฝั ะฐะดัะฐั ัะฐ ัะฟััั</translation>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation>ะญะบัะฟะฐััะฐะฒะฐัั ะณัััั ะทะฒะตััะบั ั ัะฐะนะป</translation>
</message>
<message>
<source>&Export</source>
<translation>ะญะบัะฟะฐัั</translation>
</message>
<message>
<source>&Delete</source>
<translation>ะัะดะฐะปััั</translation>
</message>
<message>
<source>Choose the address to send coins to</source>
<translation>ะัะฑัะฐัั ะฐะดัะฐั, ะบัะดั ะฒััะปะฐัั ััะพะดะบั</translation>
</message>
<message>
<source>Choose the address to receive coins with</source>
<translation>ะัะฑัะฐัั ะฐะดัะฐั, ะฝะฐ ัะบั ะฐัััะผะฐัั ััะพะดะบั</translation>
</message>
<message>
<source>C&hoose</source>
<translation>ะัะฑัะฐัั</translation>
</message>
<message>
<source>Sending addresses</source>
<translation>ะฐะดัะฐัั ะะดะฟัะฐัะบั</translation>
</message>
<message>
<source>Receiving addresses</source>
<translation>ะฐะดัะฐัั ะััะผะฐะฝะฝั</translation>
</message>
<message>
<source>These are your Greenfrogcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>ะขัั ะทะฝะฐั
ะพะดะทัััะฐ Greenfrogcoin-ะฐะดัะฐัั ะดะปั ะฒัััะปะฐะฝะฝั ะฟะปะฐััะถะพั. ะะฐัััะดั ัะฟัะฐัะดะถะฒะฐะนัะต ะบะพะปัะบะฐััั ั ะฐะดัะฐั ะฟััะทะฝะฐััะฝะฝั ะฟะตัะฐะด ะทะดะทัะนัะฝะตะฝะฝะตะผ ััะฐะฝะทะฐะบััั.</translation>
</message>
<message>
<source>These are your Greenfrogcoin addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source>
<translation>ะขัั ะทะฝะฐั
ะพะดะทัััะฐ Greenfrogcoin-ะฐะดัะฐัั ะดะปั ะฟััะผะฐะฝะฝั ะฟะปะฐััะถะพั. ะะฐะถะฐะดะฐะฝะฐ ะฒัะบะฐััััะพัะฒะฐัั ะฝะพะฒั ะฐะดัะฐั ะดะปั ะบะพะถะฝะฐะน ััะฐะฝะทะฐะบััั.</translation>
</message>
<message>
<source>&Copy Address</source>
<translation>ะะฐะฟััะฒะฐัั ะฐะดัะฐั</translation>
</message>
<message>
<source>Copy &Label</source>
<translation>ะะฐะฟััะฒะฐัั ะะตัะบั</translation>
</message>
<message>
<source>&Edit</source>
<translation>ะ ัะดะฐะณะฐะฒะฐัั</translation>
</message>
<message>
<source>Export Address List</source>
<translation>ะญะบัะฟะฐััะฐะฒะฐัั ะกะฟัั ะะดัะฐัะพั</translation>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>ะะพัะบะฐะผั ะฟะฐะดะทะตะปะตะฝั ัะฐะนะป (*.csv)</translation>
</message>
<message>
<source>Exporting Failed</source>
<translation>ะญะบัะฟะฐััะฐะฒะฐะฝะฝะต ะฝััะดะฐะปะฐะต</translation>
</message>
<message>
<source>There was an error trying to save the address list to %1. Please try again.</source>
<translation>ะะดะฑัะปะฐัั ะฟะฐะผัะปะบะฐ ะฟะฐะดัะฐั ัะฟัะพะฑั ะทะฐั
ะฐะฒะฐัั ะฐะดัะฐั ั %1. ะะฐัะฟัะฐะฑัะนัะต ะทะฝะพั.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<source>Label</source>
<translation>ะะตัะบะฐ</translation>
</message>
<message>
<source>Address</source>
<translation>ะะดัะฐั</translation>
</message>
<message>
<source>(no label)</source>
<translation>ะฝะตะฟะฐะทะฝะฐัะฐะฝั</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<source>Passphrase Dialog</source>
<translation>ะััะปะพะณ ัะฐะบัััะฝะฐะน ััะฐะทั</translation>
</message>
<message>
<source>Enter passphrase</source>
<translation>ะฃะฒัะดะทััะต ะบะพะดะฐะฒัั ััะฐะทั</translation>
</message>
<message>
<source>New passphrase</source>
<translation>ะะพะฒะฐั ะบะพะดะฐะฒะฐั ััะฐะทะฐ</translation>
</message>
<message>
<source>Repeat new passphrase</source>
<translation>ะะฐััะฐัััะต ะฝะพะฒัั ะบะพะดะฐะฒัั ััะฐะทั</translation>
</message>
<message>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>ะฃะฒัะดะทััะต ะฝะพะฒั ะฟะฐัะพะปั ะดะปั ะณะฐะผะฐะฝัะฐ.<br/>ะะฐัะพะปัะฝะฐั ััะฐะทะฐ ะฟะฐะฒะธะฝะฝะฐ ัะบะปะฐะดะฐััะฐ<b> ะฝะต ะผะตะฝัั ััะผ ะท ะดะทะตัััั ััะผะฒะฐะปะฐั</b>, ัั <b>ะฑะพะปัั ััะผ ะท ะฒะฐััะผั ัะปะพั</b>.</translation>
</message>
<message>
<source>Encrypt wallet</source>
<translation>ะะฐััััะฐะฒะฐัั ะณะฐะผะฐะฝะตั.</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>ะััะฐั ะฐะฟะตัะฐััั ะฟะฐััะฐะฑัะต ะบะพะดะฐะฒัั ััะฐะทั, ะบะฐะฑ ัะทะฑะปะฐะบะฐะฒะฐัั ะณะฐะผะฐะฝะตั.</translation>
</message>
<message>
<source>Unlock wallet</source>
<translation>ะ ะฐะทะฑะปะฐะบะฐะฒะฐัั ะณะฐะผะฐะฝะตั</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>ะััะฐั ะฐะฟะตัะฐััั ะฟะฐััะฐะฑัะต ะฟะฐัะพะปั ะบะฐะฑ ัะฐัััััะฐะฒะฐัั ะณะฐะผะฐะฝะตั.</translation>
</message>
<message>
<source>Decrypt wallet</source>
<translation>ะ ะฐัััััะฐะฒะฐัั ะณะฐะผะฐะฝะตั</translation>
</message>
<message>
<source>Change passphrase</source>
<translation>ะะผัะฝััั ะฟะฐัะพะปั</translation>
</message>
<message>
<source>Enter the old passphrase and new passphrase to the wallet.</source>
<translation>ะฃะฒัะดะทััะต ััะฐัั ะฟะฐัะพะปั ั ะฝะพะฒั ะฟะฐัะพะปั ะดะปั ะณะฐะผะฐะฝัะฐ.</translation>
</message>
<message>
<source>Confirm wallet encryption</source>
<translation>ะะฐัะฒะตัะดะทััะต ััััะฐะฒะฐะฝะฝะต ะณะฐะผะฐะฝัะฐ</translation>
</message>
<message>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR LITECOINS</b>!</source>
<translation>ะฃะฒะฐะณะฐ: ะบะฐะปั ะฒั ะทะฐัััััะตัะต ัะฒะพะน ะณะฐะผะฐะฝะตั ั ัััะฐัััะต ะฟะฐัะพะปัะฝัั ััะฐะทั, ัะพ <b>ะกะขะ ะะฆะะฆะ ะะกะ ะกะะะ ะะะขะะะะะซ</b>!</translation>
</message>
<message>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>ะฆั ัะฟััะฝะตะฝัั ะฒั, ััะพ ะถะฐะดะฐะตัะต ะทะฐััััะฐะฒะฐัั ัะฒะพะน ะณะฐะผะฐะฝะตั?</translation>
</message>
<message>
<source>Wallet encrypted</source>
<translation>ะะฐะผะฐะฝะตั ะทะฐััััะฐะฒะฐะฝั</translation>
</message>
<message>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>ะะะะะ: ะฃัะต ะฟะฐะฟัััะดะฝัั ะบะพะฟัั ะณะฐะผะฐะฝัะฐ ะฒะฐััะฐ ะทะฐะผัะฝััั ะฝะพะฒัะผ ะทะฐััััะฐะฒะฐะฝัะผ ัะฐะนะปะฐะผ. ะฃ ะผััะฐั
ะฑััะฟะตะบั ะฟะฐะฟัััะดะฝัั ะบะพะฟัั ะฝะตะทะฐััััะฐะฒะฐะฝะฐะณะฐ ัะฐะนะปะฐ-ะณะฐะผะฐะฝัะฐ ััะฐะฝััั ะฝะตัะถัะฒะฐะปัะฝัะผั, ะบะฐะปั ะฒั ััะฐะฝะตัะต ะบะฐััััะฐััะฐ ะฝะพะฒัะผ ะทะฐััััะฐะฒะฐะฝัะผ ะณะฐะผะฐะฝัะพะผ.</translation>
</message>
<message>
<source>Wallet encryption failed</source>
<translation>ะจัััะฐะฒะฐะฝะฝะต ะณะฐะผะฐะฝัะฐ ะฝััะดะฐะปะฐะต</translation>
</message>
<message>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>ะจัััะฐะฒะฐะฝะฝะต ะณะฐะผะฐะฝัะฐ ะฝะต ะฐะดะฑัะปะพัั ะท-ะทะฐ ัะฝัััะฐะฝะฐะน ะฟะฐะผัะปะบั. ะะฐะผะฐะฝะตั ะฝะตะทะฐััััะฐะฒะฐะฝั.</translation>
</message>
<message>
<source>The supplied passphrases do not match.</source>
<translation>ะฃะฒะตะดะดะทะตะฝัั ะฟะฐัะพะปั ะฝะต ััะฟะฐะดะฐััั</translation>
</message>
<message>
<source>Wallet unlock failed</source>
<translation>ะ ะฐะทะฑะปะฐะบะฐะฒะฐะฝะฝะต ะณะฐะผะฐะฝัะฐ ะฝััะดะฐะปะฐะต</translation>
</message>
<message>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>ะฃะฒะตะดะทะตะฝั ะฟะฐัะพะปั ะดะปั ัะฐัััััะฐะฒะฐะฝะฝั ะณะฐะผะฐะฝัะฐ ะฟะฐะผัะปะบะพะฒั</translation>
</message>
<message>
<source>Wallet decryption failed</source>
<translation>ะ ะฐัััััะฐะฒะฐะฝะฝะต ะณะฐะผะฐะฝัะฐ ะฝััะดะฐะปะฐะต</translation>
</message>
<message>
<source>Wallet passphrase was successfully changed.</source>
<translation>ะะฐัะพะปัะฝะฐั ััะฐะทะฐ ะณะฐะผะฐะฝัะฐ ะฟะฐัะฟัั
ะพะฒะฐ ะทะผะตะฝะตะฝะฐ.</translation>
</message>
<message>
<source>Warning: The Caps Lock key is on!</source>
<translation>ะฃะฒะฐะณะฐ: Caps Lock ัะบะปััะฐะฝั!</translation>
</message>
</context>
<context>
<name>BanTableModel</name>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<source>Sign &message...</source>
<translation>ะะฐะดะฟััะฐัั ะฟะฐะฒะตะดะฐะผะปะตะฝะฝะต...</translation>
</message>
<message>
<source>Synchronizing with network...</source>
<translation>ะกัะฝั
ัะฐะฝัะทะฐััั ะท ัะตััะฒะฐะผ...</translation>
</message>
<message>
<source>&Overview</source>
<translation>ะะณะปัะด</translation>
</message>
<message>
<source>Node</source>
<translation>ะัะทะตะป</translation>
</message>
<message>
<source>Show general overview of wallet</source>
<translation>ะะฐะบะฐะทะฒะฐะต ะฐะณัะปัะฝัั ะทะฒะตััะบั ะฐะฑ ะณะฐะผะฐะฝัั</translation>
</message>
<message>
<source>&Transactions</source>
<translation>ะขัะฐะฝะทะฐะบััั</translation>
</message>
<message>
<source>Browse transaction history</source>
<translation>ะัะฐะณะปัะดะทะตัั ะณัััะพััั ััะฐะฝะทะฐะบััะน</translation>
</message>
<message>
<source>E&xit</source>
<translation>ะัะนััั</translation>
</message>
<message>
<source>Quit application</source>
<translation>ะัะนััั ะท ะฟัะฐะณัะฐะผั</translation>
</message>
<message>
<source>About &Qt</source>
<translation>ะะฑ Qt</translation>
</message>
<message>
<source>Show information about Qt</source>
<translation>ะะฐะบะฐะทะฐัั ัะฝัะฐัะผะฐััั ะฐะฑ Qt</translation>
</message>
<message>
<source>&Options...</source>
<translation>ะะฟััั...</translation>
</message>
<message>
<source>&Encrypt Wallet...</source>
<translation>ะะฐััััะฐะฒะฐัั ะะฐะผะฐะฝะตั...</translation>
</message>
<message>
<source>&Backup Wallet...</source>
<translation>ะกัะฒะฐัััั ะบะพะฟัั ะณะฐะผะฐะฝัะฐ...</translation>
</message>
<message>
<source>&Change Passphrase...</source>
<translation>&Change Passphrase...</translation>
</message>
<message>
<source>&Sending addresses...</source>
<translation>ะะดัะฐัั ะดะฐััะปะฐะฝะฝั...</translation>
</message>
<message>
<source>&Receiving addresses...</source>
<translation>ะะดัะฐัั ะฟััะผะฐะฝะฝั...</translation>
</message>
<message>
<source>Open &URI...</source>
<translation>ะะดััะฝะธัั &URI...</translation>
</message>
<message>
<source>Reindexing blocks on disk...</source>
<translation>ะะตัะฐัะฝะดัะบัะฐััั ะฑะปะพะบะฐั ะฝะฐ ะดััะบั...</translation>
</message>
<message>
<source>Send coins to a Greenfrogcoin address</source>
<translation>ะะฐัะปะฐัั ะผะฐะฝะตัั ะฝะฐ Greenfrogcoin-ะฐะดัะฐั</translation>
</message>
<message>
<source>Backup wallet to another location</source>
<translation>ะัะฐะฑััะต ะบะพะฟัั ะณะฐะผะฐะฝัะฐ ั ัะฝัะฐะต ะผะตััะฐ</translation>
</message>
<message>
<source>Change the passphrase used for wallet encryption</source>
<translation>ะะผัะฝััั ะฟะฐัะพะปั ััััะฐะฒะฐะฝะฝั ะณะฐะผะฐะฝัะฐ</translation>
</message>
<message>
<source>&Debug window</source>
<translation>ะะฐะบะฝะพ ะฐะดะปะฐะดะบั</translation>
</message>
<message>
<source>Open debugging and diagnostic console</source>
<translation>ะะดะบัััั ะบะฐะฝัะพะปั ะดััะณะฝะพัััะบั ั ะฐะดะปะฐะดะบั</translation>
</message>
<message>
<source>&Verify message...</source>
<translation>ะัะฐะฒะตัััั ะฟะฐะฒะตะดะฐะผะปะตะฝะฝะต...</translation>
</message>
<message>
<source>Greenfrogcoin</source>
<translation>Greenfrogcoin</translation>
</message>
<message>
<source>Wallet</source>
<translation>ะะฐะผะฐะฝะตั</translation>
</message>
<message>
<source>&Send</source>
<translation>ะะฐัะปะฐัั</translation>
</message>
<message>
<source>&Receive</source>
<translation>ะัััะผะฐัั</translation>
</message>
<message>
<source>&Show / Hide</source>
<translation>&ะะฐะบะฐะทะฐัั / ะกั
ะฐะฒะฐัั</translation>
</message>
<message>
<source>Show or hide the main Window</source>
<translation>ะะฐะบะฐะทะฐัั ะฐะปัะฑะพ ัั
ะฐะฒะฐัั ะณะฐะปะพัะฝะฐะต ะฒะฐะบะฝะพ</translation>
</message>
<message>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>ะะฐััััะฐะฒะฐัั ะฟััะฒะฐัะฝัั ะบะปััั, ัะบะธั ะฝะฐะปะตะถะฐัั ะฒะฐัะฐะผั ะณะฐะผะฐะฝัั</translation>
</message>
<message>
<source>Sign messages with your Greenfrogcoin addresses to prove you own them</source>
<translation>ะะฐะดะฟััะฐัั ะฟะฐะฒะตะดะฐะผะปะตะฝะฝะต ะท ะดะฐะฟะฐะผะพะณะฐะน Greenfrogcoin-ะฐะดัะฐัะฐ ะบะฐะฑ ะดะฐะบะฐะทะฐัั, ััะพ ัะฝะพ ะฝะฐะปะตะถััั ะฒะฐะผ</translation>
</message>
<message>
<source>Verify messages to ensure they were signed with specified Greenfrogcoin addresses</source>
<translation>ะกะฟัะฐัะดะทััั ะฟะฐะฒะตะดะฐะผะปะตะฝะฝะต ะท ะดะฐะฟะฐะผะพะณะฐะน Greenfrogcoin-ะฐะดัะฐัะฐ ะบะฐะฑ ะดะฐะบะฐะทะฐัั, ััะพ ัะฝะพ ะฝะฐะปะตะถััั ะฒะฐะผ</translation>
</message>
<message>
<source>&File</source>
<translation>ะค&ะฐะนะป</translation>
</message>
<message>
<source>&Settings</source>
<translation>ะะฐะปะฐะดะบั</translation>
</message>
<message>
<source>&Help</source>
<translation>ะะฐะฟะฐะผะพะณะฐ</translation>
</message>
<message>
<source>Request payments (generates QR codes and greenfrogcoin: URIs)</source>
<translation>ะะฐะฟะฐััะฐะฑะฐะฒะฐัั ะฟะปะฐััะถ (ะณะตะฝะตััะตััะฐ QR-ะบะพะด ะดะปั greenfrogcoin URI)</translation>
</message>
<message>
<source>Show the list of used sending addresses and labels</source>
<translation>ะะฐะบะฐะทะฐัั ัะฟัั ะฐะดัะฐัะพั ั ะผะตัะฐะบ ะดะปั ะดะฐััะปะฐะฝะฝั</translation>
</message>
<message>
<source>Show the list of used receiving addresses and labels</source>
<translation>ะะฐะบะฐะทะฐัั ัะฟัั ะฐะดัะฐัะพั ั ะผะตัะฐะบ ะดะปั ะฟััะผะฐะฝะฝั</translation>
</message>
<message>
<source>Open a greenfrogcoin: URI or payment request</source>
<translation>ะะดะบัััั greenfrogcoin: URI ัั ะทะฐะฟัั ะฟะปะฐััะถั</translation>
</message>
<message>
<source>&Command-line options</source>
<translation>ะะฟััั ะบะฐะผะฐะฝะดะฝะฐะณะฐ ัะฐะดะบะฐ</translation>
</message>
<message>
<source>%1 behind</source>
<translation>%1 ัะฐะผั</translation>
</message>
<message>
<source>Last received block was generated %1 ago.</source>
<translation>ะะฟะพัะฝั ะฟััะฝััั ะฑะปะพะบ ะณะตะฝะตัะฐะฒะฐะฝั %1 ัะฐะผั.</translation>
</message>
<message>
<source>Transactions after this will not yet be visible.</source>
<translation>ะขัะฐะฝะทะฐะบััะธ ะฟะฐัะปั ะณััะฐะน ะฝะต ะฑัะดััั ะฑะฐัะฝัั.</translation>
</message>
<message>
<source>Error</source>
<translation>ะะฐะผัะปะบะฐ</translation>
</message>
<message>
<source>Warning</source>
<translation>ะฃะฒะฐะณะฐ</translation>
</message>
<message>
<source>Information</source>
<translation>ะะฝัะฐัะผะฐััั</translation>
</message>
<message>
<source>Up to date</source>
<translation>ะกัะฝั
ัะฐะฝัะทะฐะฒะฐะฝะฐ</translation>
</message>
<message>
<source>Catching up...</source>
<translation>ะะฐะณะฐะฝัะตะผ...</translation>
</message>
<message>
<source>Date: %1
</source>
<translation>ะะฐัะฐ: %1
</translation>
</message>
<message>
<source>Amount: %1
</source>
<translation>ะะพะปัะบะฐััั: %1
</translation>
</message>
<message>
<source>Type: %1
</source>
<translation>ะขัะฟ: %1
</translation>
</message>
<message>
<source>Label: %1
</source>
<translation>ะะตัะบะฐ: %1
</translation>
</message>
<message>
<source>Address: %1
</source>
<translation>ะะดัะฐั: %1
</translation>
</message>
<message>
<source>Sent transaction</source>
<translation>ะะฐัะปะฐะฝัั ััะฐะฝะทะฐะบััั</translation>
</message>
<message>
<source>Incoming transaction</source>
<translation>ะััะฝัััั ััะฐะฝะทะฐะบััั</translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>ะะฐะผะฐะฝะตั <b>ะทะฐััััะฐะฒะฐะฝั</b> ั ะทะฐัะฐะท <b>ัะฐะทะฑะปะฐะบะฐะฒะฐะฝั</b></translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>ะะฐะผะฐะฝะตั <b>ะทะฐััััะฐะฒะฐะฝั</b> ั ะทะฐัะฐะท <b>ะทะฐะฑะปะฐะบะฐะฒะฐะฝั</b></translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<source>Quantity:</source>
<translation>ะะพะปัะบะฐััั:</translation>
</message>
<message>
<source>Bytes:</source>
<translation>ะะฐะนัะฐั:</translation>
</message>
<message>
<source>Amount:</source>
<translation>ะะพะปัะบะฐััั:</translation>
</message>
<message>
<source>Fee:</source>
<translation>ะะฐะผัััั:</translation>
</message>
<message>
<source>Dust:</source>
<translation>ะัะป:</translation>
</message>
<message>
<source>After Fee:</source>
<translation>ะะฐัะปั ะบะฐะผัััั:</translation>
</message>
<message>
<source>(un)select all</source>
<translation>(ะฝะต)ะฒัะฑัะฐัั ััั</translation>
</message>
<message>
<source>Tree mode</source>
<translation>ะ ัะถัะผ ะดััะฒะฐ</translation>
</message>
<message>
<source>List mode</source>
<translation>ะ ัะถัะผ ัะฟััะฐ</translation>
</message>
<message>
<source>Amount</source>
<translation>ะะพะปัะบะฐััั</translation>
</message>
<message>
<source>Received with label</source>
<translation>ะััะฝััั ะฟัะฐะท ะผะตัะบั</translation>
</message>
<message>
<source>Received with address</source>
<translation>ะััะฝััั ะฟัะฐะท ะฐะดัะฐั</translation>
</message>
<message>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<source>Confirmations</source>
<translation>ะะฐัะฒะตัะดะถะฐะฝะฝัั</translation>
</message>
<message>
<source>Confirmed</source>
<translation>ะะฐัะฒะตัะดะถะฐะฝะฐ</translation>
</message>
<message>
<source>Copy address</source>
<translation>ะะฐะฟััะฒะฐัั ะฐะดัะฐั</translation>
</message>
<message>
<source>Copy label</source>
<translation>ะะฐะฟััะฒะฐัั ะฟะฐะทะฝะฐะบั</translation>
</message>
<message>
<source>Copy amount</source>
<translation>ะะฐะฟััะฒะฐัั ะบะพะปัะบะฐััั</translation>
</message>
<message>
<source>Copy transaction ID</source>
<translation>ะะฐะฟััะฒะฐัั ID ััะฐะฝะทะฐะบััั</translation>
</message>
<message>
<source>Lock unspent</source>
<translation>ะะฐะผะบะฝััั ะฝะตะฟะฐััะฐัะฐะฝะฐะต</translation>
</message>
<message>
<source>Unlock unspent</source>
<translation>ะะดะฐะผะบะฝััั ะฝะตะฟะฐััะฐัะฐะฝะฐะต</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>ะะฐะฟััะฒะฐัั ะบะพะปัะบะฐััั</translation>
</message>
<message>
<source>Copy fee</source>
<translation>ะะฐะฟััะฒะฐัั ะบะฐะผัััั</translation>
</message>
<message>
<source>Copy after fee</source>
<translation>ะะฐะฟััะฒะฐัั ะท ะฒัะฝััะบะฐะผ ะบะฐะผัััั</translation>
</message>
<message>
<source>Copy bytes</source>
<translation>ะะฐะฟััะฒะฐัั ะฑะฐะนัั</translation>
</message>
<message>
<source>Copy dust</source>
<translation>ะะฐะฟััะฒะฐัั ะฟัะป</translation>
</message>
<message>
<source>yes</source>
<translation>ัะฐะบ</translation>
</message>
<message>
<source>no</source>
<translation>ะฝะต</translation>
</message>
<message>
<source>(no label)</source>
<translation>ะฝะตะฟะฐะทะฝะฐัะฐะฝั</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<source>Edit Address</source>
<translation>ะ ัะดะฐะณะฐะฒะฐัั ะะดัะฐั</translation>
</message>
<message>
<source>&Label</source>
<translation>ะะตัะบะฐ</translation>
</message>
<message>
<source>&Address</source>
<translation>ะะดัะฐั</translation>
</message>
<message>
<source>New receiving address</source>
<translation>ะะพะฒั ะฐะดัะฐั ะดะปั ะฐัััะผะฐะฝะฝั</translation>
</message>
<message>
<source>New sending address</source>
<translation>ะะพะฒั ะฐะดัะฐั ะดะปั ะดะฐััะปะฐะฝะฝั</translation>
</message>
<message>
<source>Edit receiving address</source>
<translation>ะ ัะดะฐะณะฐะฒะฐัั ะฐะดัะฐั ะฟััะผะฐะฝะฝั</translation>
</message>
<message>
<source>Edit sending address</source>
<translation>ะ ัะดะฐะณะฐะฒะฐัั ะฐะดัะฐั ะดะฐััะปะฐะฝะฝั</translation>
</message>
<message>
<source>The entered address "%1" is already in the address book.</source>
<translation>ะฃะฒะตะดะทะตะฝั ะฐะดัะฐั "%1" ัะถะพ ั ะบะฝัะณะต ะฐะดัะฐัะพั</translation>
</message>
<message>
<source>Could not unlock wallet.</source>
<translation>ะะตะผะฐะณััะผะฐ ัะฐะทะฑะปะฐะบะฐะฒะฐัั ะณะฐะผะฐะฝะตั</translation>
</message>
<message>
<source>New key generation failed.</source>
<translation>ะะตะฝะตัะฐััั ะฝะพะฒะฐะณะฐ ะบะปััะฐ ะฝััะดะฐะปะฐั</translation>
</message>
</context>
<context>
<name>FreespaceChecker</name>
<message>
<source>A new data directory will be created.</source>
<translation>ะัะดะทะต ััะฒะพัะฐะฝั ะฝะพะฒั ะบะฐัะฐะปะพะณ ะท ะดะฐะฝัะผั.</translation>
</message>
<message>
<source>name</source>
<translation>ัะผั</translation>
</message>
<message>
<source>Directory already exists. Add %1 if you intend to create a new directory here.</source>
<translation>ะะฐัะฐะปะพะณ ัะถะพ ััะฝัะต. ะะฐะดะฐะนัะต %1 ะบะฐะปั ะฒั ะทะฑััะฐะตัะตัั ััะฒะฐัััั ััั ะฝะพะฒั ะบะฐัะฐะปะพะณ.</translation>
</message>
</context>
<context>
<name>HelpMessageDialog</name>
<message>
<source>(%1-bit)</source>
<translation>(%1-ะฑัั)</translation>
</message>
<message>
<source>Command-line options</source>
<translation>ะะฟััั ะบะฐะผะฐะฝะดะฝะฐะณะฐ ัะฐะดะบะฐ</translation>
</message>
<message>
<source>Usage:</source>
<translation>ะฃะถัะฒะฐะฝะฝะต:</translation>
</message>
<message>
<source>command-line options</source>
<translation>ะพะฟััั ะบะฐะผะฐะฝะดะฝะฐะณะฐ ัะฐะดะบะฐ</translation>
</message>
<message>
<source>Start minimized</source>
<translation>ะกัะฐััะฐะฒะฐัั ะผะผัะฝัะผัะทะฐะฒะฐะฝะฐะน</translation>
</message>
</context>
<context>
<name>Intro</name>
<message>
<source>Welcome</source>
<translation>ะััะฐะตะผ</translation>
</message>
<message>
<source>Greenfrogcoin</source>
<translation>Greenfrogcoin</translation>
</message>
<message>
<source>Error</source>
<translation>ะะฐะผัะปะบะฐ</translation>
</message>
</context>
<context>
<name>ModalOverlay</name>
<message>
<source>Form</source>
<translation>ะคะพัะผะฐ</translation>
</message>
</context>
<context>
<name>OpenURIDialog</name>
<message>
<source>Open URI</source>
<translation>ะะดะบัััั URI</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<source>Options</source>
<translation>ะะฟััั</translation>
</message>
<message>
<source>MB</source>
<translation>ะะฑ</translation>
</message>
<message>
<source>W&allet</source>
<translation>ะะฐะผะฐะฝะตั</translation>
</message>
<message>
<source>Error</source>
<translation>ะะฐะผัะปะบะฐ</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<source>Form</source>
<translation>ะคะพัะผะฐ</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
</context>
<context>
<name>PeerTableModel</name>
</context>
<context>
<name>QObject</name>
<message>
<source>Amount</source>
<translation>ะะพะปัะบะฐััั</translation>
</message>
<message>
<source>%1 and %2</source>
<translation>%1 ั %2</translation>
</message>
<message>
<source>unknown</source>
<translation>ะฝะตะฒัะดะพะผะฐ</translation>
</message>
</context>
<context>
<name>QObject::QObject</name>
</context>
<context>
<name>QRImageWidget</name>
</context>
<context>
<name>RPCConsole</name>
<message>
<source>&Information</source>
<translation>ะะฝัะฐัะผะฐััั</translation>
</message>
<message>
<source>Debug window</source>
<translation>ะะฐะบะฝะพ ะฐะดะปะฐะดะบั</translation>
</message>
</context>
<context>
<name>ReceiveCoinsDialog</name>
<message>
<source>&Amount:</source>
<translation>&ะะพะปัะบะฐััั:</translation>
</message>
<message>
<source>&Label:</source>
<translation>ะะตัะบะฐ:</translation>
</message>
<message>
<source>Copy label</source>
<translation>ะะฐะฟััะฒะฐัั ะฟะฐะทะฝะฐะบั</translation>
</message>
<message>
<source>Copy amount</source>
<translation>ะะฐะฟััะฒะฐัั ะบะพะปัะบะฐััั</translation>
</message>
</context>
<context>
<name>ReceiveRequestDialog</name>
<message>
<source>Copy &Address</source>
<translation>ะะฐะฟััะฒะฐัั ะฐะดัะฐั</translation>
</message>
<message>
<source>Address</source>
<translation>ะะดัะฐั</translation>
</message>
<message>
<source>Amount</source>
<translation>ะะพะปัะบะฐััั</translation>
</message>
<message>
<source>Label</source>
<translation>ะะตัะบะฐ</translation>
</message>
<message>
<source>Message</source>
<translation>ะะฐะฒะตะดะฐะผะปะตะฝะฝะต</translation>
</message>
</context>
<context>
<name>RecentRequestsTableModel</name>
<message>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<source>Label</source>
<translation>ะะตัะบะฐ</translation>
</message>
<message>
<source>Message</source>
<translation>ะะฐะฒะตะดะฐะผะปะตะฝะฝะต</translation>
</message>
<message>
<source>(no label)</source>
<translation>ะฝะตะฟะฐะทะฝะฐัะฐะฝั</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<source>Send Coins</source>
<translation>ะะฐัะปะฐัั ะะฐะฝะตัั</translation>
</message>
<message>
<source>Insufficient funds!</source>
<translation>ะะตะดะฐััะฐัะบะพะฒะฐ ััะพะดะบะฐั</translation>
</message>
<message>
<source>Quantity:</source>
<translation>ะะพะปัะบะฐััั:</translation>
</message>
<message>
<source>Bytes:</source>
<translation>ะะฐะนัะฐั:</translation>
</message>
<message>
<source>Amount:</source>
<translation>ะะพะปัะบะฐััั:</translation>
</message>
<message>
<source>Fee:</source>
<translation>ะะฐะผัััั:</translation>
</message>
<message>
<source>After Fee:</source>
<translation>ะะฐัะปั ะบะฐะผัััั:</translation>
</message>
<message>
<source>Send to multiple recipients at once</source>
<translation>ะะฐัะปะฐัั ะฐะดัะฐะทั ะฝะตะบะฐะปัะบัะผ ะฐัััะผะฐะปัะฝัะบะฐะผ</translation>
</message>
<message>
<source>Dust:</source>
<translation>ะัะป:</translation>
</message>
<message>
<source>Balance:</source>
<translation>ะะฐะปะฐะฝั:</translation>
</message>
<message>
<source>Confirm the send action</source>
<translation>ะะฐัะฒะตัะดะทััั ะดะฐััะปะฐะฝะฝะต</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>ะะฐะฟััะฒะฐัั ะบะพะปัะบะฐััั</translation>
</message> | <source>Copy amount</source>
<translation>ะะฐะฟััะฒะฐัั ะบะพะปัะบะฐััั</translation>
</message>
<message>
<source>Copy fee</source>
<translation>ะะฐะฟััะฒะฐัั ะบะฐะผัััั</translation>
</message>
<message>
<source>Copy after fee</source>
<translation>ะะฐะฟััะฒะฐัั ะท ะฒัะฝััะบะฐะผ ะบะฐะผัััั</translation>
</message>
<message>
<source>Copy bytes</source>
<translation>ะะฐะฟััะฒะฐัั ะฑะฐะนัั</translation>
</message>
<message>
<source>Copy dust</source>
<translation>ะะฐะฟััะฒะฐัั ะฟัะป</translation>
</message>
<message>
<source>Confirm send coins</source>
<translation>ะะฐัะฒะตัะดะทััั ะดะฐััะปะฐะฝะฝะต ะผะฐะฝะตั</translation>
</message>
<message>
<source>The amount to pay must be larger than 0.</source>
<translation>ะะตะปัััะฝั ะฟะปะฐััะถั ะผะฐะต ะฑััั ะฑะพะปัั ะทะฐ 0.</translation>
</message>
<message>
<source>(no label)</source>
<translation>ะฝะตะฟะฐะทะฝะฐัะฐะฝั</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<source>A&mount:</source>
<translation>ะะพะปัะบะฐััั:</translation>
</message>
<message>
<source>Pay &To:</source>
<translation>ะะฐะฟะปะฐัััั ะดะฐ:</translation>
</message>
<message>
<source>&Label:</source>
<translation>ะะตัะบะฐ:</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>ะฃััะฐะฒััั ะฐะดัะฐั ะท ะฑััะตัั ะฐะฑะผะตะฝะฐ</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<source>Message:</source>
<translation>ะะฐะฒะตะดะฐะผะปะตะฝะฝะต:</translation>
</message>
<message>
<source>Pay To:</source>
<translation>ะะฐะฟะปะฐัััั ะดะฐ:</translation>
</message>
<message>
<source>Memo:</source>
<translation>ะะฐะผััะบะฐ:</translation>
</message>
<message>
<source>Enter a label for this address to add it to your address book</source>
<translation>ะฃะฒัะดะทััะต ะฟะฐะทะฝะฐะบั ะณััะฐะผั ะฐะดัะฐัั, ะบะฐะฑ ะดะฐะดะฐัั ัะณะพ ั ะฐะดัะฐัะฝัั ะบะฝัะณั</translation>
</message>
</context>
<context>
<name>SendConfirmationDialog</name>
</context>
<context>
<name>ShutdownWindow</name>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>ะฃััะฐะฒััั ะฐะดัะฐั ะท ะฑััะตัั ะฐะฑะผะตะฝะฐ</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<source>KB/s</source>
<translation>ะะฑ/ั</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<source>%1/offline</source>
<translation>%1/offline</translation>
</message>
<message>
<source>%1/unconfirmed</source>
<translation>%1/ะฝะตะฟะฐัะฒะตัะดะถะฐะฝะฐ</translation>
</message>
<message>
<source>%1 confirmations</source>
<translation>%1 ะฟะฐัะฒะตัะดะถะฐะฝะฝัั</translation>
</message>
<message>
<source>Status</source>
<translation>ะกัะฐััั</translation>
</message>
<message>
<source>, has not been successfully broadcast yet</source>
<translation>, ะฟะฐะบัะปั ะฝะต ะฑัะปะพ ะฟะฐัะฟัั
ะพะฒะฐ ััะฐะฝัะปัะฒะฐะฝะฐ</translation>
</message>
<message>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<source>unknown</source>
<translation>ะฝะตะฒัะดะพะผะฐ</translation>
</message>
<message>
<source>Message</source>
<translation>ะะฐะฒะตะดะฐะผะปะตะฝะฝะต</translation>
</message>
<message>
<source>Comment</source>
<translation>ะะฐะผะตะฝัะฐั</translation>
</message>
<message>
<source>Transaction ID</source>
<translation>ID</translation>
</message>
<message>
<source>Amount</source>
<translation>ะะพะปัะบะฐััั</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<source>This pane shows a detailed description of the transaction</source>
<translation>ะััะฐั ะฟะฐะฝัะปั ะฟะฐะบะฐะทะฒะฐะต ะดััะฐะปัะฝะฐะต ะฐะฟััะฐะฝะฝะต ััะฐะฝะทะฐะบััั</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<source>Type</source>
<translation>ะขัะฟ</translation>
</message>
<message>
<source>Label</source>
<translation>ะะตัะบะฐ</translation>
</message>
<message>
<source>Confirmed (%1 confirmations)</source>
<translation>ะะฐัะฒะตัะดะถะฐะฝะฐ (%1 ะฟะฐัะฒะตัะดะถะฐะฝะฝัั)</translation>
</message>
<message>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>ะััั ะฑะปะพะบ ะฝะต ะฑัั ะฟััะฝััั ัะฝััะผั ะฒัะทะปะฐะผั ั ะผะฐะณััะผะฐ ะฝะต ะฑัะดะทะต ัั
ะฒะฐะปะตะฝั!</translation>
</message>
<message>
<source>Generated but not accepted</source>
<translation>ะะณะตะฝะตัะฐะฒะฐะฝะฐ, ะฐะปะต ะฝะต ะฟััะฝััะฐ</translation>
</message>
<message>
<source>Received with</source>
<translation>ะััะฝััะฐ ะท</translation>
</message>
<message>
<source>Received from</source>
<translation>ะััะฝััะฐ ะฐะด</translation>
</message>
<message>
<source>Sent to</source>
<translation>ะะฐัะปะฐะฝะฐ ะดะฐ</translation>
</message>
<message>
<source>Payment to yourself</source>
<translation>ะะปะฐััะถ ัะฐะผะพะผั ัะฐะฑะต</translation>
</message>
<message>
<source>Mined</source>
<translation>ะะดะฐะฑััะฐ</translation>
</message>
<message>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<source>(no label)</source>
<translation>ะฝะตะฟะฐะทะฝะฐัะฐะฝั</translation>
</message>
<message>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>ะกัะฐััั ััะฐะฝะทะฐะบััั. ะะฐะฒัะดะทััะต ะบัััะฐั ะฝะฐ ะณััะฐะต ะฟะพะปะต, ะบะฐะฑ ะฟะฐะบะฐะทะฐัั ะบะพะปัะบะฐััั ะฟะฐัะฒะตัะดะถะฐะฝะฝัั.</translation>
</message>
<message>
<source>Date and time that the transaction was received.</source>
<translation>ะะฐัะฐ ั ัะฐั, ะบะฐะปั ััะฐะฝะทะฐะบััั ะฑัะปะฐ ะฟััะฝััะฐ.</translation>
</message>
<message>
<source>Type of transaction.</source>
<translation>ะขัะฟ ััะฐะฝะทะฐะบััั</translation>
</message>
<message>
<source>Amount removed from or added to balance.</source>
<translation>ะะพะปัะบะฐััั ะฐะดะฝััะฐั ัั ะดะฐะดะฐะฝะฐั ะดะฐ ะฑะฐะปะฐะฝัั.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<source>All</source>
<translation>ะฃัั</translation>
</message>
<message>
<source>Today</source>
<translation>ะกัะฝะฝั</translation>
</message>
<message>
<source>This week</source>
<translation>ะััั ััะดะทะตะฝั</translation>
</message>
<message>
<source>This month</source>
<translation>ะััั ะผะตััั</translation>
</message>
<message>
<source>Last month</source>
<translation>ะัะฝัะปั ะผะตััั</translation>
</message>
<message>
<source>This year</source>
<translation>ะััั ะณะพะด</translation>
</message>
<message>
<source>Range...</source>
<translation>ะัะฐะผะตะถะฐะบ...</translation>
</message>
<message>
<source>Received with</source>
<translation>ะััะฝััะฐ ะท</translation>
</message>
<message>
<source>Sent to</source>
<translation>ะะฐัะปะฐะฝะฐ ะดะฐ</translation>
</message>
<message>
<source>To yourself</source>
<translation>ะะฐ ััะฑะต</translation>
</message>
<message>
<source>Mined</source>
<translation>ะะดะฐะฑััะฐ</translation>
</message>
<message>
<source>Other</source>
<translation>ะะฝััั</translation>
</message>
<message>
<source>Min amount</source>
<translation>ะัะฝ. ะบะพะปัะบะฐััั</translation>
</message>
<message>
<source>Copy address</source>
<translation>ะะฐะฟััะฒะฐัั ะฐะดัะฐั</translation>
</message>
<message>
<source>Copy label</source>
<translation>ะะฐะฟััะฒะฐัั ะฟะฐะทะฝะฐะบั</translation>
</message>
<message>
<source>Copy amount</source>
<translation>ะะฐะฟััะฒะฐัั ะบะพะปัะบะฐััั</translation>
</message>
<message>
<source>Copy transaction ID</source>
<translation>ะะฐะฟััะฒะฐัั ID ััะฐะฝะทะฐะบััั</translation>
</message>
<message>
<source>Edit label</source>
<translation>ะ ัะดะฐะณะฐะฒะฐัั ะฟะฐะทะฝะฐะบั</translation>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>ะะพัะบะฐะผั ะฟะฐะดะทะตะปะตะฝั ัะฐะนะป (*.csv)</translation>
</message>
<message>
<source>Confirmed</source>
<translation>ะะฐัะฒะตัะดะถะฐะฝะฐ</translation>
</message>
<message>
<source>Date</source>
<translation>ะะฐัะฐ</translation>
</message>
<message>
<source>Type</source>
<translation>ะขัะฟ</translation>
</message>
<message>
<source>Label</source>
<translation>ะะตัะบะฐ</translation>
</message>
<message>
<source>Address</source>
<translation>ะะดัะฐั</translation>
</message>
<message>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<source>Exporting Failed</source>
<translation>ะญะบัะฟะฐััะฐะฒะฐะฝะฝะต ะฝััะดะฐะปะฐะต</translation>
</message>
<message>
<source>Range:</source>
<translation>ะัะฐะผะตะถะฐะบ:</translation>
</message>
<message>
<source>to</source>
<translation>ะดะฐ</translation>
</message>
</context>
<context>
<name>UnitDisplayStatusBarControl</name>
</context>
<context>
<name>WalletFrame</name>
</context>
<context>
<name>WalletModel</name>
<message>
<source>Send Coins</source>
<translation>ะะฐัะปะฐัั ะะฐะฝะตัั</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<source>&Export</source>
<translation>ะญะบัะฟะฐัั</translation>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation>ะญะบัะฟะฐััะฐะฒะฐัั ะณัััั ะทะฒะตััะบั ั ัะฐะนะป</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<source>Options:</source>
<translation>ะะฟััั:</translation>
</message>
<message>
<source>Specify data directory</source>
<translation>ะัะทะฝะฐัััั ะบะฐัะฐะปะพะณ ะดะฐะฝัั
</translation>
</message>
<message>
<source>Accept command line and JSON-RPC commands</source>
<translation>ะััะผะฐัั ะบะฐะผะฐะฝะดะฝั ัะฐะดะพะบ ั JSON-RPC ะบะฐะผะฐะฝะดั</translation>
</message>
<message>
<source>Run in the background as a daemon and accept commands</source>
<translation>ะะฐะฟัััััั ั ัะพะฝะต ัะบ ะดัะผะฐะฝ ั ะฟััะผะฐัั ะบะฐะผะฐะฝะดั</translation>
</message>
<message>
<source>Greenfrogcoin Core</source>
<translation>Greenfrogcoin Core</translation>
</message>
<message>
<source>Do you want to rebuild the block database now?</source>
<translation>ะฆั ะถะฐะดะฐะตัะต ะฒั ะฟะตัะฐะฑัะดะฐะฒะฐัั ะทะฐัะฐะท ะฑะฐะทั ะทะฒะตััะฐะบ ะฑะปะพะบะฐั?</translation>
</message>
<message>
<source>Error initializing block database</source>
<translation>ะะฐะผัะปะบะฐ ัะฝััััะปัะทะฐััั ะฑะฐะทะฒั ะทะฒะตััะฐะบ ะฑะปะพะบะฐั</translation>
</message>
<message>
<source>Error initializing wallet database environment %s!</source>
<translation>ะะฐะผะฐะปะบะฐ ัะฝััััะปัะทะฐััั ะฐัััะพะดะดะทั ะฑะฐะทั ะทะฒะตััะฐะบ ะณะฐะผะฐะฝัะฐ %s!</translation>
</message>
<message>
<source>Error loading block database</source>
<translation>ะะฐะผัะปะบะฐ ะทะฐะณััะทะบั ะฑะฐะทะฒั ะทะฒะตััะฐะบ ะฑะปะพะบะฐั</translation>
</message>
<message>
<source>Error opening block database</source>
<translation>ะะฐะผัะปะบะฐ ะฐะดััะฝะตะฝะฝั ะฑะฐะทั ะทะฒะตััะฐะบ ะฑะปะพะบะฐั</translation>
</message>
<message>
<source>Error: Disk space is low!</source>
<translation>ะะฐะผัะปะบะฐ: ะะฐะผะฐะปะฐ ะฒะพะปัะฝะฐะณะฐ ะผะตััะฐ ะฝะฐ ะดััะบั!</translation>
</message>
<message>
<source>Importing...</source>
<translation>ะะผะฟะฐััะฐะฒะฐะฝะฝะต...</translation>
</message>
<message>
<source>Not enough file descriptors available.</source>
<translation>ะะต ั
ะฐะฟะฐะต ัะฐะนะปะฐะฒัั
ะดััะบััะฟัะฐัะฐั.</translation>
</message>
<message>
<source>Use UPnP to map the listening port (default: %u)</source>
<translation>Use UPnP to map the listening port (default: %u)</translation>
</message>
<message>
<source>Verifying blocks...</source>
<translation>ะัะฐะฒะตัะบะฐ ะฑะปะพะบะฐั...</translation>
</message>
<message>
<source>Wallet options:</source>
<translation>ะะฟััั ะณะฐะผะฐะฝัะฐ:</translation>
</message>
<message>
<source>Information</source>
<translation>ะะฝัะฐัะผะฐััั</translation>
</message>
<message>
<source>RPC server options:</source>
<translation>ะะฟััั RPC ัะตัะฒะตัะฐ:</translation>
</message>
<message>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>ะกะปะฐัั trace/debug ะทะฒะตััะบั ั ะบะฐะฝัะพะปั ะทะฐะผะตัั ัะฐะนะปะฐ debug.log</translation>
</message>
<message>
<source>Signing transaction failed</source>
<translation>ะะฐะผัะปะบะฐ ะฟะพะดะฟััั ััะฐะฝะทะฐะบััั</translation>
</message>
<message>
<source>This is experimental software.</source>
<translation>ะััะฐ ัะบัะฟะตััะผะตะฝัะฐะปัะฝะฐั ะฟัะฐะณัะฐะผะฐ.</translation>
</message>
<message>
<source>Transaction amount too small</source>
<translation>ะขัะฐะฝะทะฐะบััั ะทะฐะฝะฐะดัะฐ ะผะฐะปะฐั</translation>
</message>
<message>
<source>Transaction too large</source>
<translation>ะขัะฐะฝะทะฐะบััั ะทะฐะฝะฐะดัะฐ ะฒัะปัะบะฐั</translation>
</message>
<message>
<source>Username for JSON-RPC connections</source>
<translation>ะะผั ะบะฐััััะฐะปัะฝะธะบะฐ ะดะปั JSON-RPC ะทะปัััะฝะฝัั</translation>
</message>
<message>
<source>Warning</source>
<translation>ะฃะฒะฐะณะฐ</translation>
</message>
<message>
<source>Password for JSON-RPC connections</source>
<translation>ะะฐัะพะปั ะดะปั JSON-RPC ะทะปัััะฝะฝัั</translation>
</message>
<message>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>ะัะบะฐะฝะฐัั ะบะฐะผะฐะฝะดั ะบะฐะปั ะปะตะฟัั ะฑะปะพะบ ะทะผะตะฝัััะฐ (%s ะทะฐะผัะฝัะตััะฐ ะฝะฐ ั
ัั ะฑะปะพะบะฐ)</translation>
</message>
<message>
<source>Insufficient funds</source>
<translation>ะะตะดะฐััะฐัะบะพะฒะฐ ััะพะดะบะฐั</translation>
</message>
<message>
<source>Loading block index...</source>
<translation>ะะฐะณััะถะฐะตะผ ัะฝะดัะบั ะฑะปะพะบะฐั...</translation>
</message>
<message>
<source>Loading wallet...</source>
<translation>ะะฐะณััะถะฐะตะผ ะณะฐะผะฐะฝะตั...</translation>
</message>
<message>
<source>Cannot downgrade wallet</source>
<translation>ะะตะผะฐะณััะผะฐ ััะณัััะฐะฒะฐัั ะณะฐะผะฐะฝะตั</translation>
</message>
<message>
<source>Rescanning...</source>
<translation>ะะตัะฐัะบะฐะฝะฐะฒะฐะฝะฝะต...</translation>
</message>
<message>
<source>Done loading</source>
<translation>ะะฐะณััะทะบะฐ ะฒัะบะฐะฝะฐะฝะฐ</translation>
</message>
<message>
<source>Error</source>
<translation>ะะฐะผัะปะบะฐ</translation>
</message>
</context>
</TS> | <message> |
display.stories.tsx | import React from 'react';
import { storiesOf } from '@storybook/react';
import { Display } from './display';
import { Property } from 'interfaces/Property';
// import more addons
const story = storiesOf('Products.Display', module);
const mockProperty: Property[] = [
{
name: 'Rumah 1',
address: 'Jalan rumah 1',
propertyType: 'house',
saleType: 'sell',
imgs: [
{
image:
'https://res.cloudinary.com/dsvdffre0/image/upload/v1597137791/webaliser-_TPTXZd9mOo-unsplash_pqtm1r.jpg',
imgAlt: 'Beautiful house',
}, | price: 200,
area: 125,
},
{
name: 'Rumah 2',
address: 'Jalan apartment 2',
propertyType: 'apartment',
saleType: 'rent',
imgs: [
{
image:
'https://res.cloudinary.com/dsvdffre0/image/upload/v1597137791/jacques-bopp-Hh18POSx5qk-unsplash_lweaht.jpg',
imgAlt: 'Beautiful house 2',
},
],
description: 'Deskripsi bum bum bum',
location: 'PIK',
price: 500,
area: 250,
},
{
name: 'Rumah 10',
address: 'Jalan ke dufan',
propertyType: 'home-office',
saleType: 'rent',
imgs: [
{
image:
'https://res.cloudinary.com/dsvdffre0/image/upload/v1597137792/digital-marketing-agency-ntwrk-g39p1kDjvSY-unsplash_kh6gti.jpg',
imgAlt: 'Beautiful house 3',
},
],
description: 'Deskripsi bum bum bum',
location: 'jkarta Timur',
price: 700,
area: 500,
},
];
story.add('Component', () => (
<Display
display={mockProperty}
blur={true}
handleSelectLocations={() => {
return;
}}
handleSelectPropertyType={() => {
return;
}}
handleSelectSaleType={() => {
return;
}}
applyFilters={() => {
return;
}}
/>
)); | ],
description: 'Deskripsi bum bum bum',
location: 'Penang', |
markdown_it_extensions.py | #
# VAZ Projects
#
#
# Author: Marcelo Tellier Sartori Vaz <marcelotsvaz@gmail.com>
from functools import partial
import re
from django.template import loader
def linkAttributes( self, tokens, index, options, env ):
'''
Add target and rel attributes to links.
'''
tokens[index].attrSet( 'rel', 'noopener' )
return self.renderToken( tokens, index, options, env )
def imageGalleryPlugin( md, markdownImages ):
'''
Plugin for rendering image galleries using Django UserImage.
Syntax: #[cssClass1 cssClass2](identifier1, identifier2, identifier3)
'''
md.block.ruler.before(
'paragraph',
'imageGallery',
partial( imageGallery, markdownImages = markdownImages ),
{ 'alt': [ 'paragraph', 'reference', 'blockquote', 'list' ] }
)
def imageGallery( state, startLine, endLine, silent, markdownImages ):
'''
Rule for image gallery.
'''
lineContent = state.getLines( startLine, startLine + 1, 0, False ).strip()
# Only run the regex if the first two characters match.
if not lineContent.startswith( '#[' ):
return False
match = re.match( r'^#\[(.*)\]\((.*)\)$', lineContent )
if not match:
|
cssClasses = match[1]
identifiers = match[2]
if not silent:
state.line = startLine + 1
if identifiers.strip() == '*':
images = markdownImages
else:
identifiers = { identifier.strip() for identifier in identifiers.split( ',' ) }
images = [ image for image in markdownImages if image.identifier in identifiers ]
renderedTemplate = loader.render_to_string(
'commonApp/image_gallery.html',
{ 'images': images, 'cssClasses': cssClasses },
)
token = state.push( 'html_block', '', 0 )
token.content = renderedTemplate
token.map = [startLine, state.line]
return True | return False |
mklldeps.py | # Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import os
import sys
import subprocess
import itertools
from os import path
f = open(sys.argv[1], 'wb')
components = sys.argv[2].split(' ')
components = [i for i in components if i] # ignore extra whitespaces
enable_static = sys.argv[3]
f.write("""// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// WARNING: THIS IS A GENERATED FILE, DO NOT MODIFY
// take a look at src/etc/mklldeps.py if you're interested
""")
def | (args):
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if err:
print("failed to run llconfig: args = `{}`".format(args))
print(err)
sys.exit(1)
return out
for llconfig in sys.argv[4:]:
f.write("\n")
out = run([llconfig, '--host-target'])
arch, os = out.split('-', 1)
arch = 'x86' if arch == 'i686' or arch == 'i386' else arch
if 'darwin' in os:
os = 'macos'
elif 'linux' in os:
os = 'linux'
elif 'freebsd' in os:
os = 'freebsd'
elif 'android' in os:
os = 'android'
elif 'win' in os or 'mingw' in os:
os = 'win32'
cfg = [
"target_arch = \"" + arch + "\"",
"target_os = \"" + os + "\"",
]
f.write("#[cfg(" + ', '.join(cfg) + ")]\n")
version = run([llconfig, '--version']).strip()
# LLVM libs
if version < '3.5':
args = [llconfig, '--libs']
else:
args = [llconfig, '--libs', '--system-libs']
args.extend(components)
out = run(args)
for lib in out.strip().replace("\n", ' ').split(' '):
lib = lib.strip()[2:] # chop of the leading '-l'
f.write("#[link(name = \"" + lib + "\"")
# LLVM libraries are all static libraries
if 'LLVM' in lib:
f.write(", kind = \"static\"")
f.write(")]\n")
# llvm-config before 3.5 didn't have a system-libs flag
if version < '3.5':
if os == 'win32':
f.write("#[link(name = \"imagehlp\")]")
# LLVM ldflags
out = run([llconfig, '--ldflags'])
for lib in out.strip().split(' '):
if lib[:2] == "-l":
f.write("#[link(name = \"" + lib[2:] + "\")]\n")
# C++ runtime library
out = run([llconfig, '--cxxflags'])
if enable_static == '1':
assert('stdlib=libc++' not in out)
f.write("#[link(name = \"stdc++\", kind = \"static\")]\n")
else:
if 'stdlib=libc++' in out:
f.write("#[link(name = \"c++\")]\n")
else:
f.write("#[link(name = \"stdc++\")]\n")
# Attach everything to an extern block
f.write("extern {}\n")
| run |
preprocessing_images.py | #Copyright 2021 Google LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
| #
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tempfile
import pdb
import copy
import warnings
warnings.filterwarnings(action='ignore')
import functools
from itertools import combinations
from collections import defaultdict
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
############################################################################################
# data pipelines and feature engg here
# pre-defined TF2 Keras models and your own models here
from deep_autoviml.data_load.classify_features import check_model_options
# Utils
############################################################################################
# TensorFlow โฅ2.4 is required
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras import layers
from tensorflow import keras
from tensorflow.keras.layers.experimental.preprocessing import Normalization, StringLookup, Hashing
from tensorflow.keras.layers.experimental.preprocessing import IntegerLookup, CategoryEncoding, CategoryCrossing
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization, Discretization
from tensorflow.keras.layers import Embedding, Flatten
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model, load_model
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import regularizers
import tensorflow_hub as hub
import tensorflow_text as text
from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error
from IPython.core.display import Image, display
import pickle
#############################################################################################
##### Suppress all TF2 and TF1.x warnings ###################
try:
tf.logging.set_verbosity(tf.logging.ERROR)
except:
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
############################################################################################
from tensorflow.keras.layers import Reshape, MaxPooling1D, MaxPooling2D, AveragePooling2D, AveragePooling1D
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D, GlobalMaxPooling1D, Dropout, Conv1D
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
############################################################################################
def preprocessing_images(train_ds, model_options):
"""
This produces a preprocessing layer for an incoming tf.data.Dataset. It can be images only.
You need to just send in a tf.data.DataSet from the training folder and a model_options dictionary.
It will return a full-model-ready layer that you can add to your Keras Functional model as image layer!
########### Motivation and suggestions for coding for Image processing came from this blog #########
Greatly indebted to Srivatsan for his Github and notebooks: https://github.com/srivatsan88/YouTubeLI
####################################################################################################
"""
try:
####### L O A D F E A T U R E E X T R A C T O R ################
url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
feature_extractor = check_model_options(model_options, "tf_hub_model", url)
img_height = model_options["image_height"]
img_width = model_options["image_width"]
image_channels = model_options["image_channels"]
num_predicts = model_options["num_predicts"]
try:
feature_extractor_layer = hub.KerasLayer(feature_extractor, input_shape=(
img_height,img_width,image_channels))
except:
print('Loading model from Tensorflow Hub failed. Check the URL and try again...')
return
feature_extractor_layer.trainable = False
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
tf.random.set_seed(111)
model = tf.keras.Sequential([
normalization_layer,
feature_extractor_layer,
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(num_predicts,activation='softmax')
])
model.compile(
optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
except:
print(' Error: Failed image preprocessing layer. Returning...')
return
return model | #
# https://www.apache.org/licenses/LICENSE-2.0
|
group.py | # # โ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [๐ฅญ Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import typing
from decimal import Decimal
from solana.publickey import PublicKey
from .accountinfo import AccountInfo
from .addressableaccount import AddressableAccount
from .cache import Cache, PerpMarketCache, MarketCache
from .constants import SYSTEM_PROGRAM_ADDRESS
from .context import Context
from .instrumentlookup import InstrumentLookup
from .instrumentvalue import InstrumentValue
from .layouts import layouts
from .lotsizeconverter import LotSizeConverter, RaisingLotSizeConverter
from .marketlookup import MarketLookup
from .metadata import Metadata
from .token import Instrument, Token
from .tokenbank import TokenBank
from .version import Version
# # ๐ฅญ GroupSlotSpotMarket class
#
class GroupSlotSpotMarket:
def __init__(self, address: PublicKey, maint_asset_weight: Decimal, init_asset_weight: Decimal, maint_liab_weight: Decimal, init_liab_weight: Decimal) -> None:
self.add | taticmethod
def from_layout(layout: typing.Any) -> "GroupSlotSpotMarket":
spot_market: PublicKey = layout.spot_market
maint_asset_weight: Decimal = round(layout.maint_asset_weight, 8)
init_asset_weight: Decimal = round(layout.init_asset_weight, 8)
maint_liab_weight: Decimal = round(layout.maint_liab_weight, 8)
init_liab_weight: Decimal = round(layout.init_liab_weight, 8)
return GroupSlotSpotMarket(spot_market, maint_asset_weight, init_asset_weight, maint_liab_weight, init_liab_weight)
@staticmethod
def from_layout_or_none(layout: typing.Any) -> typing.Optional["GroupSlotSpotMarket"]:
if (layout.spot_market is None) or (layout.spot_market == SYSTEM_PROGRAM_ADDRESS):
return None
return GroupSlotSpotMarket.from_layout(layout)
def __str__(self) -> str:
return f"""ยซ ๐ถ๐๐๐๐๐๐๐๐๐๐๐๐๐ผ๐๐๐๐๐ [{self.address}]
Asset Weights:
Initial: {self.init_asset_weight}
Maintenance: {self.maint_asset_weight}
Liability Weights:
Initial: {self.init_liab_weight}
Maintenance: {self.maint_liab_weight}
ยป"""
def __repr__(self) -> str:
return f"{self}"
# # ๐ฅญ GroupSlotPerpMarket class
#
class GroupSlotPerpMarket:
def __init__(self, address: PublicKey, maint_asset_weight: Decimal, init_asset_weight: Decimal, maint_liab_weight: Decimal, init_liab_weight: Decimal, liquidation_fee: Decimal, base_lot_size: Decimal, quote_lot_size: Decimal) -> None:
self.address: PublicKey = address
self.maint_asset_weight: Decimal = maint_asset_weight
self.init_asset_weight: Decimal = init_asset_weight
self.maint_liab_weight: Decimal = maint_liab_weight
self.init_liab_weight: Decimal = init_liab_weight
self.liquidation_fee: Decimal = liquidation_fee
self.base_lot_size: Decimal = base_lot_size
self.quote_lot_size: Decimal = quote_lot_size
@staticmethod
def from_layout(layout: typing.Any) -> "GroupSlotPerpMarket":
perp_market: PublicKey = layout.perp_market
maint_asset_weight: Decimal = round(layout.maint_asset_weight, 8)
init_asset_weight: Decimal = round(layout.init_asset_weight, 8)
maint_liab_weight: Decimal = round(layout.maint_liab_weight, 8)
init_liab_weight: Decimal = round(layout.init_liab_weight, 8)
liquidation_fee: Decimal = round(layout.liquidation_fee, 8)
base_lot_size: Decimal = layout.base_lot_size
quote_lot_size: Decimal = layout.quote_lot_size
return GroupSlotPerpMarket(perp_market, maint_asset_weight, init_asset_weight, maint_liab_weight, init_liab_weight, liquidation_fee, base_lot_size, quote_lot_size)
@staticmethod
def from_layout_or_none(layout: typing.Any) -> typing.Optional["GroupSlotPerpMarket"]:
if (layout.perp_market is None) or (layout.perp_market == SYSTEM_PROGRAM_ADDRESS):
return None
return GroupSlotPerpMarket.from_layout(layout)
def __str__(self) -> str:
return f"""ยซ ๐ถ๐๐๐๐๐๐๐๐๐ฟ๐๐๐๐ผ๐๐๐๐๐ [{self.address}]
Asset Weights:
Initial: {self.init_asset_weight}
Maintenance: {self.maint_asset_weight}
Liability Weights:
Initial: {self.init_liab_weight}
Maintenance: {self.maint_liab_weight}
Liquidation Fee: {self.liquidation_fee}
Base Lot Size: {self.base_lot_size}
Quote Lot Size: {self.quote_lot_size}
ยป"""
def __repr__(self) -> str:
return f"{self}"
# # ๐ฅญ GroupSlot class
#
# `GroupSlot` gathers indexed slot items together instead of separate arrays.
#
class GroupSlot:
def __init__(self, index: int, base_instrument: Instrument, base_token_bank: typing.Optional[TokenBank], quote_token_bank: TokenBank, spot_market_info: typing.Optional[GroupSlotSpotMarket], perp_market_info: typing.Optional[GroupSlotPerpMarket], perp_lot_size_converter: LotSizeConverter, oracle: PublicKey) -> None:
self.index: int = index
self.base_instrument: Instrument = base_instrument
self.base_token_bank: typing.Optional[TokenBank] = base_token_bank
self.quote_token_bank: TokenBank = quote_token_bank
self.spot_market: typing.Optional[GroupSlotSpotMarket] = spot_market_info
self.perp_market: typing.Optional[GroupSlotPerpMarket] = perp_market_info
self.perp_lot_size_converter: LotSizeConverter = perp_lot_size_converter
self.oracle: PublicKey = oracle
def __str__(self) -> str:
base_token_bank = f"{self.base_token_bank}".replace("\n", "\n ")
quote_token_bank = f"{self.quote_token_bank}".replace("\n", "\n ")
spot_market_info = f"{self.spot_market}".replace("\n", "\n ")
perp_market_info = f"{self.perp_market}".replace("\n", "\n ")
return f"""ยซ ๐ถ๐๐๐๐๐๐๐๐[{self.index}] {self.base_instrument}
Base Token Info:
{base_token_bank}
Quote Token Info:
{quote_token_bank}
Oracle: {self.oracle}
Spot Market:
{spot_market_info}
Perp Market:
{perp_market_info}
ยป"""
def __repr__(self) -> str:
return f"{self}"
# # ๐ฅญ Group class
#
# `Group` defines root functionality for Mango Markets.
#
class Group(AddressableAccount):
def __init__(self, account_info: AccountInfo, version: Version, name: str,
meta_data: Metadata,
shared_quote: TokenBank,
slot_indices: typing.Sequence[bool],
slots: typing.Sequence[GroupSlot],
signer_nonce: Decimal, signer_key: PublicKey,
admin: PublicKey, serum_program_address: PublicKey, cache: PublicKey, valid_interval: Decimal,
insurance_vault: PublicKey, srm_vault: PublicKey, msrm_vault: PublicKey, fees_vault: PublicKey) -> None:
super().__init__(account_info)
self.version: Version = version
self.name: str = name
self.meta_data: Metadata = meta_data
self.shared_quote: TokenBank = shared_quote
self.slot_indices: typing.Sequence[bool] = slot_indices
self.slots: typing.Sequence[GroupSlot] = slots
self.signer_nonce: Decimal = signer_nonce
self.signer_key: PublicKey = signer_key
self.admin: PublicKey = admin
self.serum_program_address: PublicKey = serum_program_address
self.cache: PublicKey = cache
self.valid_interval: Decimal = valid_interval
self.insurance_vault: PublicKey = insurance_vault
self.srm_vault: PublicKey = srm_vault
self.msrm_vault: PublicKey = msrm_vault
self.fees_vault: PublicKey = fees_vault
@property
def shared_quote_token(self) -> Token:
return Token.ensure(self.shared_quote.token)
@property
def liquidity_incentive_token_bank(self) -> TokenBank:
for token_bank in self.tokens:
if token_bank.token.symbol_matches("MNGO"):
return token_bank
raise Exception(f"Could not find token info for symbol 'MNGO' in group {self.address}")
@property
def liquidity_incentive_token(self) -> Token:
return Token.ensure(self.liquidity_incentive_token_bank.token)
@property
def tokens(self) -> typing.Sequence[TokenBank]:
return [*self.base_tokens, self.shared_quote]
@property
def tokens_by_index(self) -> typing.Sequence[typing.Optional[TokenBank]]:
return [*self.base_tokens_by_index, self.shared_quote]
@property
def slots_by_index(self) -> typing.Sequence[typing.Optional[GroupSlot]]:
mapped_items: typing.List[typing.Optional[GroupSlot]] = []
slot_counter = 0
for available in self.slot_indices:
if available:
mapped_items += [self.slots[slot_counter]]
slot_counter += 1
else:
mapped_items += [None]
return mapped_items
@property
def base_tokens(self) -> typing.Sequence[TokenBank]:
return [slot.base_token_bank for slot in self.slots if slot.base_token_bank is not None]
@property
def base_tokens_by_index(self) -> typing.Sequence[typing.Optional[TokenBank]]:
return [slot.base_token_bank if slot is not None else None for slot in self.slots_by_index]
@property
def oracles(self) -> typing.Sequence[PublicKey]:
return [slot.oracle for slot in self.slots if slot.oracle is not None]
@property
def oracles_by_index(self) -> typing.Sequence[typing.Optional[PublicKey]]:
return [slot.oracle if slot is not None else None for slot in self.slots_by_index]
@property
def spot_markets(self) -> typing.Sequence[GroupSlotSpotMarket]:
return [slot.spot_market for slot in self.slots if slot.spot_market is not None]
@property
def spot_markets_by_index(self) -> typing.Sequence[typing.Optional[GroupSlotSpotMarket]]:
return [slot.spot_market if slot is not None else None for slot in self.slots_by_index]
@property
def perp_markets(self) -> typing.Sequence[GroupSlotPerpMarket]:
return [slot.perp_market for slot in self.slots if slot.perp_market is not None]
@property
def perp_markets_by_index(self) -> typing.Sequence[typing.Optional[GroupSlotPerpMarket]]:
return [slot.perp_market if slot is not None else None for slot in self.slots_by_index]
@staticmethod
def from_layout(layout: typing.Any, name: str, account_info: AccountInfo, version: Version, instrument_lookup: InstrumentLookup, market_lookup: MarketLookup) -> "Group":
meta_data: Metadata = Metadata.from_layout(layout.meta_data)
tokens: typing.List[typing.Optional[TokenBank]] = [
TokenBank.from_layout_or_none(t, instrument_lookup) for t in layout.tokens]
# By convention, the shared quote token is always at the end.
quote_token_bank: typing.Optional[TokenBank] = tokens[-1]
if quote_token_bank is None:
raise Exception("Could not find quote token info at end of group tokens.")
slots: typing.List[GroupSlot] = []
in_slots: typing.List[bool] = []
for index in range(len(tokens) - 1):
spot_market_info: typing.Optional[GroupSlotSpotMarket] = GroupSlotSpotMarket.from_layout_or_none(
layout.spot_markets[index])
perp_market_info: typing.Optional[GroupSlotPerpMarket] = GroupSlotPerpMarket.from_layout_or_none(
layout.perp_markets[index])
if (spot_market_info is None) and (perp_market_info is None):
in_slots += [False]
else:
perp_lot_size_converter: LotSizeConverter = RaisingLotSizeConverter()
base_token_bank: typing.Optional[TokenBank] = tokens[index]
base_instrument: Instrument
if base_token_bank is not None:
base_instrument = base_token_bank.token
else:
# It's possible there's no underlying SPL token and we have a pure PERP market.
if perp_market_info is None:
raise Exception(f"Cannot find base token or perp market info for index {index}")
perp_market = market_lookup.find_by_address(perp_market_info.address)
if perp_market is None:
raise Exception(f"Cannot find base token or perp market for index {index}")
base_instrument = perp_market.base
if perp_market_info is not None:
perp_lot_size_converter = LotSizeConverter(
base_instrument, perp_market_info.base_lot_size, quote_token_bank.token, perp_market_info.quote_lot_size)
oracle: PublicKey = layout.oracles[index]
slot: GroupSlot = GroupSlot(index, base_instrument, base_token_bank, quote_token_bank,
spot_market_info, perp_market_info, perp_lot_size_converter, oracle)
slots += [slot]
in_slots += [True]
signer_nonce: Decimal = layout.signer_nonce
signer_key: PublicKey = layout.signer_key
admin: PublicKey = layout.admin
serum_program_address: PublicKey = layout.serum_program_address
cache_address: PublicKey = layout.cache
valid_interval: Decimal = layout.valid_interval
insurance_vault: PublicKey = layout.insurance_vault
srm_vault: PublicKey = layout.srm_vault
msrm_vault: PublicKey = layout.msrm_vault
fees_vault: PublicKey = layout.fees_vault
return Group(account_info, version, name, meta_data, quote_token_bank, in_slots, slots, signer_nonce, signer_key, admin, serum_program_address, cache_address, valid_interval, insurance_vault, srm_vault, msrm_vault, fees_vault)
@staticmethod
def parse(account_info: AccountInfo, name: str, instrument_lookup: InstrumentLookup, market_lookup: MarketLookup) -> "Group":
data = account_info.data
if len(data) != layouts.GROUP.sizeof():
raise Exception(
f"Group data length ({len(data)}) does not match expected size ({layouts.GROUP.sizeof()})")
layout = layouts.GROUP.parse(data)
return Group.from_layout(layout, name, account_info, Version.V3, instrument_lookup, market_lookup)
@staticmethod
def parse_with_context(context: Context, account_info: AccountInfo) -> "Group":
name = context.lookup_group_name(account_info.address)
return Group.parse(account_info, name, context.instrument_lookup, context.market_lookup)
@staticmethod
def load(context: Context, address: typing.Optional[PublicKey] = None) -> "Group":
group_address: PublicKey = address or context.group_address
account_info = AccountInfo.load(context, group_address)
if account_info is None:
raise Exception(f"Group account not found at address '{group_address}'")
name = context.lookup_group_name(account_info.address)
return Group.parse(account_info, name, context.instrument_lookup, context.market_lookup)
def slot_by_spot_market_address(self, spot_market_address: PublicKey) -> GroupSlot:
for slot in self.slots:
if slot.spot_market is not None and slot.spot_market.address == spot_market_address:
return slot
raise Exception(f"Could not find spot market {spot_market_address} in group {self.address}")
def slot_by_perp_market_address(self, perp_market_address: PublicKey) -> GroupSlot:
for slot in self.slots:
if slot.perp_market is not None and slot.perp_market.address == perp_market_address:
return slot
raise Exception(f"Could not find perp market {perp_market_address} in group {self.address}")
def slot_by_instrument_or_none(self, instrument: Instrument) -> typing.Optional[GroupSlot]:
for slot in self.slots:
if slot.base_instrument == instrument:
return slot
return None
def slot_by_instrument(self, instrument: Instrument) -> GroupSlot:
slot: typing.Optional[GroupSlot] = self.slot_by_instrument_or_none(instrument)
if slot is not None:
return slot
raise Exception(f"Could not find slot for {instrument} in group {self.address}")
def token_bank_by_instrument(self, instrument: Instrument) -> TokenBank:
for token_bank in self.tokens:
if token_bank.token == instrument:
return token_bank
raise Exception(f"Could not find token {instrument} in group {self.address}")
def token_price_from_cache(self, cache: Cache, token: Instrument) -> InstrumentValue:
market_cache: MarketCache = self.market_cache_from_cache(cache, token)
return market_cache.adjusted_price(token, self.shared_quote_token)
def perp_market_cache_from_cache(self, cache: Cache, token: Instrument) -> typing.Optional[PerpMarketCache]:
market_cache: MarketCache = self.market_cache_from_cache(cache, token)
return market_cache.perp_market
def market_cache_from_cache(self, cache: Cache, instrument: Instrument) -> MarketCache:
slot: GroupSlot = self.slot_by_instrument(instrument)
instrument_index: int = slot.index
return cache.market_cache_for_index(instrument_index)
def fetch_cache(self, context: Context) -> Cache:
return Cache.load(context, self.cache)
def __str__(self) -> str:
slot_count = len(self.slots)
slots = "\n ".join([f"{item}".replace("\n", "\n ") for item in self.slots])
return f"""ยซ ๐ถ๐๐๐๐ {self.version} [{self.address}]
{self.meta_data}
Name: {self.name}
Signer [Nonce: {self.signer_nonce}]: {self.signer_key}
Admin: {self.admin}
DEX Program ID: {self.serum_program_address}
Cache: {self.cache}
Insurance Vault: {self.insurance_vault}
SRM Vault: {self.srm_vault}
MSRM Vault: {self.msrm_vault}
Fees Vault: {self.fees_vault}
Valid Interval: {self.valid_interval}
Basket [{slot_count} markets]:
{slots}
ยป"""
| ress: PublicKey = address
self.maint_asset_weight: Decimal = maint_asset_weight
self.init_asset_weight: Decimal = init_asset_weight
self.maint_liab_weight: Decimal = maint_liab_weight
self.init_liab_weight: Decimal = init_liab_weight
@s |
sorting.go | //we sort numbers using a heap
package heapsort
var index int
var size int = 10
var numbers [10]int
var global_heap [1000]int
var global_heapSize int
func main(){
numbers[0] = 0
numbers[1] = 17
numbers[2] = 13
numbers[3] = 8
numbers[4] = 79
numbers[5] = 65
numbers[6] = 44
numbers[7] = 72
numbers[8] = 66
numbers[9] = 53
var heap [1000]int
heapSort(10, numbers)
var sorted []int
sorted = getSortedList()
// sorted now contains a sorted list of our numbers
}
func heapSort(size int, numbers []int) {
var lastIndex int = 1
var current = 1;
var newIndex int
var keepLooping bool
var myHeap [1000]int
var temp int
myHeap[1] = numbers[0]
var i int
for i:= 1; i < size; i++ {
lastIndex++
myHeap[lastIndex] = numbers[i]
keepLooping = true
current = lastIndex
for keepLooping {
newIndex = current/2
if newIndex > 0 && myHeap[newIndex] < myHeap[current] {
temp = myHeap[newIndex]
myHeap[newIndex] = myHeap[current]
myHeap[current] = temp
current = newIndex
} else if newIndex == 0 {
keepLooping = false;
}
}
}
global_heap = myHeap
global_heapSize = lastIndex
}
func getSortedList() []int{
var sorted []int
var keepLooping bool
var current int
var temp int | sorted = append(sorted, global_heap[1])
global_heap[1] = global_heap[global_heapSize]
global_heapSize--
keepLooping = true
current = 1
for keepLooping {
if current * 2 <= global_heapSize && global_heap[current] < global_heap[current*2] {
temp = global_heap[current]
global_heap[current] = global_heap[current*2]
global_heap[current*2] = temp
continue
}
if current*2+1 <= global_heapSize && global_heap[current] < global_heap[current*2+1] {
temp = global_heap[current]
global_heap[current] = global_heap[current*2+1]
global_heap[current*2+1] = temp
continue
}
keepLooping = false
}
}
return sorted
} |
for global_heapSize > 0 { |
connector-line.ts | import { createElement, isNullOrUndefined, isObject, remove } from '@syncfusion/ej2-base';
import { Gantt } from '../base/gantt';
import * as cls from '../base/css-constants';
import { IGanttData, ITaskData, IConnectorLineObject, IPredecessor } from '../base/interface';
import { isScheduledTask } from '../base/utils';
/**
* To render the connector line in Gantt
*/
export class ConnectorLine {
private parent: Gantt;
public dependencyViewContainer: HTMLElement;
private lineColor: string;
private lineStroke: number;
public tooltipTable: HTMLElement;
/**
* @hidden
*/
public expandedRecords: IGanttData[];
constructor(ganttObj?: Gantt) {
this.expandedRecords = [];
this.parent = ganttObj;
this.dependencyViewContainer =
createElement('div', { className: cls.dependencyViewContainer });
this.initPublicProp();
}
/**
* To get connector line gap.
*
* @param {IConnectorLineObject} data .
* @returns {number} .
* @private
*/
private getconnectorLineGap(data: IConnectorLineObject): number {
let width: number = 0;
width = (data.milestoneChild ?
((this.parent.chartRowsModule.milestoneMarginTop / 2) + (this.parent.chartRowsModule.milestoneHeight / 2)) :
((this.parent.chartRowsModule.taskBarMarginTop / 2) + (this.parent.chartRowsModule.taskBarHeight / 2)));
return width;
}
/**
* To initialize the public property.
*
* @returns {void}
* @private
*/
public initPublicProp(): void {
this.lineColor = this.parent.connectorLineBackground;
this.lineStroke = (this.parent.connectorLineWidth) > 4 ? 4 : this.parent.connectorLineWidth;
this.createConnectorLineTooltipTable();
}
private getTaskbarMidpoint(isMilestone: boolean): number {
return Math.floor(isMilestone ?
(this.parent.chartRowsModule.milestoneMarginTop + (this.parent.chartRowsModule.milestoneHeight / 2)) :
(this.parent.chartRowsModule.taskBarMarginTop + (this.parent.chartRowsModule.taskBarHeight / 2))) + 1;
}
/**
* To connector line object collection.
*
* @param {IGanttData} parentGanttData .
* @param {IGanttData} childGanttData .
* @param {IPredecessor} predecessor .
* @returns {void}
* @private
*/
public createConnectorLineObject(parentGanttData: IGanttData, childGanttData: IGanttData, predecessor: IPredecessor):
IConnectorLineObject {
const connectorObj: IConnectorLineObject = {} as IConnectorLineObject;
const updatedRecords: IGanttData[] = this.parent.pdfExportModule && this.parent.pdfExportModule.isPdfExport ?
this.parent.flatData : this.expandedRecords;
const parentIndex: number = updatedRecords.indexOf(parentGanttData);
const childIndex: number = updatedRecords.indexOf(childGanttData);
const parentGanttRecord: ITaskData = parentGanttData.ganttProperties;
const childGanttRecord: ITaskData = childGanttData.ganttProperties;
const currentData: IGanttData[] = this.parent.virtualScrollModule && this.parent.enableVirtualization ?
this.parent.currentViewData : this.parent.getExpandedRecords(this.parent.currentViewData);
connectorObj.parentIndexInCurrentView = currentData.indexOf(parentGanttData);
connectorObj.childIndexInCurrentView = currentData.indexOf(childGanttData);
const isVirtualScroll: boolean = this.parent.virtualScrollModule && this.parent.enableVirtualization;
if ((!isVirtualScroll && (connectorObj.parentIndexInCurrentView === -1 || connectorObj.childIndexInCurrentView === -1)) ||
connectorObj.parentIndexInCurrentView === -1 && connectorObj.childIndexInCurrentView === -1) {
return null;
} else {
connectorObj.parentLeft = parentGanttRecord.isMilestone ?
parentGanttRecord.left - (this.parent.chartRowsModule.milestoneHeight / 2) : parentGanttRecord.left;
connectorObj.childLeft = childGanttRecord.isMilestone ?
childGanttRecord.left - (this.parent.chartRowsModule.milestoneHeight / 2) : childGanttRecord.left;
connectorObj.parentWidth = parentGanttRecord.width === 0 || parentGanttRecord.isMilestone ?
(Math.floor(this.parent.chartRowsModule.milestoneHeight)) : parentGanttRecord.width;
connectorObj.childWidth = childGanttRecord.width === 0 || childGanttRecord.isMilestone ?
(Math.floor(this.parent.chartRowsModule.milestoneHeight)) : childGanttRecord.width;
connectorObj.parentIndex = parentIndex;
connectorObj.childIndex = childIndex;
const rowHeight: number = this.parent.ganttChartModule.getChartRows()[0] &&
this.parent.ganttChartModule.getChartRows()[0].getBoundingClientRect().height;
connectorObj.rowHeight = rowHeight && !isNaN(rowHeight) ? rowHeight : this.parent.rowHeight;
connectorObj.type = predecessor.type;
const parentId: string = this.parent.viewType === 'ResourceView' ? parentGanttRecord.taskId : parentGanttRecord.rowUniqueID; | connectorObj.connectorLineId = 'parent' + parentId + 'child' + childId;
connectorObj.milestoneParent = parentGanttRecord.isMilestone ? true : false;
connectorObj.milestoneChild = childGanttRecord.isMilestone ? true : false;
if (isNullOrUndefined(isScheduledTask(parentGanttRecord)) || isNullOrUndefined(isScheduledTask(childGanttRecord))) {
return null;
} else {
return connectorObj;
}
}
}
/**
* To render connector line.
*
* @param {IConnectorLineObject} connectorLinesCollection .
* @returns {void}
* @private
*/
public renderConnectorLines(connectorLinesCollection: IConnectorLineObject[]): void {
let connectorLine: string = '';
const ariaConnector : IConnectorLineObject[] = [];
for (let index: number = 0; index < connectorLinesCollection.length; index++) {
connectorLine = connectorLine + this.getConnectorLineTemplate(connectorLinesCollection[index]);
ariaConnector.push(connectorLinesCollection[index]);
}
this.dependencyViewContainer.innerHTML = connectorLine;
const childNodes: NodeList = this.parent.connectorLineModule.dependencyViewContainer.childNodes;
for (let i: number = 0; i < childNodes.length; i++) {
const innerChild: NodeList = childNodes[i].childNodes;
for (let j: number = 0; j < innerChild.length; j++) {
const ariaString: string = this.parent.connectorLineModule.generateAriaLabel(ariaConnector[i]);
(<HTMLElement>innerChild[j]).setAttribute('aria-label', ariaString);
}
}
this.parent.ganttChartModule.chartBodyContent.appendChild(this.dependencyViewContainer);
}
/**
* To get parent position.
*
* @param {IConnectorLineObject} data .
* @returns {void}
* @private
*/
private getParentPosition(data: IConnectorLineObject): string {
if (data.parentIndex < data.childIndex) {
if (data.type === 'FF') {
if ((data.childLeft + data.childWidth) >= (data.parentLeft + data.parentWidth)) {
return 'FFType2';
} else {
return 'FFType1';
}
} else if ((data.parentLeft < data.childLeft) && (data.childLeft > (data.parentLeft + data.parentWidth + 25))) {
if (data.type === 'FS') {
return 'FSType1';
}
if (data.type === 'SF') {
return 'SFType1';
} else if (data.type === 'SS') {
return 'SSType2';
} else if (data.type === 'FF') {
return 'FFType2';
}
} else if ((data.parentLeft < data.childLeft && (data.childLeft < (data.parentLeft + data.parentWidth)))
|| (data.parentLeft === data.childLeft || data.parentLeft > data.childLeft)) {
if (data.parentLeft > (data.childLeft + data.childWidth + 25)) {
if (data.type === 'SF') {
return 'SFType2';
}
}
if (data.parentLeft > data.childLeft) {
if (data.type === 'SS') {
return 'SSType1';
}
if (data.type === 'SF') {
return 'SFType1';
}
if (data.type === 'FF') {
return 'FFType1';
}
} else if ((data.childLeft + data.childWidth) > (data.parentLeft + data.parentWidth)) {
if (data.type === 'FF') {
return 'FFType2';
}
}
if (data.type === 'FS') {
return 'FSType2';
} else if (data.type === 'SS') {
return 'SSType2';
} else if (data.type === 'FF') {
return 'FFType1';
} else if (data.type === 'SF') {
return 'SFType1';
}
} else if ((data.parentLeft) < data.childLeft) {
if (data.type === 'FS') {
return 'FSType2';
} else if (data.type === 'FF') {
return 'FFType2';
} else if (data.type === 'SS') {
return 'SSType2';
} else if (data.type === 'SF') {
return 'SFType1';
}
}
} else if (data.parentIndex > data.childIndex) {
if ((data.parentLeft < data.childLeft) && (data.childLeft > (data.parentLeft + data.parentWidth))) {
if (data.type === 'FS') {
if (30 >= (data.childLeft - (data.milestoneParent ?
(data.parentLeft + data.parentWidth + 4) : (data.parentLeft + data.parentWidth)))) {
return 'FSType3';
} else {
return 'FSType4';
}
}
if (data.parentLeft < data.childLeft || ((data.childLeft + data.childWidth) > (data.parentLeft + data.parentWidth))) {
if (data.type === 'SS') {
return 'SSType4';
}
if (data.type === 'FF') {
return 'FFType4';
}
if (data.type === 'SF') {
return 'SFType4';
}
// eslint-disable-next-line
} else if ((data.childLeft + data.childWidth) > (data.parentLeft + data.parentWidth)) {
if (data.type === 'FF') {
return 'FFType4';
}
}
} else if ((data.parentLeft < data.childLeft && (data.childLeft < (data.parentLeft + data.parentWidth)))
|| (data.parentLeft === data.childLeft || data.parentLeft > data.childLeft)) {
if ((data.childLeft + data.childWidth) <= (data.parentLeft + data.parentWidth)) {
if (data.type === 'FF') {
return 'FFType3';
}
if (data.type === 'SF') {
if ((data.childLeft + data.childWidth + 25) < (data.parentLeft)) {
return 'SFType3';
} else {
return 'SFType4';
}
}
if (data.type === 'SS') {
if (data.childLeft <= data.parentLeft) {
return 'SSType3';
} else {
return 'SSType4';
}
}
} else if ((data.childLeft + data.childWidth) > (data.parentLeft + data.parentWidth)) {
if (data.type === 'FF') {
return 'FFType4';
}
if (data.type === 'SF') {
return 'SFType4';
}
if (data.type === 'SS') {
if (data.childLeft <= data.parentLeft) {
return 'SSType3';
} else {
return 'SSType4';
}
}
}
if (data.type === 'FS') {
return 'FSType3';
}
} else if (data.parentLeft < data.childLeft) {
if (data.type === 'FS') {
return 'FSType3';
}
if (data.type === 'SS') {
return 'SSType4';
}
if (data.type === 'FF') {
return 'FFType4';
}
if (data.type === 'SF') {
return 'SFType4';
}
}
}
return null;
}
/**
* To get line height.
*
* @param {IConnectorLineObject} data .
* @returns {void}
* @private
*/
private getHeightValue(data: IConnectorLineObject): number {
return (data.parentIndex * data.rowHeight) > (data.childIndex * data.rowHeight) ?
((data.parentIndex * data.rowHeight) - (data.childIndex * data.rowHeight)) :
((data.childIndex * data.rowHeight) - (data.parentIndex * data.rowHeight));
}
/**
* To get sstype2 inner element width.
*
* @param {IConnectorLineObject} data .
* @returns {void}
* @private
*/
private getInnerElementWidthSSType2(data: IConnectorLineObject): number {
if (data.parentLeft === data.childLeft) {
return 10;
}
return (data.childLeft - data.parentLeft);
}
/**
* To get sstype2 inner element left.
*
* @param {IConnectorLineObject} data .
* @returns {void}
* @private
*/
private getInnerElementLeftSSType2(data: IConnectorLineObject): number {
if (data.parentLeft === data.childLeft) {
return (data.parentLeft - 20);
}
return (data.parentLeft - 10);
}
/**
* To get sstype2 inner child element width.
*
* @param {IConnectorLineObject} data .
* @returns {void}
* @private
*/
private getInnerChildWidthSSType2(data: IConnectorLineObject): number {
if ((data.parentLeft + data.parentWidth) < data.childLeft) {
return 10;
}
if (data.parentLeft === data.childLeft) {
return 20;
}
if ((data.parentLeft + data.parentWidth) >= data.childLeft) {
return 10;
}
return (data.childLeft - data.parentLeft);
}
private getBorderStyles(cssType: string, unit: number): string {
const borderWidth: string = 'border-' + cssType + '-width:' + unit + 'px;';
const borderStyle: string = 'border-' + cssType + '-style:solid;';
const borderColor: string = !isNullOrUndefined(this.lineColor) ? 'border-' + cssType + '-color:' + this.lineColor + ';' : '';
return (borderWidth + borderStyle + borderColor);
}
/**
* To get connector line template.
*
* @param {IConnectorLineObject} data .
* @returns {void}
* @private
*/
public getConnectorLineTemplate(data: IConnectorLineObject): string {
const setInnerChildWidthSSType2: number = this.getInnerChildWidthSSType2(data);
const setInnerElementWidthSSType2: number = this.getInnerElementWidthSSType2(data);
const setInnerElementLeftSSType2: number = this.getInnerElementLeftSSType2(data);
const height: number = this.getHeightValue(data);
const isMilestoneParent: boolean = data.milestoneParent ? true : false;
const isMilestone: boolean = data.milestoneChild ? true : false;
let connectorContainer: string = '';
const isVirtual: boolean = this.parent.virtualScrollModule && this.parent.enableVirtualization;
const connectorLine: { top: number, height: number } = this.getPosition(data, this.getParentPosition(data), height);
const heightValue: number = isVirtual ? connectorLine.height : height;
if (this.getParentPosition(data)) {
connectorContainer = '<div id="ConnectorLine' + data.connectorLineId + '" style="background-color:black">';
let div: string = '<div class="' + cls.connectorLineContainer +
'" tabindex="-1" style="';
const eLine: string = '<div class="' + cls.connectorLine + '" style="' +
(!isNullOrUndefined(this.lineColor) ? 'outline-color:' + this.lineColor + ';' : '');
const rightArrow: string = '<div class="' + cls.connectorLineRightArrow + '" style="' +
(!isNullOrUndefined(this.lineColor) ? 'outline-color:' + this.lineColor + ';' : '');
const leftArrow: string = '<div class="' + cls.connectorLineLeftArrow + '" style="' +
(!isNullOrUndefined(this.lineColor) ? 'outline-color:' + this.lineColor + ';' : '');
const duplicateStingOne: string = leftArrow + (isMilestone ? 'left:0px;' : '') +
this.getBorderStyles('right', 10) +
'top:' + (-5 - this.lineStroke + (this.lineStroke - 1)) + 'px;border-bottom-width:' + (5 + this.lineStroke) + 'px;' +
'border-top-width:' + (5 + this.lineStroke) + 'px;width:0;height:0;position:relative;"></div>';
const duplicateStingTwo: string = this.getBorderStyles('left', 10) +
'top:' + (-6) + 'px;border-bottom-width:' + (5 + this.lineStroke) + 'px;' +
'border-top-width:' + (5 + this.lineStroke) + 'px;width:0;height:0;position:relative;"></div>';
const duplicateStingThree: string = this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>' + eLine +
'top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - (this.lineStroke - 1))) + 'position:relative;"></div>';
const duplicateStingFour: string = leftArrow + 'left:' +
(((data.childLeft + data.childWidth) - (data.parentLeft)) + 10) + 'px;' +
this.getBorderStyles('right', 10);
const duplicateStingFive: string = 'top:' + (-(6 + (5 + this.lineStroke) + (this.lineStroke / 2))) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
if (this.getParentPosition(data) === 'FSType1') {
div = div + 'left:' + (data.parentLeft + data.parentWidth) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.parentIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="FSType1">';
div = div + eLine;
div = div + 'left:' + (isMilestoneParent ? -1 : 0) + 'px;width:' + (isMilestoneParent ?
((((data.childLeft - (data.parentLeft + data.parentWidth + 10)) + this.lineStroke) - 10) + 1) :
(((data.childLeft - (data.parentLeft + data.parentWidth + 10)) + this.lineStroke) - 10)) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + ((data.childLeft - (data.parentLeft + data.parentWidth + 10)) - 10) + 'px;' +
'width:0px;' + this.getBorderStyles('right', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + ((data.childLeft - (data.parentLeft + data.parentWidth + 10)) - 10) + 'px;width:10px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + rightArrow;
div = div + 'left:' + (data.childLeft - (data.parentLeft + data.parentWidth + 10)) + 'px;' +
this.getBorderStyles('left', 10) + 'top:' + (-6 - this.lineStroke) + 'px;border-bottom-width:' + (5 + this.lineStroke) +
'px;border-top-width:' + (5 + this.lineStroke) + 'px;width:0;height:0;position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'FSType2') {
div = div + 'left:' + data.parentLeft + 'px;top:' + (isVirtual ? connectorLine.top : ((data.parentIndex * data.rowHeight) +
this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="FSType2">';
div = div + eLine;
div = div + 'left:' + (isMilestoneParent ? data.parentWidth - 1 : data.parentWidth) + 'px;width:' +
(isMilestoneParent ? 11 : 10) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (data.parentWidth + 10 - this.lineStroke) + 'px;' +
this.getBorderStyles('left', this.lineStroke) + 'width:0px;' +
this.getBorderStyles(
'top', (heightValue - this.getconnectorLineGap(data) - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (data.parentWidth - (((data.parentLeft + data.parentWidth) - data.childLeft) + 20)) + 'px;' +
'width:' + (((data.parentLeft + data.parentWidth) - data.childLeft) + 30) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (data.parentWidth - (((data.parentLeft +
data.parentWidth) - data.childLeft) + 20)) + 'px;width:0px;' +
this.getBorderStyles('top', (this.getconnectorLineGap(data) - this.lineStroke)) +
this.getBorderStyles('left', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (data.parentWidth - (((data.parentLeft +
data.parentWidth) - data.childLeft) + 20)) + 'px;width:10px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + rightArrow;
div = div + 'left:' + (data.parentWidth - (((data.parentLeft + data.parentWidth) - data.childLeft) + 10)) + 'px;' +
this.getBorderStyles('left', 10) + 'border-bottom-width:' + (5 + this.lineStroke) + 'px;' +
'border-top-width:' + (5 + this.lineStroke) + 'px;top:' + (-6 - this.lineStroke) +
'px;width:0;height:0;position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'FSType3') {
div = div + 'left:' + (data.childLeft - 20) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.childIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestoneParent) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="FSType3">';
div = div + rightArrow;
div = div + 'left:10px;' + this.getBorderStyles('left', 10) +
'border-bottom-width:' + (5 + this.lineStroke) + 'px;border-top-width:' + (5 + this.lineStroke) + 'px;' +
'top:' + (-6) + 'px;width:0;height:0;position:relative;"></div>';
div = div + eLine;
div = div + 'width:10px;' + this.getBorderStyles('top', this.lineStroke) +
'position:relative;top:' + (-(6 + (5 + this.lineStroke) + Math.round(this.lineStroke / 2))) + 'px;"></div>';
div = div + eLine;
div = div + 'width:' + this.lineStroke + 'px;' + this.getBorderStyles(
'top', (heightValue - this.getconnectorLineGap(data) - this.lineStroke + 1)) +
'position:relative;top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;"></div>';
div = div + eLine;
div = div + 'width:' + (((data.parentLeft + data.parentWidth) - data.childLeft) + 30) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;top:' +
(- (13 + ((this.lineStroke - 1) * 2))) + 'px;"></div>';
div = div + eLine;
div = div + 'left:' + (((data.parentLeft + data.parentWidth) - data.childLeft) + (30 - this.lineStroke)) +
'px;width:0px;' + 'height:' + (this.getconnectorLineGap(data) - this.lineStroke) + 'px;' +
this.getBorderStyles('left', this.lineStroke) + 'position:relative;' +
'top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;"></div>';
div = div + eLine;
div = div + (isMilestoneParent ? 'left:' + (((data.parentLeft +
data.parentWidth) - data.childLeft) + (18 - this.lineStroke)) + 'px;width:' + (12 + this.lineStroke) + 'px;' : 'left:' +
(((data.parentLeft + data.parentWidth) - data.childLeft) + 20) + 'px;width:10px;') +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;top:' +
(- (13 + ((this.lineStroke - 1) * 2))) + 'px;"></div></div>';
}
if (this.getParentPosition(data) === 'FSType4') {
div = div + 'left:' + (data.parentLeft + data.parentWidth) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.childIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="FSType4">';
div = div + rightArrow;
div = div + 'left:' + (data.childLeft - (data.parentLeft + data.parentWidth + 10)) + 'px;' +
this.getBorderStyles('left', 10) + 'top:' + (-6) + 'px;' +
'border-bottom-width:' + (5 + this.lineStroke) + 'px;border-top-width:' +
(5 + this.lineStroke) + 'px;width:0;height:0;position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (data.childLeft - (data.parentLeft + data.parentWidth) - 20) +
'px;top:' + (-(6 + (5 + this.lineStroke) + Math.round(this.lineStroke / 2))) + 'px;width:10px;' +
this.getBorderStyles('top', this.lineStroke) +
'position:relative;"></div>';
div = div + eLine;
div = div + 'top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;left:' +
(data.childLeft - (data.parentLeft + data.parentWidth) - 20) + 'px;width:0px;' +
this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke + 1)) + 'position:relative;"></div>';
div = div + eLine;
div = div + (isMilestoneParent ? 'left:-1px;' : '') + 'top:' +
(- (13 + ((this.lineStroke - 1) * 2))) + 'px;width:' +
(isMilestoneParent ? ((data.childLeft - (data.parentLeft + data.parentWidth + 20) + 1) + this.lineStroke) :
((data.childLeft - (data.parentLeft + data.parentWidth + 20)) + this.lineStroke)) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'SSType4') {
div = div + 'left:' + (data.parentLeft - 10) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.childIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="SSType4">';
div = div + rightArrow;
div = div + 'left:' + (data.childLeft - data.parentLeft) + 'px;' + duplicateStingTwo;
div = div + eLine;
div = div + 'top:' + (-(6 + (5 + this.lineStroke) + (this.lineStroke / 2))) + 'px;width:' +
(data.childLeft - data.parentLeft) + 'px;' + duplicateStingThree;
div = div + eLine;
div = div + 'top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;width:10px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'SSType3') {
div = div + 'left:' + (data.childLeft - 20) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.childIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="SSType3">';
div = div + rightArrow;
div = div + 'left:10px;' + duplicateStingTwo;
div = div + eLine;
div = div + 'top:' + (-(6 + (5 + this.lineStroke) + (this.lineStroke / 2))) + 'px;width:10px;' + duplicateStingThree;
div = div + eLine;
div = div + 'top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;width:' +
(data.parentLeft - data.childLeft + 21) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'SSType2') {
div = div + 'left:' + setInnerElementLeftSSType2 + 'px;top:' + (isVirtual ? connectorLine.top :
((data.parentIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestoneParent) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="SSType2">';
div = div + eLine;
div = div + 'width:' + (setInnerChildWidthSSType2 + 1) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'width:' + setInnerElementWidthSSType2 + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + rightArrow;
div = div + 'left:' + setInnerElementWidthSSType2 + 'px;' +
this.getBorderStyles('left', 10) + 'top:' + (-6 - this.lineStroke) + 'px;' +
'border-bottom-width:' + (5 + this.lineStroke) + 'px;border-top-width:' +
(5 + this.lineStroke) + 'px;width:0;' +
'height:0;position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'SSType1') {
div = div + 'left:' + (data.childLeft - 20) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.parentIndex * data.rowHeight) +
this.getTaskbarMidpoint(isMilestoneParent) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="SSType1">';
div = div + eLine;
div = div + 'width:' + (data.parentLeft - data.childLeft + 21) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'width:10px;' + this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + rightArrow;
div = div + 'left:10px;' + this.getBorderStyles('left', 10) +
'top:' + (-6 - this.lineStroke) + 'px;border-bottom-width:' + (5 + this.lineStroke) + 'px;' +
'border-top-width:' + (5 + this.lineStroke) + 'px;width:0;height:0;position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'FFType1') {
div = div + 'left:' + (data.childLeft + data.childWidth) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.parentIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestoneParent) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="FFType1">';
div = div + eLine;
div = div + 'left:' + (isMilestoneParent ? (((data.parentLeft + data.parentWidth) -
(data.childLeft + data.childWidth)) - 1) : ((data.parentLeft + data.parentWidth) -
(data.childLeft + data.childWidth))) + 'px;' +
'width:' + (isMilestoneParent ? (21 + this.lineStroke) : (20 + this.lineStroke)) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (((data.parentLeft + data.parentWidth) -
(data.childLeft + data.childWidth)) + 20) + 'px;width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (isMilestone ? 4 : 10) + 'px;width:' + (isMilestone ?
(((data.parentLeft + data.parentWidth) - (data.childLeft + data.childWidth)) + (16 + this.lineStroke)) :
(((data.parentLeft + data.parentWidth) - (data.childLeft + data.childWidth)) + (10 + this.lineStroke))) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + leftArrow;
div = div + (isMilestone ? 'left:0px;' : '') + this.getBorderStyles('right', 10) +
'top:' + (-6 - this.lineStroke) + 'px;border-bottom-width:' + (5 + this.lineStroke) + 'px;' +
'border-top-width:' + (5 + this.lineStroke) + 'px;width:0;height:0;position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'FFType2') {
div = div + 'left:' + (data.parentLeft + data.parentWidth) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.parentIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestoneParent) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="FFType2">';
div = div + eLine;
div = div + (isMilestoneParent ? 'left:-1px;' : '') + 'width:' +
(isMilestoneParent ? (((data.childLeft + data.childWidth) - (data.parentLeft + data.parentWidth)) +
(21 + this.lineStroke)) : (((data.childLeft + data.childWidth) -
(data.parentLeft + data.parentWidth)) + (20 + this.lineStroke))) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (((data.childLeft + data.childWidth) - (data.parentLeft + data.parentWidth)) + 20) +
'px;width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke)) +
'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (isMilestone ? (((data.childLeft + data.childWidth) - (data.parentLeft + data.parentWidth)) + 4) :
(((data.childLeft + data.childWidth) - (data.parentLeft + data.parentWidth)) + 10)) + 'px;' +
'width:' + (isMilestone ? (16 + this.lineStroke) : (10 + this.lineStroke)) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + leftArrow;
div = div + 'left:' + ((data.childLeft + data.childWidth) - (data.parentLeft + data.parentWidth)) + 'px;' +
this.getBorderStyles('right', 10) + 'top:' + (-6 - this.lineStroke) + 'px;' +
'border-bottom-width:' + (5 + this.lineStroke) + 'px;border-top-width:' + (5 + this.lineStroke) +
'px;width:0;height:0;position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'FFType3') {
div = div + 'left:' + (data.childLeft + data.childWidth) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.childIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="FFType3">';
div = div + duplicateStingOne;
div = div + eLine;
div = div + (isMilestone ? ('left:4px;width:' +
(((data.parentLeft + data.parentWidth) - (data.childLeft + data.childWidth)) + 16)) :
('left:10px;width:' + (((data.parentLeft + data.parentWidth) -
(data.childLeft + data.childWidth)) + 10))) + 'px;top:' + (-(6 + (5 + this.lineStroke) +
(this.lineStroke / 2))) + 'px;' + this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (((data.parentLeft + data.parentWidth) - (data.childLeft + data.childWidth)) + 20) +
'px;top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;' +
'width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke + 1)) + 'position:relative;"></div>';
div = div + eLine;
div = div + (isMilestoneParent ? ('left:' + (((data.parentLeft + data.parentWidth) -
(data.childLeft + data.childWidth)) - 1) + 'px;width:21') : ('left:' + ((data.parentLeft + data.parentWidth) -
(data.childLeft + data.childWidth)) + 'px;width:20')) +
'px;top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'FFType4') {
div = div + 'left:' + (data.parentLeft + data.parentWidth) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.childIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="FFType4">';
div = div + leftArrow;
div = div + ('left:' + ((data.childLeft + data.childWidth) -
(data.parentLeft + data.parentWidth))) + 'px;' +
this.getBorderStyles('right', 10) + 'top:' + (-5 - this.lineStroke + (this.lineStroke - 1)) + 'px;' +
'border-bottom-width:' + (5 + this.lineStroke) +
'px;border-top-width:' + (5 + this.lineStroke) + 'px;width:0;height:0;' +
'position:relative;"></div>';
div = div + eLine;
div = div + (isMilestone ? ('left:' + (((data.childLeft + data.childWidth) -
(data.parentLeft + data.parentWidth)) + 4) +
'px;width:' + (16 + this.lineStroke)) : ('left:' + (((data.childLeft + data.childWidth) -
(data.parentLeft + data.parentWidth)) + 10) + 'px;width:' + (10 + this.lineStroke))) +
'px;' + duplicateStingFive;
div = div + eLine;
div = div + 'left:' + (((data.childLeft + data.childWidth) -
(data.parentLeft + data.parentWidth)) + 20) + 'px;top:' + (- (13 + ((this.lineStroke - 1) * 2))) +
'px;width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke + 1)) + 'position:relative;"></div>';
div = div + eLine;
div = div + (isMilestoneParent ? ('left:-1px;width:' + (((data.childLeft + data.childWidth) -
(data.parentLeft + data.parentWidth)) + (21 + this.lineStroke))) : ('width:' + (((data.childLeft + data.childWidth) -
(data.parentLeft + data.parentWidth)) + (20 + this.lineStroke)))) + 'px;top:' +
(- (13 + ((this.lineStroke - 1) * 2))) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'SFType4') {
div = div + 'left:' + (data.parentLeft - 10) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.childIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;width:1px;' +
'height:' + heightValue + 'px;position:absolute" data-connectortype="SFType4">';
div = div + duplicateStingFour + 'top:' + (-5 - this.lineStroke + (this.lineStroke - 1)) + 'px;' +
'border-bottom-width:' + (5 + this.lineStroke) +
'px;border-top-width:' + (5 + this.lineStroke) + 'px;width:0;height:0;' +
'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (isMilestone ? ((((data.childLeft + data.childWidth) - (data.parentLeft)) + (14 + this.lineStroke)) +
'px;width:16') : ((((data.childLeft + data.childWidth) - (data.parentLeft)) + 20) + 'px;width:' +
(10 + this.lineStroke))) + 'px;' + duplicateStingFive;
div = div + eLine;
div = div + 'left:' + (((data.childLeft + data.childWidth) - (data.parentLeft)) + 30) + 'px;top:' +
(- (13 + ((this.lineStroke - 1) * 2))) + 'px;width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles(
'top', (heightValue - this.getconnectorLineGap(data) - (this.lineStroke - 1))) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;width:' +
(((data.childLeft + data.childWidth) - (data.parentLeft)) + (30 + this.lineStroke)) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;width:0px;' +
this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (this.getconnectorLineGap(data) - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'top:' + (- (13 + ((this.lineStroke - 1) * 2))) + 'px;width:11px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'SFType3') {
div = div + 'left:' + (data.childLeft + data.childWidth) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.childIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="SFType3">';
div = div + duplicateStingOne;
div = div + eLine;
div = div + (isMilestone ? 'left:4px;width:' + (16 + this.lineStroke) : 'left:10px;width:' +
(10 + this.lineStroke)) + 'px;top:' + (-(13 + ((this.lineStroke - 1) * 2) - 1)) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:20px;top:' + (-(13 + ((this.lineStroke - 1) * 2))) + 'px;width:0px;' +
this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - (this.lineStroke - 1))) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:20px;top:' + (-(13 + ((this.lineStroke - 1) * 2))) + 'px;width:' +
((data.parentLeft - (data.childLeft + data.childWidth + 20)) + this.lineStroke) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'SFType1') {
div = div + 'left:' + (data.parentLeft - 10) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.parentIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="SFType1">';
div = div + eLine;
div = div + 'width:11px;' + this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles(
'top', (heightValue - this.getconnectorLineGap(data) - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'width:' + (((data.childLeft + data.childWidth) - (data.parentLeft)) + (30 + this.lineStroke)) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (((data.childLeft + data.childWidth) - (data.parentLeft)) + 30) +
'px;width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (this.getconnectorLineGap(data) - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + (isMilestone ? ('left:' + (((data.childLeft + data.childWidth) -
(data.parentLeft)) + 15) + 'px;width:' + (15 + this.lineStroke)) : ('left:' +
(((data.childLeft + data.childWidth) - (data.parentLeft)) + 20) + 'px;width:' + (10 + this.lineStroke))) + 'px;' +
this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + duplicateStingFour + 'top:' + (-6 - this.lineStroke) + 'px;' +
'border-bottom-width:' + (5 + this.lineStroke) + 'px;border-top-width:' +
(5 + this.lineStroke) + 'px;position:relative;"></div></div>';
}
if (this.getParentPosition(data) === 'SFType2') {
div = div + 'left:' + (data.childLeft + data.childWidth) + 'px;top:' + (isVirtual ? connectorLine.top :
((data.parentIndex * data.rowHeight) + this.getTaskbarMidpoint(isMilestoneParent) - (this.lineStroke - 1))) + 'px;' +
'width:1px;height:' + heightValue + 'px;position:absolute" data-connectortype="SFType2">';
div = div + eLine;
div = div + 'left:' + (((data.parentLeft) - (data.childLeft + data.childWidth)) - 10) +
'px;width:11px;' + this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + eLine;
div = div + 'left:' + (((data.parentLeft) - (data.childLeft + data.childWidth)) - 10) +
'px;width:0px;' + this.getBorderStyles('left', this.lineStroke) +
this.getBorderStyles('top', (heightValue - this.lineStroke)) + 'position:relative;"></div>';
div = div + eLine;
div = div + (isMilestone ? ('left:4px;width:' + (((data.parentLeft) - (data.childLeft + data.childWidth))
- (14 - this.lineStroke))) : ('left:10px;width:' + (((data.parentLeft) -
(data.childLeft + data.childWidth)) - (20 - this.lineStroke)))) +
'px;' + this.getBorderStyles('top', this.lineStroke) + 'position:relative;"></div>';
div = div + leftArrow;
div = div + 'left:0px;' + this.getBorderStyles('right', 10) +
'top:' + (-6 - this.lineStroke) + 'px;border-bottom-width:' + (5 + this.lineStroke) +
'px;border-top-width:' + (5 + this.lineStroke) + 'px;width:0;height:0;position:relative;"></div></div>';
}
connectorContainer += div;
connectorContainer += '</div>';
}
return connectorContainer;
}
/**
* @param {IConnectorLineObject} data .
* @param {string} type .
* @param {number} heightValue .
* @returns {number} .
* @private
*/
private getPosition(data: IConnectorLineObject, type: string, heightValue: number): { top: number, height: number } {
let topPosition: number = 0; let lineHeight: number = 0;
if (this.parent.virtualScrollModule && this.parent.enableVirtualization) {
const isMilestoneParent: boolean = data.milestoneParent ? true : false;
const isMilestone: boolean = data.milestoneChild ? true : false;
const midPointParent: number = this.getTaskbarMidpoint(isMilestoneParent) - (this.lineStroke - 1);
const midPoint: number = this.getTaskbarMidpoint(isMilestone) - (this.lineStroke - 1);
const isParentIndex: boolean = data.parentIndexInCurrentView !== -1;
const isChildIndex: boolean = data.childIndexInCurrentView !== -1;
const lastRowIndex: number = this.parent.currentViewData.length - 1;
if (type === 'SSType1' || type === 'SSType2' || type === 'FFType1' || type === 'FFType2' || type === 'SFType2') {
topPosition = isParentIndex ? (data.parentIndexInCurrentView * data.rowHeight) + midPointParent : 0;
lineHeight = (isParentIndex && isChildIndex) ? heightValue : isChildIndex ?
(data.childIndexInCurrentView * data.rowHeight) + midPointParent : (lastRowIndex * data.rowHeight) + midPointParent;
} else if (type === 'SSType3' || type === 'SSType4' || type === 'FSType4' || type === 'FFType3' ||
type === 'FFType4' || type === 'SFType4' || type === 'SFType3') {
topPosition = isChildIndex ? (data.childIndexInCurrentView * data.rowHeight) + midPoint : 0;
lineHeight = (isParentIndex && isChildIndex) ? heightValue : isParentIndex ?
(data.parentIndexInCurrentView * data.rowHeight) + midPoint :
(lastRowIndex * data.rowHeight) + midPoint;
} else if (type === 'FSType3') {
topPosition = isChildIndex ? (data.childIndexInCurrentView * data.rowHeight) + midPointParent : 0;
lineHeight = (isParentIndex && isChildIndex) ? heightValue : isParentIndex ?
(data.parentIndexInCurrentView * data.rowHeight) + midPoint :
(lastRowIndex * data.rowHeight) + midPointParent;
} else if (type === 'SFType1' || type === 'FSType1' || type === 'FSType2') {
topPosition = isParentIndex ? (data.parentIndexInCurrentView * data.rowHeight) + midPoint : 0;
lineHeight = (isParentIndex && isChildIndex) ? heightValue : isChildIndex ?
(data.childIndexInCurrentView * data.rowHeight) + midPoint :
(lastRowIndex * data.rowHeight) + midPoint;
}
}
return { top: topPosition, height: lineHeight };
}
/**
* @returns {void} .
* @private
*/
public createConnectorLineTooltipTable(): void {
this.tooltipTable = createElement(
'table', { className: '.e-tooltiptable', styles: 'margin-top:0px', attrs: { 'cellspacing': '2px', 'cellpadding': '2px' } });
const tooltipBody: HTMLElement = createElement('tbody');
tooltipBody.innerHTML = '';
this.tooltipTable.appendChild(tooltipBody);
}
/**
* @param {string} fromTaskName .
* @param {string} fromPredecessorText .
* @param {string} toTaskName .
* @param {string} toPredecessorText .
* @returns {string} .
* @private
*/
public getConnectorLineTooltipInnerTd(
fromTaskName: string, fromPredecessorText: string, toTaskName?: string, toPredecessorText?: string): string {
let innerTd: string = '<tr id="fromPredecessor"><td >' + this.parent.localeObj.getConstant('from') + '</td><td> ';
innerTd = innerTd + fromTaskName + ' </td><td> ' + this.parent.localeObj.getConstant(fromPredecessorText) + ' </td> </tr>';
innerTd = innerTd + '<tr id="toPredecessor"><td>' + this.parent.localeObj.getConstant('to') + '</td><td> ' + toTaskName;
innerTd = innerTd + ' </td><td> ' + this.parent.localeObj.getConstant(toPredecessorText) + ' </td></tr></tbody><table>';
return innerTd;
}
/**
* Generate aria-label for connectorline
*
* @param {IConnectorLineObject} data .
* @returns {string} .
* @private
*/
public generateAriaLabel(data: IConnectorLineObject): string {
const type: string = data.type;
const updatedRecords: IGanttData[] = this.expandedRecords;
const fromName: string = updatedRecords[data.parentIndex].ganttProperties.taskName;
const toName: string = updatedRecords[data.childIndex].ganttProperties.taskName;
const start: string = this.parent.localeObj.getConstant('start');
const finish: string = this.parent.localeObj.getConstant('finish');
let value: string = '';
if (type === 'FS') {
value = fromName + ' ' + finish + ' to ' + toName + ' ' + start;
} else if (type === 'FF') {
value = fromName + ' ' + finish + ' to ' + toName + ' ' + finish;
} else if (type === 'SS') {
value = fromName + ' ' + start + ' to ' + toName + ' ' + start;
} else {
value = fromName + ' ' + start + ' to ' + toName + ' ' + finish;
}
return value;
}
/**
* To get the record based on the predecessor value
*
* @param {string} id .
* @returns {IGanttData} .
* @private
*/
public getRecordByID(id: string): IGanttData {
if (isNullOrUndefined(id)) {
return null;
}
return this.parent.viewType === 'ResourceView' ? this.parent.flatData[this.parent.getTaskIds().indexOf('T' + id.toString())] :
this.parent.flatData[this.parent.ids.indexOf(id.toString())];
}
/**
* Method to remove connector line from DOM
*
* @param {IGanttData[] | object} records .
* @returns {void} .
* @private
*/
public removePreviousConnectorLines(records: IGanttData[] | object): void {
let isObjectType: boolean;
if (isObject(records) === true) {
isObjectType = true;
} else {
isObjectType = false;
}
const length: number = isObjectType ? Object.keys(records).length : (records as IGanttData[]).length;
const keys: string[] = Object.keys(records);
for (let i: number = 0; i < length; i++) {
let data: IGanttData;
if (isObjectType) {
const uniqueId: string = keys[i];
data = records[uniqueId] as IGanttData;
} else {
data = records[i];
}
const predecessors: IPredecessor[] = data.ganttProperties && data.ganttProperties.predecessor;
if (predecessors && predecessors.length > 0) {
for (let pre: number = 0; pre < predecessors.length; pre++) {
const lineId: string = 'parent' + predecessors[pre].from + 'child' + predecessors[pre].to;
this.removeConnectorLineById(lineId);
}
}
}
}
/**
* @param {string} id .
* @returns {void} .
* @private
*/
public removeConnectorLineById(id: string): void {
const element: Element = this.parent.connectorLineModule.dependencyViewContainer.querySelector('#ConnectorLine' + id);
if (!isNullOrUndefined(element)) {
remove(element);
}
}
} | const childId: string = this.parent.viewType === 'ResourceView' ? childGanttRecord.taskId : childGanttRecord.rowUniqueID; |
regexremapdotconfig_test.go | package atscfg
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"strings"
"testing"
"github.com/apache/trafficcontrol/lib/go-tc"
)
func TestMakeRegexRemapDotConfig(t *testing.T) {
cdnName := tc.CDNName("mycdn")
toToolName := "my-to"
toURL := "my-to.example.net"
fileName := "regex_remap_myds.config"
dses := map[tc.DeliveryServiceName]CDNDS{
"myds": CDNDS{
OrgServerFQDN: "https://myorigin.example.net", // DS "origin_server_fqdn" is actually a URL including the scheme, the name is wrong.
QStringIgnore: 0,
CacheURL: "https://mycacheurl.net",
RegexRemap: "myregexremap",
},
}
txt := MakeRegexRemapDotConfig(cdnName, toToolName, toURL, fileName, dses)
if !strings.Contains(txt, string(cdnName)) {
t.Errorf("expected: cdnName '" + string(cdnName) + "', actual: missing")
}
if !strings.Contains(txt, toToolName) {
t.Errorf("expected: toToolName '" + toToolName + "', actual: missing")
}
if !strings.Contains(txt, toURL) {
t.Errorf("expected: toURL '" + toURL + "', actual: missing")
}
if !strings.HasPrefix(strings.TrimSpace(txt), "#") {
t.Errorf("expected: header comment, actual: missing")
}
if strings.Contains(txt, "mycacheurl") {
t.Errorf("expected: regex remap to not contain cacheurl, actual: '%v'", txt)
}
if strings.Contains(txt, "myorigin") {
t.Errorf("expected: regex remap to not contain org server fqdn, actual: '%v'", txt)
}
if !strings.Contains(txt, "myregexremap") {
t.Errorf("expected: regex remap to contain regex remap, actual: '%v'", txt)
}
}
func TestMakeRegexRemapDotConfigUnusedDS(t *testing.T) {
cdnName := tc.CDNName("mycdn")
toToolName := "my-to"
toURL := "my-to.example.net"
fileName := "regex_remap_myds.config"
dses := map[tc.DeliveryServiceName]CDNDS{
"myds": CDNDS{
OrgServerFQDN: "https://myorigin.example.net", // DS "origin_server_fqdn" is actually a URL including the scheme, the name is wrong.
QStringIgnore: 0,
CacheURL: "https://mycacheurl.net",
RegexRemap: "myregexremap",
},
"otherds": CDNDS{
OrgServerFQDN: "https://otherorigin.example.net", // DS "origin_server_fqdn" is actually a URL including the scheme, the name is wrong.
QStringIgnore: 0,
CacheURL: "https://othercacheurl.net",
RegexRemap: "otherregexremap",
},
}
txt := MakeRegexRemapDotConfig(cdnName, toToolName, toURL, fileName, dses)
if !strings.Contains(txt, string(cdnName)) {
t.Errorf("expected: cdnName '" + string(cdnName) + "', actual: missing")
}
if !strings.Contains(txt, toToolName) {
t.Errorf("expected: toToolName '" + toToolName + "', actual: missing")
}
if !strings.Contains(txt, toURL) {
t.Errorf("expected: toURL '" + toURL + "', actual: missing")
}
if !strings.HasPrefix(strings.TrimSpace(txt), "#") {
t.Errorf("expected: header comment, actual: missing")
}
if strings.Contains(txt, "mycacheurl") {
t.Errorf("expected: regex remap to not contain cacheurl, actual: '%v'", txt)
}
if strings.Contains(txt, "myorigin") {
t.Errorf("expected: regex remap to not contain org server fqdn, actual: '%v'", txt)
}
if !strings.Contains(txt, "myregexremap") {
t.Errorf("expected: regex remap to contain regex remap, actual: '%v'", txt)
}
if strings.Contains(txt, "mycacheurl") {
t.Errorf("expected: regex remap to not contain other cacheurl, actual: '%v'", txt)
}
if strings.Contains(txt, "myorigin") {
t.Errorf("expected: regex remap to not contain other org server fqdn, actual: '%v'", txt)
}
if strings.Contains(txt, "otherregexremap") {
t.Errorf("expected: regex remap to contain other regex remap, actual: '%v'", txt)
}
}
func TestMakeRegexRemapDotConfigReplaceReturns(t *testing.T) {
cdnName := tc.CDNName("mycdn")
toToolName := "my-to"
toURL := "my-to.example.net"
fileName := "regex_remap_myds.config"
dses := map[tc.DeliveryServiceName]CDNDS{
"myds": CDNDS{
OrgServerFQDN: "https://myorigin.example.net", // DS "origin_server_fqdn" is actually a URL including the scheme, the name is wrong.
QStringIgnore: 0,
CacheURL: "https://mycacheurl.net",
RegexRemap: "myregexremap__RETURN__mypostnewline",
},
}
txt := MakeRegexRemapDotConfig(cdnName, toToolName, toURL, fileName, dses)
if !strings.Contains(txt, string(cdnName)) {
t.Errorf("expected: cdnName '" + string(cdnName) + "', actual: missing")
}
if !strings.Contains(txt, toToolName) {
t.Errorf("expected: toToolName '" + toToolName + "', actual: missing")
}
if !strings.Contains(txt, toURL) {
t.Errorf("expected: toURL '" + toURL + "', actual: missing")
}
if !strings.HasPrefix(strings.TrimSpace(txt), "#") {
t.Errorf("expected: header comment, actual: missing")
}
if strings.Contains(txt, "mycacheurl") {
t.Errorf("expected: regex remap to not contain cacheurl, actual: '%v'", txt)
}
if strings.Contains(txt, "myorigin") {
t.Errorf("expected: regex remap to not contain org server fqdn, actual: '%v'", txt)
}
if !strings.Contains(txt, "myregexremap") {
t.Errorf("expected: regex remap to contain regex remap, actual: '%v'", txt)
}
if strings.Contains(txt, "__RETURN__") {
t.Errorf("expected: regex remap to replace __RETURN__, actual: '%v'", txt)
}
if !strings.Contains(txt, "myregexremap\nmypostnewline") |
}
| {
t.Errorf("expected: regex remap to replace __RETURN__ with newline, actual: '%v'", txt)
} |
space.rs | use model::{Space, NewSpace};
use managers::db_manager::*;
use schema::spaces::dsl::*;
use diesel::{LoadDsl, FilterDsl, ExpressionMethods, ExecuteDsl};
use diesel::result::Error;
use diesel::result::Error::{NotFound, DatabaseError};
use diesel::result::DatabaseErrorKind::UniqueViolation;
use diesel;
use std::ops::Deref;
use rustc_serialize::{Encodable, Encoder};
use errors::INError;
impl Encodable for Space {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_struct("Space", 2, |s| {
try!(s.emit_struct_field("name", 0, |s| {
s.emit_str(&self.name)
}));
try!(s.emit_struct_field("owner", 1, |s| {
s.emit_str(&self.owner)
}));
Ok(())
})
}
}
impl Space {
pub fn list_public_spaces() -> Result<Vec<Space>, INError> {
let db = DB_CONNECTION.lock().unwrap();
let results = spaces.filter(public.eq(true)).load(db.deref());
match results {
Err(_) => Err(INError::fatal(1, "An error occured while accessing to the database.")),
Ok(res) => Ok(res),
}
}
pub fn add_space(space: &NewSpace) -> Option<INError> {
Space::_add_space(space, true)
}
pub fn add_private_space(space: &NewSpace) -> Option<INError> {
Space::_add_space(space, false)
}
fn _add_space(space: &NewSpace, is_public: bool) -> Option<INError> {
let db = DB_CONNECTION.lock().unwrap();
let new_space = Space {
name: space.name.to_string(),
owner: space.owner.to_string(),
public: is_public,
};
let result: Result<String, Error> = diesel::insert(&new_space)
.into(spaces)
.returning(name)
.get_result(db.deref());
match result {
Err(err) => match err {
DatabaseError(kind, _) => match kind {
UniqueViolation => Some(INError::new(300, "This space is allready registered.")),
_ => Some(INError::fatal(1, "An error occured while accessing to the database.")),
},
_ => Some(INError::fatal(1, "An error occured while accessing to the database.")),
},
Ok(_) => None,
}
}
pub fn delete_space(space_name: String) -> Option<INError> {
let db = DB_CONNECTION.lock().unwrap();
let result : Result<Space, Error> = spaces
.filter(name.eq(&space_name))
.get_result(db.deref());
match result {
Err(err) => match err {
NotFound => return Some(INError::new(301, "This space doesn't exist.")),
_ => return Some(INError::fatal(1, "An error occured while accessing to the database.")),
},
_ => {},
} | .execute(db.deref());
None
}
} |
diesel::delete(spaces.filter(public.eq(true))
.filter(name.eq(&space_name))) |
strings.go | package conditions
import (
"fmt"
"reflect"
"strings"
shipper "github.com/bookingcom/shipper/pkg/apis/shipper/v1alpha1"
)
func | (ci interface{}) string {
if ci == nil || reflect.ValueOf(ci).IsNil() {
return ""
}
var chunks []string
switch c := ci.(type) {
case *shipper.ApplicationCondition:
chunks = []string{
fmt.Sprintf("%v", c.Type),
fmt.Sprintf("%v", c.Status),
c.Reason,
c.Message,
}
case *shipper.ReleaseCondition:
chunks = []string{
fmt.Sprintf("%v", c.Type),
fmt.Sprintf("%v", c.Status),
c.Reason,
c.Message,
}
case *shipper.ClusterCapacityCondition:
chunks = []string{
fmt.Sprintf("%v", c.Type),
fmt.Sprintf("%v", c.Status),
c.Reason,
c.Message,
}
case *shipper.ClusterInstallationCondition:
chunks = []string{
fmt.Sprintf("%v", c.Type),
fmt.Sprintf("%v", c.Status),
c.Reason,
c.Message,
}
case *shipper.ClusterTrafficCondition:
chunks = []string{
fmt.Sprintf("%v", c.Type),
fmt.Sprintf("%v", c.Status),
c.Reason,
c.Message,
}
default:
chunks = []string{fmt.Sprintf("Condition %v is not classified", ci)}
}
b := strings.Builder{}
for _, ch := range chunks {
if len(ch) > 0 {
if b.Len() > 0 {
b.WriteByte(' ')
}
b.WriteString(ch)
}
}
return b.String()
}
| CondStr |
lib.deno.ns.d.ts | // Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
/// <reference no-default-lib="true" />
/// <reference lib="esnext" />
declare namespace Deno {
/** A set of error constructors that are raised by Deno APIs. */
export const errors: {
NotFound: ErrorConstructor;
PermissionDenied: ErrorConstructor;
ConnectionRefused: ErrorConstructor;
ConnectionReset: ErrorConstructor;
ConnectionAborted: ErrorConstructor;
NotConnected: ErrorConstructor;
AddrInUse: ErrorConstructor;
AddrNotAvailable: ErrorConstructor;
BrokenPipe: ErrorConstructor;
AlreadyExists: ErrorConstructor;
InvalidData: ErrorConstructor;
TimedOut: ErrorConstructor;
Interrupted: ErrorConstructor;
WriteZero: ErrorConstructor;
UnexpectedEof: ErrorConstructor;
BadResource: ErrorConstructor;
Http: ErrorConstructor;
Busy: ErrorConstructor;
};
/** The current process id of the runtime. */
export let pid: number;
/** Reflects the `NO_COLOR` environment variable.
*
* See: https://no-color.org/ */
export let noColor: boolean;
export interface TestDefinition {
fn: () => void | Promise<void>;
name: string;
ignore?: boolean;
/** Check that the number of async completed ops after the test is the same
* as number of dispatched ops. Defaults to true.*/
sanitizeOps?: boolean;
/** Ensure the test case does not "leak" resources - ie. the resource table
* after the test has exactly the same contents as before the test. Defaults
* to true. */
sanitizeResources?: boolean;
}
/** Register a test which will be run when `deno test` is used on the command
* line and the containing module looks like a test module.
* `fn` can be async if required.
* ```ts
* import {assert, fail, assertEquals} from "https://deno.land/std/testing/asserts.ts";
*
* Deno.test({
* name: "example test",
* fn(): void {
* assertEquals("world", "world");
* },
* });
*
* Deno.test({
* name: "example ignored test",
* ignore: Deno.build.os === "windows"
* fn(): void {
* // This test is ignored only on Windows machines
* },
* });
*
* Deno.test({
* name: "example async test",
* async fn() {
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello_world.txt");
* assertEquals(decoder.decode(data), "Hello world")
* }
* });
* ```
*/
export function test(t: TestDefinition): void;
/** Register a test which will be run when `deno test` is used on the command
* line and the containing module looks like a test module.
* `fn` can be async if required.
*
* ```ts
* import {assert, fail, assertEquals} from "https://deno.land/std/testing/asserts.ts";
*
* Deno.test("My test description", ():void => {
* assertEquals("hello", "hello");
* });
*
* Deno.test("My async test description", async ():Promise<void> => {
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello_world.txt");
* assertEquals(decoder.decode(data), "Hello world")
* });
* ```
* */
export function test(name: string, fn: () => void | Promise<void>): void;
/** Exit the Deno process with optional exit code. If no exit code is supplied
* then Deno will exit with return code of 0.
*
* ```ts
* Deno.exit(5);
* ```
*/
export function exit(code?: number): never;
export const env: {
/** Retrieve the value of an environment variable. Returns undefined if that
* key doesn't exist.
*
* ```ts
* console.log(Deno.env.get("HOME")); // e.g. outputs "/home/alice"
* console.log(Deno.env.get("MADE_UP_VAR")); // outputs "Undefined"
* ```
* Requires `allow-env` permission. */
get(key: string): string | undefined;
/** Set the value of an environment variable.
*
* ```ts
* Deno.env.set("SOME_VAR", "Value"));
* Deno.env.get("SOME_VAR"); // outputs "Value"
* ```
*
* Requires `allow-env` permission. */
set(key: string, value: string): void;
/** Returns a snapshot of the environment variables at invocation.
*
* ```ts
* Deno.env.set("TEST_VAR", "A");
* const myEnv = Deno.env.toObject();
* console.log(myEnv.SHELL);
* Deno.env.set("TEST_VAR", "B");
* console.log(myEnv.TEST_VAR); // outputs "A"
* ```
*
* Requires `allow-env` permission. */
toObject(): { [index: string]: string };
};
/**
* Returns the path to the current deno executable.
*
* ```ts
* console.log(Deno.execPath()); // e.g. "/home/alice/.local/bin/deno"
* ```
*
* Requires `allow-read` permission.
*/
export function execPath(): string;
/**
* Change the current working directory to the specified path.
*
* ```ts
* Deno.chdir("/home/userA");
* Deno.chdir("../userB");
* Deno.chdir("C:\\Program Files (x86)\\Java");
* ```
*
* Throws `Deno.errors.NotFound` if directory not found.
* Throws `Deno.errors.PermissionDenied` if the user does not have access
* rights
*
* Requires --allow-read.
*/
export function chdir(directory: string): void;
/**
* Return a string representing the current working directory.
*
* If the current directory can be reached via multiple paths (due to symbolic
* links), `cwd()` may return any one of them.
*
* ```ts
* const currentWorkingDirectory = Deno.cwd();
* ```
*
* Throws `Deno.errors.NotFound` if directory not available.
*
* Requires --allow-read
*/
export function cwd(): string;
export enum SeekMode {
Start = 0,
Current = 1,
End = 2,
}
export interface Reader {
/** Reads up to `p.byteLength` bytes into `p`. It resolves to the number of
* bytes read (`0` < `n` <= `p.byteLength`) and rejects if any error
* encountered. Even if `read()` resolves to `n` < `p.byteLength`, it may
* use all of `p` as scratch space during the call. If some data is
* available but not `p.byteLength` bytes, `read()` conventionally resolves
* to what is available instead of waiting for more.
*
* When `read()` encounters end-of-file condition, it resolves to EOF
* (`null`).
*
* When `read()` encounters an error, it rejects with an error.
*
* Callers should always process the `n` > `0` bytes returned before
* considering the EOF (`null`). Doing so correctly handles I/O errors that
* happen after reading some bytes and also both of the allowed EOF
* behaviors.
*
* Implementations should not retain a reference to `p`.
*
* Use Deno.iter() to turn a Reader into an AsyncIterator.
*/
read(p: Uint8Array): Promise<number | null>;
}
export interface ReaderSync {
/** Reads up to `p.byteLength` bytes into `p`. It resolves to the number
* of bytes read (`0` < `n` <= `p.byteLength`) and rejects if any error
* encountered. Even if `read()` returns `n` < `p.byteLength`, it may use
* all of `p` as scratch space during the call. If some data is available
* but not `p.byteLength` bytes, `read()` conventionally returns what is
* available instead of waiting for more.
*
* When `readSync()` encounters end-of-file condition, it returns EOF
* (`null`).
*
* When `readSync()` encounters an error, it throws with an error.
*
* Callers should always process the `n` > `0` bytes returned before
* considering the EOF (`null`). Doing so correctly handles I/O errors that happen
* after reading some bytes and also both of the allowed EOF behaviors.
*
* Implementations should not retain a reference to `p`.
*
* Use Deno.iterSync() to turn a ReaderSync into an Iterator.
*/
readSync(p: Uint8Array): number | null;
}
export interface Writer {
/** Writes `p.byteLength` bytes from `p` to the underlying data stream. It
* resolves to the number of bytes written from `p` (`0` <= `n` <=
* `p.byteLength`) or reject with the error encountered that caused the
* write to stop early. `write()` must reject with a non-null error if
* would resolve to `n` < `p.byteLength`. `write()` must not modify the
* slice data, even temporarily.
*
* Implementations should not retain a reference to `p`.
*/
write(p: Uint8Array): Promise<number>;
}
export interface WriterSync {
/** Writes `p.byteLength` bytes from `p` to the underlying data
* stream. It returns the number of bytes written from `p` (`0` <= `n`
* <= `p.byteLength`) and any error encountered that caused the write to
* stop early. `writeSync()` must throw a non-null error if it returns `n` <
* `p.byteLength`. `writeSync()` must not modify the slice data, even
* temporarily.
*
* Implementations should not retain a reference to `p`.
*/
writeSync(p: Uint8Array): number;
}
export interface Closer {
close(): void;
}
export interface Seeker {
/** Seek sets the offset for the next `read()` or `write()` to offset,
* interpreted according to `whence`: `Start` means relative to the
* start of the file, `Current` means relative to the current offset,
* and `End` means relative to the end. Seek resolves to the new offset
* relative to the start of the file.
*
* Seeking to an offset before the start of the file is an error. Seeking to
* any positive offset is legal, but the behavior of subsequent I/O
* operations on the underlying object is implementation-dependent.
* It returns the number of cursor position.
*/
seek(offset: number, whence: SeekMode): Promise<number>;
}
export interface SeekerSync {
/** Seek sets the offset for the next `readSync()` or `writeSync()` to
* offset, interpreted according to `whence`: `Start` means relative
* to the start of the file, `Current` means relative to the current
* offset, and `End` means relative to the end.
*
* Seeking to an offset before the start of the file is an error. Seeking to
* any positive offset is legal, but the behavior of subsequent I/O
* operations on the underlying object is implementation-dependent.
*/
seekSync(offset: number, whence: SeekMode): number;
}
/** Copies from `src` to `dst` until either EOF (`null`) is read from `src` or
* an error occurs. It resolves to the number of bytes copied or rejects with
* the first error encountered while copying.
*
* ```ts
* const source = await Deno.open("my_file.txt");
* const buffer = new Deno.Buffer()
* const bytesCopied1 = await Deno.copy(source, Deno.stdout);
* const bytesCopied2 = await Deno.copy(source, buffer);
* ```
*
* @param src The source to copy from
* @param dst The destination to copy to
* @param options Can be used to tune size of the buffer. Default size is 32kB
*/
export function copy(
src: Reader,
dst: Writer,
options?: {
bufSize?: number;
}
): Promise<number>;
/** Turns a Reader, `r`, into an async iterator.
*
* ```ts
* let f = await Deno.open("/etc/passwd");
* for await (const chunk of Deno.iter(f)) {
* console.log(chunk);
* }
* f.close();
* ```
*
* Second argument can be used to tune size of a buffer.
* Default size of the buffer is 32kB.
*
* ```ts
* let f = await Deno.open("/etc/passwd");
* const iter = Deno.iter(f, {
* bufSize: 1024 * 1024
* });
* for await (const chunk of iter) {
* console.log(chunk);
* }
* f.close();
* ```
*
* Iterator uses an internal buffer of fixed size for efficiency; it returns
* a view on that buffer on each iteration. It is therefore caller's
* responsibility to copy contents of the buffer if needed; otherwise the
* next iteration will overwrite contents of previously returned chunk.
*/
export function iter(
r: Reader,
options?: {
bufSize?: number;
}
): AsyncIterableIterator<Uint8Array>;
/** Turns a ReaderSync, `r`, into an iterator.
*
* ```ts
* let f = Deno.openSync("/etc/passwd");
* for (const chunk of Deno.iterSync(reader)) {
* console.log(chunk);
* }
* f.close();
* ```
*
* Second argument can be used to tune size of a buffer.
* Default size of the buffer is 32kB.
*
* ```ts
* let f = await Deno.open("/etc/passwd");
* const iter = Deno.iterSync(f, {
* bufSize: 1024 * 1024
* });
* for (const chunk of iter) {
* console.log(chunk);
* }
* f.close();
* ```
*
* Iterator uses an internal buffer of fixed size for efficiency; it returns
* a view on that buffer on each iteration. It is therefore caller's
* responsibility to copy contents of the buffer if needed; otherwise the
* next iteration will overwrite contents of previously returned chunk.
*/
export function iterSync(
r: ReaderSync,
options?: {
bufSize?: number;
}
): IterableIterator<Uint8Array>;
/** Synchronously open a file and return an instance of `Deno.File`. The
* file does not need to previously exist if using the `create` or `createNew`
* open options. It is the callers responsibility to close the file when finished
* with it.
*
* ```ts
* const file = Deno.openSync("/foo/bar.txt", { read: true, write: true });
* // Do work with file
* Deno.close(file.rid);
* ```
*
* Requires `allow-read` and/or `allow-write` permissions depending on options.
*/
export function openSync(path: string, options?: OpenOptions): File;
/** Open a file and resolve to an instance of `Deno.File`. The
* file does not need to previously exist if using the `create` or `createNew`
* open options. It is the callers responsibility to close the file when finished
* with it.
*
* ```ts
* const file = await Deno.open("/foo/bar.txt", { read: true, write: true });
* // Do work with file
* Deno.close(file.rid);
* ```
*
* Requires `allow-read` and/or `allow-write` permissions depending on options.
*/
export function open(path: string, options?: OpenOptions): Promise<File>;
/** Creates a file if none exists or truncates an existing file and returns
* an instance of `Deno.File`.
*
* ```ts
* const file = Deno.createSync("/foo/bar.txt");
* ```
*
* Requires `allow-read` and `allow-write` permissions.
*/
export function createSync(path: string): File;
/** Creates a file if none exists or truncates an existing file and resolves to
* an instance of `Deno.File`.
*
* ```ts
* const file = await Deno.create("/foo/bar.txt");
* ```
*
* Requires `allow-read` and `allow-write` permissions.
*/
export function create(path: string): Promise<File>;
/** Synchronously read from a resource ID (`rid`) into an array buffer (`buffer`).
*
* Returns either the number of bytes read during the operation or EOF
* (`null`) if there was nothing more to read.
*
* It is possible for a read to successfully return with `0` bytes. This does
* not indicate EOF.
*
* ```ts
* // if "/foo/bar.txt" contains the text "hello world":
* const file = Deno.openSync("/foo/bar.txt");
* const buf = new Uint8Array(100);
* const numberOfBytesRead = Deno.readSync(file.rid, buf); // 11 bytes
* const text = new TextDecoder().decode(buf); // "hello world"
* Deno.close(file.rid);
* ```
*/
export function readSync(rid: number, buffer: Uint8Array): number | null;
/** Read from a resource ID (`rid`) into an array buffer (`buffer`).
*
* Resolves to either the number of bytes read during the operation or EOF
* (`null`) if there was nothing more to read.
*
* It is possible for a read to successfully return with `0` bytes. This does
* not indicate EOF.
*
* ```ts
* // if "/foo/bar.txt" contains the text "hello world":
* const file = await Deno.open("/foo/bar.txt");
* const buf = new Uint8Array(100);
* const numberOfBytesRead = await Deno.read(file.rid, buf); // 11 bytes
* const text = new TextDecoder().decode(buf); // "hello world"
* Deno.close(file.rid);
* ```
*/
export function read(rid: number, buffer: Uint8Array): Promise<number | null>;
/** Synchronously write to the resource ID (`rid`) the contents of the array
* buffer (`data`).
*
* Returns the number of bytes written.
*
* ```ts
* const encoder = new TextEncoder();
* const data = encoder.encode("Hello world");
* const file = Deno.openSync("/foo/bar.txt");
* const bytesWritten = Deno.writeSync(file.rid, data); // 11
* Deno.close(file.rid);
* ```
*/
export function writeSync(rid: number, data: Uint8Array): number;
/** Write to the resource ID (`rid`) the contents of the array buffer (`data`).
*
* Resolves to the number of bytes written.
*
* ```ts
* const encoder = new TextEncoder();
* const data = encoder.encode("Hello world");
* const file = await Deno.open("/foo/bar.txt");
* const bytesWritten = await Deno.write(file.rid, data); // 11
* Deno.close(file.rid);
* ```
*/
export function write(rid: number, data: Uint8Array): Promise<number>;
/** Synchronously seek a resource ID (`rid`) to the given `offset` under mode
* given by `whence`. The new position within the resource (bytes from the
* start) is returned.
*
* ```ts
* const file = Deno.openSync('hello.txt', {read: true, write: true, truncate: true, create: true});
* Deno.writeSync(file.rid, new TextEncoder().encode("Hello world"));
* // advance cursor 6 bytes
* const cursorPosition = Deno.seekSync(file.rid, 6, Deno.SeekMode.Start);
* console.log(cursorPosition); // 6
* const buf = new Uint8Array(100);
* file.readSync(buf);
* console.log(new TextDecoder().decode(buf)); // "world"
* ```
*
* The seek modes work as follows:
*
* ```ts
* // Given file.rid pointing to file with "Hello world", which is 11 bytes long:
* // Seek 6 bytes from the start of the file
* console.log(Deno.seekSync(file.rid, 6, Deno.SeekMode.Start)); // "6"
* // Seek 2 more bytes from the current position
* console.log(Deno.seekSync(file.rid, 2, Deno.SeekMode.Current)); // "8"
* // Seek backwards 2 bytes from the end of the file
* console.log(Deno.seekSync(file.rid, -2, Deno.SeekMode.End)); // "9" (e.g. 11-2)
* ```
*/
export function seekSync(
rid: number,
offset: number,
whence: SeekMode
): number;
/** Seek a resource ID (`rid`) to the given `offset` under mode given by `whence`.
* The call resolves to the new position within the resource (bytes from the start).
*
* ```ts
* const file = await Deno.open('hello.txt', {read: true, write: true, truncate: true, create: true});
* await Deno.write(file.rid, new TextEncoder().encode("Hello world"));
* // advance cursor 6 bytes
* const cursorPosition = await Deno.seek(file.rid, 6, Deno.SeekMode.Start);
* console.log(cursorPosition); // 6
* const buf = new Uint8Array(100);
* await file.read(buf);
* console.log(new TextDecoder().decode(buf)); // "world"
* ```
*
* The seek modes work as follows:
*
* ```ts
* // Given file.rid pointing to file with "Hello world", which is 11 bytes long:
* // Seek 6 bytes from the start of the file
* console.log(await Deno.seek(file.rid, 6, Deno.SeekMode.Start)); // "6"
* // Seek 2 more bytes from the current position
* console.log(await Deno.seek(file.rid, 2, Deno.SeekMode.Current)); // "8"
* // Seek backwards 2 bytes from the end of the file
* console.log(await Deno.seek(file.rid, -2, Deno.SeekMode.End)); // "9" (e.g. 11-2)
* ```
*/
export function seek(
rid: number,
offset: number,
whence: SeekMode
): Promise<number>;
/** Close the given resource ID (rid) which has been previously opened, such
* as via opening or creating a file. Closing a file when you are finished
* with it is important to avoid leaking resources.
*
* ```ts
* const file = await Deno.open("my_file.txt");
* // do work with "file" object
* Deno.close(file.rid);
* ````
*/
export function close(rid: number): void;
/** The Deno abstraction for reading and writing files. */
export class File
implements
Reader,
ReaderSync,
Writer,
WriterSync,
Seeker,
SeekerSync,
Closer {
readonly rid: number;
constructor(rid: number);
write(p: Uint8Array): Promise<number>;
writeSync(p: Uint8Array): number;
read(p: Uint8Array): Promise<number | null>;
readSync(p: Uint8Array): number | null;
seek(offset: number, whence: SeekMode): Promise<number>;
seekSync(offset: number, whence: SeekMode): number;
close(): void;
}
/** A handle for `stdin`. */
export const stdin: Reader & ReaderSync & Closer & { rid: number };
/** A handle for `stdout`. */
export const stdout: Writer & WriterSync & Closer & { rid: number };
/** A handle for `stderr`. */
export const stderr: Writer & WriterSync & Closer & { rid: number };
export interface OpenOptions {
/** Sets the option for read access. This option, when `true`, means that the
* file should be read-able if opened. */
read?: boolean;
/** Sets the option for write access. This option, when `true`, means that
* the file should be write-able if opened. If the file already exists,
* any write calls on it will overwrite its contents, by default without
* truncating it. */
write?: boolean;
/**Sets the option for the append mode. This option, when `true`, means that
* writes will append to a file instead of overwriting previous contents.
* Note that setting `{ write: true, append: true }` has the same effect as
* setting only `{ append: true }`. */
append?: boolean;
/** Sets the option for truncating a previous file. If a file is
* successfully opened with this option set it will truncate the file to `0`
* size if it already exists. The file must be opened with write access
* for truncate to work. */
truncate?: boolean;
/** Sets the option to allow creating a new file, if one doesn't already
* exist at the specified path. Requires write or append access to be
* used. */
create?: boolean;
/** Defaults to `false`. If set to `true`, no file, directory, or symlink is
* allowed to exist at the target location. Requires write or append
* access to be used. When createNew is set to `true`, create and truncate
* are ignored. */
createNew?: boolean;
/** Permissions to use if creating the file (defaults to `0o666`, before
* the process's umask).
* Ignored on Windows. */
mode?: number;
}
/**
*
* Check if a given resource id (`rid`) is a TTY.
*
* ```ts
* // This example is system and context specific
* const nonTTYRid = Deno.openSync("my_file.txt").rid;
* const ttyRid = Deno.openSync("/dev/tty6").rid;
* console.log(Deno.isatty(nonTTYRid)); // false
* console.log(Deno.isatty(ttyRid)); // true
* Deno.close(nonTTYRid);
* Deno.close(ttyRid);
* ```
*/
export function isatty(rid: number): boolean;
/** A variable-sized buffer of bytes with `read()` and `write()` methods.
*
* Deno.Buffer is almost always used with some I/O like files and sockets. It
* allows one to buffer up a download from a socket. Buffer grows and shrinks
* as necessary.
*
* Deno.Buffer is NOT the same thing as Node's Buffer. Node's Buffer was
* created in 2009 before JavaScript had the concept of ArrayBuffers. It's
* simply a non-standard ArrayBuffer.
*
* ArrayBuffer is a fixed memory allocation. Deno.Buffer is implemented on top
* of ArrayBuffer.
*
* Based on [Go Buffer](https://golang.org/pkg/bytes/#Buffer). */
export class Buffer implements Reader, ReaderSync, Writer, WriterSync {
constructor(ab?: ArrayBuffer);
/** Returns a slice holding the unread portion of the buffer.
*
* The slice is valid for use only until the next buffer modification (that
* is, only until the next call to a method like `read()`, `write()`,
* `reset()`, or `truncate()`). The slice aliases the buffer content at
* least until the next buffer modification, so immediate changes to the
* slice will affect the result of future reads. */
bytes(): Uint8Array;
/** Returns whether the unread portion of the buffer is empty. */
empty(): boolean;
/** A read only number of bytes of the unread portion of the buffer. */
readonly length: number;
/** The read only capacity of the buffer's underlying byte slice, that is,
* the total space allocated for the buffer's data. */
readonly capacity: number;
/** Discards all but the first `n` unread bytes from the buffer but
* continues to use the same allocated storage. It throws if `n` is
* negative or greater than the length of the buffer. */
truncate(n: number): void;
/** Resets the buffer to be empty, but it retains the underlying storage for
* use by future writes. `.reset()` is the same as `.truncate(0)`. */
reset(): void;
/** Reads the next `p.length` bytes from the buffer or until the buffer is
* drained. Returns the number of bytes read. If the buffer has no data to
* return, the return is EOF (`null`). */
readSync(p: Uint8Array): number | null;
/** Reads the next `p.length` bytes from the buffer or until the buffer is
* drained. Resolves to the number of bytes read. If the buffer has no
* data to return, resolves to EOF (`null`).
*
* NOTE: This methods reads bytes sychronously; it's provided for
* compatibility with `Reader` interfaces.
*/
read(p: Uint8Array): Promise<number | null>;
writeSync(p: Uint8Array): number;
/** NOTE: This methods writes bytes sychronously; it's provided for
* compatibility with `Writer` interface. */
write(p: Uint8Array): Promise<number>;
/** Grows the buffer's capacity, if necessary, to guarantee space for
* another `n` bytes. After `.grow(n)`, at least `n` bytes can be written to
* the buffer without another allocation. If `n` is negative, `.grow()` will
* throw. If the buffer can't grow it will throw an error.
*
* Based on Go Lang's
* [Buffer.Grow](https://golang.org/pkg/bytes/#Buffer.Grow). */
grow(n: number): void;
/** Reads data from `r` until EOF (`null`) and appends it to the buffer,
* growing the buffer as needed. It resolves to the number of bytes read.
* If the buffer becomes too large, `.readFrom()` will reject with an error.
*
* Based on Go Lang's
* [Buffer.ReadFrom](https://golang.org/pkg/bytes/#Buffer.ReadFrom). */
readFrom(r: Reader): Promise<number>;
/** Reads data from `r` until EOF (`null`) and appends it to the buffer,
* growing the buffer as needed. It returns the number of bytes read. If the
* buffer becomes too large, `.readFromSync()` will throw an error.
*
* Based on Go Lang's
* [Buffer.ReadFrom](https://golang.org/pkg/bytes/#Buffer.ReadFrom). */
readFromSync(r: ReaderSync): number;
}
/** Read Reader `r` until EOF (`null`) and resolve to the content as
* Uint8Array`.
*
* ```ts
* // Example from stdin
* const stdinContent = await Deno.readAll(Deno.stdin);
*
* // Example from file
* const file = await Deno.open("my_file.txt", {read: true});
* const myFileContent = await Deno.readAll(file);
* Deno.close(file.rid);
*
* // Example from buffer
* const myData = new Uint8Array(100);
* // ... fill myData array with data
* const reader = new Deno.Buffer(myData.buffer as ArrayBuffer);
* const bufferContent = await Deno.readAll(reader);
* ```
*/
export function readAll(r: Reader): Promise<Uint8Array>;
/** Synchronously reads Reader `r` until EOF (`null`) and returns the content
* as `Uint8Array`.
*
* ```ts
* // Example from stdin
* const stdinContent = Deno.readAllSync(Deno.stdin);
*
* // Example from file
* const file = Deno.openSync("my_file.txt", {read: true});
* const myFileContent = Deno.readAllSync(file);
* Deno.close(file.rid);
*
* // Example from buffer
* const myData = new Uint8Array(100);
* // ... fill myData array with data
* const reader = new Deno.Buffer(myData.buffer as ArrayBuffer);
* const bufferContent = Deno.readAllSync(reader);
* ```
*/
export function readAllSync(r: ReaderSync): Uint8Array;
/** Write all the content of the array buffer (`arr`) to the writer (`w`).
*
* ```ts
* // Example writing to stdout
* const contentBytes = new TextEncoder().encode("Hello World");
* await Deno.writeAll(Deno.stdout, contentBytes);
*
* // Example writing to file
* const contentBytes = new TextEncoder().encode("Hello World");
* const file = await Deno.open('test.file', {write: true});
* await Deno.writeAll(file, contentBytes);
* Deno.close(file.rid);
*
* // Example writing to buffer
* const contentBytes = new TextEncoder().encode("Hello World");
* const writer = new Deno.Buffer();
* await Deno.writeAll(writer, contentBytes);
* console.log(writer.bytes().length); // 11
* ```
*/
export function writeAll(w: Writer, arr: Uint8Array): Promise<void>;
/** Synchronously write all the content of the array buffer (`arr`) to the
* writer (`w`).
*
* ```ts
* // Example writing to stdout
* const contentBytes = new TextEncoder().encode("Hello World");
* Deno.writeAllSync(Deno.stdout, contentBytes);
*
* // Example writing to file
* const contentBytes = new TextEncoder().encode("Hello World");
* const file = Deno.openSync('test.file', {write: true});
* Deno.writeAllSync(file, contentBytes);
* Deno.close(file.rid);
*
* // Example writing to buffer
* const contentBytes = new TextEncoder().encode("Hello World");
* const writer = new Deno.Buffer();
* Deno.writeAllSync(writer, contentBytes);
* console.log(writer.bytes().length); // 11
* ```
*/
export function writeAllSync(w: WriterSync, arr: Uint8Array): void;
export interface MkdirOptions {
/** Defaults to `false`. If set to `true`, means that any intermediate
* directories will also be created (as with the shell command `mkdir -p`).
* Intermediate directories are created with the same permissions.
* When recursive is set to `true`, succeeds silently (without changing any
* permissions) if a directory already exists at the path, or if the path
* is a symlink to an existing directory. */
recursive?: boolean;
/** Permissions to use when creating the directory (defaults to `0o777`,
* before the process's umask).
* Ignored on Windows. */
mode?: number;
}
/** Synchronously creates a new directory with the specified path.
*
* ```ts
* Deno.mkdirSync("new_dir");
* Deno.mkdirSync("nested/directories", { recursive: true });
* Deno.mkdirSync("restricted_access_dir", { mode: 0o700 });
* ```
*
* Defaults to throwing error if the directory already exists.
*
* Requires `allow-write` permission. */
export function mkdirSync(path: string, options?: MkdirOptions): void;
/** Creates a new directory with the specified path.
*
* ```ts
* await Deno.mkdir("new_dir");
* await Deno.mkdir("nested/directories", { recursive: true });
* await Deno.mkdir("restricted_access_dir", { mode: 0o700 });
* ```
*
* Defaults to throwing error if the directory already exists.
*
* Requires `allow-write` permission. */
export function mkdir(path: string, options?: MkdirOptions): Promise<void>;
export interface MakeTempOptions {
/** Directory where the temporary directory should be created (defaults to
* the env variable TMPDIR, or the system's default, usually /tmp). */
dir?: string;
/** String that should precede the random portion of the temporary
* directory's name. */
prefix?: string;
/** String that should follow the random portion of the temporary
* directory's name. */
suffix?: string;
}
/** Synchronously creates a new temporary directory in the default directory
* for temporary files (see also `Deno.dir("temp")`), unless `dir` is specified.
* Other optional options include prefixing and suffixing the directory name
* with `prefix` and `suffix` respectively.
*
* The full path to the newly created directory is returned.
*
* Multiple programs calling this function simultaneously will create different
* directories. It is the caller's responsibility to remove the directory when
* no longer needed.
*
* ```ts
* const tempDirName0 = Deno.makeTempDirSync(); // e.g. /tmp/2894ea76
* const tempDirName1 = Deno.makeTempDirSync({ prefix: 'my_temp' }); // e.g. /tmp/my_temp339c944d
* ```
*
* Requires `allow-write` permission. */
// TODO(ry) Doesn't check permissions.
export function makeTempDirSync(options?: MakeTempOptions): string;
/** Creates a new temporary directory in the default directory for temporary
* files (see also `Deno.dir("temp")`), unless `dir` is specified. Other
* optional options include prefixing and suffixing the directory name with
* `prefix` and `suffix` respectively.
*
* This call resolves to the full path to the newly created directory.
*
* Multiple programs calling this function simultaneously will create different
* directories. It is the caller's responsibility to remove the directory when
* no longer needed.
*
* ```ts
* const tempDirName0 = await Deno.makeTempDir(); // e.g. /tmp/2894ea76
* const tempDirName1 = await Deno.makeTempDir({ prefix: 'my_temp' }); // e.g. /tmp/my_temp339c944d
* ```
*
* Requires `allow-write` permission. */
// TODO(ry) Doesn't check permissions.
export function makeTempDir(options?: MakeTempOptions): Promise<string>;
/** Synchronously creates a new temporary file in the default directory for
* temporary files (see also `Deno.dir("temp")`), unless `dir` is specified.
* Other optional options include prefixing and suffixing the directory name
* with `prefix` and `suffix` respectively.
*
* The full path to the newly created file is returned.
*
* Multiple programs calling this function simultaneously will create different
* files. It is the caller's responsibility to remove the file when no longer
* needed.
*
* ```ts
* const tempFileName0 = Deno.makeTempFileSync(); // e.g. /tmp/419e0bf2
* const tempFileName1 = Deno.makeTempFileSync({ prefix: 'my_temp' }); // e.g. /tmp/my_temp754d3098
* ```
*
* Requires `allow-write` permission. */
export function makeTempFileSync(options?: MakeTempOptions): string;
/** Creates a new temporary file in the default directory for temporary
* files (see also `Deno.dir("temp")`), unless `dir` is specified. Other
* optional options include prefixing and suffixing the directory name with
* `prefix` and `suffix` respectively.
*
* This call resolves to the full path to the newly created file.
*
* Multiple programs calling this function simultaneously will create different
* files. It is the caller's responsibility to remove the file when no longer
* needed.
*
* ```ts
* const tmpFileName0 = await Deno.makeTempFile(); // e.g. /tmp/419e0bf2
* const tmpFileName1 = await Deno.makeTempFile({ prefix: 'my_temp' }); // e.g. /tmp/my_temp754d3098
* ```
*
* Requires `allow-write` permission. */
export function makeTempFile(options?: MakeTempOptions): Promise<string>;
/** Synchronously changes the permission of a specific file/directory of
* specified path. Ignores the process's umask.
*
* ```ts
* Deno.chmodSync("/path/to/file", 0o666);
* ``` | *
* Requires `allow-write` permission. */
export function chmodSync(path: string, mode: number): void;
/** Changes the permission of a specific file/directory of specified path.
* Ignores the process's umask.
*
* ```ts
* await Deno.chmod("/path/to/file", 0o666);
* ```
*
* The mode is a sequence of 3 octal numbers. The first/left-most number
* specifies the permissions for the owner. The second number specifies the
* permissions for the group. The last/right-most number specifies the
* permissions for others. For example, with a mode of 0o764, the owner (7) can
* read/write/execute, the group (6) can read/write and everyone else (4) can
* read only.
*
* | Number | Description |
* | ------ | ----------- |
* | 7 | read, write, and execute |
* | 6 | read and write |
* | 5 | read and execute |
* | 4 | read only |
* | 3 | write and execute |
* | 2 | write only |
* | 1 | execute only |
* | 0 | no permission |
*
* NOTE: This API currently throws on Windows
*
* Requires `allow-write` permission. */
export function chmod(path: string, mode: number): Promise<void>;
/** Synchronously change owner of a regular file or directory. This functionality
* is not available on Windows.
*
* ```ts
* Deno.chownSync("myFile.txt", 1000, 1002);
* ```
*
* Requires `allow-write` permission.
*
* Throws Error (not implemented) if executed on Windows
*
* @param path path to the file
* @param uid user id (UID) of the new owner
* @param gid group id (GID) of the new owner
*/
export function chownSync(path: string, uid: number, gid: number): void;
/** Change owner of a regular file or directory. This functionality
* is not available on Windows.
*
* ```ts
* await Deno.chown("myFile.txt", 1000, 1002);
* ```
*
* Requires `allow-write` permission.
*
* Throws Error (not implemented) if executed on Windows
*
* @param path path to the file
* @param uid user id (UID) of the new owner
* @param gid group id (GID) of the new owner
*/
export function chown(path: string, uid: number, gid: number): Promise<void>;
export interface RemoveOptions {
/** Defaults to `false`. If set to `true`, path will be removed even if
* it's a non-empty directory. */
recursive?: boolean;
}
/** Synchronously removes the named file or directory.
*
* ```ts
* Deno.removeSync("/path/to/empty_dir/or/file");
* Deno.removeSync("/path/to/populated_dir/or/file", { recursive: true });
* ```
*
* Throws error if permission denied, path not found, or path is a non-empty
* directory and the `recursive` option isn't set to `true`.
*
* Requires `allow-write` permission. */
export function removeSync(path: string, options?: RemoveOptions): void;
/** Removes the named file or directory.
*
* ```ts
* await Deno.remove("/path/to/empty_dir/or/file");
* await Deno.remove("/path/to/populated_dir/or/file", { recursive: true });
* ```
*
* Throws error if permission denied, path not found, or path is a non-empty
* directory and the `recursive` option isn't set to `true`.
*
* Requires `allow-write` permission. */
export function remove(path: string, options?: RemoveOptions): Promise<void>;
/** Synchronously renames (moves) `oldpath` to `newpath`. Paths may be files or
* directories. If `newpath` already exists and is not a directory,
* `renameSync()` replaces it. OS-specific restrictions may apply when
* `oldpath` and `newpath` are in different directories.
*
* ```ts
* Deno.renameSync("old/path", "new/path");
* ```
*
* On Unix, this operation does not follow symlinks at either path.
*
* It varies between platforms when the operation throws errors, and if so what
* they are. It's always an error to rename anything to a non-empty directory.
*
* Requires `allow-read` and `allow-write` permissions. */
export function renameSync(oldpath: string, newpath: string): void;
/** Renames (moves) `oldpath` to `newpath`. Paths may be files or directories.
* If `newpath` already exists and is not a directory, `rename()` replaces it.
* OS-specific restrictions may apply when `oldpath` and `newpath` are in
* different directories.
*
* ```ts
* await Deno.rename("old/path", "new/path");
* ```
*
* On Unix, this operation does not follow symlinks at either path.
*
* It varies between platforms when the operation throws errors, and if so what
* they are. It's always an error to rename anything to a non-empty directory.
*
* Requires `allow-read` and `allow-write` permission. */
export function rename(oldpath: string, newpath: string): Promise<void>;
/** Synchronously reads and returns the entire contents of a file as utf8 encoded string
* encoded string. Reading a directory returns an empty string.
*
* ```ts
* const data = Deno.readTextFileSync("hello.txt");
* console.log(data);
* ```
*
* Requires `allow-read` permission. */
export function readTextFileSync(path: string): string;
/** Asynchronously reads and returns the entire contents of a file as a utf8
* encoded string. Reading a directory returns an empty data array.
*
* ```ts
* const data = await Deno.readTextFile("hello.txt");
* console.log(data);
* ```
*
* Requires `allow-read` permission. */
export function readTextFile(path: string): Promise<string>;
/** Synchronously reads and returns the entire contents of a file as an array
* of bytes. `TextDecoder` can be used to transform the bytes to string if
* required. Reading a directory returns an empty data array.
*
* ```ts
* const decoder = new TextDecoder("utf-8");
* const data = Deno.readFileSync("hello.txt");
* console.log(decoder.decode(data));
* ```
*
* Requires `allow-read` permission. */
export function readFileSync(path: string): Uint8Array;
/** Reads and resolves to the entire contents of a file as an array of bytes.
* `TextDecoder` can be used to transform the bytes to string if required.
* Reading a directory returns an empty data array.
*
* ```ts
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello.txt");
* console.log(decoder.decode(data));
* ```
*
* Requires `allow-read` permission. */
export function readFile(path: string): Promise<Uint8Array>;
/** A FileInfo describes a file and is returned by `stat`, `lstat`,
* `statSync`, `lstatSync`. */
export interface FileInfo {
/** True if this is info for a regular file. Mutually exclusive to
* `FileInfo.isDirectory` and `FileInfo.isSymlink`. */
isFile: boolean;
/** True if this is info for a regular directory. Mutually exclusive to
* `FileInfo.isFile` and `FileInfo.isSymlink`. */
isDirectory: boolean;
/** True if this is info for a symlink. Mutually exclusive to
* `FileInfo.isFile` and `FileInfo.isDirectory`. */
isSymlink: boolean;
/** The size of the file, in bytes. */
size: number;
/** The last modification time of the file. This corresponds to the `mtime`
* field from `stat` on Linux/Mac OS and `ftLastWriteTime` on Windows. This
* may not be available on all platforms. */
mtime: Date | null;
/** The last access time of the file. This corresponds to the `atime`
* field from `stat` on Unix and `ftLastAccessTime` on Windows. This may not
* be available on all platforms. */
atime: Date | null;
/** The creation time of the file. This corresponds to the `birthtime`
* field from `stat` on Mac/BSD and `ftCreationTime` on Windows. This may
* not be available on all platforms. */
birthtime: Date | null;
/** ID of the device containing the file.
*
* _Linux/Mac OS only._ */
dev: number | null;
/** Inode number.
*
* _Linux/Mac OS only._ */
ino: number | null;
/** **UNSTABLE**: Match behavior with Go on Windows for `mode`.
*
* The underlying raw `st_mode` bits that contain the standard Unix
* permissions for this file/directory. */
mode: number | null;
/** Number of hard links pointing to this file.
*
* _Linux/Mac OS only._ */
nlink: number | null;
/** User ID of the owner of this file.
*
* _Linux/Mac OS only._ */
uid: number | null;
/** Group ID of the owner of this file.
*
* _Linux/Mac OS only._ */
gid: number | null;
/** Device ID of this file.
*
* _Linux/Mac OS only._ */
rdev: number | null;
/** Blocksize for filesystem I/O.
*
* _Linux/Mac OS only._ */
blksize: number | null;
/** Number of blocks allocated to the file, in 512-byte units.
*
* _Linux/Mac OS only._ */
blocks: number | null;
}
/** Returns absolute normalized path, with symbolic links resolved.
*
* ```ts
* // e.g. given /home/alice/file.txt and current directory /home/alice
* Deno.symlinkSync("file.txt", "symlink_file.txt");
* const realPath = Deno.realPathSync("./file.txt");
* const realSymLinkPath = Deno.realPathSync("./symlink_file.txt");
* console.log(realPath); // outputs "/home/alice/file.txt"
* console.log(realSymLinkPath); // outputs "/home/alice/file.txt"
* ```
*
* Requires `allow-read` permission. */
export function realPathSync(path: string): string;
/** Resolves to the absolute normalized path, with symbolic links resolved.
*
* ```ts
* // e.g. given /home/alice/file.txt and current directory /home/alice
* await Deno.symlink("file.txt", "symlink_file.txt");
* const realPath = await Deno.realPath("./file.txt");
* const realSymLinkPath = await Deno.realPath("./symlink_file.txt");
* console.log(realPath); // outputs "/home/alice/file.txt"
* console.log(realSymLinkPath); // outputs "/home/alice/file.txt"
* ```
*
* Requires `allow-read` permission. */
export function realPath(path: string): Promise<string>;
export interface DirEntry {
name: string;
isFile: boolean;
isDirectory: boolean;
isSymlink: boolean;
}
/** Synchronously reads the directory given by `path` and returns an iterable
* of `Deno.DirEntry`.
*
* ```ts
* for (const dirEntry of Deno.readDirSync("/")) {
* console.log(dirEntry.name);
* }
* ```
*
* Throws error if `path` is not a directory.
*
* Requires `allow-read` permission. */
export function readDirSync(path: string): Iterable<DirEntry>;
/** Reads the directory given by `path` and returns an async iterable of
* `Deno.DirEntry`.
*
* ```ts
* for await (const dirEntry of Deno.readDir("/")) {
* console.log(dirEntry.name);
* }
* ```
*
* Throws error if `path` is not a directory.
*
* Requires `allow-read` permission. */
export function readDir(path: string): AsyncIterable<DirEntry>;
/** Synchronously copies the contents and permissions of one file to another
* specified path, by default creating a new file if needed, else overwriting.
* Fails if target path is a directory or is unwritable.
*
* ```ts
* Deno.copyFileSync("from.txt", "to.txt");
* ```
*
* Requires `allow-read` permission on fromPath.
* Requires `allow-write` permission on toPath. */
export function copyFileSync(fromPath: string, toPath: string): void;
/** Copies the contents and permissions of one file to another specified path,
* by default creating a new file if needed, else overwriting. Fails if target
* path is a directory or is unwritable.
*
* ```ts
* await Deno.copyFile("from.txt", "to.txt");
* ```
*
* Requires `allow-read` permission on fromPath.
* Requires `allow-write` permission on toPath. */
export function copyFile(fromPath: string, toPath: string): Promise<void>;
/** Returns the full path destination of the named symbolic link.
*
* ```ts
* Deno.symlinkSync("./test.txt", "./test_link.txt");
* const target = Deno.readLinkSync("./test_link.txt"); // full path of ./test.txt
* ```
*
* Throws TypeError if called with a hard link
*
* Requires `allow-read` permission. */
export function readLinkSync(path: string): string;
/** Resolves to the full path destination of the named symbolic link.
*
* ```ts
* await Deno.symlink("./test.txt", "./test_link.txt");
* const target = await Deno.readLink("./test_link.txt"); // full path of ./test.txt
* ```
*
* Throws TypeError if called with a hard link
*
* Requires `allow-read` permission. */
export function readLink(path: string): Promise<string>;
/** Resolves to a `Deno.FileInfo` for the specified `path`. If `path` is a
* symlink, information for the symlink will be returned instead of what it
* points to.
*
* ```ts
* const fileInfo = await Deno.lstat("hello.txt");
* assert(fileInfo.isFile);
* ```
*
* Requires `allow-read` permission. */
export function lstat(path: string): Promise<FileInfo>;
/** Synchronously returns a `Deno.FileInfo` for the specified `path`. If
* `path` is a symlink, information for the symlink will be returned instead of
* what it points to..
*
* ```ts
* const fileInfo = Deno.lstatSync("hello.txt");
* assert(fileInfo.isFile);
* ```
*
* Requires `allow-read` permission. */
export function lstatSync(path: string): FileInfo;
/** Resolves to a `Deno.FileInfo` for the specified `path`. Will always
* follow symlinks.
*
* ```ts
* const fileInfo = await Deno.stat("hello.txt");
* assert(fileInfo.isFile);
* ```
*
* Requires `allow-read` permission. */
export function stat(path: string): Promise<FileInfo>;
/** Synchronously returns a `Deno.FileInfo` for the specified `path`. Will
* always follow symlinks.
*
* ```ts
* const fileInfo = Deno.statSync("hello.txt");
* assert(fileInfo.isFile);
* ```
*
* Requires `allow-read` permission. */
export function statSync(path: string): FileInfo;
/** Options for writing to a file. */
export interface WriteFileOptions {
/** Defaults to `false`. If set to `true`, will append to a file instead of
* overwriting previous contents. */
append?: boolean;
/** Sets the option to allow creating a new file, if one doesn't already
* exist at the specified path (defaults to `true`). */
create?: boolean;
/** Permissions always applied to file. */
mode?: number;
}
/** Synchronously write `data` to the given `path`, by default creating a new
* file if needed, else overwriting.
*
* ```ts
* const encoder = new TextEncoder();
* const data = encoder.encode("Hello world\n");
* Deno.writeFileSync("hello1.txt", data); // overwrite "hello1.txt" or create it
* Deno.writeFileSync("hello2.txt", data, {create: false}); // only works if "hello2.txt" exists
* Deno.writeFileSync("hello3.txt", data, {mode: 0o777}); // set permissions on new file
* Deno.writeFileSync("hello4.txt", data, {append: true}); // add data to the end of the file
* ```
*
* Requires `allow-write` permission, and `allow-read` if `options.create` is
* `false`.
*/
export function writeFileSync(
path: string,
data: Uint8Array,
options?: WriteFileOptions
): void;
/** Write `data` to the given `path`, by default creating a new file if needed,
* else overwriting.
*
* ```ts
* const encoder = new TextEncoder();
* const data = encoder.encode("Hello world\n");
* await Deno.writeFile("hello1.txt", data); // overwrite "hello1.txt" or create it
* await Deno.writeFile("hello2.txt", data, {create: false}); // only works if "hello2.txt" exists
* await Deno.writeFile("hello3.txt", data, {mode: 0o777}); // set permissions on new file
* await Deno.writeFile("hello4.txt", data, {append: true}); // add data to the end of the file
* ```
*
* Requires `allow-write` permission, and `allow-read` if `options.create` is `false`.
*/
export function writeFile(
path: string,
data: Uint8Array,
options?: WriteFileOptions
): Promise<void>;
/** Synchronously write string `data` to the given `path`, by default creating a new file if needed,
* else overwriting.
*
* ```ts
* await Deno.writeTextFileSync("hello1.txt", "Hello world\n"); // overwrite "hello1.txt" or create it
* ```
*
* Requires `allow-write` permission, and `allow-read` if `options.create` is `false`.
*/
export function writeTextFileSync(path: string, data: string): void;
/** Asynchronously write string `data` to the given `path`, by default creating a new file if needed,
* else overwriting.
*
* ```ts
* await Deno.writeTextFile("hello1.txt", "Hello world\n"); // overwrite "hello1.txt" or create it
* ```
*
* Requires `allow-write` permission, and `allow-read` if `options.create` is `false`.
*/
export function writeTextFile(path: string, data: string): Promise<void>;
/** Synchronously truncates or extends the specified file, to reach the
* specified `len`. If `len` is not specified then the entire file contents
* are truncated.
*
* ```ts
* // truncate the entire file
* Deno.truncateSync("my_file.txt");
*
* // truncate part of the file
* const file = Deno.makeTempFileSync();
* Deno.writeFileSync(file, new TextEncoder().encode("Hello World"));
* Deno.truncateSync(file, 7);
* const data = Deno.readFileSync(file);
* console.log(new TextDecoder().decode(data));
* ```
*
* Requires `allow-write` permission. */
export function truncateSync(name: string, len?: number): void;
/** Truncates or extends the specified file, to reach the specified `len`. If
* `len` is not specified then the entire file contents are truncated.
*
* ```ts
* // truncate the entire file
* await Deno.truncate("my_file.txt");
*
* // truncate part of the file
* const file = await Deno.makeTempFile();
* await Deno.writeFile(file, new TextEncoder().encode("Hello World"));
* await Deno.truncate(file, 7);
* const data = await Deno.readFile(file);
* console.log(new TextDecoder().decode(data)); // "Hello W"
* ```
*
* Requires `allow-write` permission. */
export function truncate(name: string, len?: number): Promise<void>;
export interface NetAddr {
transport: "tcp" | "udp";
hostname: string;
port: number;
}
export interface UnixAddr {
transport: "unix" | "unixpacket";
path: string;
}
export type Addr = NetAddr | UnixAddr;
/** A generic network listener for stream-oriented protocols. */
export interface Listener extends AsyncIterable<Conn> {
/** Waits for and resolves to the next connection to the `Listener`. */
accept(): Promise<Conn>;
/** Close closes the listener. Any pending accept promises will be rejected
* with errors. */
close(): void;
/** Return the address of the `Listener`. */
readonly addr: Addr;
[Symbol.asyncIterator](): AsyncIterableIterator<Conn>;
}
export interface Conn extends Reader, Writer, Closer {
/** The local address of the connection. */
readonly localAddr: Addr;
/** The remote address of the connection. */
readonly remoteAddr: Addr;
/** The resource ID of the connection. */
readonly rid: number;
/** Shuts down (`shutdown(2)`) the writing side of the TCP connection. Most
* callers should just use `close()`.
*
* **Unstable** because of lack of testing and because Deno.shutdown is also
* unstable.
* */
closeWrite(): void;
}
export interface ListenOptions {
/** The port to listen on. */
port: number;
/** A literal IP address or host name that can be resolved to an IP address.
* If not specified, defaults to `0.0.0.0`. */
hostname?: string;
}
/** Listen announces on the local transport address.
*
* ```ts
* const listener1 = Deno.listen({ port: 80 })
* const listener2 = Deno.listen({ hostname: "192.0.2.1", port: 80 })
* const listener3 = Deno.listen({ hostname: "[2001:db8::1]", port: 80 });
* const listener4 = Deno.listen({ hostname: "golang.org", port: 80, transport: "tcp" });
* ```
*
* Requires `allow-net` permission. */
export function listen(
options: ListenOptions & { transport?: "tcp" }
): Listener;
export interface ListenTlsOptions extends ListenOptions {
/** Server certificate file. */
certFile: string;
/** Server public key file. */
keyFile: string;
transport?: "tcp";
}
/** Listen announces on the local transport address over TLS (transport layer
* security).
*
* ```ts
* const lstnr = Deno.listenTls({ port: 443, certFile: "./server.crt", keyFile: "./server.key" });
* ```
*
* Requires `allow-net` permission. */
export function listenTls(options: ListenTlsOptions): Listener;
export interface ConnectOptions {
/** The port to connect to. */
port: number;
/** A literal IP address or host name that can be resolved to an IP address.
* If not specified, defaults to `127.0.0.1`. */
hostname?: string;
transport?: "tcp";
}
/**
* Connects to the hostname (default is "127.0.0.1") and port on the named
* transport (default is "tcp"), and resolves to the connection (`Conn`).
*
* ```ts
* const conn1 = await Deno.connect({ port: 80 });
* const conn2 = await Deno.connect({ hostname: "192.0.2.1", port: 80 });
* const conn3 = await Deno.connect({ hostname: "[2001:db8::1]", port: 80 });
* const conn4 = await Deno.connect({ hostname: "golang.org", port: 80, transport: "tcp" });
* const conn5 = await Deno.connect({ path: "/foo/bar.sock", transport: "unix" });
* ```
*
* Requires `allow-net` permission for "tcp" and `allow-read` for unix. */
export function connect(options: ConnectOptions): Promise<Conn>;
export interface ConnectTlsOptions {
/** The port to connect to. */
port: number;
/** A literal IP address or host name that can be resolved to an IP address.
* If not specified, defaults to `127.0.0.1`. */
hostname?: string;
/** Server certificate file. */
certFile?: string;
}
/** Establishes a secure connection over TLS (transport layer security) using
* an optional cert file, hostname (default is "127.0.0.1") and port. The
* cert file is optional and if not included Mozilla's root certificates will
* be used (see also https://github.com/ctz/webpki-roots for specifics)
*
* ```ts
* const conn1 = await Deno.connectTls({ port: 80 });
* const conn2 = await Deno.connectTls({ certFile: "./certs/my_custom_root_CA.pem", hostname: "192.0.2.1", port: 80 });
* const conn3 = await Deno.connectTls({ hostname: "[2001:db8::1]", port: 80 });
* const conn4 = await Deno.connectTls({ certFile: "./certs/my_custom_root_CA.pem", hostname: "golang.org", port: 80});
* ```
*
* Requires `allow-net` permission.
*/
export function connectTls(options: ConnectTlsOptions): Promise<Conn>;
export interface Metrics {
opsDispatched: number;
opsDispatchedSync: number;
opsDispatchedAsync: number;
opsDispatchedAsyncUnref: number;
opsCompleted: number;
opsCompletedSync: number;
opsCompletedAsync: number;
opsCompletedAsyncUnref: number;
bytesSentControl: number;
bytesSentData: number;
bytesReceived: number;
}
/** Receive metrics from the privileged side of Deno. This is primarily used
* in the development of Deno. 'Ops', also called 'bindings', are the go-between
* between Deno JavaScript and Deno Rust.
*
* > console.table(Deno.metrics())
* โโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโ
* โ (index) โ Values โ
* โโโโโโโโโโโโโโโโโโโโโโโโโโโผโโโโโโโโโค
* โ opsDispatched โ 3 โ
* โ opsDispatchedSync โ 2 โ
* โ opsDispatchedAsync โ 1 โ
* โ opsDispatchedAsyncUnref โ 0 โ
* โ opsCompleted โ 3 โ
* โ opsCompletedSync โ 2 โ
* โ opsCompletedAsync โ 1 โ
* โ opsCompletedAsyncUnref โ 0 โ
* โ bytesSentControl โ 73 โ
* โ bytesSentData โ 0 โ
* โ bytesReceived โ 375 โ
* โโโโโโโโโโโโโโโโโโโโโโโโโโโดโโโโโโโโโ
*/
export function metrics(): Metrics;
interface ResourceMap {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
[rid: number]: any;
}
/** Returns a map of open resource ids (rid) along with their string
* representations. This is an internal API and as such resource
* representation has `any` type; that means it can change any time.
*
* ```ts
* console.log(Deno.resources());
* // { 0: "stdin", 1: "stdout", 2: "stderr" }
* Deno.openSync('../test.file');
* console.log(Deno.resources());
* // { 0: "stdin", 1: "stdout", 2: "stderr", 3: "fsFile" }
* ```
*/
export function resources(): ResourceMap;
export interface FsEvent {
kind: "any" | "access" | "create" | "modify" | "remove";
paths: string[];
}
/** Watch for file system events against one or more `paths`, which can be files
* or directories. These paths must exist already. One user action (e.g.
* `touch test.file`) can generate multiple file system events. Likewise,
* one user action can result in multiple file paths in one event (e.g. `mv
* old_name.txt new_name.txt`). Recursive option is `true` by default and,
* for directories, will watch the specified directory and all sub directories.
* Note that the exact ordering of the events can vary between operating systems.
*
* ```ts
* const watcher = Deno.watchFs("/");
* for await (const event of watcher) {
* console.log(">>>> event", event);
* // { kind: "create", paths: [ "/foo.txt" ] }
* }
*```
*
* Requires `allow-read` permission.
*/
export function watchFs(
paths: string | string[],
options?: { recursive: boolean }
): AsyncIterableIterator<FsEvent>;
export class Process {
readonly rid: number;
readonly pid: number;
readonly stdin?: Writer & Closer;
readonly stdout?: Reader & Closer;
readonly stderr?: Reader & Closer;
/** Resolves to the current status of the process. */
status(): Promise<ProcessStatus>;
/** Buffer the stdout until EOF and return it as `Uint8Array`.
*
* You must set stdout to `"piped"` when creating the process.
*
* This calls `close()` on stdout after its done. */
output(): Promise<Uint8Array>;
/** Buffer the stderr until EOF and return it as `Uint8Array`.
*
* You must set stderr to `"piped"` when creating the process.
*
* This calls `close()` on stderr after its done. */
stderrOutput(): Promise<Uint8Array>;
close(): void;
/** **UNSTABLE**: The `signo` argument may change to require the Deno.Signal
* enum.
*
* Send a signal to process. This functionality currently only works on
* Linux and Mac OS.
*/
kill(signo: number): void;
}
export type ProcessStatus =
| {
success: true;
code: 0;
signal?: undefined;
}
| {
success: false;
code: number;
signal?: number;
};
export interface RunOptions {
/** Arguments to pass. Note, the first element needs to be a path to the
* binary */
cmd: string[];
cwd?: string;
env?: {
[key: string]: string;
};
stdout?: "inherit" | "piped" | "null" | number;
stderr?: "inherit" | "piped" | "null" | number;
stdin?: "inherit" | "piped" | "null" | number;
}
/** Spawns new subprocess. RunOptions must contain at a minimum the `opt.cmd`,
* an array of program arguments, the first of which is the binary.
*
* ```ts
* const p = Deno.run({
* cmd: ["echo", "hello"],
* });
* ```
*
* Subprocess uses same working directory as parent process unless `opt.cwd`
* is specified.
*
* Environmental variables for subprocess can be specified using `opt.env`
* mapping.
*
* By default subprocess inherits stdio of parent process. To change that
* `opt.stdout`, `opt.stderr` and `opt.stdin` can be specified independently -
* they can be set to either an rid of open file or set to "inherit" "piped"
* or "null":
*
* `"inherit"` The default if unspecified. The child inherits from the
* corresponding parent descriptor.
*
* `"piped"` A new pipe should be arranged to connect the parent and child
* sub-processes.
*
* `"null"` This stream will be ignored. This is the equivalent of attaching
* the stream to `/dev/null`.
*
* Details of the spawned process are returned.
*
* Requires `allow-run` permission. */
export function run(opt: RunOptions): Process;
interface InspectOptions {
depth?: number;
}
/** Converts the input into a string that has the same format as printed by
* `console.log()`.
*
* ```ts
* const obj = {};
* obj.propA = 10;
* obj.propB = "hello"
* const objAsString = Deno.inspect(obj); // { propA: 10, propB: "hello" }
* console.log(obj); // prints same value as objAsString, e.g. { propA: 10, propB: "hello" }
* ```
*
* You can also register custom inspect functions, via the `customInspect` Deno
* symbol on objects, to control and customize the output.
*
* ```ts
* class A {
* x = 10;
* y = "hello";
* [Deno.customInspect](): string {
* return "x=" + this.x + ", y=" + this.y;
* }
* }
* ```
*
* const inStringFormat = Deno.inspect(new A()); // "x=10, y=hello"
* console.log(inStringFormat); // prints "x=10, y=hello"
*
* Finally, a number of output options are also available.
*
* const out = Deno.inspect(obj, {showHidden: true, depth: 4, colors: true, indentLevel: 2});
*
*/
export function inspect(value: unknown, options?: InspectOptions): string;
/** Build related information. */
export const build: {
/** The LLVM target triple */
target: string;
/** Instruction set architecture */
arch: "x86_64";
/** Operating system */
os: "darwin" | "linux" | "windows";
/** Computer vendor */
vendor: string;
/** Optional environment */
env?: string;
};
interface Version {
deno: string;
v8: string;
typescript: string;
}
/** Version related information. */
export const version: Version;
/** Returns the script arguments to the program. If for example we run a
* program:
*
* deno run --allow-read https://deno.land/std/examples/cat.ts /etc/passwd
*
* Then `Deno.args` will contain:
*
* [ "/etc/passwd" ]
*/
export const args: string[];
/** A symbol which can be used as a key for a custom method which will be
* called when `Deno.inspect()` is called, or when the object is logged to
* the console. */
export const customInspect: unique symbol;
} | *
* For a full description, see [chmod](#chmod)
*
* NOTE: This API currently throws on Windows |
rdf.py | # !/usr/bin/env python
# -*-coding:utf-8-*-
"""
@author: xhades
@Date: 2017/12/28
"""
# ้ๆบๆฃฎๆๅ็ฑปๅจ
import numpy as np
from numpy import *
from numpy import array, argmax
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pickle
from sklearn.ensemble import RandomForestClassifier as RDF
np.set_printoptions(threshold=np.inf)
# ่ฎญ็ป้ๆต่ฏ้ 3/7ๅๅฒ
def train(xFile, yFile):
with | xFile, "rb") as file_r:
X = pickle.load(file_r)
X = reshape(X, (212841, -1)) # reshapeไธไธ ๏ผ212841, 30*128๏ผ
# ่ฏปๅlabelๆฐๆฎ๏ผๅนถไธencodig
with open(yFile, "r") as yFile_r:
labelLines = [_.strip("\n") for _ in yFile_r.readlines()]
values = array(labelLines)
labelEncoder = LabelEncoder()
integerEncoded = labelEncoder.fit_transform(values)
integerEncoded = integerEncoded.reshape(len(integerEncoded), 1)
# print(integerEncoded)
# ่ทๅพlabel ็ผ็
Y = integerEncoded.reshape(212841, )
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
# ้ๆบๆฃฎๆๅ็ฑปๅจ
clf = RDF(criterion="gini")
# criterion ๅฏไปฅไฝฟ็จ"gini"ๆ่
"entropy"๏ผๅ่
ไปฃ่กจๅบๅฐผ็ณปๆฐ๏ผๅ่
ไปฃ่กจไฟกๆฏๅข็ใไธ่ฌ่ฏดไฝฟ็จ้ป่ฎค็ๅบๅฐผ็ณปๆฐ"gini"ๅฐฑๅฏไปฅไบ๏ผๅณCART็ฎๆณใ้ค้ไฝ ๆดๅๆฌข็ฑปไผผID3, C4.5็ๆไผ็นๅพ้ๆฉๆนๆณใ
clf.fit(X_train, Y_train)
# ๆต่ฏๆฐๆฎ
predict = clf.predict(X_test)
count = 0
for p, t in zip(predict, Y_test):
if p == t:
count += 1
print("RandomForest Accuracy is:", count/len(Y_test))
if __name__ == "__main__":
xFile = "Res/char_embedded.pkl"
yFile = "data/label.txt"
print("Start Training.....")
train(xFile, yFile)
print("End.....")
| open( |
fabfile.py | from fabric.api import cd, env, lcd, local, hosts, prompt, run
from fabric.decorators import runs_once
import os
import time
env.runtime = 'production'
env.hosts = ['newchimera.readthedocs.com',
'newbuild.readthedocs.com',
'newasgard.readthedocs.com']
env.user = 'docs'
env.code_dir = '/home/docs/checkouts/readthedocs.org'
env.virtualenv = '/home/docs/'
env.rundir = '/home/docs/run'
fabfile_dir = os.path.dirname(__file__)
@hosts(['newchimera.readthedocs.com', 'newasgard.readthedocs.com'])
def remove_project(project):
run('rm -rf %s/user_builds/%s' % (env.code_dir, project))
def ntpdate():
run('ntpdate-debian')
def wheelhouse():
for host in ['newchimera.readthedocs.com', 'newasgard.readthedocs.com']:
run('rsync -av wheelhouse/ root@%s:/home/docs/checkouts/readthedocs.org/media/wheelhouse/' % host)
## Logging Awesomeness
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def nginx_logs():
env.user = "root"
run("tail -F /var/log/nginx/*.log")
@hosts(['newbuild.readthedocs.com'])
def celery_logs():
env.user = "docs"
run("tail -F tail -f ~/log/celery.err")
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def logs():
|
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def postcommit_logs():
env.user = "docs"
run("tail -F %s/logs/postcommit.log" % env.code_dir)
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def cat_postcommit_logs():
env.user = "docs"
run("cat %s/logs/postcommit.log" % env.code_dir)
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def api_logs():
env.user = "docs"
run("tail -F %s/logs/api.log" % env.code_dir)
@hosts(['newasgard.readthedocs.com', 'newchimera.readthedocs.com'])
def web_logs(type):
"""
Get logs from the web servers::
fab -P web_logs:middleware
"""
env.user = "docs"
run("tail -F %s/logs/%s.log" % (env.code_dir, type))
## Normal bits
@hosts(['localhost'])
def i18n():
with lcd('readthedocs'):
local('rm -rf rtd_tests/tests/builds/')
local('tx pull')
local('./manage.py makemessages --all')
local('tx push -s')
local('./manage.py compilemessages')
@hosts(['localhost'])
def i18n_docs():
with lcd('docs'):
# Update our tanslations
local('tx pull -a')
local('sphinx-intl build')
# Push new ones
local('make gettext')
local('tx push -s')
def push():
"Push new code, but don't restart/reload."
local('git push origin master')
with cd(env.code_dir):
run('git fetch')
run('git reset --hard origin/master')
def update_requirements():
"Update requirements in the virtualenv."
run("%s/bin/pip install -r %s/deploy_requirements.txt" % (env.virtualenv, env.code_dir))
@hosts(['newchimera.readthedocs.com'])
def migrate(project=None):
if project:
run('django-admin.py migrate %s' % project)
else:
run('django-admin.py migrate')
@hosts(['newchimera.readthedocs.com'])
def syncdb(project=None):
run('django-admin.py syncdb')
@hosts(['newchimera.readthedocs.com', 'newasgard.readthedocs.com'])
def static():
"Restart (or just start) the server"
run('django-admin.py collectstatic --noinput')
@hosts(['newchimera.readthedocs.com', 'newasgard.readthedocs.com'])
def restart():
"Restart (or just start) the server"
env.user = "docs"
run("supervisorctl restart web")
#so it has time to reload
time.sleep(3)
@hosts(['newchimera.readthedocs.com', 'newasgard.readthedocs.com'])
def reload():
"Reload (or just start) the server"
run("supervisorctl update")
@hosts(['newbuild.readthedocs.com'])
def celery():
"Restart (or just start) the server"
run("supervisorctl restart celery")
def pull():
"Pull new code"
with cd(env.code_dir):
run('git fetch')
run('git reset --hard origin/master')
@runs_once
def spider():
local('patu.py -d1 readthedocs.org')
def _aws_wrapper(f, *args, **kwargs):
"get AWS credentials if not defined"
#these are normally defined in ~/.fabricrc
@hosts('run_once') # so fab doesn't go crazy
def wrapped(*args, **kwargs):
from boto.cloudfront.exception import CloudFrontServerError
from boto.cloudfront import CloudFrontConnection
c = CloudFrontConnection(env.aws_access_key_id,
env.aws_secret_access_key)
if not hasattr(env, 'aws_access_key_id'):
prompt('AWS Access Key ID: ', key='aws_access_key_id')
if not hasattr(env, 'aws_secret_access_key'):
prompt('AWS Secret Access Key: ', key='aws_secret_access_key')
try:
return f(c, *args, **kwargs)
except CloudFrontServerError as e:
print "Error: \n", e.error_message
return wrapped
@_aws_wrapper
def to_cdn(c, slug):
"Create a new Distribution object on CloudFront"
from boto.cloudfront import CloudFrontConnection
from boto.cloudfront.origin import CustomOrigin
c = CloudFrontConnection(env.aws_access_key_id,
env.aws_secret_access_key)
d = c.create_distribution(
origin=CustomOrigin(slug + '.cdn.readthedocs.org',
origin_protocol_policy='http-only'),
enabled=True,
comment='Slug: ' + slug,
cnames=[slug + '.readthedocs.org']
)
print "Created: " + d.domain_name + " for " + slug
list_cdn()
@_aws_wrapper
def list_cdn(c):
"List Distributions on CloudFront"
distributions = c.get_all_distributions()
for d in distributions:
print "%3s %4s %40s %30s" % ('Ena' if d.enabled else 'Dis',
d.status[:4], d.origin.dns_name,
d.domain_name)
@_aws_wrapper
def disable_cdn(c, *args):
"Sets a Distribution entry to disabled. Required before deletion."
distributions = c.get_all_distributions()
for distro in distributions:
dist_slug = distro.origin.dns_name.split('.')[0]
if dist_slug in args:
print "Disabling:", dist_slug
#this is broken as of boto 2.0b4.
#fix is to comment out lines 347-352 in cloudfront/distribution.py
distro.get_distribution().disable()
@_aws_wrapper
def delete_cdn(c):
"Deletes all Distributions in the 'Disabled' state."
distributions = c.get_all_distributions()
for distro in distributions:
if not distro.enabled and distro.status == "Deployed":
print "Deleting", distro.origin.dns_name
distro.get_distribution().delete()
def full_deploy():
#HACK
#Call this again at the top-level so the hosts decorator
#effects the hosts it runs against for each command.
run('fab push update_requirements migrate restart celery')
#push()
#update_requirements()
#migrate()
#restart()
#celery()
@hosts(['newchimera.readthedocs.com'])
def uptime():
run('uptime')
@hosts(['newchimera.readthedocs.com'])
def update_index():
run('django-admin.py update_index')
@hosts('None')
def update_theme():
theme_dir = os.path.join(fabfile_dir, 'readthedocs', 'templates', 'sphinx')
if not os.path.exists('/tmp/sphinx_rtd_theme'):
local('git clone https://github.com/snide/sphinx_rtd_theme.git /tmp/sphinx_rtd_theme')
with lcd('/tmp/sphinx_rtd_theme'):
local('git remote update')
local('git reset --hard origin/master ')
local('cp -r /tmp/sphinx_rtd_theme/sphinx_rtd_theme %s' % theme_dir)
local('cp -r /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/fonts/ %s' % os.path.join(fabfile_dir, 'media', 'font'))
local('cp /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/css/badge_only.css %s' % os.path.join(fabfile_dir, 'media', 'css'))
local('cp /tmp/sphinx_rtd_theme/sphinx_rtd_theme/static/css/theme.css %s' % os.path.join(fabfile_dir, 'media', 'css', 'sphinx_rtd_theme.css'))
| env.user = "docs"
run("tail -F %s/logs/*.log" % env.code_dir) |
salesforceConnection.ts | import * as jsforce from 'jsforce';
import * as vscode from 'vscode';
import { SalesforceConfig } from './salesforceConfig';
import { l } from '../strings/Strings';
export class | {
private static validConnection: jsforce.Connection | undefined;
private connection: jsforce.Connection;
constructor(public config: SalesforceConfig) {
const createConnection = () => {
if (config.doValidation()) {
this.connection = new jsforce.Connection({
loginUrl: config.getOrganizationUrl()
});
}
};
createConnection();
vscode.workspace.onDidChangeConfiguration(() => {
this.invalidateConnection();
createConnection();
});
}
public async login(): Promise<jsforce.Connection> {
if (SalesforceConnection.validConnection) {
return Promise.resolve(SalesforceConnection.validConnection);
} else {
return <Promise<jsforce.Connection>>vscode.window.withProgress(
{
title: l('SalesforceConnection'),
location: vscode.ProgressLocation.Window
},
async (progress): Promise<jsforce.Connection> => {
if (this.config.doValidation()) {
const username = this.config.getUsername();
const password = this.config.getPassword();
const securityToken = this.config.getSecurityToken();
if (username && password) {
progress.report({ message: l('SalesforceConnection') });
await this.connection.login(username, password + securityToken);
SalesforceConnection.validConnection = this.connection;
return SalesforceConnection.validConnection;
} else {
return Promise.reject(l('SalesforceInvalidLoginConfig'));
}
} else {
this.config.doValidation(false);
return Promise.reject(l('SalesforceInvalidLoginConfig'));
}
}
);
}
}
public invalidateConnection() {
if (SalesforceConnection.validConnection) {
SalesforceConnection.validConnection.logout();
}
SalesforceConnection.validConnection = undefined;
}
}
| SalesforceConnection |
template-param-usage-6.rs | /* automatically generated by rust-bindgen */
#![allow(dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals)]
|
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct DoesNotUseTemplateParameter {
pub x: ::std::os::raw::c_int,
}
pub type DoesNotUseTemplateParameter_ButAliasDoesUseIt<T> = T; | |
e.go | package e
type S struct{}
func (s *S) foo() {
go func() { // want "no defer"
println("foo")
}()
}
func (s S) bar() {
go func() { // want "no recover"
defer func() {
}()
println("bar") | }()
} |
|
css.js | export default (function CrappyStyleSheets() {
const prototype = {};
function property(name, value) {
return [name, ':', value, ';'].join('');
}
[
'accelerator',
'animation',
'azimuth',
'background',
'background-attachment',
'background-color',
'background-image',
'background-position',
'background-position-x',
'background-position-y',
'background-repeat',
'behavior',
'border',
'border-bottom',
'border-bottom-color',
'border-bottom-style',
'border-bottom-width',
'border-collapse',
'border-color',
'border-left',
'border-left-color',
'border-left-style',
'border-left-width',
'border-right',
'border-right-color',
'border-right-style',
'border-right-width',
'border-spacing',
'border-style',
'border-top',
'border-top-color',
'border-top-style',
'border-top-width',
'border-width',
'bottom',
'box-sizing',
'caption-side',
'clear',
'clip',
'color',
'content',
'counter-increment',
'counter-reset',
'cue',
'cue-after',
'cue-before',
'cursor',
'direction',
'display',
'elevation',
'empty-cells',
'filter',
'float',
'font',
'font-family',
'font-size',
'font-size-adjust',
'font-stretch',
'font-style',
'font-variant',
'font-weight',
'height',
'ime-mode',
'include-source',
'layer-background-color',
'layer-background-image',
'layout-flow',
'layout-grid',
'layout-grid-char',
'layout-grid-char-spacing',
'layout-grid-line',
'layout-grid-mode',
'layout-grid-type',
'left',
'letter-spacing',
'line-break',
'line-height',
'list-style',
'list-style-image',
'list-style-position',
'list-style-type',
'margin',
'margin-bottom',
'margin-left',
'margin-right',
'margin-top',
'marker-offset',
'marks',
'max-height',
'max-width',
'min-height',
'min-width',
'-moz-binding',
'-moz-border-radius',
'-moz-border-radius-topleft',
'-moz-border-radius-topright',
'-moz-border-radius-bottomright',
'-moz-border-radius-bottomleft',
'-moz-border-top-colors',
'-moz-border-right-colors',
'-moz-border-bottom-colors',
'-moz-border-left-colors',
'-moz-opacity',
'-moz-outline',
'-moz-outline-color',
'-moz-outline-style',
'-moz-outline-width',
'-moz-user-focus',
'-moz-user-input',
'-moz-user-modify',
'-moz-user-select',
'opacity',
'orphans',
'outline',
'outline-color',
'outline-style',
'outline-width',
'overflow',
'overflow-X',
'overflow-Y',
'padding',
'padding-bottom',
'padding-left',
'padding-right',
'padding-top',
'page',
'page-break-after',
'page-break-before',
'page-break-inside',
'pause',
'pause-after',
'pause-before',
'pitch',
'pitch-range',
'play-during',
'pointer-events',
'position',
'quotes',
'-replace',
'richness',
'right',
'ruby-align',
'ruby-overhang',
'ruby-position',
'-set-link-source',
'size',
'speak',
'speak-header',
'speak-numeral',
'speak-punctuation',
'speech-rate',
'stress',
'scrollbar-arrow-color',
'scrollbar-base-color',
'scrollbar-dark-shadow-color',
'scrollbar-face-color',
'scrollbar-highlight-color',
'scrollbar-shadow-color',
'scrollbar-3d-light-color',
'scrollbar-track-color',
'table-layout',
'text-align',
'text-align-last',
'text-decoration',
'text-indent',
'text-justify',
'text-overflow',
'text-shadow',
'text-transform',
'text-autospace',
'text-kashida-space',
'text-underline-position',
'top',
'transition',
'unicode-bidi',
'-use-link-source',
'vertical-align',
'visibility',
'voice-family',
'volume',
'white-space',
'widows',
'width',
'word-break',
'word-spacing',
'word-wrap',
'writing-mode',
'z-index',
'zoom',
'-webkit-transition',
'-webkit-animation',
].forEach(function add_to_prototype(_) {
let name = _.replace(/-/g, '_');
prototype[name] = function(value) {
return property(_, value);
}
});
const __css = {};
const next_name = (function generator() {
const letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';
const max_position = letters.length - 1;
let position = 0;
let length = 1;
return function next() {
if(position === max_position) {
position = 0;
}
let result = "";
for(let i = 0; i < length; i++) {
result += letters[position];
position += 1;
}
return result;
}
}());
// Returns the class name.
function define(styles) {
const class_name = next_name();
__css[`.${class_name}`] = ['{', styles['_'].join('\n') ,'}\n'].join('\n');
delete styles['_'];
let selectors = Object.getOwnPropertyNames(styles);
selectors.forEach(function(_) {
let selector = `.${class_name} ${_}`;
__css[selector] = ['{', styles[_].join('\n') ,'}\n'].join('\n');
});
return class_name;
}
function define_keyframes(name, properties) {
__css[`@keyframes ${name}`] = properties;
__css[`@-webkit-keyframes ${name}`] = properties;
}
// returns the generated css.
function generate() {
classes = Object.getOwnPropertyNames(__css);
let the_css = "";
classes.forEach(function(_) {
let properties = __css[_];
the_css += `${_} ${properties}`
});
return html.style(undefined, the_css);
}
function style(element) {
return (function Styler() {
let constructor = function renderStyle() {
const class_name = next_name();
__css[`.${class_name}`] = ['{', element._style, '}\n'].join('\n');
element.className += ' ' + class_name;
};
element._style = '';
[ 'color',
'display',
'font_size',
'font_weight',
'padding',
'position',
'pointer_events',
'left',
'top',
'width',
'border',
'margin_bottom',
'box_sizing',
'border_bottom',
'height',
'opacity'
].forEach(function(_) {
constructor[_] = function(value) {
element._style += css[_](value);
return constructor;
}
});
return constructor;
}());
}
function style_state(element) {
return (function StateStyler() {
let styles = [];
let constructor = function compile() {
let class_name = element.className.trim().split(' ')[0];
styles.forEach(function add_css(_) {
__css[`.${class_name}:${_.pseudo}`] = [
'{',
_.properties.join(''),
'}\n'
].join('\n');
});
};
constructor.on = function On(pseudo_state) {
let pseudo_style = {
pseudo: pseudo_state
};
return { | style: function Style(...properties) {
pseudo_style.properties = properties;
styles.push(pseudo_style);
return constructor;
}
}
};
return constructor;
}());
}
function animate(element) {
return (function Animator() {
let transition = undefined;
let from_properties = undefined;
let to_properties = undefined;
let on_element = undefined;
let constructor = function compile() {
const class_name = next_name();
let selectors = [];
on_element.pseudo.forEach(function makeSelector(_) {
selectors.push(`.${class_name}:${_} ~ .${element.className.trim()}`);
});
if(from_properties) {
// Create the @key-frame structure.
const name = next_name();
__css[`@-webkit-keyframes ${name}`] = [
'{',
'from {',
from_properties,
'}',
'to {',
to_properties,
'}}\n'
].join('\n');
__css[`${selectors.join(',')}`] = [
'{',
css._webkit_animation(`${name} ${transition}`),
'}\n'].join('\n');
}
else {
element.style = css.transition(transition);
__css[`${selectors.join(',')}`] = ['{', to_properties, '}\n'].join('\n');
}
on_element.element.className = [on_element.element.className,
class_name].join(' ');
};
constructor.transition = function(value) {
transition = value;
return constructor;
};
constructor.from = function(...properties) {
from_properties = properties.join('');
return {
to: function(...properties) {
to_properties = properties.join('');
return constructor;
}
}
};
constructor.to = function(...properties) {
to_properties = properties.join('');
return constructor;
};
constructor.on = function(element, ...pseudo) {
on_element = {
'element': element,
'pseudo': pseudo
};
return constructor;
};
return constructor;
}());
}
prototype.define = define;
prototype.define_keyframes = define_keyframes;
prototype.generate = generate;
prototype.raw = function(_) { return _;};
prototype.style = style;
prototype.style_state = style_state;
prototype.animate = animate;
return Object.freeze(prototype);
}()); |