text
stringlengths 2
99.9k
| meta
dict |
---|---|
// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT.
// +build linux
// +build 386 amd64
package unix
import "unsafe"
// PtraceRegs386 is the registers used by 386 binaries.
type PtraceRegs386 struct {
Ebx int32
Ecx int32
Edx int32
Esi int32
Edi int32
Ebp int32
Eax int32
Xds int32
Xes int32
Xfs int32
Xgs int32
Orig_eax int32
Eip int32
Xcs int32
Eflags int32
Esp int32
Xss int32
}
// PtraceGetRegs386 fetches the registers used by 386 binaries.
func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
}
// PtraceSetRegs386 sets the registers used by 386 binaries.
func PtraceSetRegs386(pid int, regs *PtraceRegs386) error {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
}
// PtraceRegsAmd64 is the registers used by amd64 binaries.
type PtraceRegsAmd64 struct {
R15 uint64
R14 uint64
R13 uint64
R12 uint64
Rbp uint64
Rbx uint64
R11 uint64
R10 uint64
R9 uint64
R8 uint64
Rax uint64
Rcx uint64
Rdx uint64
Rsi uint64
Rdi uint64
Orig_rax uint64
Rip uint64
Cs uint64
Eflags uint64
Rsp uint64
Ss uint64
Fs_base uint64
Gs_base uint64
Ds uint64
Es uint64
Fs uint64
Gs uint64
}
// PtraceGetRegsAmd64 fetches the registers used by amd64 binaries.
func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
}
// PtraceSetRegsAmd64 sets the registers used by amd64 binaries.
func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
}
| {
"pile_set_name": "Github"
} |
K 14
svn:executable
V 1
*
K 13
svn:mime-type
V 24
application/octet-stream
END
| {
"pile_set_name": "Github"
} |
// +build linux,386 linux,arm linux,mips linux,mipsle
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unix
func init() {
// On 32-bit Linux systems, the fcntl syscall that matches Go's
// Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
fcntl64Syscall = SYS_FCNTL64
}
| {
"pile_set_name": "Github"
} |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
brave_locale_source_patterns = [
"${root_gen_dir}/components/brave_components_strings_",
"${root_gen_dir}/brave/brave_generated_resources_",
]
brave_locale_deps = [
"//brave/app:brave_generated_resources_grit",
"//brave/components/resources:strings",
]
| {
"pile_set_name": "Github"
} |
# Copyright (c) 2015 iXsystems, Inc.
# All rights reserved.
# This file is a part of TrueNAS
# and may not be copied and/or distributed
# without the express permission of iXsystems.
from middlewared.client import Client, ClientException, CallTimeout
from middlewared.schema import accepts, Any, Bool, Dict, Int, List, Str
from middlewared.service import CallError, Service, job, private
from middlewared.utils import start_daemon_thread
from middlewared.utils.osc import set_thread_name
from collections import defaultdict
from functools import partial
import errno
import json
import logging
import requests
import socket
import threading
import time
logger = logging.getLogger('failover.remote')
class RemoteClient(object):
def __init__(self):
self.client = None
self.connected = threading.Event()
self.middleware = None
self.remote_ip = None
self._subscribe_lock = threading.Lock()
self._subscriptions = defaultdict(list)
self._on_connect_callbacks = []
self._on_disconnect_callbacks = []
def run(self):
set_thread_name('ha_connection')
retry = 5
refused = False
while True:
try:
self.connect_and_wait()
refused = False
except ConnectionRefusedError:
if not refused:
logger.error(f'Persistent connection refused, retrying every {retry} seconds')
refused = True
except Exception:
logger.error('Remote connection failed', exc_info=True)
refused = False
time.sleep(retry)
def connect_and_wait(self):
try:
with Client(f'ws://{self.remote_ip}:6000/websocket', reserved_ports=True) as c:
self.client = c
self.connected.set()
# Subscribe to all events on connection
with self._subscribe_lock:
for name in self._subscriptions:
self.client.subscribe(name, partial(self._sub_callback, name))
self._on_connect()
c._closed.wait()
except OSError as e:
if e.errno in (
errno.EPIPE, # Happens when failover is configured on cxl device that has no link
errno.ENETDOWN, errno.EHOSTDOWN, errno.ENETUNREACH, errno.EHOSTUNREACH,
errno.ECONNREFUSED,
) or isinstance(e, socket.timeout):
raise ConnectionRefusedError()
raise
finally:
if self.connected.is_set():
# Only happens if we have successfully connected once
self._on_disconnect()
self.client = None
self.connected.clear()
def is_connected(self):
return self.connected.is_set()
def register_connect(self, cb):
"""
Register a callback to be called everytime we connect to the other node.
"""
self._on_connect_callbacks.append(cb)
def _on_connect(self):
"""
Called everytime connection has been established.
"""
for cb in self._on_connect_callbacks:
try:
cb(self.middleware)
except Exception:
logger.error('Failed to run on_connect for remote client', exc_info=True)
def register_disconnect(self, cb):
"""
Register a callback to be called everytime we disconnect from the other node.
"""
self._on_disconnect_callbacks.append(cb)
def _on_disconnect(self):
"""
Called everytime connection is closed for whatever reason.
"""
for cb in self._on_disconnect_callbacks:
try:
cb(self.middleware)
except Exception:
logger.error('Failed to run on_disconnect for remote client', exc_info=True)
def call(self, *args, **kwargs):
try:
if not self.connected.wait(timeout=20):
if self.remote_ip is None:
raise CallError('Unable to determine remote node IP', errno.EHOSTUNREACH)
raise CallError('Remote connection unavailable', errno.ECONNREFUSED)
return self.client.call(*args, **kwargs)
except AttributeError as e:
# ws4py traceback which can happen when connection is lost
if "'NoneType' object has no attribute 'text_message'" in str(e):
raise CallError('Remote connection closed.', errno.ECONNRESET)
else:
raise
except ClientException as e:
raise CallError(str(e), e.errno or errno.EFAULT)
def subscribe(self, name, callback):
# Only subscribe if we are already connected, otherwise simply register it
if name not in self._subscriptions and self.is_connected():
with self._subscribe_lock:
self.client.subscribe(name, partial(self._sub_callback, name))
self._subscriptions[name].append(callback)
def _sub_callback(self, name, type_, **message):
for callback in self._subscriptions.get(name, []):
try:
callback(self.middleware, type_, **message)
except Exception:
logger.warning('Failed to run callback for %s', name, exc_info=True)
def sendfile(self, token, local_path, remote_path):
r = requests.post(
f'http://{self.remote_ip}:6000/_upload/',
files=[
('data', json.dumps({
'method': 'filesystem.put',
'params': [remote_path],
})),
('file', open(local_path, 'rb')),
],
headers={
'Authorization': f'Token {token}',
},
)
job_id = r.json()['job_id']
# TODO: use event subscription in the client instead of polling
while True:
rjob = self.client.call('core.get_jobs', [('id', '=', job_id)])
if rjob:
rjob = rjob[0]
if rjob['state'] == 'FAILED':
raise CallError(
f'Failed to send {local_path} to Standby Controller: {job["error"]}.'
)
elif rjob['state'] == 'ABORTED':
raise CallError(
f'Failed to send {local_path} to Standby Controller, job aborted by user.'
)
elif rjob['state'] == 'SUCCESS':
break
time.sleep(0.5)
class FailoverService(Service):
CLIENT = RemoteClient()
@private
async def remote_ip(self):
node = await self.middleware.call('failover.node')
if node == 'A':
remote = '169.254.10.2'
elif node == 'B':
remote = '169.254.10.1'
else:
raise CallError(f'Node {node} invalid for call_remote', errno.EHOSTUNREACH)
return remote
@accepts(
Str('method'),
List('args', default=[]),
Dict(
'options',
Int('timeout'),
Bool('job', default=False),
Bool('job_return', default=None, null=True),
Any('callback'),
),
)
def call_remote(self, method, args, options=None):
"""
Call a method in the other node.
"""
options = options or {}
job_return = options.get('job_return')
if job_return is not None:
options['job'] = 'RETURN'
try:
return self.CLIENT.call(method, *args, **options)
except CallTimeout:
raise CallError('Call timeout', errno.ETIMEDOUT)
@private
def sendfile(self, token, src, dst):
self.CLIENT.sendfile(token, src, dst)
@private
async def ensure_remote_client(self):
if self.CLIENT.remote_ip is not None:
return
try:
self.CLIENT.remote_ip = await self.middleware.call('failover.remote_ip')
self.CLIENT.middleware = self.middleware
start_daemon_thread(target=self.CLIENT.run)
except CallError:
pass
@private
def remote_connected(self):
return self.CLIENT.is_connected()
@private
def remote_subscribe(self, name, callback):
self.CLIENT.subscribe(name, callback)
@private
def remote_on_connect(self, callback):
self.CLIENT.register_connect(callback)
@private
def remote_on_disconnect(self, callback):
self.CLIENT.register_disconnect(callback)
async def setup(middleware):
if await middleware.call('failover.licensed'):
await middleware.call('failover.ensure_remote_client')
| {
"pile_set_name": "Github"
} |
/**
* @license
* Copyright (c) 2018 The Polymer Project Authors. All rights reserved.
* This code may only be used under the BSD style license found at
* http://polymer.github.io/LICENSE.txt
* The complete set of authors may be found at
* http://polymer.github.io/AUTHORS.txt
* The complete set of contributors may be found at
* http://polymer.github.io/CONTRIBUTORS.txt
* Code distributed by Google as part of the polymer project is also
* subject to an additional IP rights grant found at
* http://polymer.github.io/PATENTS.txt
*/
import * as astTypes from 'ast-types';
import {NodePath} from 'ast-types';
import * as estree from 'estree';
/**
* Replace all "this" identifiers to "window" identifiers. Detects and handles
* for strict vs. sloppy mode.
*/
export function removeToplevelUseStrict(program: estree.Program) {
astTypes.visit(program, {
// Don't delve into any function or class bodies.
visitFunctionDeclaration() {
return false;
},
visitFunctionExpression() {
return false;
},
visitClassBody() {
return false;
},
visitThisExpression() {
return false;
},
visitLiteral(path: NodePath<estree.SimpleLiteral>) {
// A sloppy way of detecting if the script is intended to be strict mode.
if (path.node.value === 'use strict' && path.parent &&
path.parent.node.type === 'ExpressionStatement' &&
path.parent.parent && path.parent.parent.node.type === 'Program') {
path.prune();
}
return false;
},
});
}
| {
"pile_set_name": "Github"
} |
HOST=$(or $(host),127.0.0.1)
PORT=$(or $(port),19530)
build:
docker build --network=host -t milvusdb/mishards .
push:
docker push milvusdb/mishards
pull:
docker pull milvusdb/mishards
deploy: clean_deploy
cd all_in_one && docker-compose -f all_in_one.yml up -d && cd -
clean_deploy:
cd all_in_one && docker-compose -f all_in_one.yml down && cd -
probe_deploy:
docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py"
deploy_m: clean_deploy_m
cd all_in_one_with_mysql && docker-compose -f all_in_one.yml up -d && cd -
clean_deploy_m:
cd all_in_one_with_mysql && docker-compose -f all_in_one.yml down && cd -
probe_deploy_m:
docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one_with_mysql/probe_test.py"
cluster:
cd kubernetes_demo;./start.sh baseup;sleep 10;./start.sh appup;cd -
clean_cluster:
cd kubernetes_demo;./start.sh cleanup;cd -
cluster_status:
kubectl get pods -n milvus -o wide
probe_cluster:
@echo
$(shell kubectl get service -n mishards | grep mishards-proxy-servers | awk {'print $$4,$$5'} | awk -F"[: ]" {'print "docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c \"python all_in_one/probe_test.py --port="$$2" --host="$$1"\""'})
probe:
docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py --port=${PORT} --host=${HOST}"
clean_coverage:
rm -rf cov_html
clean: clean_coverage clean_deploy clean_cluster clean_deploy_m
style:
pycodestyle --config=.
coverage:
pytest --cov-report html:cov_html --cov=mishards
test:
pytest
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/if.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <net/mac80211.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "rate.h"
#include "debugfs.h"
#include "debugfs_netdev.h"
static ssize_t ieee80211_if_read(
struct ieee80211_sub_if_data *sdata,
char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int))
{
char buf[70];
ssize_t ret = -EINVAL;
read_lock(&dev_base_lock);
if (sdata->dev->reg_state == NETREG_REGISTERED)
ret = (*format)(sdata, buf, sizeof(buf));
read_unlock(&dev_base_lock);
if (ret != -EINVAL)
ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
return ret;
}
static ssize_t ieee80211_if_write(
struct ieee80211_sub_if_data *sdata,
const char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
{
u8 *buf;
ssize_t ret;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = -EFAULT;
if (copy_from_user(buf, userbuf, count))
goto freebuf;
ret = -ENODEV;
rtnl_lock();
if (sdata->dev->reg_state == NETREG_REGISTERED)
ret = (*write)(sdata, buf, count);
rtnl_unlock();
freebuf:
kfree(buf);
return ret;
}
#define IEEE80211_IF_FMT(name, field, format_string) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, char *buf, \
int buflen) \
{ \
return scnprintf(buf, buflen, format_string, sdata->field); \
}
#define IEEE80211_IF_FMT_DEC(name, field) \
IEEE80211_IF_FMT(name, field, "%d\n")
#define IEEE80211_IF_FMT_HEX(name, field) \
IEEE80211_IF_FMT(name, field, "%#x\n")
#define IEEE80211_IF_FMT_SIZE(name, field) \
IEEE80211_IF_FMT(name, field, "%zd\n")
#define IEEE80211_IF_FMT_ATOMIC(name, field) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, \
char *buf, int buflen) \
{ \
return scnprintf(buf, buflen, "%d\n", atomic_read(&sdata->field));\
}
#define IEEE80211_IF_FMT_MAC(name, field) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, char *buf, \
int buflen) \
{ \
return scnprintf(buf, buflen, "%pM\n", sdata->field); \
}
#define IEEE80211_IF_FMT_DEC_DIV_16(name, field) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, \
char *buf, int buflen) \
{ \
return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \
}
#define __IEEE80211_IF_FILE(name, _write) \
static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
return ieee80211_if_read(file->private_data, \
userbuf, count, ppos, \
ieee80211_if_fmt_##name); \
} \
static const struct file_operations name##_ops = { \
.read = ieee80211_if_read_##name, \
.write = (_write), \
.open = mac80211_open_file_generic, \
}
#define __IEEE80211_IF_FILE_W(name) \
static ssize_t ieee80211_if_write_##name(struct file *file, \
const char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
return ieee80211_if_write(file->private_data, userbuf, count, \
ppos, ieee80211_if_parse_##name); \
} \
__IEEE80211_IF_FILE(name, ieee80211_if_write_##name)
#define IEEE80211_IF_FILE(name, field, format) \
IEEE80211_IF_FMT_##format(name, field) \
__IEEE80211_IF_FILE(name, NULL)
/* common attributes */
IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
HEX);
IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
HEX);
/* STA attributes */
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC);
IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16);
static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_local *local = sdata->local;
int err;
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
smps_mode == IEEE80211_SMPS_STATIC)
return -EINVAL;
/* auto should be dynamic if in PS mode */
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
(smps_mode == IEEE80211_SMPS_DYNAMIC ||
smps_mode == IEEE80211_SMPS_AUTOMATIC))
return -EINVAL;
/* supported only on managed interfaces for now */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
mutex_lock(&local->iflist_mtx);
err = __ieee80211_request_smps(sdata, smps_mode);
mutex_unlock(&local->iflist_mtx);
return err;
}
static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
[IEEE80211_SMPS_AUTOMATIC] = "auto",
[IEEE80211_SMPS_OFF] = "off",
[IEEE80211_SMPS_STATIC] = "static",
[IEEE80211_SMPS_DYNAMIC] = "dynamic",
};
static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
char *buf, int buflen)
{
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
return snprintf(buf, buflen, "request: %s\nused: %s\n",
smps_modes[sdata->u.mgd.req_smps],
smps_modes[sdata->u.mgd.ap_smps]);
}
static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
const char *buf, int buflen)
{
enum ieee80211_smps_mode mode;
for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
if (strncmp(buf, smps_modes[mode], buflen) == 0) {
int err = ieee80211_set_smps(sdata, mode);
if (!err)
return buflen;
return err;
}
}
return -EINVAL;
}
__IEEE80211_IF_FILE_W(smps);
/* AP attributes */
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
static ssize_t ieee80211_if_fmt_num_buffered_multicast(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
return scnprintf(buf, buflen, "%u\n",
skb_queue_len(&sdata->u.ap.ps_bc_buf));
}
__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
/* WDS attributes */
IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
#ifdef CONFIG_MAC80211_MESH
/* Mesh stats attributes */
IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
u.mesh.mshstats.dropped_frames_no_route, DEC);
IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
/* Mesh parameters */
IEEE80211_IF_FILE(dot11MeshMaxRetries,
u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
IEEE80211_IF_FILE(dot11MeshRetryTimeout,
u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshConfirmTimeout,
u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHoldingTimeout,
u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC);
IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC);
IEEE80211_IF_FILE(dot11MeshMaxPeerLinks,
u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
IEEE80211_IF_FILE(path_refresh_time,
u.mesh.mshcfg.path_refresh_time, DEC);
IEEE80211_IF_FILE(min_discovery_timeout,
u.mesh.mshcfg.min_discovery_timeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
#endif
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
sdata, &name##_ops);
#define DEBUGFS_ADD_MODE(name, mode) \
debugfs_create_file(#name, mode, sdata->debugfs.dir, \
sdata, &name##_ops);
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(bssid);
DEBUGFS_ADD(aid);
DEBUGFS_ADD(last_beacon);
DEBUGFS_ADD(ave_beacon);
DEBUGFS_ADD_MODE(smps, 0600);
}
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(num_sta_ps);
DEBUGFS_ADD(dtim_count);
DEBUGFS_ADD(num_buffered_multicast);
}
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(peer);
}
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
}
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
{
}
#ifdef CONFIG_MAC80211_MESH
static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir = debugfs_create_dir("mesh_stats",
sdata->debugfs.dir);
#define MESHSTATS_ADD(name)\
debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
MESHSTATS_ADD(fwded_mcast);
MESHSTATS_ADD(fwded_unicast);
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
MESHSTATS_ADD(estab_plinks);
#undef MESHSTATS_ADD
}
static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir = debugfs_create_dir("mesh_config",
sdata->debugfs.dir);
#define MESHPARAMS_ADD(name) \
debugfs_create_file(#name, 0600, dir, sdata, &name##_ops);
MESHPARAMS_ADD(dot11MeshMaxRetries);
MESHPARAMS_ADD(dot11MeshRetryTimeout);
MESHPARAMS_ADD(dot11MeshConfirmTimeout);
MESHPARAMS_ADD(dot11MeshHoldingTimeout);
MESHPARAMS_ADD(dot11MeshTTL);
MESHPARAMS_ADD(auto_open_plinks);
MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);
MESHPARAMS_ADD(min_discovery_timeout);
#undef MESHPARAMS_ADD
}
#endif
static void add_files(struct ieee80211_sub_if_data *sdata)
{
if (!sdata->debugfs.dir)
return;
switch (sdata->vif.type) {
case NL80211_IFTYPE_MESH_POINT:
#ifdef CONFIG_MAC80211_MESH
add_mesh_stats(sdata);
add_mesh_config(sdata);
#endif
break;
case NL80211_IFTYPE_STATION:
add_sta_files(sdata);
break;
case NL80211_IFTYPE_ADHOC:
/* XXX */
break;
case NL80211_IFTYPE_AP:
add_ap_files(sdata);
break;
case NL80211_IFTYPE_WDS:
add_wds_files(sdata);
break;
case NL80211_IFTYPE_MONITOR:
add_monitor_files(sdata);
break;
case NL80211_IFTYPE_AP_VLAN:
add_vlan_files(sdata);
break;
default:
break;
}
}
void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
{
char buf[10+IFNAMSIZ];
sprintf(buf, "netdev:%s", sdata->name);
sdata->debugfs.dir = debugfs_create_dir(buf,
sdata->local->hw.wiphy->debugfsdir);
add_files(sdata);
}
void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
{
if (!sdata->debugfs.dir)
return;
debugfs_remove_recursive(sdata->debugfs.dir);
sdata->debugfs.dir = NULL;
}
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir;
char buf[10 + IFNAMSIZ];
dir = sdata->debugfs.dir;
if (!dir)
return;
sprintf(buf, "netdev:%s", sdata->name);
if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
"dir to %s\n", buf);
}
| {
"pile_set_name": "Github"
} |
{
"CREATE_EContractCreateNEContractInInit_Tr" : {
"_info" : {
"comment" : "",
"filling-rpc-server" : "Geth-1.9.6-unstable-63b18027-20190920",
"filling-tool-version" : "retesteth-0.0.1+commit.0ae18aef.Linux.g++",
"lllcversion" : "Version: 0.5.12-develop.2019.9.13+commit.2d601a4f.Linux.g++",
"source" : "src/GeneralStateTestsFiller/stCreateTest/CREATE_EContractCreateNEContractInInit_TrFiller.json",
"sourceHash" : "8ad630782495fb82560a17a8f2975fe03869db860b24ee44686ec3582e6f1db4"
},
"env" : {
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty" : "0x020000",
"currentGasLimit" : "0x989680",
"currentNumber" : "0x01",
"currentTimestamp" : "0x03e8",
"previousHash" : "0x5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
},
"post" : {
"Istanbul" : [
{
"indexes" : {
"data" : 0,
"gas" : 0,
"value" : 0
},
"hash" : "0xae03eb73853e0d4d4d859e1312290fe8b0b6566a2cc4ca678e2f7cf49c60f649",
"logs" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
}
]
},
"pre" : {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0xe8d4a51000",
"code" : "0x",
"nonce" : "0x00",
"storage" : {
}
},
"0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0xe8d4a51000",
"code" : "0x600c60015500",
"nonce" : "0x00",
"storage" : {
}
}
},
"transaction" : {
"data" : [
"0x6000600060006000600073c94f5374fce5edbc8e2a8697c15331677e6ebf0b61ea60f1506d64600c6000556000526005601bf3600052600e60126000f0"
],
"gasLimit" : [
"0x0927c0"
],
"gasPrice" : "0x01",
"nonce" : "0x00",
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"to" : "",
"value" : [
"0x00"
]
}
}
} | {
"pile_set_name": "Github"
} |
MIIP6gYJKoZIhvcNAQcCoIIP2zCCD9cCAQExCzAJBgUrDgMCGgUAMH8GCSqGSIb3DQEHAaByBHB7
InRlYW1JZCI6IjhRNUY2TTNUTlMiLCJkb21haW4iOiJuLXBsdWdpbi10ZXN0LmZpcmViYXNlYXBw
LmNvbSIsImRhdGVDcmVhdGVkIjoiMjAxOS0xMS0xOCwxMDozNjo1MCIsInZlcnNpb24iOjF9oIIM
sjCCA_MwggLboAMCAQICARcwDQYJKoZIhvcNAQEFBQAwYjELMAkGA1UEBhMCVVMxEzARBgNVBAoT
CkFwcGxlIEluYy4xJjAkBgNVBAsTHUFwcGxlIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRYwFAYD
VQQDEw1BcHBsZSBSb290IENBMB4XDTA3MDQxMjE3NDMyOFoXDTIyMDQxMjE3NDMyOFoweTELMAkG
A1UEBhMCVVMxEzARBgNVBAoTCkFwcGxlIEluYy4xJjAkBgNVBAsTHUFwcGxlIENlcnRpZmljYXRp
b24gQXV0aG9yaXR5MS0wKwYDVQQDEyRBcHBsZSBpUGhvbmUgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCjHr7wR8C0nhBbRqS4IbhPhiFwKEVg
XBzDyApkY4j7_Gnu-FT86Vu3Bk4EL8NrM69ETOpLgAm0h_ZbtP1k3bNy4BOz_RfZvOeo7cKMYcIq
-ezOpV7WaetkC40Ij7igUEYJ3Bnk5bCUbbv3mZjE6JtBTtTxZeMbUnrc6APZbh3aEFWGpClYSQzq
R9cVNDP2wKBESnC-LLUqMDeMLhXr0eRslzhVVrE1K1jqRKMmhe7IZkrkz4nwPWOtKd6tulqz3KWj
mqcJToAWNWWkhQ1jez5jitp9SkbsozkYNLnGKGUYvBNgnH9XrBTJie2htodoUraETrjIg-z5nhmr
s8ELhsefAgMBAAGjgZwwgZkwDgYDVR0PAQH_BAQDAgGGMA8GA1UdEwEB_wQFMAMBAf8wHQYDVR0O
BBYEFOc0Ki4i3jlga7SUzneDYS8xoHw1MB8GA1UdIwQYMBaAFCvQaUeUdgn-9GuNLkCm90dNfwhe
MDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cuYXBwbGUuY29tL2FwcGxlY2Evcm9vdC5jcmww
DQYJKoZIhvcNAQEFBQADggEBAB3R1XvddE7XF_yCLQyZm15CcvJp3NVrXg0Ma0s-exQl3rOU6KD6
D4CJ8hc9AAKikZG-dFfcr5qfoQp9ML4AKswhWev9SaxudRnomnoD0Yb25_awDktJ-qO3QbrX0eNW
oX2Dq5eu-FFKJsGFQhMmjQNUZhBeYIQFEjEra1TAoMhBvFQe51StEwDSSse7wYqvgQiO8EYKvyem
vtzPOTqAcBkjMqNrZl2eTahHSbJ7RbVRM6d0ZwlOtmxvSPcsuTMFRGtFvnRLb7KGkbQ-JSglnrPC
UYb8T-WvO6q7RCwBSeJ0szT6RO8UwhHyLRkaUYnTCEpBbFhW3ps64QVX5WLP0g8wggP4MIIC4KAD
AgECAgg9ciDjz4zyJTANBgkqhkiG9w0BAQUFADB5MQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBw
bGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxLTArBgNVBAMT
JEFwcGxlIGlQaG9uZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNDA3MTEwMTM1MjVaFw0y
MjA0MTIxNzQzMjhaMFkxCzAJBgNVBAYTAlVTMRMwEQYDVQQKDApBcHBsZSBJbmMuMTUwMwYDVQQD
DCxBcHBsZSBpUGhvbmUgT1MgUHJvdmlzaW9uaW5nIFByb2ZpbGUgU2lnbmluZzCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAOfZmsMXo8npB9XHmaS0dSFMEQNoHzAsB5x3iDFIyEQEjYHN
esb40_ZHHG1O7rrmFIVxxO95s0t12miFpnNVosaHUXvXHIG1AWrjjJHueir8z5Ve-XGgKH75q9Th
zg5PlfPK7beVCjL_JZk29pidJItkV7b1_b5FIfmuRHa36rA7aZ9tf37XEZuy6kOi5f0mR87MxAfi
53XG2_x-FrWkk8Z8rz293cAvgHh2Ok582GRPKiVRh0F2Dm7gk6Qhqj5dyl-niwtApS-zs2pKx8ZT
tR9cLIqI7uSQL5_dUj4WQcY4HmgkjzEt22lxz6DzQhooEUp0nKbWeElYDcS8HFvxPXsCAwEAAaOB
ozCBoDAdBgNVHQ4EFgQUpF5rO_x6R3KRcAnBJL0vO8l7oL4wDAYDVR0TAQH_BAIwADAfBgNVHSME
GDAWgBTnNCouIt45YGu0lM53g2EvMaB8NTAwBgNVHR8EKTAnMCWgI6Ahhh9odHRwOi8vY3JsLmFw
cGxlLmNvbS9pcGhvbmUuY3JsMAsGA1UdDwQEAwIHgDARBgsqhkiG92NkBgICAQQCBQAwDQYJKoZI
hvcNAQEFBQADggEBAIq2Vk5B0rHzIUOdC9nH_7SYWJntQacw8e_b2oBtIbazXNy-h_E5IbzEodom
0u2m8e3AEZUZrEe4Kg5pmNTm5s5r6iLBK6cBbkFMLB3jI4yGJ6OMF5zMG-7YZDMPRA6LO0hiE2JU
03FNki2BOv-my45cQ3FsiDMiPCA_HXi5_xoqIehzac-boaHhPekMF7ypc9fpUrrCth-hIoU-uFwa
spp7n8zLUDr-lsf8SEf0JKKtPkz7SttnnANxFSc_g1L7svQZFqk-qewU7F7CCqfzTdEwqtStuDKh
UC9NVchCJ6wcznJk8CzgCeRMuQsgNTec1QuRxDEd0CviXIK9fdD-CJkwggS7MIIDo6ADAgECAgEC
MA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNVBAYTAlVTMRMwEQYDVQQKEwpBcHBsZSBJbmMuMSYwJAYD
VQQLEx1BcHBsZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEWMBQGA1UEAxMNQXBwbGUgUm9vdCBD
QTAeFw0wNjA0MjUyMTQwMzZaFw0zNTAyMDkyMTQwMzZaMGIxCzAJBgNVBAYTAlVTMRMwEQYDVQQK
EwpBcHBsZSBJbmMuMSYwJAYDVQQLEx1BcHBsZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEWMBQG
A1UEAxMNQXBwbGUgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOSRqQkf
kdseR1DrBe1eeYQt6zaiV0xV7IsZid75S2z1B6siMALoGD74UAnTf0GomPnRymacJGsR0KO75Bsq
wx-VnnoMpEeLW9QWNzPLxA9NzhRp0ckZcvVdDtV_X5vyJQO6VY9NXQ3xZDUjFUsVWR2zlPf2nJ7P
ULrBWFBnjwi0IPfLrCwgb3C2PwEwjLdDzw-dPfMrSSgayP7OtbkO2V4c1ss9tTqt9A8OAJILsSEW
LnTVPA3bYharo3GSR1NVwa8vQbP4--NwzeajTEV-H0xrUJZBicR0YgsQg0GHM4qBsTBY7FoEMoxo
s48d3mVz_2deZbxJ2HafMxRloXeUyS0CAwEAAaOCAXowggF2MA4GA1UdDwEB_wQEAwIBBjAPBgNV
HRMBAf8EBTADAQH_MB0GA1UdDgQWBBQr0GlHlHYJ_vRrjS5ApvdHTX8IXjAfBgNVHSMEGDAWgBQr
0GlHlHYJ_vRrjS5ApvdHTX8IXjCCAREGA1UdIASCAQgwggEEMIIBAAYJKoZIhvdjZAUBMIHyMCoG
CCsGAQUFBwIBFh5odHRwczovL3d3dy5hcHBsZS5jb20vYXBwbGVjYS8wgcMGCCsGAQUFBwICMIG2
GoGzUmVsaWFuY2Ugb24gdGhpcyBjZXJ0aWZpY2F0ZSBieSBhbnkgcGFydHkgYXNzdW1lcyBhY2Nl
cHRhbmNlIG9mIHRoZSB0aGVuIGFwcGxpY2FibGUgc3RhbmRhcmQgdGVybXMgYW5kIGNvbmRpdGlv
bnMgb2YgdXNlLCBjZXJ0aWZpY2F0ZSBwb2xpY3kgYW5kIGNlcnRpZmljYXRpb24gcHJhY3RpY2Ug
c3RhdGVtZW50cy4wDQYJKoZIhvcNAQEFBQADggEBAFw2mUwteLftjJvc83eb8nbSdzBPwR-Fg4Ub
mT1HN_Kpm0COLNSxkBLYvvRzm-7SZA_LeU802KI--Xj_a8gH7H05g4tTINM4xLG_mk8Ka_8r_Fmn
BQl8F0BWER5007eLIztHo9VvJOLr0bdw3w9F4SfK8W147ee1Fxeo3H4iNcol1dkP1mvUoiQjEfeh
rI9zgWDGG1sJL5Ky-ERI8GA4nhX1PSZnIIozavcNgs_e66Mv-VNqW2TAYzN39zoHLFbr2g8hDtq6
cxlPtdk2f8GHVdmnmbkyQvvY1XGefqFStxu9k0IkEirHDx22TZxeY8hLgBdQqorV2uT80AkHN7B1
dSExggKMMIICiAIBATCBhTB5MQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBwbGUgSW5jLjEmMCQG
A1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxLTArBgNVBAMTJEFwcGxlIGlQaG9u
ZSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eQIIPXIg48-M8iUwCQYFKw4DAhoFAKCB3DAYBgkqhkiG
9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xOTExMTgxMDM2NTBaMCMGCSqGSIb3
DQEJBDEWBBTHKvPnCvbCQ1qTFVUswmju5UIDozApBgkqhkiG9w0BCTQxHDAaMAkGBSsOAwIaBQCh
DQYJKoZIhvcNAQEBBQAwUgYJKoZIhvcNAQkPMUUwQzAKBggqhkiG9w0DBzAOBggqhkiG9w0DAgIC
AIAwDQYIKoZIhvcNAwICAUAwBwYFKw4DAgcwDQYIKoZIhvcNAwICASgwDQYJKoZIhvcNAQEBBQAE
ggEAxVzPObQJuj9zEX3d867UO2sSbytzXfJawKtOJXuhEhfoS0sVsXBk6IpKH6I1ZIYBrt64vjeG
WNXkUOpkX2ZMGpITdmtuivmRKZtLW7VgR26xUaNScJoeDdX4o8ApheebPqq00i0wsurkfuWPxghK
d_RM-1fW5xjSfJ0kl-aKysP0oATSj9LrDbO1cR6kPxNzUFnA1R7Wguq3o6KRJdd43qL9g48jZU6O
UWjlSAGiohCu1i_MbydWjjniBlOFk3mw4LqwadyPk4-er_NHFWv2G0VRoZTZtO1cu2MEbycuPIdv
aBch97v3J8McglXfItgE0aYRxdoPM80nT8JfQ6xJ7g
| {
"pile_set_name": "Github"
} |
after_IN
sixteen_CD
years_NNS
francis_NP
ford_NP
copolla_NN
has_HVZ
again_RB
returned_VBN
to_IN
his_PP$
favorite_JJ
project_NN
,_,
making_VBG
the_DT
third_OD
installment_NN
in_IN
the_DT
godfather-trilogy_NN
._.
this_DT
new_JJ
film_NN
has_HVZ
been_BEN
underrated_VBN
for_IN
no_DT
reason_NN
._.
it_PP
is_BEZ
as_IN
intellectual_JJ
and_CC
majestically_RB
made_VBN
as_IN
copolla_NN
's_POS
pervious_JJ
films_NNS
._.
it_PP
is_BEZ
also_RB
more_DT
psychological_JJ
,_,
pessimistic_JJ
and_CC
more_DT
tragic_JJ
than_CS
the_DT
first_OD
two_CD
._.
the_DT
only_JJ
regret_NN
is_BEZ
the_DT
unconvincing_JJ
performance_NN
by_IN
the_DT
newcomer_NN
sofia_NP
copolla_NN
and_CC
some_DT
"_"
unfinished_JJ
"_"
developments_NNS
of_IN
some_DT
characters_NNS
._.
the_DT
film_NN
elegantly_RB
begins_VBZ
with_IN
nino_???
rota_NN
's_HVZ
recognizable_JJ
musical_JJ
score_NN
,_,
the_DT
beautiful_JJ
skyscrapers_NNS
of_IN
new_JJ
york_NP
and_CC
michael_NP
's_POS
voice_NN
as_IN
he_PP
is_BEZ
writing_VBG
a_DT
letter_NN
to_IN
his_PP$
children_NNS
:_:
"_"
the_DT
only_RB
wealth_NN
in_IN
this_DT
world_NN
is_BEZ
children_NNS
._.
more_DT
than_CS
all_PDT
money_NN
and_CC
power_NN
on_IN
earth_NN
,_,
you_PP
are_BER
my_PP$
treasure_NN
"_"
._.
the_DT
year_NN
is_BEZ
1979_CD
and_CC
michael_NP
corleone_NN
has_HVZ
used_VBN
the_DT
time_NN
since_CS
the_DT
ending_NN
of_IN
"_"
part_NN
ii_CD
"_"
to_IN
make_NN
his_PP$
father_NN
's_POS
dream_NN
come_VB
true_JJ
-_-
making_VBG
the_DT
corleone_NN
family_NN
legitimate_JJ
._.
michael_NP
sold_VBD
all_PDT
his_PP$
casinos_NNS
and_CC
invests_VBZ
only_RB
in_IN
gambling_NN
._.
constantly_RB
haunted_VBN
by_IN
the_DT
past_NN
,_,
his_PP$
only_JJ
reason_NN
to_IN
live_JJ
is_BEZ
his_PP$
children_NNS
._.
the_DT
family_NN
has_HVZ
amassed_VBD
unimaginable_JJ
wealth_NN
,_,
and_CC
as_CS
the_DT
film_NN
opens_VBZ
michael_NP
corleone_PN
(_(
al_FW
pacino_NP
)_)
is_BEZ
being_NN
invested_VBN
with_IN
a_DT
great_JJ
honor_NN
by_IN
the_DT
church_NN
._.
later_RBR
that_CS
day_NN
,_,
at_IN
a_DT
reception_NN
,_,
his_PP$
daughter_NN
announces_VBZ
a_DT
corleone_NN
family_NN
gift_NN
to_IN
the_DT
church_NN
and_CC
the_DT
charities_NNS
of_IN
sicily_NN
,_,
"_"
a_DT
check_NN
in_IN
the_DT
amount_NN
of_IN
$_???
100_CD
million_NN
._.
"_"
but_CC
the_DT
corleones_NNS
are_BER
about_IN
to_TO
find_VB
,_,
as_CS
others_NNS
have_HV
throughout_IN
history_NN
,_,
that_CS
you_PP
cannot_NN
buy_VB
forgiveness_NN
._.
sure_JJ
,_,
you_PP
can_MD
do_DO
business_NN
with_IN
evil_JJ
men_NNS
inside_IN
the_DT
church_NN
,_,
for_IN
all_PDT
men_NNS
are_BER
fallible_JJ
and_CC
capable_JJ
of_IN
sin_NN
._.
but_CC
god_NP
does_DOZ
not_XNOT
take_VB
payoffs_NNS
._.
the_DT
plot_NN
of_IN
the_DT
movie_NN
,_,
concocted_VBN
by_IN
coppola_NP
and_CC
mario_NP
puzo_???
in_CD
a_NP
screenplay_NN
inspired_VBN
by_IN
headlines_NNS
,_,
brings_VBZ
the_DT
corleone_NN
family_NN
into_IN
the_DT
inner_JJ
circles_NNS
of_IN
corruption_NN
in_IN
the_DT
vatican_NP
._.
there_EX
is_BEZ
a_DT
moment_NN
in_IN
"_"
godfather_NN
iii_CD
"_"
where_CS
michael_NP
says_VBZ
:_:
"_"
all_PDT
my_PP$
life_NN
i_PP
have_HV
been_BEN
trying_VBG
to_IN
go_NN
up_RP
in_IN
society_NN
,_,
where_CS
everything_PN
was_BEDZ
legal_JJ
._.
but_CC
the_DT
higher_JJR
i_PP
go_VB
,_,
the_DT
crookier_RBR
it_PP
becomes_VBZ
._.
._.
"_"
._.
visually_RB
this_DT
film_NN
is_BEZ
as_CS
spectacular_JJ
as_IN
the_DT
first_OD
two_CD
._.
gordon_NP
willis_NP
'_"
rich_JJ
cinematography_NN
,_,
carmine_NN
copolla_NN
's_POS
beautiful_JJ
composition_NN
and_CC
alex_NP
tavoularis_NP
'_"
wonderful_JJ
art_NN
direction_NN
could_MD
not_XNOT
be_BE
better_JJR
._.
but_CC
copolla_NN
's_POS
first_OD
two_CD
godfather-films_NNS
were_BED
more_DT
famous_JJ
for_IN
their_PP$
deep_JJ
,_,
intellectual_JJ
plots_NNS
,_,
tree_NN
dimensional_JJ
characters_NNS
and_CC
incredible_JJ
acting_JJ
,_,
than_CS
for_IN
their_PP$
visual_JJ
perfection_NN
._.
the_DT
third_OD
installment_NN
has_HVZ
only_RB
the_DT
plot_NN
and_CC
visuals_NNS
._.
some_DT
characters_NNS
could_MD
be_BE
much_DT
more_DT
developed_JJ
and_CC
the_DT
acting_JJ
,_,
although_CS
good_JJ
,_,
never_RB
accomplishes_NNS
to_IN
reach_NN
the_DT
same_DT
height_NN
of_IN
the_DT
first_OD
two_CD
films_NNS
._.
the_DT
biggest_JJS
miscasting_NN
is_BEZ
sofia_NP
copolla_NN
,_,
who_WP
is_BEZ
so_RB
unconvincing_JJ
and_CC
unemotional_JJ
that_CS
she_PP
manages_VBZ
to_IN
ruin_NN
several_DT
scenes_NNS
throughout_IN
the_DT
movie_NN
,_,
that_CS
could_MD
have_HV
been_BEN
grander_NN
and_CC
more_DT
emotional_JJ
._.
the_DT
best_JJS
performance_NN
comes_VBZ
unsurprisingly_RB
from_IN
al_NP
pacino_NP
,_,
who_WP
should_MD
have_HV
got_VBN
a_DT
nomination_NN
for_IN
best_JJS
actor_NN
at_IN
the_DT
oscars_NNS
._.
andy_NP
garcia_NP
is_BEZ
powerful_JJ
as_CS
sonny_NN
's_BEZ
son_NN
,_,
strong_JJ
,_,
focused_VBN
and_CC
loyal_JJ
._.
violence_NN
is_BEZ
natural_JJ
to_TO
him_PP
._.
he_PP
suffers_VBZ
no_DT
pangs_NNS
of_IN
conscience_NN
when_CS
he_PP
takes_VBZ
revenge_NN
on_IN
his_PP$
family_NN
's_POS
behalf_NN
,_,
and_CC
in_IN
this_DT
he_PP
is_BEZ
supposed_JJ
to_TO
be_BE
strong_JJ
in_IN
the_DT
uncomplicated_VBN
way_NN
don_NP
vito_???
corleone_NN
was_BEDZ
._.
however_RB
both_RB
kay_NP
(_(
diane_NP
keaton_NP
)_)
and_CC
connie_NP
(_(
talia_NP
shire_NN
)_)
are_BER
useless_JJ
._.
and_CC
characters_NNS
like_IN
vito_???
corleone_NN
and_CC
tom_NP
hagen_NP
are_BER
really_RB
missed_VBN
._.
the_DT
good_JJ
part_NN
is_BEZ
that_CS
michael_NP
is_BEZ
again_RB
reunited_VBN
with_IN
old_JJ
friends_NNS
,_,
that_CS
you_PP
remember_VB
from_IN
the_DT
first_OD
and_CC
second_OD
films_NNS
._.
in_IN
the_DT
third_OD
film_NN
michael_NP
has_HVZ
become_VBN
almost_RB
like_IN
his_PP$
father_NN
,_,
vito_???
in_CD
the_DT
first_OD
film_NN
and_CC
vincenzo_NP
resembles_VBZ
michael_NP
when_CS
he_PP
was_BEDZ
much_DT
younger_JJR
._.
this_DT
parallel_JJ
could_MD
be_BE
more_DT
interesting_JJ
if_CS
vincenzo_NP
's_POS
character_NN
was_BEDZ
more_DT
developed_JJ
._.
many_DT
have_HV
pointed_VBN
out_RP
that_CS
making_VBG
the_DT
third_OD
film_NN
,_,
was_BEDZ
unnecessary_JJ
._.
i_PP
disagree_VB
._.
it_PP
is_BEZ
a_DT
beautiful_JJ
film_NN
of_IN
great_JJ
importance_NN
,_,
completing_VBG
the_DT
tragic_JJ
saga_NN
of_IN
the_DT
corleone_NN
family_NN
._.
the_DT
first_OD
film_NN
showed_VBD
some_DT
horrible_JJ
results_NNS
of_IN
corleone_PN
's_POS
life_NN
._.
it_PP
showed_VBD
michael_NP
making_VBG
a_DT
choice_NN
;_;
the_DT
second_NN
showed_VBD
a_DT
man_NN
damning_VBG
himself_PPX
for_IN
his_PP$
choices_NNS
and_CC
feeling_NN
the_DT
impact_NN
of_IN
changing_JJ
times_NNS
._.
a_DT
man_NN
desperately_RB
trying_VBG
to_IN
keep_NN
his_PP$
balance_NN
,_,
focus_NN
,_,
family_NN
and_CC
sanity_NN
,_,
while_CS
everything_PN
is_BEZ
crashing_JJ
all_RB
around_IN
him_PP
._.
the_DT
third_OD
film_NN
is_BEZ
a_DT
terrifying_JJ
conclusion_NN
-_-
a_DT
result_NN
of_IN
michael_NP
's_POS
life_NN
._.
the_DT
life_NN
he_PP
chose_VBD
for_IN
himself_PPX
is_BEZ
like_IN
quicksand_NN
-_-
one_CD
wrong_JJ
step_NN
and_CC
you_PP
are_BER
doomed_VBN
._.
there_EX
is_BEZ
no_DT
turning_NN
back_NN
._.
and_CC
no_DT
matter_NN
how_WRB
hard_JJ
you_PP
try_VB
to_TO
get_VB
out_RP
of_IN
it_PP
,_,
to_IN
free_JJ
yourself_PPX
,_,
no_DT
matter_NN
how_WRB
powerful_JJ
and_CC
wealthy_JJ
you_PP
are_BER
,_,
you_PP
are_BER
helpless_JJ
-_-
sinking_VBG
deeper_JJR
and_CC
deeper_JJR
till_IN
it_PP
swallows_NNS
you_PP
completely_RB
._.
the_DT
beautifully_RB
directed_JJ
last_OD
sequence_NN
is_BEZ
also_RB
the_DT
powerful_JJ
climax_NN
of_IN
the_DT
film_NN
,_,
when_CS
michael_NP
is_BEZ
sitting_VBG
alone_RB
in_IN
his_PP$
chair_NN
,_,
left_VBD
by_IN
everyone_PN
,_,
surrounded_VBN
by_IN
emptiness_NN
and_CC
memories_NNS
of_IN
his_PP$
friends_NNS
and_CC
family_NN
members_NNS
long_JJ
dead_JJ
._.
here_RB
he_PP
dies_VBZ
-_-
alone_RB
,_,
miserable_JJ
and_CC
unforgiven_JJ
| {
"pile_set_name": "Github"
} |
<view class="weui-cell__ft">
<view wx:if="{{!disabled}}" class="weui-vcode-btn" catchtap="_onTap">{{str}}</view>
<view wx:else class="weui-vcode-btn" style="color: #888;">{{str}}</view>
</view> | {
"pile_set_name": "Github"
} |
# $Id$
# Authority: dag
%{?el4:%define _without_selinux 1}
%{?el3:%define _without_selinux 1}
%{?rh9:%define _without_selinux 1}
%{?rh7:%define _without_selinux 1}
%{?el2:%define _without_ldap 1}
%{?el2:%define _without_pgsql 1}
%{?el2:%define _without_selinux 1}
%{?el2:%define _without_tls 1}
Summary: Lightweight, fast and secure FTP server
Name: pure-ftpd
Version: 1.0.36
Release: 1%{?dist}
License: BSD
Group: System Environment/Daemons
URL: http://www.pureftpd.org/
Source0: http://download.pureftpd.org/pub/pure-ftpd/releases/pure-ftpd-%{version}.tar.bz2
Source1: pure-ftpd.init
Source6: pure-ftpd.README.SELinux
Source7: pure-ftpd.pureftpd.te
Patch0: pure-ftpd-1.0.27-config.patch
Patch1: pure-ftpd-paminclude.patch
Provides: ftpserver
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
BuildRequires: libcap-devel
BuildRequires: pam-devel
BuildRequires: perl
BuildRequires: python
%{!?_without_ldap:BuildRequires: openldap-devel}
%{!?_without_mysql:BuildRequires: mysql-devel}
%{!?_without_pgsql:BuildRequires: postgresql-devel}
%{!?_without_selinux:BuildRequires: checkpolicy, selinux-policy-devel}
%{!?_without_tls:BuildRequires: openssl-devel}
Requires: chkconfig
Requires: initscripts
Requires: logrotate
Requires: usermode
%description
Pure-FTPd is a fast, production-quality, standard-comformant FTP server,
based upon Troll-FTPd. Unlike other popular FTP servers, it has no known
security flaw, it is really trivial to set up and it is especially designed
for modern Linux and FreeBSD kernels (setfsuid, sendfile, capabilities) .
Features include PAM support, IPv6, chroot()ed home directories, virtual
domains, built-in LS, anti-warez system, bandwidth throttling, FXP, bounded
ports for passive downloads, UL/DL ratios, native LDAP and SQL support,
Apache log files and more.
Rebuild switches:
--without extauth disable external authentication
--without ldap disable ldap support
--without mysql disable mysql support
--without pgsql disable postgresql support
--without tls disable SSL/TLS
%package selinux
Summary: SELinux support for Pure-FTPD
Group: System Environment/Daemons
Requires: %{name} = %{version}-%{release}
Requires: policycoreutils
Requires: initscripts
%description selinux
This package adds SELinux enforcement to Pure-FTPD. Install it if you want
Pure-FTPd to be protected in the same way other FTP servers are in Fedora
(e.g. VSFTPd and ProFTPd)
%prep
%setup
%patch0 -p0 -b .config
%patch1 -p0 -b .paminclude
%{__cat} <<EOF >pure-ftpd.logrotate
/var/log/pureftpd.log {
weekly
notifempty
missingok
}
EOF
%{__cat} <<EOF >pure-ftpd.xinetd
# default: off
# description: pure-ftpd server, xinetd version. \
# Don't run the standalone version if you run \
# this and remember do set "Daemonize" to "no" \
# in /etc/pure-ftpd/pure-ftpd.conf
service ftp
{
disable = yes
socket_type = stream
wait = no
user = root
server = /usr/sbin/pure-config.pl
server_args = /etc/pure-ftpd/pure-ftpd.conf
log_on_success += DURATION USERID
log_on_failure += USERID
nice = 10
}
EOF
%{__cat} <<EOF >pure-ftpwho.pam
#%PAM-1.0
auth sufficient pam_rootok.so
auth required pam_localuser.so
account required pam_permit.so
EOF
%{__cat} <<EOF >pure-ftpwho.consoleapps
USER=root
PROGRAM=%{_sbindir}/pure-ftpwho
GUI=no
EOF
%{__install} -Dp -m0644 %{SOURCE6} README.SELinux
%{__install} -Dp -m0644 %{SOURCE7} selinux/pureftpd.te
%{__cat} <<EOF >selinux/pureftpd.fc
%{_sbindir}/pure-ftpd system_u:object_r:ftpd_exec_t:s0
%{_localstatedir}/log/pureftpd.log system_u:object_r:xferlog_t:s0
EOF
touch selinux/pureftpd.if
%build
%configure \
--sysconfdir="%{_sysconfdir}/pure-ftpd" \
--without-bonjour \
--with-altlog \
--with-capabilities \
--with-cookie \
--with-diraliases \
%{!?_without_extauth:--with-extauth} \
--with-ftpwho \
%{!?_without_ldap:--with-ldap} \
%{!?_without_mysql:--with-mysql} \
--with-pam \
--with-paranoidmsg \
--with-peruserlimits \
%{!?_without_pgsql:--with-pgsql} \
--with-privsep \
--with-puredb \
--with-quotas \
--with-ratios \
--with-rfc2640 \
--with-sendfile \
--with-throttling \
%{!?_without_tls:--with-tls --with-certfile="%{_sysconfdir}/pki/pure-ftpd/pure-ftpd.pem"} \
--with-uploadscript \
--with-virtualchroot \
--with-virtualhosts \
--with-welcomemsg
%{__make} %{?_smp_mflags}
%{!?_without_selinux:%{__make} -C selinux -f %{_datadir}/selinux/devel/Makefile}
%install
%{__rm} -rf %{buildroot}
%{__make} install DESTDIR="%{buildroot}"
%{__install} -Dp -m0755 configuration-file/pure-config.pl %{buildroot}%{_sbindir}/pure-config.pl
%{__install} -Dp -m0644 configuration-file/pure-ftpd.conf %{buildroot}%{_sysconfdir}/pure-ftpd/pure-ftpd.conf
%{__install} -Dp -m0755 configuration-file/pure-config.py %{buildroot}%{_sbindir}/pure-config.py
%{__install} -Dp -m0644 pureftpd-ldap.conf %{buildroot}%{_sysconfdir}/pure-ftpd/pureftpd-ldap.conf
%{__install} -Dp -m0644 pureftpd-mysql.conf %{buildroot}%{_sysconfdir}/pure-ftpd/pureftpd-mysql.conf
%{__install} -Dp -m0644 pureftpd-pgsql.conf %{buildroot}%{_sysconfdir}/pure-ftpd/pureftpd-pgsql.conf
%{__install} -Dp -m0644 man/pure-ftpd.8 %{buildroot}%{_mandir}/man8/pure-ftpd.8
%{__install} -Dp -m0644 man/pure-ftpwho.8 %{buildroot}%{_mandir}/man8/pure-ftpwho.8
%{__install} -Dp -m0644 man/pure-mrtginfo.8 %{buildroot}%{_mandir}/man8/pure-mrtginfo.8
%{__install} -Dp -m0644 man/pure-uploadscript.8 %{buildroot}%{_mandir}/man8/pure-uploadscript.8
%{__install} -Dp -m0644 man/pure-pw.8 %{buildroot}%{_mandir}/man8/pure-pw.8
%{__install} -Dp -m0644 man/pure-pwconvert.8 %{buildroot}%{_mandir}/man8/pure-pwconvert.8
%{__install} -Dp -m0644 man/pure-statsdecode.8 %{buildroot}%{_mandir}/man8/pure-statsdecode.8
%{__install} -Dp -m0644 man/pure-quotacheck.8 %{buildroot}%{_mandir}/man8/pure-quotacheck.8
%{__install} -Dp -m0644 man/pure-authd.8 %{buildroot}%{_mandir}/man8/pure-authd.8
%{__install} -Dp -m0755 %{SOURCE1} %{buildroot}%{_initrddir}/pure-ftpd
%{__install} -Dp -m0644 pam/pure-ftpd %{buildroot}%{_sysconfdir}/pam.d/pure-ftpd
%{__install} -Dp -m0644 pure-ftpd.logrotate %{buildroot}%{_sysconfdir}/logrotate.d/pure-ftpd
%{__install} -Dp -m0644 pure-ftpd.xinetd %{buildroot}%{_sysconfdir}/xinetd.d/pure-ftpd
%{__install} -Dp -m0644 pure-ftpwho.pam %{buildroot}%{_sysconfdir}/pam.d/pure-ftpwho
%{__install} -Dp -m0644 pure-ftpwho.consoleapps %{buildroot}%{_sysconfdir}/security/console.apps/pure-ftpwho
%{__ln_s} -f consolehelper %{buildroot}%{_bindir}/pure-ftpwho
%{__install} -d -m0755 %{buildroot}%{_localstatedir}/ftp/
%{!?_without_tls:%{__install} -d -m0700 %{buildroot}%{_sysconfdir}/pki/pure-ftpd/}
%{!?_without_selinux:%{__install} -Dp -m0644 selinux/pureftpd.pp %{buildroot}%{_datadir}/selinux/packages/pure-ftpd/pureftpd.pp}
%clean
%{__rm} -rf %{buildroot}
%post
if [ $1 -le 1 ]; then
/sbin/chkconfig --add pure-ftpd
fi
if [ -d %{_sysconfdir}/pki/pure-ftpd/ -a ! -f %{_sysconfdir}/pki/pure-ftpd/pure-ftpd.pem ]; then
%{_sysconfdir}/pki/tls/certs/make-dummy-cert %{_sysconfdir}/pki/pure-ftpd/pure-ftpd.pem
fi
%preun
if [ $1 -eq 0 ]; then
/sbin/service pure-ftpd stop &>/dev/null || :
/sbin/chkconfig --del pure-ftpd
fi
%postun
if [ $1 -gt 0 ]; then
/sbin/service pure-ftpd condrestart &>/dev/null || :
fi
%post selinux
if [ $1 -le 1 ]; then
semodule -i %{_datadir}/selinux/packages/pure-ftpd/pureftpd.pp 2>/dev/null || :
fixfiles -R pure-ftpd restore
/sbin/service pure-ftpd condrestart &>/dev/null || :
fi
%preun selinux
if [ $1 -eq 0 ]; then
semodule -r pureftpd 2>/dev/null || :
fixfiles -R pure-ftpd restore
/sbin/service pure-ftpd condrestart &>/dev/null
fi
%postun selinux
if [ $1 -gt 0 ]; then
semodule -i %{_datadir}/selinux/packages/pure-ftpd/pureftpd.pp 2>/dev/null || :
fi
%files
%defattr(-, root, root, 0755)
%doc AUTHORS CONTACT FAQ HISTORY NEWS README* THANKS
%doc contrib/pure-vpopauth.pl pureftpd.schema contrib/pure-stat.pl
%doc %{_mandir}/man8/pure-*.8*
%config %{_initrddir}/pure-ftpd
%config(noreplace) %{_sysconfdir}/logrotate.d/pure-ftpd
%config(noreplace) %{_sysconfdir}/pam.d/pure-ftpd
%config(noreplace) %{_sysconfdir}/pure-ftpd/
%config(noreplace) %{_sysconfdir}/xinetd.d/pure-ftpd
%config %{_sysconfdir}/pam.d/pure-ftpwho
%config %{_sysconfdir}/security/console.apps/pure-ftpwho
%{!?_without_tls:%{_sysconfdir}/pki/pure-ftpd/}
%{_bindir}/pure-*
%dir %{_localstatedir}/ftp/
%{_sbindir}/pure-*
%if %{!?_without_selinux:1}0
%files selinux
%defattr(-, root, root, 0755)
%doc README.SELinux
%{_datadir}/selinux/packages/pure-ftpd/pureftpd.pp
%endif
%changelog
* Thu Aug 30 2012 Denis Fateyev <denis@fateyev.com> - 1.0.36-1
- Updated to release 1.0.36
* Mon Oct 06 2008 Dag Wieers <dag@wieers.com> - 1.0.21-1
- Updated to release 1.0.21. (rebased on Fedora)
* Sun Aug 31 2003 Dag Wieers <dag@wieers.com> - 1.0.16-0
- Updated to release 1.0.16.
* Tue Feb 25 2003 Dag Wieers <dag@wieers.com> - 1.0.14-1
- Added sysv scripts with chkconfig.
* Tue Feb 25 2003 Dag Wieers <dag@wieers.com> - 1.0.14-0
- Initial package. (using DAR)
| {
"pile_set_name": "Github"
} |
import React, { Component } from "react";
import PropTypes from "prop-types";
import { connect } from "react-redux";
import SvgIcon from "../../SvgIcon";
import "./style.less";
class CurrentTheme extends Component {
constructor() {
super();
this.state = {};
}
render() {
const {props} = this;
return (
<div className="current-theme">
{
(function () {
if (props.currentTheme && props.currentTheme.SiteLayout.thumbnail) {
return <img src={props.currentTheme.SiteLayout.thumbnail} alt={props.currentTheme.SiteLayout.themeName}/>;
}
else {
return <SvgIcon name="EmptyThumbnail" />;
}
})()
}
</div>
);
}
}
CurrentTheme.propTypes = {
dispatch: PropTypes.func.isRequired,
currentTheme: PropTypes.object
};
function mapStateToProps(state) {
return {
currentTheme: state.theme.currentTheme
};
}
export default connect(mapStateToProps)(CurrentTheme); | {
"pile_set_name": "Github"
} |
import { IconDefinition } from '../types';
declare const ClockCircleFilled: IconDefinition;
export default ClockCircleFilled;
| {
"pile_set_name": "Github"
} |
blank_issues_enabled: false
contact_links:
- name: Feature request
url: https://community.n8n.io
about: Suggest an idea for this project
- name: Question / Problem
url: https://community.n8n.io
about: Questions and problems with n8n
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Acknowledgements</title>
<link rel="stylesheet" type="text/css" href="mesa.css">
</head>
<body>
<div class="header">
<h1>The Mesa 3D Graphics Library</h1>
</div>
<iframe src="contents.html"></iframe>
<div class="content">
<h1>Acknowledgements</h1>
The following individuals and groups are to be acknowledged for their
contributions to Mesa over the years.
This list is far from complete and somewhat dated, unfortunately.
<ul>
<li>Early Mesa development was done while Brian was part of the
<a href="http://www.ssec.wisc.edu/~billh/vis.html">
SSEC Visualization Project</a> at the University of
Wisconsin. He'd like to thank Bill Hibbard for letting him work on
Mesa as part of that project.
<br>
<br>
<li>John Carmack of id Software, Inc. funded Keith Whitwell in 1999 in
order to optimize Mesa's vertex transformation module. This is a very
substantial piece of work.
<br>
<br>
<li>Precision Insight, Inc., VA Linux Systems, Inc., and most recently,
Tungsten Graphics, Inc. have supported the ongoing development of Mesa.
<br>
<br>
<li>The
<a href="http://www.mesa3d.org">Mesa</a>
website is hosted by
<a href="http://sourceforge.net">
<img src="http://sourceforge.net/sflogo.php?group_id=3&type=1"
width="88" height="31" align="bottom" alt="Sourceforge.net" border="0"></a>
<br>
<br>
<li>The Mesa git repository is hosted by
<a href="http://freedesktop.org/">freedesktop.org</a>.
<br>
<br>
<li><a href="http://www.altsoftware.com/">alt.software</a> contributed the Direct3D driver.
<li><b>Bernd Barsuhn</b> wrote the evaluator code for (splines,
patches) in Mesa.
<li><b>Bernhard Tschirren</b> wrote the Allegro DJGPP driver.
<li><b>Bogdan Sikorski</b> wrote the GLU NURBS and polygon tessellator
in Mesa.
<li><b>Charlie Wallace</b> wrote the MS-DOS driver.
<li><b>CJ Beyer</b> was the www.mesa3d.org webmaster.
<li><b>Darren Abbott</b> provided the OS/2 driver.
<li><b>David Bucciarelli</b> wrote and maintained the 3Dfx Glide
driver. Thousands of Linux/Quake players thank David!
<li><b>Gareth Hughes</b> wrote new GLU 1.2 Polygon Tessellation code
(now superceded by SGI SI GLU).
<li><b>Holger Waechtler</b> contributed AMD 3DNow! assembly code which
accelerates vertex transformation in Mesa 3.1. Holger also implemented
the GL_EXT_texture_env_combine extension.
<li><b>Jeroen van der Zijp</b> and <b>Thorsten Ohl</b> contributed the
Xt/Motif widget code.
<li><b>John Stone</b> provided the multi-threading support in Mesa 3.0.
<li><b>John Watson</b> assisted with web page design.
<li><b>Josh Vanderhoof</b> contributed Intel x86 assembly code which
accelerates vertex transformation in Mesa 3.x.
<li><b>Jouk Jansen</b> contributed and continues to maintain the VMS
support.
<li><b>Karl Schultz</b> has been maintaining the Windows driver.
<li><b>Keith Whitwell</b> has made extension contributions to Mesa
since 1999.
<li><b>Kendall Bennett</b> wrote the SciTech MGL driver.
<li><b>Klaus Niederkrueger</b> contributed many improvements to Mesa's
software rasterizer.
<li><b>Mark Kilgard</b> contributed antialiased line improvements and
several extensions.
<li><b>Michael Pichler</b> contributed <em>many</em> bug fixes
<li><b>Miklos Fazekas</b> wrote and maintains the Macintosh driver.
<li><b>Pascal Thibaudeau</b> wrote the NeXT driver.
<li><b>Pedro Vazquez</b> setup and maintains the Mesa Mailing list.
<li><b>Randy Frank</b> contributed <em>many</em> bug fixes.
<li><b>Stefan Zivkovic</b> wrote the Amiga driver.
<li><b>Stephane Rehel</b> provided the Cygnus Win32 support
<li><b>Ted Jump</b> maintained the
makefiles and project files for Windows 95/98/NT compilation for some time.
<li><b>Uwe Maurer</b> wrote the LibGGI driver for Mesa-3.0.
<li><b>Victor Ng-Thow-Hing</b> wrote the Amiwin driver for the Amiga.
</ul>
<p>
Apologies to anyone who's been omitted.
Please send corrections and additions to Brian.
</p>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
#!/bin/sh
# --------------------------------------------------------------
# -- Warm up DNS cache script by your own MRU domains or from
# -- file when it specified as script argument.
# --
# -- Version 1.1
# -- By Yuri Voinov (c) 2014
# --------------------------------------------------------------
# Default DNS host address
address="127.0.0.1"
cat=`which cat`
dig=`which dig`
if [ -z "$1" ]; then
echo "Warming up cache by MRU domains..."
$dig -f - @$address >/dev/null 2>&1 <<EOT
2gis.ru
admir.kz
adobe.com
agent.mail.ru
aimp.ru
akamai.com
akamai.net
almaty.tele2.kz
aol.com
apple.com
arin.com
artlebedev.ru
auto.mail.ru
beeline.kz
bing.com
blogspot.com
clamav.net
comodo.com
dnscrypt.org
drive.google.com
drive.mail.ru
facebook.com
farmanager.com
fb.com
firefox.com
forum.farmanager.com
gazeta.ru
getsharex.com
gismeteo.ru
google.com
google.kz
google.ru
googlevideo.com
goto.kz
iana.org
icq.com
imap.mail.ru
instagram.com
instagram.com
intel.com
irr.kz
java.com
kaspersky.com
kaspersky.ru
kcell.kz
krisha.kz
lady.mail.ru
lenta.ru
libreoffice.org
linkedin.com
livejournal.com
mail.google.com
mail.ru
microsoft.com
mozilla.org
mra.mail.ru
munin-monitoring.org
my.mail.ru
news.bbcimg.co.uk
news.mail.ru
newsimg.bbc.net.uk
nvidia.com
odnoklassniki.ru
ok.ru
opencsw.org
opendns.com
opendns.org
opennet.ru
opera.com
oracle.com
peerbet.ru
piriform.com
plugring.farmanager.com
privoxy.org
qip.ru
raidcall.com
rambler.ru
reddit.com
ru.wikipedia.org
shallalist.de
skype.com
snob.ru
squid-cache.org
squidclamav.darold.net
squidguard.org
ssl.comodo.com
ssl.verisign.com
symantec.com
symantecliveupdate.com
tele2.kz
tengrinews.kz
thunderbird.com
torproject.org
torstatus.blutmagie.de
translate.google.com
unbound.net
verisign.com
vk.com
vk.me
vk.ru
vkontakte.com
vkontakte.ru
vlc.org
watsapp.net
weather.mail.ru
windowsupdate.com
www.baidu.com
www.bbc.co.uk
www.internic.net
www.opennet.ru
www.topgear.com
ya.ru
yahoo.com
yandex.com
yandex.ru
youtube.com
ytimg.com
EOT
else
echo "Warming up cache from $1 file..."
$cat $1 | $dig -f - @$address >/dev/null 2>&1
fi
echo "Done."
echo "Saving cache..."
script=`which unbound_cache.sh`
[ -f "$script" ] && $script -s
echo "Done."
exit 0
| {
"pile_set_name": "Github"
} |
// Copyright 2014 BVLC and contributors.
//
// Based on data_layer.cpp by Yangqing Jia.
#include <stdint.h>
#include <pthread.h>
#include <string>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/vision_layers.hpp"
using std::string;
using std::map;
using std::pair;
// caffe.proto > LayerParameter > WindowDataParameter
// 'source' field specifies the window_file
// 'crop_size' indicates the desired warped size
namespace caffe {
template <typename Dtype>
Dtype WindowDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
// First, join the thread
JoinPrefetchThread();
// Copy the data
CUDA_CHECK(cudaMemcpy((*top)[0]->mutable_gpu_data(),
prefetch_data_->cpu_data(), sizeof(Dtype) * prefetch_data_->count(),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy((*top)[1]->mutable_gpu_data(),
prefetch_label_->cpu_data(), sizeof(Dtype) * prefetch_label_->count(),
cudaMemcpyHostToDevice));
// Start a new prefetch thread
CreatePrefetchThread();
return Dtype(0.);
}
INSTANTIATE_CLASS(WindowDataLayer);
} // namespace caffe
| {
"pile_set_name": "Github"
} |
<?php
namespace Guzzle\Http\QueryAggregator;
use Guzzle\Http\QueryString;
/**
* Aggregates nested query string variables using commas
*/
class CommaAggregator implements QueryAggregatorInterface
{
/**
* {@inheritdoc}
*/
public function aggregate($key, $value, QueryString $query)
{
if ($query->isUrlEncoding()) {
return array($query->encodeValue($key) => implode(',', array_map(array($query, 'encodeValue'), $value)));
} else {
return array($key => implode(',', $value));
}
}
}
| {
"pile_set_name": "Github"
} |
//
// File.cs: Provides an empty wrapper for files that don't support metadata.
//
// Author:
// Ruben Vermeersch (ruben@savanne.be)
//
// Copyright (C) 2010 Ruben Vermeersch
//
// This library is free software; you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License version
// 2.1 as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
// USA
//
using System;
namespace TagLib.Image.NoMetadata
{
/// <summary>
/// This class extends <see cref="TagLib.Image.File" /> to provide tagging
/// some sort of support for files that don't support metadata. You
/// obviously can't write to them, but you can populate an XMP tag, for
/// sidecar purposes.
/// </summary>
[SupportedMimeType("taglib/bmp", "bmp")]
[SupportedMimeType("image/x-MS-bmp")]
[SupportedMimeType("image/x-bmp")]
[SupportedMimeType("taglib/ppm", "ppm")]
[SupportedMimeType("taglib/pgm", "pgm")]
[SupportedMimeType("taglib/pbm", "pbm")]
[SupportedMimeType("taglib/pnm", "pnm")]
[SupportedMimeType("image/x-portable-pixmap")]
[SupportedMimeType("image/x-portable-graymap")]
[SupportedMimeType("image/x-portable-bitmap")]
[SupportedMimeType("image/x-portable-anymap")]
[SupportedMimeType("taglib/pcx", "pcx")]
[SupportedMimeType("image/x-pcx")]
[SupportedMimeType("taglib/svg", "svg")]
[SupportedMimeType("taglib/svgz", "svgz")]
[SupportedMimeType("image/svg+xml")]
[SupportedMimeType("taglib/kdc", "kdc")] // FIXME: Not supported yet!
[SupportedMimeType("taglib/orf", "orf")] // FIXME: Not supported yet!
[SupportedMimeType("taglib/srf", "srf")] // FIXME: Not supported yet!
[SupportedMimeType("taglib/crw", "crw")] // FIXME: Not supported yet!
[SupportedMimeType("taglib/mrw", "mrw")] // FIXME: Not supported yet!
[SupportedMimeType("taglib/raf", "raf")] // FIXME: Not supported yet!
[SupportedMimeType("taglib/x3f", "x3f")] // FIXME: Not supported yet!
public class File : TagLib.Image.File
{
#region public Properties
/// <summary>
/// Gets the media properties of the file represented by the
/// current instance.
/// </summary>
/// <value>
/// A <see cref="TagLib.Properties" /> object containing the
/// media properties of the file represented by the current
/// instance.
/// </value>
public override TagLib.Properties Properties {
get { return null; }
}
/// <summary>
/// Indicates if tags can be written back to the current file or not
/// </summary>
/// <value>
/// A <see cref="bool" /> which is true if tags can be written to the
/// current file, otherwise false.
/// </value>
public override bool Writeable {
get { return false; }
}
#endregion
#region Constructors
/// <summary>
/// Constructs and initializes a new instance of <see
/// cref="File" /> for a specified path in the local file
/// system and specified read style.
/// </summary>
/// <param name="path">
/// A <see cref="string" /> object containing the path of the
/// file to use in the new instance.
/// </param>
/// <param name="propertiesStyle">
/// A <see cref="ReadStyle" /> value specifying at what level
/// of accuracy to read the media properties, or <see
/// cref="ReadStyle.None" /> to ignore the properties.
/// </param>
/// <exception cref="ArgumentNullException">
/// <paramref name="path" /> is <see langword="null" />.
/// </exception>
public File (string path, ReadStyle propertiesStyle)
: this (new File.LocalFileAbstraction (path),
propertiesStyle)
{
}
/// <summary>
/// Constructs and initializes a new instance of <see
/// cref="File" /> for a specified path in the local file
/// system.
/// </summary>
/// <param name="path">
/// A <see cref="string" /> object containing the path of the
/// file to use in the new instance.
/// </param>
/// <exception cref="ArgumentNullException">
/// <paramref name="path" /> is <see langword="null" />.
/// </exception>
public File (string path) : this (path, ReadStyle.Average)
{
}
/// <summary>
/// Constructs and initializes a new instance of <see
/// cref="File" /> for a specified file abstraction and
/// specified read style.
/// </summary>
/// <param name="abstraction">
/// A <see cref="TagLib.File.IFileAbstraction" /> object to use when
/// reading from and writing to the file.
/// </param>
/// <param name="propertiesStyle">
/// A <see cref="ReadStyle" /> value specifying at what level
/// of accuracy to read the media properties, or <see
/// cref="ReadStyle.None" /> to ignore the properties.
/// </param>
/// <exception cref="ArgumentNullException">
/// <paramref name="abstraction" /> is <see langword="null"
/// />.
/// </exception>
public File (File.IFileAbstraction abstraction,
ReadStyle propertiesStyle) : base (abstraction)
{
ImageTag = new CombinedImageTag (TagTypes.XMP);
}
/// <summary>
/// Constructs and initializes a new instance of <see
/// cref="File" /> for a specified file abstraction.
/// </summary>
/// <param name="abstraction">
/// A <see cref="TagLib.File.IFileAbstraction" /> object to use when
/// reading from and writing to the file.
/// </param>
/// <exception cref="ArgumentNullException">
/// <paramref name="abstraction" /> is <see langword="null"
/// />.
/// </exception>
protected File (IFileAbstraction abstraction)
: this (abstraction, ReadStyle.Average)
{
}
#endregion
#region Public Methods
/// <summary>
/// Saves the changes made in the current instance to the
/// file it represents.
/// </summary>
public override void Save ()
{
throw new NotSupportedException ();
}
#endregion
}
}
| {
"pile_set_name": "Github"
} |
// When changing thing, also edit router.d.ts
export const NavigationFailureType = {
redirected: 2,
aborted: 4,
cancelled: 8,
duplicated: 16
}
export function createNavigationRedirectedError (from, to) {
return createRouterError(
from,
to,
NavigationFailureType.redirected,
`Redirected when going from "${from.fullPath}" to "${stringifyRoute(
to
)}" via a navigation guard.`
)
}
export function createNavigationDuplicatedError (from, to) {
const error = createRouterError(
from,
to,
NavigationFailureType.duplicated,
`Avoided redundant navigation to current location: "${from.fullPath}".`
)
// backwards compatible with the first introduction of Errors
error.name = 'NavigationDuplicated'
return error
}
export function createNavigationCancelledError (from, to) {
return createRouterError(
from,
to,
NavigationFailureType.cancelled,
`Navigation cancelled from "${from.fullPath}" to "${
to.fullPath
}" with a new navigation.`
)
}
export function createNavigationAbortedError (from, to) {
return createRouterError(
from,
to,
NavigationFailureType.aborted,
`Navigation aborted from "${from.fullPath}" to "${
to.fullPath
}" via a navigation guard.`
)
}
function createRouterError (from, to, type, message) {
const error = new Error(message)
error._isRouter = true
error.from = from
error.to = to
error.type = type
return error
}
const propertiesToLog = ['params', 'query', 'hash']
function stringifyRoute (to) {
if (typeof to === 'string') return to
if ('path' in to) return to.path
const location = {}
propertiesToLog.forEach(key => {
if (key in to) location[key] = to[key]
})
return JSON.stringify(location, null, 2)
}
export function isError (err) {
return Object.prototype.toString.call(err).indexOf('Error') > -1
}
export function isNavigationFailure (err, errorType) {
return (
isError(err) &&
err._isRouter &&
(errorType == null || err.type === errorType)
)
}
| {
"pile_set_name": "Github"
} |
/*
* comedi_buf.c
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
* Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "comedidev.h"
#include "comedi_internal.h"
#ifdef PAGE_KERNEL_NOCACHE
#define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
#else
#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
#endif
static void comedi_buf_map_kref_release(struct kref *kref)
{
struct comedi_buf_map *bm =
container_of(kref, struct comedi_buf_map, refcount);
struct comedi_buf_page *buf;
unsigned int i;
if (bm->page_list) {
for (i = 0; i < bm->n_pages; i++) {
buf = &bm->page_list[i];
clear_bit(PG_reserved,
&(virt_to_page(buf->virt_addr)->flags));
if (bm->dma_dir != DMA_NONE) {
#ifdef CONFIG_HAS_DMA
dma_free_coherent(bm->dma_hw_dev,
PAGE_SIZE,
buf->virt_addr,
buf->dma_addr);
#endif
} else {
free_page((unsigned long)buf->virt_addr);
}
}
vfree(bm->page_list);
}
if (bm->dma_dir != DMA_NONE)
put_device(bm->dma_hw_dev);
kfree(bm);
}
static void __comedi_buf_free(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
struct comedi_buf_map *bm;
unsigned long flags;
if (async->prealloc_buf) {
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
spin_lock_irqsave(&s->spin_lock, flags);
bm = async->buf_map;
async->buf_map = NULL;
spin_unlock_irqrestore(&s->spin_lock, flags);
comedi_buf_map_put(bm);
}
static void __comedi_buf_alloc(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int n_pages)
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned long flags;
unsigned int i;
if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
dev_err(dev->class_dev,
"dma buffer allocation not supported\n");
return;
}
bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
if (!bm)
return;
kref_init(&bm->refcount);
spin_lock_irqsave(&s->spin_lock, flags);
async->buf_map = bm;
spin_unlock_irqrestore(&s->spin_lock, flags);
bm->dma_dir = s->async_dma_dir;
if (bm->dma_dir != DMA_NONE)
/* Need ref to hardware device to free buffer later. */
bm->dma_hw_dev = get_device(dev->hw_dev);
bm->page_list = vzalloc(sizeof(*buf) * n_pages);
if (bm->page_list)
pages = vmalloc(sizeof(struct page *) * n_pages);
if (!pages)
return;
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
if (bm->dma_dir != DMA_NONE)
#ifdef CONFIG_HAS_DMA
buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
PAGE_SIZE,
&buf->dma_addr,
GFP_KERNEL |
__GFP_COMP);
#else
break;
#endif
else
buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
if (!buf->virt_addr)
break;
set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
pages[i] = virt_to_page(buf->virt_addr);
}
spin_lock_irqsave(&s->spin_lock, flags);
bm->n_pages = i;
spin_unlock_irqrestore(&s->spin_lock, flags);
/* vmap the prealloc_buf if all the pages were allocated */
if (i == n_pages)
async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
COMEDI_PAGE_PROTECTION);
vfree(pages);
}
void comedi_buf_map_get(struct comedi_buf_map *bm)
{
if (bm)
kref_get(&bm->refcount);
}
int comedi_buf_map_put(struct comedi_buf_map *bm)
{
if (bm)
return kref_put(&bm->refcount, comedi_buf_map_kref_release);
return 1;
}
/* returns s->async->buf_map and increments its kref refcount */
struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
struct comedi_buf_map *bm = NULL;
unsigned long flags;
if (!async)
return NULL;
spin_lock_irqsave(&s->spin_lock, flags);
bm = async->buf_map;
/* only want it if buffer pages allocated */
if (bm && bm->n_pages)
comedi_buf_map_get(bm);
else
bm = NULL;
spin_unlock_irqrestore(&s->spin_lock, flags);
return bm;
}
bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
{
struct comedi_buf_map *bm = s->async->buf_map;
return bm && (atomic_read(&bm->refcount.refcount) > 1);
}
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size)
{
struct comedi_async *async = s->async;
/* Round up new_size to multiple of PAGE_SIZE */
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
/* if no change is required, do nothing */
if (async->prealloc_buf && async->prealloc_bufsz == new_size)
return 0;
/* deallocate old buffer */
__comedi_buf_free(dev, s);
/* allocate new buffer */
if (new_size) {
unsigned int n_pages = new_size >> PAGE_SHIFT;
__comedi_buf_alloc(dev, s, n_pages);
if (!async->prealloc_buf) {
/* allocation failed */
__comedi_buf_free(dev, s);
return -ENOMEM;
}
}
async->prealloc_bufsz = new_size;
return 0;
}
void comedi_buf_reset(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
async->buf_write_alloc_count = 0;
async->buf_write_count = 0;
async->buf_read_alloc_count = 0;
async->buf_read_count = 0;
async->buf_write_ptr = 0;
async->buf_read_ptr = 0;
async->cur_chan = 0;
async->scans_done = 0;
async->scan_progress = 0;
async->munge_chan = 0;
async->munge_count = 0;
async->munge_ptr = 0;
async->events = 0;
}
static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
return free_end - async->buf_write_alloc_count;
}
unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
return free_end - async->buf_write_count;
}
/**
* comedi_buf_write_alloc() - Reserve buffer space for writing
* @s: COMEDI subdevice.
* @nbytes: Maximum space to reserve in bytes.
*
* Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
* data buffer associated with the subdevice. The amount reserved is limited
* by the space available.
*
* Return: The amount of space reserved in bytes.
*/
unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int unalloc = comedi_buf_write_n_unalloc(s);
if (nbytes > unalloc)
nbytes = unalloc;
async->buf_write_alloc_count += nbytes;
/*
* ensure the async buffer 'counts' are read and updated
* before we write data to the write-alloc'ed buffer space
*/
smp_mb();
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
/*
* munging is applied to data by core as it passes between user
* and kernel space
*/
static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int num_bytes)
{
struct comedi_async *async = s->async;
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
count = num_bytes;
} else {
/* don't munge partial samples */
num_bytes -= num_bytes % num_sample_bytes;
while (count < num_bytes) {
int block_size = num_bytes - count;
unsigned int buf_end;
buf_end = async->prealloc_bufsz - async->munge_ptr;
if (block_size > buf_end)
block_size = buf_end;
s->munge(s->device, s,
async->prealloc_buf + async->munge_ptr,
block_size, async->munge_chan);
/*
* ensure data is munged in buffer before the
* async buffer munge_count is incremented
*/
smp_wmb();
async->munge_chan += block_size / num_sample_bytes;
async->munge_chan %= async->cmd.chanlist_len;
async->munge_count += block_size;
async->munge_ptr += block_size;
async->munge_ptr %= async->prealloc_bufsz;
count += block_size;
}
}
return count;
}
unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
return async->buf_write_alloc_count - async->buf_write_count;
}
/**
* comedi_buf_write_free() - Free buffer space after it is written
* @s: COMEDI subdevice.
* @nbytes: Maximum space to free in bytes.
*
* Free up to @nbytes bytes of space previously reserved for writing in the
* COMEDI acquisition data buffer associated with the subdevice. The amount of
* space freed is limited to the amount that was reserved. The freed space is
* assumed to have been filled with sample data by the writer.
*
* If the samples in the freed space need to be "munged", do so here. The
* freed space becomes available for allocation by the reader.
*
* Return: The amount of space freed in bytes.
*/
unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int allocated = comedi_buf_write_n_allocated(s);
if (nbytes > allocated)
nbytes = allocated;
async->buf_write_count += nbytes;
async->buf_write_ptr += nbytes;
comedi_buf_munge(s, async->buf_write_count - async->munge_count);
if (async->buf_write_ptr >= async->prealloc_bufsz)
async->buf_write_ptr %= async->prealloc_bufsz;
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_write_free);
/**
* comedi_buf_read_n_available() - Determine amount of readable buffer space
* @s: COMEDI subdevice.
*
* Determine the amount of readable buffer space in the COMEDI acquisition data
* buffer associated with the subdevice. The readable buffer space is that
* which has been freed by the writer and "munged" to the sample data format
* expected by COMEDI if necessary.
*
* Return: The amount of readable buffer space.
*/
unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
unsigned int num_bytes;
if (!async)
return 0;
num_bytes = async->munge_count - async->buf_read_count;
/*
* ensure the async buffer 'counts' are read before we
* attempt to read data from the buffer
*/
smp_rmb();
return num_bytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
/**
* comedi_buf_read_alloc() - Reserve buffer space for reading
* @s: COMEDI subdevice.
* @nbytes: Maximum space to reserve in bytes.
*
* Reserve up to @nbytes bytes of previously written and "munged" buffer space
* for reading in the COMEDI acquisition data buffer associated with the
* subdevice. The amount reserved is limited to the space available. The
* reader can read from the reserved space and then free it. A reader is also
* allowed to read from the space before reserving it as long as it determines
* the amount of readable data available, but the space needs to be marked as
* reserved before it can be freed.
*
* Return: The amount of space reserved in bytes.
*/
unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int available;
available = async->munge_count - async->buf_read_alloc_count;
if (nbytes > available)
nbytes = available;
async->buf_read_alloc_count += nbytes;
/*
* ensure the async buffer 'counts' are read before we
* attempt to read data from the read-alloc'ed buffer space
*/
smp_rmb();
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
{
return async->buf_read_alloc_count - async->buf_read_count;
}
/**
* comedi_buf_read_free() - Free buffer space after it has been read
* @s: COMEDI subdevice.
* @nbytes: Maximum space to free in bytes.
*
* Free up to @nbytes bytes of buffer space previously reserved for reading in
* the COMEDI acquisition data buffer associated with the subdevice. The
* amount of space freed is limited to the amount that was reserved.
*
* The freed space becomes available for allocation by the writer.
*
* Return: The amount of space freed in bytes.
*/
unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int allocated;
/*
* ensure data has been read out of buffer before
* the async read count is incremented
*/
smp_mb();
allocated = comedi_buf_read_n_allocated(async);
if (nbytes > allocated)
nbytes = allocated;
async->buf_read_count += nbytes;
async->buf_read_ptr += nbytes;
async->buf_read_ptr %= async->prealloc_bufsz;
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_free);
static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
const void *data, unsigned int num_bytes)
{
struct comedi_async *async = s->async;
unsigned int write_ptr = async->buf_write_ptr;
while (num_bytes) {
unsigned int block_size;
if (write_ptr + num_bytes > async->prealloc_bufsz)
block_size = async->prealloc_bufsz - write_ptr;
else
block_size = num_bytes;
memcpy(async->prealloc_buf + write_ptr, data, block_size);
data += block_size;
num_bytes -= block_size;
write_ptr = 0;
}
}
static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
void *dest, unsigned int nbytes)
{
void *src;
struct comedi_async *async = s->async;
unsigned int read_ptr = async->buf_read_ptr;
while (nbytes) {
unsigned int block_size;
src = async->prealloc_buf + read_ptr;
if (nbytes >= async->prealloc_bufsz - read_ptr)
block_size = async->prealloc_bufsz - read_ptr;
else
block_size = nbytes;
memcpy(dest, src, block_size);
nbytes -= block_size;
dest += block_size;
read_ptr = 0;
}
}
/**
* comedi_buf_write_samples() - Write sample data to COMEDI buffer
* @s: COMEDI subdevice.
* @data: Pointer to source samples.
* @nsamples: Number of samples to write.
*
* Write up to @nsamples samples to the COMEDI acquisition data buffer
* associated with the subdevice, mark it as written and update the
* acquisition scan progress. If there is not enough room for the specified
* number of samples, the number of samples written is limited to the number
* that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
* acquisition to terminate with an overrun error. Set the %COMEDI_CB_BLOCK
* event flag if any samples are written to cause waiting tasks to be woken
* when the event flags are processed.
*
* Return: The amount of data written in bytes.
*/
unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
const void *data, unsigned int nsamples)
{
unsigned int max_samples;
unsigned int nbytes;
/*
* Make sure there is enough room in the buffer for all the samples.
* If not, clamp the nsamples to the number that will fit, flag the
* buffer overrun and add the samples that fit.
*/
max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
if (nsamples > max_samples) {
dev_warn(s->device->class_dev, "buffer overrun\n");
s->async->events |= COMEDI_CB_OVERFLOW;
nsamples = max_samples;
}
if (nsamples == 0)
return 0;
nbytes = comedi_buf_write_alloc(s,
comedi_samples_to_bytes(s, nsamples));
comedi_buf_memcpy_to(s, data, nbytes);
comedi_buf_write_free(s, nbytes);
comedi_inc_scan_progress(s, nbytes);
s->async->events |= COMEDI_CB_BLOCK;
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
/**
* comedi_buf_read_samples() - Read sample data from COMEDI buffer
* @s: COMEDI subdevice.
* @data: Pointer to destination.
* @nsamples: Maximum number of samples to read.
*
* Read up to @nsamples samples from the COMEDI acquisition data buffer
* associated with the subdevice, mark it as read and update the acquisition
* scan progress. Limit the number of samples read to the number available.
* Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
* tasks to be woken when the event flags are processed.
*
* Return: The amount of data read in bytes.
*/
unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
void *data, unsigned int nsamples)
{
unsigned int max_samples;
unsigned int nbytes;
/* clamp nsamples to the number of full samples available */
max_samples = comedi_bytes_to_samples(s,
comedi_buf_read_n_available(s));
if (nsamples > max_samples)
nsamples = max_samples;
if (nsamples == 0)
return 0;
nbytes = comedi_buf_read_alloc(s,
comedi_samples_to_bytes(s, nsamples));
comedi_buf_memcpy_from(s, data, nbytes);
comedi_buf_read_free(s, nbytes);
comedi_inc_scan_progress(s, nbytes);
s->async->events |= COMEDI_CB_BLOCK;
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
| {
"pile_set_name": "Github"
} |
<?php
namespace Office365\Runtime\OData;
abstract class ODataReader
{
/**
* @param array $options
* @return ODataModel
*/
function generateModel($options)
{
$model = new ODataModel($options);
$edmx = file_get_contents($options['metadataPath']);
$this->parseEdmx($edmx, $model);
return $model;
}
/**
* @param string $edmx
* @param ODataModel $model
*/
abstract function parseEdmx($edmx, $model);
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<g:element xmlns:g="http://www.esri.com/geoportal/gxe"
xmlns:h="http://www.esri.com/geoportal/gxe/html"
g:extends="$base/schema/gco/xtn/Wrapped_BasicPropertyType.xml">
<g:body>
<g:element g:minOccurs="$parent" g:extends="$base/schema/gco/basicTypes/Scale.xml"/>
</g:body>
</g:element>
| {
"pile_set_name": "Github"
} |
#' @include timespans.r
#' @include parse.r
#' @include util.r
NULL
check_interval <- function(object) {
errors <- character()
if (!is.numeric(object@.Data)) {
msg <- "Span length must be numeric."
errors <- c(errors, msg)
}
if (!is(object@start, "POSIXct")) {
msg <- "Start date must be in POSIXct format."
errors <- c(errors, msg)
}
if (length(object@.Data) != length(object@start)) {
msg <- paste("Inconsistent lengths: spans = ", length(object@.Data),
", start dates = ", length(object@start), sep = "")
errors <- c(errors, msg)
}
if (length(errors) == 0)
TRUE
else
errors
}
.units_within_seconds <- function(secs, unit = "second") {
## return a list suitable to pass to new("Period", ...)
switch(unit,
second = list(secs),
minute = list(secs %% 60, minute = secs %/% 60),
hour =
c(.units_within_seconds(secs %% 3600, "minute"),
list(hour = secs %/% 3600)),
day =
c(.units_within_seconds(secs %% 86400, "hour"),
list(day = secs %/% 86400)),
stop("Unsuported unit ", unit))
}
#' Interval class
#'
#' Interval is an S4 class that extends the [Timespan-class] class. An
#' Interval object records one or more spans of time. Intervals record these
#' timespans as a sequence of seconds that begin at a specified date. Since
#' intervals are anchored to a precise moment of time, they can accurately be
#' converted to [Period-class] or [Duration-class] class objects. This
#' is because we can observe the length in seconds of each period that begins on a
#' specific date. Contrast this to a generalized period, which may not have a
#' consistent length in seconds (e.g. the number of seconds in a year will change
#' if it is a leap year).
#'
#' Intervals can be both negative and positive. Negative intervals progress
#' backwards from the start date; positive intervals progress forwards.
#'
#' Interval class objects have two slots: .Data, a numeric object equal to the number
#' of seconds in the interval; and start, a POSIXct object that specifies the time
#' when the interval starts.
#'
#' @aliases intervals
#' @export
setClass("Interval", contains = c("Timespan", "numeric"),
slots = c(start = "POSIXct", tzone = "character"), validity = check_interval)
#' @export
setMethod("show", signature(object = "Interval"), function(object) {
if (length(object@.Data) == 0) {
cat("<Interval[0]>\n")
} else {
print(format(object), quote = FALSE)
}
})
#' @export
format.Interval <- function(x, ...) {
paste(
format(x@start, tz = x@tzone, usetz = TRUE),
format(x@start + x@.Data, tz = x@tzone, usetz = TRUE),
sep = "--"
)
}
#' @export
setMethod("c", signature(x = "Interval"), function(x, ...) {
elements <- lapply(list(...), as.interval)
spans <- c(x@.Data, unlist(elements@.Data))
starts <- c(x@start, lapply(elements, int_start))
new("Interval", spans, start = starts, tzone = x@tzone)
})
#' @export
setMethod("rep", signature(x = "Interval"), function(x, ...) {
new("Interval", rep(x@.Data, ...), start = rep(x@start, ...), tzone = x@tzone)
})
#' @export
setMethod("[", signature(x = "Interval"),
function(x, i, j, ..., drop = TRUE) {
new("Interval", x@.Data[i], start = x@start[i], tzone = x@tzone)
}
)
#' @export
setMethod("[[", signature(x = "Interval"),
function(x, i, j, ..., exact = TRUE) {
new("Interval", x@.Data[i], start = x@start[i], tzone = x@tzone)
}
)
#' @export
setMethod("[<-", signature(x = "Interval"), function(x, i, j, ..., value) {
if (is.interval(value)) {
x@.Data[i] <- value@.Data
x@start[i] <- value@start
new("Interval", x@.Data, start = x@start, tzone = x@tzone)
}
else {
x@.Data[i] <- value
new("Interval", x@.Data, start = x@start, tzone = x@tzone)
}
})
#' @export
setMethod("[[<-", signature(x = "Interval"), function(x, i, j, ..., value) {
if (is.interval(value)) {
x@.Data[i] <- value@.Data
x@start[i] <- value@start
new("Interval", x@.Data, start = x@start, tzone = x@tzone)
}
else {
x@.Data[i] <- value
new("Interval", x@.Data, start = x@start, tzone = x@tzone)
}
})
#' @export
setMethod("$", signature(x = "Interval"), function(x, name) {
if (name == "span") name <- ".Data"
slot(x, name)
})
#' @export
setMethod("$<-", signature(x = "Interval"), function(x, name, value) {
if (name == "span") name <- ".Data"
slot(x, name) <- value
x
})
#' @export
unique.Interval <- function(x, ...) {
df <- unique.data.frame(data.frame(data = x@.Data, start = x@start), ...)
new("Interval", df$data, start = df$start, tzone = x@tzone)
}
#' Utilities for creation and manipulation of `Interval` objects
#'
#' `interval()` creates an [Interval-class] object with the specified start and
#' end dates. If the start date occurs before the end date, the interval will be
#' positive. Otherwise, it will be negative. Character vectors in ISO 8601
#' format are supported from v1.7.2.
#'
#' Intervals are time spans bound by two real date-times. Intervals can be
#' accurately converted to either period or duration objects using
#' [as.period()], [as.duration()]. Since an interval is anchored to a fixed
#' history of time, both the exact number of seconds that passed and the number
#' of variable length time units that occurred during the interval can be
#' calculated.
#'
#' @export
#' @param start,end POSIXt, Date or a character vectors. When `start` is a
#' character vector and end is `NULL`, ISO 8601 specification is assumed but
#' with much more permisive lubridate style parsing both for dates and periods
#' (see examples).
#' @param tzone a recognized timezone to display the interval in
#' @param x an R object
#' @return `interval()` -- [Interval-class] object.
#' @seealso [Interval-class], [as.interval()], \code{\link{\%within\%}}
#' @examples
#' interval(ymd(20090201), ymd(20090101))
#'
#' date1 <- ymd_hms("2009-03-08 01:59:59")
#' date2 <- ymd_hms("2000-02-29 12:00:00")
#' interval(date2, date1)
#' interval(date1, date2)
#' span <- interval(ymd(20090101), ymd(20090201))
#'
#' ### ISO Intervals
#'
#' interval("2007-03-01T13:00:00Z/2008-05-11T15:30:00Z")
#' interval("2007-03-01T13:00:00Z/P1Y2M10DT2H30M")
#' interval("P1Y2M10DT2H30M/2008-05-11T15:30:00Z")
#' interval("2008-05-11/P2H30M")
#'
#' ### More permisive parsing (as long as there are no intermittent / characters)
#' interval("2008 05 11/P2hours 30minutes")
#' interval("08 05 11/P 2h 30m")
#'
#' is.interval(period(months= 1, days = 15)) # FALSE
#' is.interval(interval(ymd(20090801), ymd(20090809))) # TRUE
interval <- function(start = NULL, end = NULL, tzone = tz(start)) {
# NB: tzone is forced and never called on NULL here
if (is.character(start) && is.null(end)) {
return(parse_interval(start, tzone))
}
if (length(start) == 0 || length(end) == 0) {
## We used to return UTC on NULL
if (is.null(start) && missing(tzone)) {
tzone <- "UTC"
}
start <- POSIXct(tz = tzone)
return(new("Interval", numeric(), start = start, tzone = tzone))
}
if (is.Date(start)) start <- date_to_posix(start)
if (is.Date(end)) end <- date_to_posix(end)
force(tzone)
start <- as_POSIXct(start, tzone)
end <- as_POSIXct(end, tzone)
span <- as.numeric(end) - as.numeric(start)
starts <- start + rep(0, length(span))
if (tzone != tz(starts)) starts <- with_tz(starts, tzone)
new("Interval", span, start = starts, tzone = tzone)
}
parse_interval <- function(x, tz) {
# create matrix of string parts from x: 1st column is anything before /, 2nd is anything after.
# replicates without stringr: str_split_fixed(x, "/", 2)
mat <- matrix(
c(gsub('(^[^/]+)/(.+$)', '\\1', x), gsub('(^[^/]+)/(.+$)', '\\2', x)),
ncol = 2
)
pstart <- grepl("^P", mat[, 1])
pend <- grepl("^P", mat[, 2])
if (any(pstart & pend)) {
stop(sprintf("Interval specified with period endpoints (%s)", x[pstart & pend][[1]]))
}
start <- .POSIXct(rep.int(NA_real_, length(x)), tz = tz)
end <- .POSIXct(rep.int(NA_real_, length(x)), tz = tz)
start[!pstart] <- .parse_iso_dt(mat[!pstart, 1], tz)
end[!pend] <- .parse_iso_dt(mat[!pend, 2], tz = tz)
end[pend] <- start[pend] + parse_period(mat[pend, 2])
start[pstart] <- end[pstart] - parse_period(mat[pstart, 1])
interval(start, end, tz)
}
#'
#' \code{\%--\%} Creates an interval that covers the range spanned by two
#' dates. It replaces the original behavior of \pkg{lubridate}, which created an
#' interval by default whenever two date-times were subtracted.
#'
#' @export
#' @rdname interval
"%--%" <- function(start, end) interval(start, end)
#' @rdname interval
#' @export
is.interval <- function(x) is(x, c("Interval"))
#'
#'
#' `int_start()` and `int_start<-()` are accessors the start date of an
#' interval. Note that changing the start date of an interval will change the
#' length of the interval, since the end date will remain the same.
#'
#' @rdname interval
#' @param int an interval object
#' @return `int_start()` and `int_end()` return a POSIXct date object when
#' used as an accessor. Nothing when used as a setter.
#' @examples
#' int <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
#' int_start(int)
#' int_start(int) <- ymd("2001-06-01")
#' int
#'
#' int <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
#' int_end(int)
#' int_end(int) <- ymd("2002-06-01")
#' int
#' @export
int_start <- function(int) int@start
#' @rdname interval
#' @param value interval's start/end to be assigned to `int`
#' @export
"int_start<-" <- function(int, value) {
value <- as.POSIXct(value)
span <- as.numeric(int@start + int@.Data - value, "secs")
equal.lengths <- data.frame(span, value)
int <- new("Interval", span, start = equal.lengths$value,
tzone = int@tzone)
}
#' @description
#' `int_end()` and `int_end<-()` are accessors the end date of an
#' interval. Note that changing the end date of an interval will change the
#' length of the interval, since the start date will remain the same.
#'
#' @rdname interval
#' @export
int_end <- function(int) int@start + int@.Data
#' @rdname interval
#' @export
"int_end<-" <- function(int, value) {
value <- as.POSIXct(value)
span <- as.numeric(value - int@start, "secs")
int <- new("Interval", span, start = int@start,
tzone = int@tzone)
}
#' @rdname interval
#' @return `int_length()` -- numeric length of the interval in
#' seconds. A negative number connotes a negative interval.
#' @examples
#' int <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
#' int_length(int)
#' @export
int_length <- function(int) int@.Data
#' @description
#' `int_flip()` reverses the order of the start date and end date in an
#' interval. The new interval takes place during the same timespan as the
#' original interval, but has the opposite direction.
#'
#' @rdname interval
#' @return `int_flip()` -- flipped interval object
#' @examples
#' int <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
#' int_flip(int)
#' @export
int_flip <- function(int) {
new("Interval", -int@.Data, start = int@start + int@.Data, tzone = int@tzone)
}
#' @description
#' `int_shift()` shifts the start and end dates of an interval up or down the
#' timeline by a specified amount. Note that this may change the exact length of
#' the interval if the interval is shifted by a Period object. Intervals shifted
#' by a Duration or difftime object will retain their exact length in seconds.
#'
#' @rdname interval
#' @param by A period or duration object to shift by (for `int_shift`)
#' @return `int_shift()` -- an Interval object
#' @examples
#' int <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
#' int_shift(int, duration(days = 11))
#' int_shift(int, duration(hours = -1))
#' @export
int_shift <- function(int, by) {
if (!is.timespan(by)) stop("by is not a recognized timespan object")
if (is.interval(by)) stop("an interval cannot be shifted by another interval.
Convert second interval to a period or duration.")
interval(int@start + by, int_end(int) + by)
}
#' @description
#' `int_overlaps()` tests if two intervals overlap.
#'
#' @rdname interval
#' @param int1 an Interval object (for `int_overlaps()`, `int_aligns()`)
#' @param int2 an Interval object (for `int_overlaps()`, `int_aligns()`)
#' @return `int_overlaps()` -- logical, TRUE if int1 and int2 overlap by at
#' least one second. FALSE otherwise
#' @examples
#' int1 <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
#' int2 <- interval(ymd("2001-06-01"), ymd("2002-06-01"))
#' int3 <- interval(ymd("2003-01-01"), ymd("2004-01-01"))
#'
#' int_overlaps(int1, int2) # TRUE
#' int_overlaps(int1, int3) # FALSE
#' @export
int_overlaps <- function(int1, int2) {
stopifnot(c(is.interval(int1), is.interval(int2)))
int1 <- int_standardize(int1)
int2 <- int_standardize(int2)
int1@start <= int2@start + int2@.Data & int2@start <= int1@start + int1@.Data
}
#' @description
#' `int_standardize()` ensures all intervals in an interval object are
#' positive. If an interval is not positive, flip it so that it retains its
#' endpoints but becomes positive.
#'
#' @rdname interval
#' @examples
#' int <- interval(ymd("2002-01-01"), ymd("2001-01-01"))
#' int_standardize(int)
#' @export
int_standardize <- function(int) {
negs <- !is.na(int@.Data) & int@.Data < 0
int[negs] <- int_flip(int[negs])
int
}
#' @description
#' `int_aligns()` tests if two intervals share an endpoint. The direction of
#' each interval is ignored. int_align tests whether the earliest or latest
#' moments of each interval occur at the same time.
#'
#' @rdname interval
#' @return `int_aligns()` -- logical, TRUE if int1 and int2 begin or end on the
#' same moment. FALSE otherwise
#' @examples
#' int1 <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
#' int2 <- interval(ymd("2001-06-01"), ymd("2002-01-01"))
#' int3 <- interval(ymd("2003-01-01"), ymd("2004-01-01"))
#'
#' int_aligns(int1, int2) # TRUE
#' int_aligns(int1, int3) # FALSE
#' @export
int_aligns <- function(int1, int2) {
int1 <- int_standardize(int1)
int2 <- int_standardize(int2)
int1@start == int2@start | (int1@start + int1@.Data) == (int2@start + int2@.Data)
}
#' @description
#' `int_diff()` returns the intervals that occur between the elements of a
#' vector of date-times. `int_diff()` is similar to the POSIXt and Date
#' methods of [diff()], but returns an [Interval-class] object instead
#' of a difftime object.
#'
#' @rdname interval
#' @param times A vector of POSIXct, POSIXlt or Date class date-times (for
#' `int_diff()`)
#' @return `int_diff()` -- interval object that contains the n-1 intervals
#' between the n date-time in times
#' @examples
#' dates <- now() + days(1:10)
#' int_diff(dates)
#' @export
int_diff <- function(times) {
interval(times[-length(times)], times[-1])
}
#' @importFrom generics intersect
#' @export
generics::intersect
#' @export
intersect.Interval <- function(x, y, ...) {
int1 <- int_standardize(x)
int2 <- int_standardize(y)
starts <- pmax(int1@start, int2@start)
ends <- pmin(int1@start + int1@.Data, int2@start + int2@.Data)
spans <- as.numeric(ends) - as.numeric(starts)
no.int <- ends < starts
no.int <- is.na(no.int) | no.int
spans[no.int] <- NA
starts[no.int] <- NA
new.int <- new("Interval", spans, start = starts, tzone = x@tzone)
negix <- !is.na(x@.Data) & (sign(x@.Data) == -1)
new.int[negix] <- int_flip(new.int[negix])
new.int
}
#' @importFrom generics union
#' @export
generics::union
#' @export
union.Interval <- function(x, y, ...) {
int1 <- int_standardize(x)
int2 <- int_standardize(y)
starts <- pmin(int1@start, int2@start)
ends <- pmax(int1@start + int1@.Data, int2@start + int2@.Data)
spans <- as.numeric(ends) - as.numeric(starts)
if (any(!int_overlaps(int1, int2)) && is_verbose()) {
message("Union includes intervening time between intervals.")
}
tz(starts) <- x@tzone
new.int <- new("Interval", spans, start = starts, tzone = x@tzone)
new.int[sign(x@.Data) == -1] <- int_flip(new.int[sign(x@.Data) == -1])
new.int
}
#' @importFrom generics setdiff
#' @export
generics::setdiff
#' @export
setdiff.Interval <- function(x, y, ...) {
if (length(x) != length(y)) {
xy <- match_lengths(x, y)
x <- xy[[1]]
y <- xy[[2]]
}
aligned <- int_aligns(x, y)
inside <- y %within% x
makes2 <- !aligned & inside
if (sum(makes2)) {
stop(paste("Cases", which(makes2),
"result in discontinuous intervals."))
}
int1 <- int_standardize(x)
int2 <- int_standardize(y)
first.y <- int_start(int2)
last.y <- int_end(int2)
starts <- int_start(int1)
starts[(last.y + 1) %within% int1] <- last.y[(last.y + 1) %within% int1]
ends <- int_end(int1)
ends[(first.y - 1) %within% int1] <- first.y[(first.y - 1) %within% int1]
spans <- as.numeric(ends) - as.numeric(starts)
new.int <- new("Interval", spans, start = starts, tzone = x@tzone)
new.int[sign(x@.Data) == -1] <- int_flip(new.int[sign(x@.Data) == -1])
new.int
}
#' Does a date (or interval) fall within an interval?
#'
#' Check whether `a` lies within the interval `b`, inclusive of the endpoints.
#'
#' @export
#' @rdname within-interval
#' @aliases %within%,Interval,Interval-method %within%,ANY,Interval-method
#' %within%,Date,list-method %within%,POSIXt,list-method
#' @param a An interval or date-time object.
#' @param b Either an interval vector, or a list of intervals.
#'
#' If `b` is an internal it is recycled to the same length as `a`.
#' If `b` is a list of intervals, `a` is checked if it falls within _any_
#' of the intervals, i.e. `a %within% list(int1, int2)` is equivalent to
#' `a %within% int1 | a %within% int2`.
#' @return A logical vector.
#' @examples
#' int <- interval(ymd("2001-01-01"), ymd("2002-01-01"))
#' int2 <- interval(ymd("2001-06-01"), ymd("2002-01-01"))
#'
#' ymd("2001-05-03") %within% int # TRUE
#' int2 %within% int # TRUE
#' ymd("1999-01-01") %within% int # FALSE
#'
#' ## recycling
#' dates <- ymd(c("2014-12-20", "2014-12-30", "2015-01-01", "2015-01-03"))
#' blackouts<- c(interval(ymd("2014-12-30"), ymd("2014-12-31")),
#' interval(ymd("2014-12-30"), ymd("2015-01-03")))
#' dates %within% blackouts
#'
#' ## within ANY of the intervals of a list
#' dates <- ymd(c("2014-12-20", "2014-12-30", "2015-01-01", "2015-01-03"))
#' blackouts<- list(interval(ymd("2014-12-30"), ymd("2014-12-31")),
#' interval(ymd("2014-12-30"), ymd("2015-01-03")))
#' dates %within% blackouts
setGeneric("%within%", useAsDefault = function(a, b) {
stop(sprintf("No %%within%% method with signature a = %s, b = %s",
class(a)[[1]], class(b)[[1]]))
})
.within <- function(a, int) {
as.numeric(a) - as.numeric(int@start) <= int@.Data & as.numeric(a) - as.numeric(int@start) >= 0
}
setMethod("%within%", signature(b = "Interval"), function(a, b) {
if (!is.instant(a)) stop("Argument 1 is not a recognized date-time")
a <- as.POSIXct(a)
.within(a, b)
})
setMethod("%within%", signature(a = "Interval", b = "Interval"), function(a, b) {
a <- int_standardize(a)
b <- int_standardize(b)
start.in <- as.numeric(a@start) >= as.numeric(b@start)
end.in <- (as.numeric(a@start) + a@.Data) <= (as.numeric(b@start) + b@.Data)
start.in & end.in
})
setMethod("%within%", signature(a = "Interval", b = "Interval"), function(a, b) {
a <- int_standardize(a)
b <- int_standardize(b)
start.in <- as.numeric(a@start) >= as.numeric(b@start)
end.in <- (as.numeric(a@start) + a@.Data) <= (as.numeric(b@start) + b@.Data)
start.in & end.in
})
.within_instant <- function(a, b) {
if (!all(sapply(b, is.interval)))
stop("When second argument to %within% is a list it must contain interval objects only")
a <- as.POSIXct(a)
out <- FALSE
for (int in b) {
out <- out | .within(a, int)
}
out
}
setMethod("%within%", signature(a = "POSIXt", b = "list"), .within_instant)
setMethod("%within%", signature(a = "Date", b = "list"), .within_instant)
#' @export
as.list.Interval <- function(x, ...) {
lapply(seq_along(x), function(i) x[[i]])
}
#' @export
summary.Interval <- function(object, ...) {
nas <- is.na(object)
object <- object[!nas]
n <- length(object)
dates <- c(int_start(object), int_end(object))
earliest <- as.character(min(dates))
latest <- as.character(max(dates))
zone <- tz(dates)
qq <- c(n, earliest, latest, zone)
names(qq) <- c("Intervals", "Earliest endpoint", "Latest endpoint", "Time zone")
if (any(nas))
c(qq, `NA's` = sum(nas))
else qq
}
#' @rdname time_length
setMethod("time_length", signature("Interval"), function(x, unit = "second") {
unit <- standardise_period_names(unit)
if (unit %in% c("year", "month")) {
periods <- as.period(x, unit = unit)
int_part <- slot(periods, unit)
prev_aniv <- add_with_rollback(
int_start(x), (int_part * period(1, units = unit)),
roll_to_first = TRUE, preserve_hms = FALSE)
next_aniv <- add_with_rollback(
int_start(x), ((int_part + ifelse(x@.Data < 0, -1, 1)) * period(1, units = unit)),
roll_to_first = TRUE, preserve_hms = FALSE)
sofar <- as.duration(int_end(x) - prev_aniv)
total <- as.duration(next_aniv - prev_aniv)
int_part + sign(x@.Data) * sofar / total
} else {
as.duration(x) / duration(num = 1, units = unit)
}
})
#' @export
setMethod("Arith", signature(e1 = "Interval", e2 = "ANY"), function(e1, e2) {
stop_incompatible_classes(e1, e2, .Generic)
})
#' @export
setMethod("Arith", signature(e1 = "ANY", e2 = "Interval"), function(e1, e2) {
stop_incompatible_classes(e1, e2, .Generic)
})
#' @name hidden_aliases
#' @aliases Arith,Interval,ANY-method Arith,ANY,Interval-method
#' intersect,Interval,Interval-method union,Interval,Interval-method
#' setdiff,Interval,Interval-method as.numeric,Interval-method
#' show,Interval-method c,Interval-method rep,Interval-method
#' [,Interval-method [<-,Interval,ANY,ANY,ANY-method [[,Interval-method
#' [[<-,Interval,ANY,ANY,ANY-method $,Interval-method $<-,Interval-method
#' as.difftime,Interval-method as.character,Interval-method
#' +,Interval,Duration-method +,Interval,Interval-method
#' +,Interval,Period-method +,Interval,Date-method +,Date,Interval-method
#' +,Interval,difftime-method +,difftime,Interval-method
#' +,Interval,numeric-method +,numeric,Interval-method
#' +,Interval,POSIXct-method +,POSIXct,Interval-method
#' +,Interval,POSIXlt-method +,POSIXlt,Interval-method
#' /,Interval,Duration-method /,Interval,Interval-method
#' /,Interval,Period-method /,Interval,difftime-method
#' /,difftime,Interval-method /,Interval,numeric-method
#' /,numeric,Interval-method *,Interval,ANY-method *,ANY,Interval-method
#' -,Interval,missing-method -,Interval,Interval-method -,Date,Interval-method
#' -,POSIXct,Interval-method -,POSIXlt,Interval-method
#' -,numeric,Interval-method -,Interval,Date-method -,Interval,POSIXct-method
#' -,Interval,POSIXlt-method -,Interval,numeric-method
#' -,Duration,Interval-method -,Period,Interval-method
#' %%,Interval,Duration-method %%,Interval,Interval-method
#' %%,Interval,Period-method %%,Interval,Duration %%,Interval,Interval
#' %%,Interval,Period -,Date,Interval -,Duration,Interval -,Interval,Date
#' -,Interval,Interval -,Interval,POSIXct -,Interval,POSIXlt
#' -,Interval,numeric -,POSIXct,Interval -,POSIXlt,Interval -,numeric,Interval
NULL
| {
"pile_set_name": "Github"
} |
[
{
"blockName": "core/social-link-deviantart",
"attrs": {
"url": "https://example.com/"
},
"innerBlocks": [],
"innerHTML": "",
"innerContent": []
},
{
"blockName": null,
"attrs": {},
"innerBlocks": [],
"innerHTML": "\n",
"innerContent": [
"\n"
]
}
]
| {
"pile_set_name": "Github"
} |
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/C++, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
FILE(GLOB headers *.h)
ADD_HEADERS(${headers})
ADD_HEADER_CHECKS()
| {
"pile_set_name": "Github"
} |
import React from "react";
import Fab from "@material-ui/core/Fab";
import ContentAdd from "@material-ui/icons/Add";
import Search from "@material-ui/icons/Search";
import PageBase from "../components/PageBase";
import { connect } from "react-redux";
import { getAction } from "../actions/order";
import Drawer from "@material-ui/core/Drawer";
import Button from "@material-ui/core/Button";
import TextField from "@material-ui/core/TextField";
import Snackbar from "@material-ui/core/Snackbar";
import { thunkApiCall } from "../services/thunks";
import { LIST_ORDER, DELETE_ORDER, NEW_ORDER, ApiAction } from "../store/types";
import { Order, SearchFilter } from "../types";
import { Grid } from "@material-ui/core";
import Alert from "../components/Alert";
import SkeletonList from "../components/SkeletonList";
import DataTable from "../components/DataTable";
import DeleteDialog from "../components/DeleteDialog";
import { listPageStyle } from "../styles";
import { clearSearchFilters, buildSearchFilters, buildJsonServerQuery } from "../utils/app-utils";
const styles = listPageStyle;
const defaultProps = {
model: "order",
dataKeys: [
"reference",
"products.length",
"amount",
"customer.firstname",
"orderDate",
"shippedDate",
"actions",
],
headers: [
"Reference",
"Quantity",
"Amount",
"Customer",
"Order Date",
"Shipping Date",
"Actions",
],
};
type DefaultProps = typeof defaultProps;
type OrderListProps = {
pageCount: number;
isFetching: boolean;
orderList: Order[];
searchOrder: typeof thunkApiCall;
deleteOrder: typeof thunkApiCall;
newOrder: typeof thunkApiCall;
errorMessage: string;
deleted: boolean;
} & DefaultProps;
interface OrderListState {
open: boolean;
isFetching: boolean;
searchOpen: boolean;
snackbarOpen: boolean;
autoHideDuration: number;
page: number;
items: Order[];
orderList: Order[];
totalPages: number;
orderId: number;
search: {
contain: {
reference: string;
customer: string;
};
};
}
class OrderListPage extends React.Component<OrderListProps, OrderListState> {
constructor(props) {
super(props);
this.handleToggle = this.handleToggle.bind(this);
this.handleSearch = this.handleSearch.bind(this);
this.closeDialog = this.closeDialog.bind(this);
this.onPageChange = this.onPageChange.bind(this);
this.onSnackBarClose = this.onSnackBarClose.bind(this);
this.handleNewOrder = this.handleNewOrder.bind(this);
this.handleSearchFilter = this.handleSearchFilter.bind(this);
this.clearSearchFilter = this.clearSearchFilter.bind(this);
this.onPageChange = this.onPageChange.bind(this);
this.openDialog = this.openDialog.bind(this);
}
static defaultProps = defaultProps;
state: OrderListState = {
isFetching: true,
open: false,
searchOpen: false,
snackbarOpen: false,
autoHideDuration: 1500,
page: 1,
items: [],
totalPages: 1,
orderList: [],
orderId: null,
search: {
contain: {
reference: "",
customer: "",
},
},
};
componentDidMount() {
this.handleSearch();
}
/* eslint-disable */
componentDidUpdate(prevProps, prevState) {
if (this.props.orderList !== prevProps.orderList) {
this.setState({ orderList: this.props.orderList });
const page = 1;
const totalPages = Math.ceil(this.props.orderList.length / 10);
const items = this.props.orderList.slice(0, 10);
const isFetching = this.props.isFetching;
this.setState({ page, totalPages, items, isFetching });
}
if (
this.props.deleted !== prevProps.deleted &&
this.props.deleted === true
) {
this.setState({ snackbarOpen: true });
this.handleSearch();
}
}
onPageChange(_event: React.ChangeEvent<unknown>, page: number) {
const startIndex = (page - 1) * 10;
const endIndex = startIndex + 10;
const items = this.props.orderList.slice(startIndex, endIndex);
this.setState({ page, items });
}
openDialog(_event: React.ChangeEvent<unknown>, value: number) {
if (value != null && value > 0) {
this.setState({ open: true, orderId: value });
}
}
handleToggle() {
this.setState({ searchOpen: !this.state.searchOpen });
}
handleSearch() {
// this.setState({ searchOpen: false, isFetching: true });
// const action = getAction(LIST_ORDER, null, null, "") as ApiAction;
// this.props.searchOrder(action); //this.state.search);
const filters = buildSearchFilters(this.state.search as SearchFilter);
const query = buildJsonServerQuery(filters);
const action = getAction(LIST_ORDER, null, null, query) as ApiAction
this.props.searchOrder(action); //this.state.search);
this.setState({ searchOpen: false, isFetching: true });
}
closeDialog(isConfirmed: boolean) {
this.setState({ open: false });
if (isConfirmed && this.state.orderId) {
const action = getAction(
DELETE_ORDER,
this.state.orderId,
null,
""
) as ApiAction;
this.props.deleteOrder(action);
this.setState({ orderId: null });
}
}
handleNewOrder() {
const action = getAction(NEW_ORDER) as ApiAction;
this.props.newOrder(action);
// @ts-ignore
this.props.history.push("/neworder");
}
handleSearchFilter(event) {
const field = event.target.name;
if (event && event.target && field) {
const search = Object.assign({}, this.state.search);
search.contain[field] = event.target.value;
this.setState({ search: search });
}
}
clearSearchFilter() {
const search = Object.assign({}, this.state.search);
clearSearchFilters(search as SearchFilter);
this.setState({ search });
this.handleSearch()
}
handleErrorMessage() {
this.setState({
snackbarOpen: true,
});
}
onSnackBarClose() {
this.setState({
snackbarOpen: false,
});
}
render() {
const { orderList, headers, dataKeys, model } = this.props;
const { isFetching, page, totalPages, items } = this.state;
return (
<PageBase
title={"Orders (" + orderList.length + ")"}
navigation="React CRM / Order"
>
{isFetching ? (
<div>
<SkeletonList />
</div>
) : (
<div>
<Fab
size="small"
color="secondary"
style={styles.fab}
onClick={this.handleNewOrder}
>
<ContentAdd />
</Fab>
<Fab
size="small"
style={styles.fabSearch}
onClick={this.handleToggle}
>
<Search />
</Fab>
<Snackbar
open={this.state.snackbarOpen}
autoHideDuration={this.state.autoHideDuration}
onClose={this.onSnackBarClose}
>
<Alert onClose={this.onSnackBarClose} severity="success">
The operation completed successfully !
</Alert>
</Snackbar>
<DataTable
model={model}
items={items}
dataKeys={dataKeys}
headers={headers}
page={page}
totalPages={totalPages}
onDelete={this.openDialog}
onPageChange={this.onPageChange}
/>
<DeleteDialog
open={this.state.open}
closeDialog={this.closeDialog}
/>
<Drawer
anchor="right"
open={this.state.searchOpen}
onClose={this.handleToggle}
style={styles.searchDrawer}
>
<Grid container style={styles.searchDrawer} spacing={0}>
<Grid item xs={12} style={styles.searchField}>
<h5>Search</h5>
</Grid>
<Grid item xs={12} style={styles.searchField}>
<TextField
// placeholder="Order Reference"
label="Order Reference"
fullWidth={true}
name="reference"
value={this.state.search.contain.reference}
onChange={this.handleSearchFilter}
/>
</Grid>
<Grid item xs={12} style={styles.searchField}>
<Button
variant="contained"
style={styles.searchButton}
onClick={this.handleSearch}
color="secondary"
>
Search
</Button>
<Button
variant="contained"
style={styles.searchButton}
onClick={this.clearSearchFilter}
color="default"
>
Cancel
</Button>
</Grid>
</Grid>
</Drawer>
</div>
)}
</PageBase>
);
}
}
function mapStateToProps(state) {
const { orderList, isFetching, errorMessage, user, deleted } = state.order;
return {
orderList,
isFetching,
errorMessage,
user,
deleted,
};
}
function mapDispatchToProps(dispatch) {
return {
searchOrder: (action?: TODO) => dispatch(thunkApiCall(action)),
deleteOrder: (action: TODO) => dispatch(thunkApiCall(action)),
newOrder: (action?: TODO) => dispatch(thunkApiCall(action)),
};
}
export default connect(mapStateToProps, mapDispatchToProps)(OrderListPage);
| {
"pile_set_name": "Github"
} |
.. title:: clang-tidy - cert-fio38-c
.. meta::
:http-equiv=refresh: 5;URL=misc-non-copyable-objects.html
cert-fio38-c
============
The cert-fio38-c check is an alias, please see
`misc-non-copyable-objects <misc-non-copyable-objects.html>`_ for more
information.
| {
"pile_set_name": "Github"
} |
{
"nome": "Bareggio",
"codice": "015012",
"zona": {
"codice": "1",
"nome": "Nord-ovest"
},
"regione": {
"codice": "03",
"nome": "Lombardia"
},
"provincia": {
"codice": "015",
"nome": "Milano"
},
"sigla": "MI",
"codiceCatastale": "A652",
"cap": [
"20010"
],
"popolazione": 17035
}
| {
"pile_set_name": "Github"
} |
{
"_args": [
[
"estraverse@^4.2.0",
"/Users/wuchangfang/Workspace/My/django-beginners-guide/node_modules/escodegen"
]
],
"_from": "estraverse@>=4.2.0 <5.0.0",
"_id": "estraverse@4.2.0",
"_inCache": true,
"_installable": true,
"_location": "/estraverse",
"_nodeVersion": "0.12.9",
"_npmOperationalInternal": {
"host": "packages-12-west.internal.npmjs.com",
"tmp": "tmp/estraverse-4.2.0.tgz_1457646738925_0.7118953282479197"
},
"_npmUser": {
"email": "nicholas@nczconsulting.com",
"name": "nzakas"
},
"_npmVersion": "2.14.9",
"_phantomChildren": {},
"_requested": {
"name": "estraverse",
"raw": "estraverse@^4.2.0",
"rawSpec": "^4.2.0",
"scope": null,
"spec": ">=4.2.0 <5.0.0",
"type": "range"
},
"_requiredBy": [
"/escodegen"
],
"_resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.2.0.tgz",
"_shasum": "0dee3fed31fcd469618ce7342099fc1afa0bdb13",
"_shrinkwrap": null,
"_spec": "estraverse@^4.2.0",
"_where": "/Users/wuchangfang/Workspace/My/django-beginners-guide/node_modules/escodegen",
"bugs": {
"url": "https://github.com/estools/estraverse/issues"
},
"dependencies": {},
"description": "ECMAScript JS AST traversal functions",
"devDependencies": {
"babel-preset-es2015": "^6.3.13",
"babel-register": "^6.3.13",
"chai": "^2.1.1",
"espree": "^1.11.0",
"gulp": "^3.8.10",
"gulp-bump": "^0.2.2",
"gulp-filter": "^2.0.0",
"gulp-git": "^1.0.1",
"gulp-tag-version": "^1.2.1",
"jshint": "^2.5.6",
"mocha": "^2.1.0"
},
"directories": {},
"dist": {
"shasum": "0dee3fed31fcd469618ce7342099fc1afa0bdb13",
"tarball": "https://registry.npmjs.org/estraverse/-/estraverse-4.2.0.tgz"
},
"engines": {
"node": ">=0.10.0"
},
"gitHead": "6f6a4e99653908e859c7c10d04d9518bf4844ede",
"homepage": "https://github.com/estools/estraverse",
"license": "BSD-2-Clause",
"main": "estraverse.js",
"maintainers": [
{
"name": "constellation",
"email": "utatane.tea@gmail.com"
},
{
"name": "michaelficarra",
"email": "npm@michael.ficarra.me"
},
{
"name": "nzakas",
"email": "nicholas@nczconsulting.com"
}
],
"name": "estraverse",
"optionalDependencies": {},
"readme": "ERROR: No README data found!",
"repository": {
"type": "git",
"url": "git+ssh://git@github.com/estools/estraverse.git"
},
"scripts": {
"lint": "jshint estraverse.js",
"test": "npm run-script lint && npm run-script unit-test",
"unit-test": "mocha --compilers js:babel-register"
},
"version": "4.2.0"
}
| {
"pile_set_name": "Github"
} |
^\.git
^MYMETA.*$
^Makefile$
~$
\.old(?:\..*)?$
\.swp$
\.o$
\.bs$
\.bak$
\.orig$
\.cache\.cm$
\.DS_Store
^blib
^pm_to_blib
^backup
^parts/todo-
^parts/base-
^ppport\.h$
^PPPort\.c$
^RealPPPort\.
^testing
Devel-PPPort.*
Devel-PPPort.*\.tar\.gz$
.travis.yml
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="test">
<UniqueIdentifier>{B674DEFB-2B8A-B641-9B90-E38E9E9275A3}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\..\..\..\test\testrelative.c">
<Filter>test</Filter>
</ClCompile>
</ItemGroup>
</Project>
| {
"pile_set_name": "Github"
} |
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"fmt"
"reflect"
"github.com/alecthomas/template/parse"
)
// common holds the information shared by related templates.
type common struct {
tmpl map[string]*Template
// We use two maps, one for parsing and one for execution.
// This separation makes the API cleaner since it doesn't
// expose reflection to the client.
parseFuncs FuncMap
execFuncs map[string]reflect.Value
}
// Template is the representation of a parsed template. The *parse.Tree
// field is exported only for use by html/template and should be treated
// as unexported by all other clients.
type Template struct {
name string
*parse.Tree
*common
leftDelim string
rightDelim string
}
// New allocates a new template with the given name.
func New(name string) *Template {
return &Template{
name: name,
}
}
// Name returns the name of the template.
func (t *Template) Name() string {
return t.name
}
// New allocates a new template associated with the given one and with the same
// delimiters. The association, which is transitive, allows one template to
// invoke another with a {{template}} action.
func (t *Template) New(name string) *Template {
t.init()
return &Template{
name: name,
common: t.common,
leftDelim: t.leftDelim,
rightDelim: t.rightDelim,
}
}
func (t *Template) init() {
if t.common == nil {
t.common = new(common)
t.tmpl = make(map[string]*Template)
t.parseFuncs = make(FuncMap)
t.execFuncs = make(map[string]reflect.Value)
}
}
// Clone returns a duplicate of the template, including all associated
// templates. The actual representation is not copied, but the name space of
// associated templates is, so further calls to Parse in the copy will add
// templates to the copy but not to the original. Clone can be used to prepare
// common templates and use them with variant definitions for other templates
// by adding the variants after the clone is made.
func (t *Template) Clone() (*Template, error) {
nt := t.copy(nil)
nt.init()
nt.tmpl[t.name] = nt
for k, v := range t.tmpl {
if k == t.name { // Already installed.
continue
}
// The associated templates share nt's common structure.
tmpl := v.copy(nt.common)
nt.tmpl[k] = tmpl
}
for k, v := range t.parseFuncs {
nt.parseFuncs[k] = v
}
for k, v := range t.execFuncs {
nt.execFuncs[k] = v
}
return nt, nil
}
// copy returns a shallow copy of t, with common set to the argument.
func (t *Template) copy(c *common) *Template {
nt := New(t.name)
nt.Tree = t.Tree
nt.common = c
nt.leftDelim = t.leftDelim
nt.rightDelim = t.rightDelim
return nt
}
// AddParseTree creates a new template with the name and parse tree
// and associates it with t.
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
if t.common != nil && t.tmpl[name] != nil {
return nil, fmt.Errorf("template: redefinition of template %q", name)
}
nt := t.New(name)
nt.Tree = tree
t.tmpl[name] = nt
return nt, nil
}
// Templates returns a slice of the templates associated with t, including t
// itself.
func (t *Template) Templates() []*Template {
if t.common == nil {
return nil
}
// Return a slice so we don't expose the map.
m := make([]*Template, 0, len(t.tmpl))
for _, v := range t.tmpl {
m = append(m, v)
}
return m
}
// Delims sets the action delimiters to the specified strings, to be used in
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
// definitions will inherit the settings. An empty delimiter stands for the
// corresponding default: {{ or }}.
// The return value is the template, so calls can be chained.
func (t *Template) Delims(left, right string) *Template {
t.leftDelim = left
t.rightDelim = right
return t
}
// Funcs adds the elements of the argument map to the template's function map.
// It panics if a value in the map is not a function with appropriate return
// type. However, it is legal to overwrite elements of the map. The return
// value is the template, so calls can be chained.
func (t *Template) Funcs(funcMap FuncMap) *Template {
t.init()
addValueFuncs(t.execFuncs, funcMap)
addFuncs(t.parseFuncs, funcMap)
return t
}
// Lookup returns the template with the given name that is associated with t,
// or nil if there is no such template.
func (t *Template) Lookup(name string) *Template {
if t.common == nil {
return nil
}
return t.tmpl[name]
}
// Parse parses a string into a template. Nested template definitions will be
// associated with the top-level template t. Parse may be called multiple times
// to parse definitions of templates to associate with t. It is an error if a
// resulting template is non-empty (contains content other than template
// definitions) and would replace a non-empty template with the same name.
// (In multiple calls to Parse with the same receiver template, only one call
// can contain text other than space, comments, and template definitions.)
func (t *Template) Parse(text string) (*Template, error) {
t.init()
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
if err != nil {
return nil, err
}
// Add the newly parsed trees, including the one for t, into our common structure.
for name, tree := range trees {
// If the name we parsed is the name of this template, overwrite this template.
// The associate method checks it's not a redefinition.
tmpl := t
if name != t.name {
tmpl = t.New(name)
}
// Even if t == tmpl, we need to install it in the common.tmpl map.
if replace, err := t.associate(tmpl, tree); err != nil {
return nil, err
} else if replace {
tmpl.Tree = tree
}
tmpl.leftDelim = t.leftDelim
tmpl.rightDelim = t.rightDelim
}
return t, nil
}
// associate installs the new template into the group of templates associated
// with t. It is an error to reuse a name except to overwrite an empty
// template. The two are already known to share the common structure.
// The boolean return value reports wither to store this tree as t.Tree.
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
if new.common != t.common {
panic("internal error: associate not common")
}
name := new.name
if old := t.tmpl[name]; old != nil {
oldIsEmpty := parse.IsEmptyTree(old.Root)
newIsEmpty := parse.IsEmptyTree(tree.Root)
if newIsEmpty {
// Whether old is empty or not, new is empty; no reason to replace old.
return false, nil
}
if !oldIsEmpty {
return false, fmt.Errorf("template: redefinition of template %q", name)
}
}
t.tmpl[name] = new
return true, nil
}
| {
"pile_set_name": "Github"
} |
var convert = require('./convert'),
func = convert('isString', require('../isString'), require('./_falseOptions'));
func.placeholder = require('./placeholder');
module.exports = func;
| {
"pile_set_name": "Github"
} |
/*-
* Public Domain 2014-2016 MongoDB, Inc.
* Public Domain 2008-2014 WiredTiger, Inc.
*
* This is free and unencumbered software released into the public domain.
*
* Anyone is free to copy, modify, publish, use, compile, sell, or
* distribute this software, either in source code form or as a compiled
* binary, for any purpose, commercial or non-commercial, and by any
* means.
*
* In jurisdictions that recognize copyright laws, the author or authors
* of this software dedicate any and all copyright interest in the
* software to the public domain. We make this dedication for the benefit
* of the public at large and to the detriment of our heirs and
* successors. We intend this dedication to be an overt act of
* relinquishment in perpetuity of all present and future rights to this
* software under copyright law.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "test_util.h"
/*
* JIRA ticket reference: WT-2447
*
* Test case description: This test case is adapted from the submitted test
* program in the JIRA ticket. We create a database of 10,000 entries, with
* every key i having pair of values (i, i). Create indices on both values,
* and establish a join: table.v1 >= 5000 AND table.v2 < 5001. There's a
* Bloom filter on v2. We expect that although we iterate from 5000 to
* 10000, we'll only have accesses to the main table for key 5000, as
* 5001-10000 will generally not be in the Bloom filter. For key 5000,
* we technically have two accesses to the main table - one occurs when we
* see key 5000 is in the Bloom filter, and we need to do a full test, we
* make an access to the projection table:tablename(v2), that's just to get
* the value of v2, which we'll check by comparison to the cursor at 5001.
* That counts as a main table access, and when we see it is satisfied and
* return the complete set of values, we'll access the main table with the
* full projection (that's the second main table access).
*
* Failure mode: Before fixes of WT-2447, we saw lots of accesses to the main
* table.
*/
void (*custom_die)(void) = NULL;
#define N_RECORDS 10000
static void
get_stat_total(WT_SESSION *session, WT_CURSOR *jcursor, const char *descmatch,
uint64_t *pval)
{
WT_CURSOR *statcursor;
uint64_t val;
int ret;
bool match;
char *desc, *valstr;
match = false;
*pval = 0;
testutil_check(session->open_cursor(session, "statistics:join", jcursor,
NULL, &statcursor));
while ((ret = statcursor->next(statcursor)) == 0) {
testutil_assert(statcursor->get_value(
statcursor, &desc, &valstr, &val) == 0);
printf("statistics: %s: %s: %" PRIu64 "\n", desc, valstr, val);
if (strstr(desc, descmatch) != NULL) {
*pval += val;
match = true;
}
}
testutil_assert(ret == WT_NOTFOUND);
testutil_check(statcursor->close(statcursor));
testutil_assert(match);
}
int
main(int argc, char *argv[])
{
TEST_OPTS *opts, _opts;
WT_CURSOR *cursor1, *cursor2, *jcursor;
WT_ITEM d;
WT_SESSION *session;
uint64_t maincount;
int half, i, j;
const char *tablename;
char bloom_cfg[128], index1uri[256], index2uri[256], joinuri[256];
opts = &_opts;
memset(opts, 0, sizeof(*opts));
testutil_check(testutil_parse_opts(argc, argv, opts));
testutil_make_work_dir(opts->home);
tablename = strchr(opts->uri, ':');
testutil_assert(tablename != NULL);
tablename++;
snprintf(index1uri, sizeof(index1uri), "index:%s:index1", tablename);
snprintf(index2uri, sizeof(index2uri), "index:%s:index2", tablename);
snprintf(joinuri, sizeof(joinuri), "join:%s", opts->uri);
testutil_check(wiredtiger_open(opts->home, NULL,
"statistics=(all),create", &opts->conn));
testutil_check(
opts->conn->open_session(opts->conn, NULL, NULL, &session));
testutil_check(session->create(session, opts->uri,
"key_format=i,value_format=iiu,columns=(k,v1,v2,d)"));
testutil_check(session->create(session, index1uri, "columns=(v1)"));
testutil_check(session->create(session, index2uri, "columns=(v2)"));
testutil_check(session->open_cursor(session, opts->uri, NULL, NULL,
&cursor1));
d.size = 4100;
d.data = dmalloc(d.size);
memset((char *)d.data, 7, d.size);
for (i = 0; i < N_RECORDS; ++i)
{
cursor1->set_key(cursor1, i);
cursor1->set_value(cursor1, i, i, &d);
testutil_check(cursor1->insert(cursor1));
}
free((void*)d.data);
testutil_check(opts->conn->close(opts->conn, NULL));
testutil_check(wiredtiger_open(opts->home, NULL,
"statistics=(all),create,cache_size=1GB", &opts->conn));
testutil_check(opts->conn->open_session(opts->conn, NULL, NULL,
&session));
testutil_check(session->open_cursor(session, index1uri, NULL, NULL,
&cursor1));
testutil_check(session->open_cursor(session, index2uri, NULL, NULL,
&cursor2));
half = N_RECORDS / 2;
cursor1->set_key(cursor1, half);
testutil_check(cursor1->search(cursor1));
cursor2->set_key(cursor2, half + 1);
testutil_check(cursor2->search(cursor2));
sprintf(bloom_cfg, "compare=lt,strategy=bloom,count=%d", half);
testutil_check(session->open_cursor(session, joinuri, NULL, NULL,
&jcursor));
testutil_check(session->join(session, jcursor, cursor1, "compare=ge"));
testutil_check(session->join(session, jcursor, cursor2, bloom_cfg));
/* Expect one value returned */
testutil_assert(jcursor->next(jcursor) == 0);
i = 0;
testutil_assert(jcursor->get_key(jcursor, &i) == 0);
testutil_assert(i == (int)half);
i = j = 0;
memset(&d, 0, sizeof(d));
testutil_assert(jcursor->get_value(jcursor, &i, &j, &d) == 0);
testutil_assert(i == (int)half);
testutil_assert(j == (int)half);
testutil_assert(d.size == 4100);
for (i = 0; i < 4100; i++)
testutil_assert(((char *)d.data)[i] == 7);
testutil_assert(jcursor->next(jcursor) == WT_NOTFOUND);
/*
* Make sure there have been 2 accesses to the main table,
* explained in the discussion above.
*/
get_stat_total(session, jcursor, "accesses to the main table",
&maincount);
testutil_assert(maincount == 2);
testutil_cleanup(opts);
return (EXIT_SUCCESS);
}
| {
"pile_set_name": "Github"
} |
<div class="intro-left">
<div class="tips blue">
<h1> <span>Rich funktionalitet</span> </h1>
<p> Kode beder automatisk </p>
<p> Multi-tema: Vælg din favorit programmering stil </p>
<p> Tilpasset Font: til brug ved scene </p>
<p> Multi cursor redigering, blok redigering online programmeringserfaring kan sammenlignes med det sublime </p>
<p> Block foldning, udvide; wrap </p>
<p> Understøttelse af flere faner, skal du trække skifte sekvens; </p>
<p> Fastholdelse af flere dokumenter, finde og erstatte; Historie; </p>
<p> Auto-komplet [], {}, (), '' '' </p>
<p> Online real-time preview, der giver mulighed for at falde i kærlighed med online programmering! </p>
<p> zendcodeing support, skrive kode firs </p>
<p> Flere funktioner venter på din opdagelse ...... </p>
</div>
<div class="tips orange">
<h1> <span>150 slags kode fremhævning</span> </h1>
<p> Tip: html, javascript, css, mindre, Sass, SCSS </p>
<p> webudvikling: php, perl, python, rubin, Elang, go ... </p>
<p> Traditionelle sprog: java, C, C ++, C #, ActionScript, VBScript ... </p>
<p> Andre: markdown, shell, sql, lua, xml, YAML ... </p>
</div>
</div>
<div class="intro-right">
<div class="tips green">
<h1> <span>Genvej Handling</span> </h1>
<pre> Almindeligt brugte genveje:
ctrl + s for at gemme
Ctrl + A Vælg alle ctrl + x Cut
ctrl + c ctrl + v pasta kopi
ctrl + z Fortryd Fortryd Anti ctrl + y
ctrl + f for at finde erstatning ctrl + f + f
win + alt + 0 kollaps alle win + alt + shift + 0 Udvid alle
esc [Afslut søgning annulleres automatisk beder ...]
ctrl-shift-r Eksempel
ctrl-shift-e show & Close funktion
</pre>
<pre> Vælg:
Mus lysavis - træk
shift + home / ende / op / venstre / ned / højre
shift + PageUp / PageDown flip op og ned for at vælge
ctrl + shift + home / slutningen af den aktuelle markøren til begyndelsen og slutningen
alt + musen til at trække blokken udvælgelse
ctrl + alt + g batch vælge og indtaste den aktuelle multi-fanen editor
</pre>
<pre> Cursor:
hjem / ende / op / venstre / ned / højre
ctrl + home / ende flytte markøren til dokumentet hoved / hale
ctrl + p Hop til matchende tag
PageUp / PageDown markøren op og ned
alt + venstre / højre markøren til at flytte til toppen af linjen
shift + venstre / højre markøren til slutningen af linjen &
ctrl + l for at springe til en bestemt række
ctrl + alt + op / ned (nedenfor) øge markøren
</pre>
<pre> Edit:
ctrl + / Kommentar & udkommentere ctrl + alt + et berettiget
Fanen tabel justering shift + bord samlede avancement bord
slet slet slet hele linjen ctrl + d
ctrl + delete for at slette række på det rigtige ord
ctrl / shift + backspace for at slette ordet til venstre
alt + shift + op / ned og tilføjet til kopien linje (herunder) fly
Alt + Delete for at slette indholdet i højre for markøren
alt + op / ned på den aktuelle linje og den linje (næste linje udveksling)
CTRL + SKIFT + d rækker kopieret og tilføjet til følgende
ctrl + delete for at slette højre for ordet
ctrl + shift + u konverteret til små bogstaver
Ctrl + U markeret tekst til store bogstaver
</pre>
</div>
</div>
| {
"pile_set_name": "Github"
} |
function x = thin(x,nburn,nthin,nlast)
%THIN Delete burn-in and thin in MCMC-chains
%
% Description
% X = THIN(X,NBURN,NTHIN,NLAST) returns chain containing only
% every NTHIN:th simulation sample starting from sample number
% NBURN+1 and continuing to sample number NLAST.
%
% See also
% JOIN
% Copyright (c) 1999 Simo Särkkä
% Copyright (c) 2000,2010 Aki Vehtari
%
% This software is distributed under the GNU General Public
% License (version 3 or later); please refer to the file
% License.txt, included with the software, for details.
if nargin < 4
nlast = [];
end
if nargin < 3
nthin = [];
end
if nargin < 2
nburn = [];
end
[m,n]=size(x);
if isfield(x,'rstate')
x=rmfield(x,'rstate');
end
if isstruct(x)
if (m>1 | n>1)
% array of structures
for i=1:(m*n)
x(i) = thin(x(i),nburn,nthin,nlast);
end
else
% single structure
names = fieldnames(x);
for i=1:size(names,1)
if isequal(names{i},'xtime')
% Coxph model has ntime x 1 vector, which should be passed as is
continue
end
value = getfield(x,names{i});
if ~ischar(value) && (length(value) > 1 || isstruct(value))
x = setfield(x,names{i},thin(value,nburn,nthin,nlast));
elseif iscell(value)
x = setfield(x,names{i},{thin(value{1},nburn,nthin,nlast)});
end
end
end
elseif iscell(x)
% cell array
for i=1:(m*n)
x{i} = thin(x{i},nburn,nthin,nlast);
end
elseif m > 1
% field array
if isempty(nburn)
nburn = 0;
elseif (nburn < 0) | (nburn >= m)
error('Illegal burn-in value');
end
if isempty(nthin)
nthin = 1;
elseif (nthin < 1) | (nthin > m)
error('Illegal thinning value');
end
if isempty(nlast)
nlast = m;
elseif (nlast < 1) | (nlast > m)
error('Illegal last index');
end
x = x((nburn+1):nthin:nlast,:);
end
end
| {
"pile_set_name": "Github"
} |
///////////////////////////////////////////////////////////////////////////////////
/// OpenGL Mathematics (glm.g-truc.net)
///
/// Copyright (c) 2005 - 2012 G-Truc Creation (www.g-truc.net)
/// Permission is hereby granted, free of charge, to any person obtaining a copy
/// of this software and associated documentation files (the "Software"), to deal
/// in the Software without restriction, including without limitation the rights
/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
/// copies of the Software, and to permit persons to whom the Software is
/// furnished to do so, subject to the following conditions:
///
/// The above copyright notice and this permission notice shall be included in
/// all copies or substantial portions of the Software.
///
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
/// THE SOFTWARE.
///
/// @ref core
/// @file glm/core/type_mat.hpp
/// @date 2010-01-26 / 2011-06-15
/// @author Christophe Riccio
///////////////////////////////////////////////////////////////////////////////////
#ifndef glm_core_type_mat
#define glm_core_type_mat
#include "type_gentype.hpp"
namespace glm{
namespace detail
{
//template
//<
// typename T,
// template <typename> class C,
// template <typename> class R
//>
//struct matType
//{
// enum ctor{null};
// typedef T value_type;
// typedef std::size_t size_type;
// typedef C<T> col_type;
// typedef R<T> row_type;
// static size_type const col_size;
// static size_type const row_size;
//};
//template
//<
// typename T,
// template <typename> class C,
// template <typename> class R
//>
//typename matType<T, C, R>::size_type const
//matType<T, C, R>::col_size = matType<T, C, R>::col_type::value_size;
//template
//<
// typename T,
// template <typename> class C,
// template <typename> class R
//>
//typename matType<T, C, R>::size_type const
//matType<T, C, R>::row_size = matType<T, C, R>::row_type::value_size;
}//namespace detail
}//namespace glm
#endif//glm_core_type_mat
| {
"pile_set_name": "Github"
} |
package server
import (
"net/http"
"sort"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/api/errcode"
disterrors "github.com/docker/distribution/registry/api/v2"
"github.com/openshift/origin/pkg/dockerregistry/server/client"
imageapi "github.com/openshift/origin/pkg/image/apis/image"
imageapiv1 "github.com/openshift/origin/pkg/image/apis/image/v1"
"github.com/openshift/origin/pkg/image/importer"
)
// BlobGetterService combines the operations to access and read blobs.
type BlobGetterService interface {
distribution.BlobStatter
distribution.BlobProvider
distribution.BlobServer
}
type ImageStreamGetter func() (*imageapiv1.ImageStream, error)
// remoteBlobGetterService implements BlobGetterService and allows to serve blobs from remote
// repositories.
type remoteBlobGetterService struct {
namespace string
name string
cacheTTL time.Duration
getImageStream ImageStreamGetter
isSecretsNamespacer client.ImageStreamSecretsNamespacer
cachedLayers digestToRepositoryCache
digestToStore map[string]distribution.BlobStore
}
var _ BlobGetterService = &remoteBlobGetterService{}
// NewBlobGetterService returns a getter for remote blobs. Its cache will be shared among different middleware
// wrappers, which is a must at least for stat calls made on manifest's dependencies during its verification.
func NewBlobGetterService(
namespace, name string,
cacheTTL time.Duration,
imageStreamGetter ImageStreamGetter,
isSecretsNamespacer client.ImageStreamSecretsNamespacer,
cachedLayers digestToRepositoryCache,
) BlobGetterService {
return &remoteBlobGetterService{
namespace: namespace,
name: name,
getImageStream: imageStreamGetter,
isSecretsNamespacer: isSecretsNamespacer,
cacheTTL: cacheTTL,
cachedLayers: cachedLayers,
digestToStore: make(map[string]distribution.BlobStore),
}
}
// imagePullthroughSpec contains a reference of remote image to pull associated with an insecure flag for the
// corresponding registry.
type imagePullthroughSpec struct {
dockerImageReference *imageapi.DockerImageReference
insecure bool
}
// Stat provides metadata about a blob identified by the digest. If the
// blob is unknown to the describer, ErrBlobUnknown will be returned.
func (rbgs *remoteBlobGetterService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
context.GetLogger(ctx).Debugf("(*remoteBlobGetterService).Stat: starting with dgst=%s", dgst.String())
// look up the potential remote repositories that this blob could be part of (at this time,
// we don't know which image in the image stream surfaced the content).
is, err := rbgs.getImageStream()
if err != nil {
if t, ok := err.(errcode.Error); ok && t.ErrorCode() == disterrors.ErrorCodeNameUnknown {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return distribution.Descriptor{}, err
}
var localRegistry string
if local, err := imageapi.ParseDockerImageReference(is.Status.DockerImageRepository); err == nil {
// TODO: normalize further?
localRegistry = local.Registry
}
retriever := getImportContext(ctx, rbgs.isSecretsNamespacer, rbgs.namespace, rbgs.name)
cached := rbgs.cachedLayers.RepositoriesForDigest(dgst)
// look at the first level of tagged repositories first
repositoryCandidates, search := identifyCandidateRepositories(is, localRegistry, true)
if desc, err := rbgs.findCandidateRepository(ctx, repositoryCandidates, search, cached, dgst, retriever); err == nil {
return desc, nil
}
// look at all other repositories tagged by the server
repositoryCandidates, secondary := identifyCandidateRepositories(is, localRegistry, false)
for k := range search {
delete(secondary, k)
}
if desc, err := rbgs.findCandidateRepository(ctx, repositoryCandidates, secondary, cached, dgst, retriever); err == nil {
return desc, nil
}
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
func (rbgs *remoteBlobGetterService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
context.GetLogger(ctx).Debugf("(*remoteBlobGetterService).Open: starting with dgst=%s", dgst.String())
store, ok := rbgs.digestToStore[dgst.String()]
if ok {
return store.Open(ctx, dgst)
}
desc, err := rbgs.Stat(ctx, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Open: failed to stat blob %q in remote repositories: %v", dgst.String(), err)
return nil, err
}
store, ok = rbgs.digestToStore[desc.Digest.String()]
if !ok {
return nil, distribution.ErrBlobUnknown
}
return store.Open(ctx, desc.Digest)
}
func (rbgs *remoteBlobGetterService) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error {
context.GetLogger(ctx).Debugf("(*remoteBlobGetterService).ServeBlob: starting with dgst=%s", dgst.String())
store, ok := rbgs.digestToStore[dgst.String()]
if ok {
return store.ServeBlob(ctx, w, req, dgst)
}
desc, err := rbgs.Stat(ctx, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("ServeBlob: failed to stat blob %q in remote repositories: %v", dgst.String(), err)
return err
}
store, ok = rbgs.digestToStore[desc.Digest.String()]
if !ok {
return distribution.ErrBlobUnknown
}
return store.ServeBlob(ctx, w, req, desc.Digest)
}
// proxyStat attempts to locate the digest in the provided remote repository or returns an error. If the digest is found,
// rbgs.digestToStore saves the store.
func (rbgs *remoteBlobGetterService) proxyStat(
ctx context.Context,
retriever importer.RepositoryRetriever,
spec *imagePullthroughSpec,
dgst digest.Digest,
) (distribution.Descriptor, error) {
ref := spec.dockerImageReference
insecureNote := ""
if spec.insecure {
insecureNote = " with a fall-back to insecure transport"
}
context.GetLogger(ctx).Infof("Trying to stat %q from %q%s", dgst, ref.AsRepository().Exact(), insecureNote)
repo, err := retriever.Repository(ctx, ref.RegistryURL(), ref.RepositoryName(), spec.insecure)
if err != nil {
context.GetLogger(ctx).Errorf("Error getting remote repository for image %q: %v", ref.AsRepository().Exact(), err)
return distribution.Descriptor{}, err
}
pullthroughBlobStore := repo.Blobs(ctx)
desc, err := pullthroughBlobStore.Stat(ctx, dgst)
if err != nil {
if err != distribution.ErrBlobUnknown {
context.GetLogger(ctx).Errorf("Error statting blob %s in remote repository %q: %v", dgst, ref.AsRepository().Exact(), err)
}
return distribution.Descriptor{}, err
}
rbgs.digestToStore[dgst.String()] = pullthroughBlobStore
return desc, nil
}
// Get attempts to fetch the requested blob by digest using a remote proxy store if necessary.
func (rbgs *remoteBlobGetterService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
context.GetLogger(ctx).Debugf("(*remoteBlobGetterService).Get: starting with dgst=%s", dgst.String())
store, ok := rbgs.digestToStore[dgst.String()]
if ok {
return store.Get(ctx, dgst)
}
desc, err := rbgs.Stat(ctx, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Get: failed to stat blob %q in remote repositories: %v", dgst.String(), err)
return nil, err
}
store, ok = rbgs.digestToStore[desc.Digest.String()]
if !ok {
return nil, distribution.ErrBlobUnknown
}
return store.Get(ctx, desc.Digest)
}
// findCandidateRepository looks in search for a particular blob, referring to previously cached items
func (rbgs *remoteBlobGetterService) findCandidateRepository(
ctx context.Context,
repositoryCandidates []string,
search map[string]imagePullthroughSpec,
cachedLayers []string,
dgst digest.Digest,
retriever importer.RepositoryRetriever,
) (distribution.Descriptor, error) {
// no possible remote locations to search, exit early
if len(search) == 0 {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
// see if any of the previously located repositories containing this digest are in this
// image stream
for _, repo := range cachedLayers {
spec, ok := search[repo]
if !ok {
continue
}
desc, err := rbgs.proxyStat(ctx, retriever, &spec, dgst)
if err != nil {
delete(search, repo)
continue
}
context.GetLogger(ctx).Infof("Found digest location from cache %q in %q", dgst, repo)
return desc, nil
}
// search the remaining registries for this digest
for _, repo := range repositoryCandidates {
spec, ok := search[repo]
if !ok {
continue
}
desc, err := rbgs.proxyStat(ctx, retriever, &spec, dgst)
if err != nil {
continue
}
rbgs.cachedLayers.RememberDigest(dgst, rbgs.cacheTTL, repo)
context.GetLogger(ctx).Infof("Found digest location by search %q in %q", dgst, repo)
return desc, nil
}
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
type byInsecureFlag struct {
repositories []string
specs []*imagePullthroughSpec
}
func (by *byInsecureFlag) Len() int {
if len(by.specs) < len(by.repositories) {
return len(by.specs)
}
return len(by.repositories)
}
func (by *byInsecureFlag) Swap(i, j int) {
by.repositories[i], by.repositories[j] = by.repositories[j], by.repositories[i]
by.specs[i], by.specs[j] = by.specs[j], by.specs[i]
}
func (by *byInsecureFlag) Less(i, j int) bool {
if by.specs[i].insecure == by.specs[j].insecure {
switch {
case by.repositories[i] < by.repositories[j]:
return true
case by.repositories[i] > by.repositories[j]:
return false
default:
return by.specs[i].dockerImageReference.Exact() < by.specs[j].dockerImageReference.Exact()
}
}
return !by.specs[i].insecure
}
// identifyCandidateRepositories returns a list of remote repository names sorted from the best candidate to
// the worst and a map of remote repositories referenced by this image stream. The best candidate is a secure
// one. The worst allows for insecure transport.
func identifyCandidateRepositories(
is *imageapiv1.ImageStream,
localRegistry string,
primary bool,
) ([]string, map[string]imagePullthroughSpec) {
insecureByDefault := false
if insecure, ok := is.Annotations[imageapi.InsecureRepositoryAnnotation]; ok {
insecureByDefault = insecure == "true"
}
// maps registry to insecure flag
insecureRegistries := make(map[string]bool)
// identify the canonical location of referenced registries to search
search := make(map[string]*imageapi.DockerImageReference)
for _, tagEvent := range is.Status.Tags {
tag := tagEvent.Tag
var candidates []imageapiv1.TagEvent
if primary {
if len(tagEvent.Items) == 0 {
continue
}
candidates = tagEvent.Items[:1]
} else {
if len(tagEvent.Items) <= 1 {
continue
}
candidates = tagEvent.Items[1:]
}
for _, event := range candidates {
ref, err := imageapi.ParseDockerImageReference(event.DockerImageReference)
if err != nil {
continue
}
// skip anything that matches the innate registry
// TODO: there may be a better way to make this determination
if len(localRegistry) != 0 && localRegistry == ref.Registry {
continue
}
ref = ref.DockerClientDefaults()
insecure := insecureByDefault
for _, t := range is.Spec.Tags {
if t.Name == tag {
insecure = insecureByDefault || t.ImportPolicy.Insecure
break
}
}
if is := insecureRegistries[ref.Registry]; !is && insecure {
insecureRegistries[ref.Registry] = insecure
}
search[ref.AsRepository().Exact()] = &ref
}
}
repositories := make([]string, 0, len(search))
results := make(map[string]imagePullthroughSpec)
specs := []*imagePullthroughSpec{}
for repo, ref := range search {
repositories = append(repositories, repo)
// accompany the reference with corresponding registry's insecure flag
spec := imagePullthroughSpec{
dockerImageReference: ref,
insecure: insecureRegistries[ref.Registry],
}
results[repo] = spec
specs = append(specs, &spec)
}
sort.Sort(&byInsecureFlag{repositories: repositories, specs: specs})
return repositories, results
}
// pullInsecureByDefault returns true if the given repository or repository's tag allows for insecure
// transport.
func pullInsecureByDefault(isGetter ImageStreamGetter, tag string) bool {
insecureByDefault := false
is, err := isGetter()
if err != nil {
return insecureByDefault
}
if insecure, ok := is.Annotations[imageapi.InsecureRepositoryAnnotation]; ok {
insecureByDefault = insecure == "true"
}
if insecureByDefault || len(tag) == 0 {
return insecureByDefault
}
for _, t := range is.Spec.Tags {
if t.Name == tag {
return t.ImportPolicy.Insecure
}
}
return false
}
| {
"pile_set_name": "Github"
} |
//===- SourceMgr.cpp - Manager for Simple Source Buffers & Diagnostics ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SourceMgr class. This class is used as a simple
// substrate for diagnostics, #include handling, and other low level things for
// simple parsers.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/SourceMgr.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Locale.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
static const size_t TabStop = 8;
namespace {
struct LineNoCacheTy {
unsigned LastQueryBufferID;
const char *LastQuery;
unsigned LineNoOfQuery;
};
}
static LineNoCacheTy *getCache(void *Ptr) {
return (LineNoCacheTy*)Ptr;
}
SourceMgr::~SourceMgr() {
// Delete the line # cache if allocated.
if (LineNoCacheTy *Cache = getCache(LineNoCache))
delete Cache;
}
unsigned SourceMgr::AddIncludeFile(const std::string &Filename,
SMLoc IncludeLoc,
std::string &IncludedFile) {
IncludedFile = Filename;
ErrorOr<std::unique_ptr<MemoryBuffer>> NewBufOrErr =
MemoryBuffer::getFile(IncludedFile);
// If the file didn't exist directly, see if it's in an include path.
for (unsigned i = 0, e = IncludeDirectories.size(); i != e && !NewBufOrErr;
++i) {
IncludedFile =
IncludeDirectories[i] + sys::path::get_separator().data() + Filename;
NewBufOrErr = MemoryBuffer::getFile(IncludedFile);
}
if (!NewBufOrErr)
return 0;
return AddNewSourceBuffer(std::move(*NewBufOrErr), IncludeLoc);
}
unsigned SourceMgr::FindBufferContainingLoc(SMLoc Loc) const {
for (unsigned i = 0, e = Buffers.size(); i != e; ++i)
if (Loc.getPointer() >= Buffers[i].Buffer->getBufferStart() &&
// Use <= here so that a pointer to the null at the end of the buffer
// is included as part of the buffer.
Loc.getPointer() <= Buffers[i].Buffer->getBufferEnd())
return i + 1;
return 0;
}
std::pair<unsigned, unsigned>
SourceMgr::getLineAndColumn(SMLoc Loc, unsigned BufferID) const {
if (!BufferID)
BufferID = FindBufferContainingLoc(Loc);
assert(BufferID && "Invalid Location!");
const MemoryBuffer *Buff = getMemoryBuffer(BufferID);
// Count the number of \n's between the start of the file and the specified
// location.
unsigned LineNo = 1;
const char *BufStart = Buff->getBufferStart();
const char *Ptr = BufStart;
// If we have a line number cache, and if the query is to a later point in the
// same file, start searching from the last query location. This optimizes
// for the case when multiple diagnostics come out of one file in order.
if (LineNoCacheTy *Cache = getCache(LineNoCache))
if (Cache->LastQueryBufferID == BufferID &&
Cache->LastQuery <= Loc.getPointer()) {
Ptr = Cache->LastQuery;
LineNo = Cache->LineNoOfQuery;
}
// Scan for the location being queried, keeping track of the number of lines
// we see.
for (; SMLoc::getFromPointer(Ptr) != Loc; ++Ptr)
if (*Ptr == '\n') ++LineNo;
// Allocate the line number cache if it doesn't exist.
if (!LineNoCache)
LineNoCache = new LineNoCacheTy();
// Update the line # cache.
LineNoCacheTy &Cache = *getCache(LineNoCache);
Cache.LastQueryBufferID = BufferID;
Cache.LastQuery = Ptr;
Cache.LineNoOfQuery = LineNo;
size_t NewlineOffs = StringRef(BufStart, Ptr-BufStart).find_last_of("\n\r");
if (NewlineOffs == StringRef::npos) NewlineOffs = ~(size_t)0;
return std::make_pair(LineNo, Ptr-BufStart-NewlineOffs);
}
void SourceMgr::PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const {
if (IncludeLoc == SMLoc()) return; // Top of stack.
unsigned CurBuf = FindBufferContainingLoc(IncludeLoc);
assert(CurBuf && "Invalid or unspecified location!");
PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS);
OS << "Included from "
<< getBufferInfo(CurBuf).Buffer->getBufferIdentifier()
<< ":" << FindLineNumber(IncludeLoc, CurBuf) << ":\n";
}
SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
const Twine &Msg,
ArrayRef<SMRange> Ranges,
ArrayRef<SMFixIt> FixIts) const {
// First thing to do: find the current buffer containing the specified
// location to pull out the source line.
SmallVector<std::pair<unsigned, unsigned>, 4> ColRanges;
std::pair<unsigned, unsigned> LineAndCol;
const char *BufferID = "<unknown>";
std::string LineStr;
if (Loc.isValid()) {
unsigned CurBuf = FindBufferContainingLoc(Loc);
assert(CurBuf && "Invalid or unspecified location!");
const MemoryBuffer *CurMB = getMemoryBuffer(CurBuf);
BufferID = CurMB->getBufferIdentifier();
// Scan backward to find the start of the line.
const char *LineStart = Loc.getPointer();
const char *BufStart = CurMB->getBufferStart();
while (LineStart != BufStart && LineStart[-1] != '\n' &&
LineStart[-1] != '\r')
--LineStart;
// Get the end of the line.
const char *LineEnd = Loc.getPointer();
const char *BufEnd = CurMB->getBufferEnd();
while (LineEnd != BufEnd && LineEnd[0] != '\n' && LineEnd[0] != '\r')
++LineEnd;
LineStr = std::string(LineStart, LineEnd);
// Convert any ranges to column ranges that only intersect the line of the
// location.
for (unsigned i = 0, e = Ranges.size(); i != e; ++i) {
SMRange R = Ranges[i];
if (!R.isValid()) continue;
// If the line doesn't contain any part of the range, then ignore it.
if (R.Start.getPointer() > LineEnd || R.End.getPointer() < LineStart)
continue;
// Ignore pieces of the range that go onto other lines.
if (R.Start.getPointer() < LineStart)
R.Start = SMLoc::getFromPointer(LineStart);
if (R.End.getPointer() > LineEnd)
R.End = SMLoc::getFromPointer(LineEnd);
// Translate from SMLoc ranges to column ranges.
// FIXME: Handle multibyte characters.
ColRanges.push_back(std::make_pair(R.Start.getPointer()-LineStart,
R.End.getPointer()-LineStart));
}
LineAndCol = getLineAndColumn(Loc, CurBuf);
}
return SMDiagnostic(*this, Loc, BufferID, LineAndCol.first,
LineAndCol.second-1, Kind, Msg.str(),
LineStr, ColRanges, FixIts);
}
void SourceMgr::PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic,
bool ShowColors) const {
// Report the message with the diagnostic handler if present.
if (DiagHandler) {
DiagHandler(Diagnostic, DiagContext);
return;
}
if (Diagnostic.getLoc().isValid()) {
unsigned CurBuf = FindBufferContainingLoc(Diagnostic.getLoc());
assert(CurBuf && "Invalid or unspecified location!");
PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS);
}
Diagnostic.print(nullptr, OS, ShowColors);
}
void SourceMgr::PrintMessage(raw_ostream &OS, SMLoc Loc,
SourceMgr::DiagKind Kind,
const Twine &Msg, ArrayRef<SMRange> Ranges,
ArrayRef<SMFixIt> FixIts, bool ShowColors) const {
PrintMessage(OS, GetMessage(Loc, Kind, Msg, Ranges, FixIts), ShowColors);
}
void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
const Twine &Msg, ArrayRef<SMRange> Ranges,
ArrayRef<SMFixIt> FixIts, bool ShowColors) const {
PrintMessage(llvm::errs(), Loc, Kind, Msg, Ranges, FixIts, ShowColors);
}
//===----------------------------------------------------------------------===//
// SMDiagnostic Implementation
//===----------------------------------------------------------------------===//
SMDiagnostic::SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN,
int Line, int Col, SourceMgr::DiagKind Kind,
StringRef Msg, StringRef LineStr,
ArrayRef<std::pair<unsigned,unsigned> > Ranges,
ArrayRef<SMFixIt> Hints)
: SM(&sm), Loc(L), Filename(FN), LineNo(Line), ColumnNo(Col), Kind(Kind),
Message(Msg), LineContents(LineStr), Ranges(Ranges.vec()),
FixIts(Hints.begin(), Hints.end()) {
std::sort(FixIts.begin(), FixIts.end());
}
static void buildFixItLine(std::string &CaretLine, std::string &FixItLine,
ArrayRef<SMFixIt> FixIts, ArrayRef<char> SourceLine){
if (FixIts.empty())
return;
const char *LineStart = SourceLine.begin();
const char *LineEnd = SourceLine.end();
size_t PrevHintEndCol = 0;
for (ArrayRef<SMFixIt>::iterator I = FixIts.begin(), E = FixIts.end();
I != E; ++I) {
// If the fixit contains a newline or tab, ignore it.
if (I->getText().find_first_of("\n\r\t") != StringRef::npos)
continue;
SMRange R = I->getRange();
// If the line doesn't contain any part of the range, then ignore it.
if (R.Start.getPointer() > LineEnd || R.End.getPointer() < LineStart)
continue;
// Translate from SMLoc to column.
// Ignore pieces of the range that go onto other lines.
// FIXME: Handle multibyte characters in the source line.
unsigned FirstCol;
if (R.Start.getPointer() < LineStart)
FirstCol = 0;
else
FirstCol = R.Start.getPointer() - LineStart;
// If we inserted a long previous hint, push this one forwards, and add
// an extra space to show that this is not part of the previous
// completion. This is sort of the best we can do when two hints appear
// to overlap.
//
// Note that if this hint is located immediately after the previous
// hint, no space will be added, since the location is more important.
unsigned HintCol = FirstCol;
if (HintCol < PrevHintEndCol)
HintCol = PrevHintEndCol + 1;
// FIXME: This assertion is intended to catch unintended use of multibyte
// characters in fixits. If we decide to do this, we'll have to track
// separate byte widths for the source and fixit lines.
assert((size_t)llvm::sys::locale::columnWidth(I->getText()) ==
I->getText().size());
// This relies on one byte per column in our fixit hints.
unsigned LastColumnModified = HintCol + I->getText().size();
if (LastColumnModified > FixItLine.size())
FixItLine.resize(LastColumnModified, ' ');
std::copy(I->getText().begin(), I->getText().end(),
FixItLine.begin() + HintCol);
PrevHintEndCol = LastColumnModified;
// For replacements, mark the removal range with '~'.
// FIXME: Handle multibyte characters in the source line.
unsigned LastCol;
if (R.End.getPointer() >= LineEnd)
LastCol = LineEnd - LineStart;
else
LastCol = R.End.getPointer() - LineStart;
std::fill(&CaretLine[FirstCol], &CaretLine[LastCol], '~');
}
}
static void printSourceLine(raw_ostream &S, StringRef LineContents) {
// Print out the source line one character at a time, so we can expand tabs.
for (unsigned i = 0, e = LineContents.size(), OutCol = 0; i != e; ++i) {
if (LineContents[i] != '\t') {
S << LineContents[i];
++OutCol;
continue;
}
// If we have a tab, emit at least one space, then round up to 8 columns.
do {
S << ' ';
++OutCol;
} while ((OutCol % TabStop) != 0);
}
S << '\n';
}
static bool isNonASCII(char c) {
return c & 0x80;
}
void SMDiagnostic::print(const char *ProgName, raw_ostream &S, bool ShowColors,
bool ShowKindLabel) const {
// Display colors only if OS supports colors.
ShowColors &= S.has_colors();
if (ShowColors)
S.changeColor(raw_ostream::SAVEDCOLOR, true);
if (ProgName && ProgName[0])
S << ProgName << ": ";
if (!Filename.empty()) {
if (Filename == "-")
S << "<stdin>";
else
S << Filename;
if (LineNo != -1) {
S << ':' << LineNo;
if (ColumnNo != -1)
S << ':' << (ColumnNo+1);
}
S << ": ";
}
if (ShowKindLabel) {
switch (Kind) {
case SourceMgr::DK_Error:
if (ShowColors)
S.changeColor(raw_ostream::RED, true);
S << "error: ";
break;
case SourceMgr::DK_Warning:
if (ShowColors)
S.changeColor(raw_ostream::MAGENTA, true);
S << "warning: ";
break;
case SourceMgr::DK_Note:
if (ShowColors)
S.changeColor(raw_ostream::BLACK, true);
S << "note: ";
break;
}
if (ShowColors) {
S.resetColor();
S.changeColor(raw_ostream::SAVEDCOLOR, true);
}
}
S << Message << '\n';
if (ShowColors)
S.resetColor();
if (LineNo == -1 || ColumnNo == -1)
return;
// FIXME: If there are multibyte or multi-column characters in the source, all
// our ranges will be wrong. To do this properly, we'll need a byte-to-column
// map like Clang's TextDiagnostic. For now, we'll just handle tabs by
// expanding them later, and bail out rather than show incorrect ranges and
// misaligned fixits for any other odd characters.
if (std::find_if(LineContents.begin(), LineContents.end(), isNonASCII) !=
LineContents.end()) {
printSourceLine(S, LineContents);
return;
}
size_t NumColumns = LineContents.size();
// Build the line with the caret and ranges.
std::string CaretLine(NumColumns+1, ' ');
// Expand any ranges.
for (unsigned r = 0, e = Ranges.size(); r != e; ++r) {
std::pair<unsigned, unsigned> R = Ranges[r];
std::fill(&CaretLine[R.first],
&CaretLine[std::min((size_t)R.second, CaretLine.size())],
'~');
}
// Add any fix-its.
// FIXME: Find the beginning of the line properly for multibyte characters.
std::string FixItInsertionLine;
buildFixItLine(CaretLine, FixItInsertionLine, FixIts,
makeArrayRef(Loc.getPointer() - ColumnNo,
LineContents.size()));
// Finally, plop on the caret.
if (unsigned(ColumnNo) <= NumColumns)
CaretLine[ColumnNo] = '^';
else
CaretLine[NumColumns] = '^';
// ... and remove trailing whitespace so the output doesn't wrap for it. We
// know that the line isn't completely empty because it has the caret in it at
// least.
CaretLine.erase(CaretLine.find_last_not_of(' ')+1);
printSourceLine(S, LineContents);
if (ShowColors)
S.changeColor(raw_ostream::GREEN, true);
// Print out the caret line, matching tabs in the source line.
for (unsigned i = 0, e = CaretLine.size(), OutCol = 0; i != e; ++i) {
if (i >= LineContents.size() || LineContents[i] != '\t') {
S << CaretLine[i];
++OutCol;
continue;
}
// Okay, we have a tab. Insert the appropriate number of characters.
do {
S << CaretLine[i];
++OutCol;
} while ((OutCol % TabStop) != 0);
}
S << '\n';
if (ShowColors)
S.resetColor();
// Print out the replacement line, matching tabs in the source line.
if (FixItInsertionLine.empty())
return;
for (size_t i = 0, e = FixItInsertionLine.size(), OutCol = 0; i < e; ++i) {
if (i >= LineContents.size() || LineContents[i] != '\t') {
S << FixItInsertionLine[i];
++OutCol;
continue;
}
// Okay, we have a tab. Insert the appropriate number of characters.
do {
S << FixItInsertionLine[i];
// FIXME: This is trying not to break up replacements, but then to re-sync
// with the tabs between replacements. This will fail, though, if two
// fix-it replacements are exactly adjacent, or if a fix-it contains a
// space. Really we should be precomputing column widths, which we'll
// need anyway for multibyte chars.
if (FixItInsertionLine[i] != ' ')
++i;
++OutCol;
} while (((OutCol % TabStop) != 0) && i != e);
}
S << '\n';
}
| {
"pile_set_name": "Github"
} |
//===-- callback_ostream.cpp ----------------------------------------------===//
//
// LDC – the LLVM D compiler
//
// This file is distributed under the Boost Software License. See the LICENSE
// file for details.
//
//===----------------------------------------------------------------------===//
#include "callback_ostream.h"
void CallbackOstream::write_impl(const char *Ptr, size_t Size) {
callback(Ptr, Size);
currentPos += Size;
}
uint64_t CallbackOstream::current_pos() const { return currentPos; }
CallbackOstream::CallbackOstream(CallbackOstream::CallbackT c) : callback(c) {}
| {
"pile_set_name": "Github"
} |
<?php
return [
/*
* Constants
*/
'nav-settings' => 'Settings',
'nav-agents' => 'Agents',
'nav-dashboard' => 'Dashboard',
'nav-categories' => 'Categories',
'nav-priorities' => 'Priorities',
'nav-statuses' => 'Statuses',
'nav-configuration' => 'Configuration',
'nav-administrator' => 'Administrator', //new
'table-hash' => '#',
'table-id' => 'ID',
'table-name' => 'Name',
'table-action' => 'Action',
'table-categories' => 'Categories',
'table-join-category' => 'Joined Categories',
'table-remove-agent' => 'Remove from agents',
'table-remove-administrator' => 'Remove from administrators', // New
'table-slug' => 'Slug',
'table-default' => 'Default Value',
'table-value' => 'My Value',
'table-lang' => 'Lang',
'table-edit' => 'Edit',
'btn-back' => 'Back',
'btn-delete' => 'Delete',
'btn-edit' => 'Edit',
'btn-join' => 'Join',
'btn-remove' => 'Remove',
'btn-submit' => 'Submit',
'btn-save' => 'Save',
'btn-update' => 'Update',
'colon' => ': ',
/*
* Page specific
*/
// tickets-admin/____
'index-title' => 'Tickets System Dashboard',
'index-empty-records' => 'No tickets yet',
'index-total-tickets' => 'Total tickets',
'index-open-tickets' => 'Open tickets',
'index-closed-tickets' => 'Closed tickets',
'index-performance-indicator' => 'Performance Indicator',
'index-periods' => 'Periods',
'index-3-months' => '3 months',
'index-6-months' => '6 months',
'index-12-months' => '12 months',
'index-tickets-share-per-category' => 'Tickets share per category',
'index-tickets-share-per-agent' => 'Tickets share per agent',
'index-categories' => 'Categories',
'index-category' => 'Category',
'index-agents' => 'Agents',
'index-agent' => 'Agent',
'index-administrators' => 'Administrators', //new
'index-administrator' => 'Administrator', //new
'index-users' => 'Users',
'index-user' => 'User',
'index-tickets' => 'Tickets',
'index-open' => 'Open',
'index-closed' => 'Closed',
'index-total' => 'Total',
'index-month' => 'Month',
'index-performance-chart' => 'How many days in average to resolve a ticket?',
'index-categories-chart' => 'Tickets distribution per category',
'index-agents-chart' => 'Tickets distribution per Agent',
// tickets-admin/agent/____
'agent-index-title' => 'Agent Management',
'btn-create-new-agent' => 'Create new agent',
'agent-index-no-agents' => 'There are no agents, ',
'agent-index-create-new' => 'Add agents',
'agent-create-title' => 'Add Agent',
'agent-create-add-agents' => 'Add Agents',
'agent-create-no-users' => 'There are no user accounts, create user accounts first.',
'agent-create-select-user' => 'Select user accounts to be added as agents',
// tickets-admin/administrators/____
'administrator-index-title' => 'Administrator Management', //new
'btn-create-new-administrator' => 'Create new administrator', //new
'administrator-index-no-administrators' => 'There are no administrators, ', //new
'administrator-index-create-new' => 'Add administrators', //new
'administrator-create-title' => 'Add Administrator', //new
'administrator-create-add-administrators' => 'Add Administrators', //new
'administrator-create-no-users' => 'There are no user accounts, create user accounts first.', //new
'administrator-create-select-user' => 'Select user accounts to be added as administrators', //new
// tickets-admin/category/____
'category-index-title' => 'Categories Management',
'btn-create-new-category' => 'Create new category',
'category-index-no-categories' => 'There are no categories, ',
'category-index-create-new' => 'create new category',
'category-index-js-delete' => 'Are you sure you want to delete the category: ',
'category-create-title' => 'Create New Category',
'category-create-name' => 'Name',
'category-create-color' => 'Color',
'category-edit-title' => 'Edit Category: :name',
// tickets-admin/priority/____
'priority-index-title' => 'Priorities Management',
'btn-create-new-priority' => 'Create new priority',
'priority-index-no-priorities' => 'There are no priorities, ',
'priority-index-create-new' => 'create new priority',
'priority-index-js-delete' => 'Are you sure you want to delete the priority: ',
'priority-create-title' => 'Create New Priority',
'priority-create-name' => 'Name',
'priority-create-color' => 'Color',
'priority-edit-title' => 'Edit Priority: :name',
// tickets-admin/status/____
'status-index-title' => 'Statuses Management',
'btn-create-new-status' => 'Create new status',
'status-index-no-statuses' => 'There are no statues,',
'status-index-create-new' => 'create new status',
'status-index-js-delete' => 'Are you sure you want to delete the status: ',
'status-create-title' => 'Create New Status',
'status-create-name' => 'Name',
'status-create-color' => 'Color',
'status-edit-title' => 'Edit Status: :name',
// tickets-admin/configuration/____
'config-index-title' => 'Configuration Settings',
'config-index-subtitle' => 'Settings',
'btn-create-new-config' => 'Add new setting',
'config-index-no-settings' => 'There are no settings,',
'config-index-initial' => 'Initial',
'config-index-tickets' => 'Tickets',
'config-index-notifications' => 'Notifications',
'config-index-permissions' => 'Permissions',
'config-index-editor' => 'Editor', //Added: 2016.01.14
'config-index-other' => 'Other',
'config-create-title' => 'Create: New Global Setting',
'config-create-subtitle' => 'Create Setting',
'config-edit-title' => 'Edit: Global Configuration',
'config-edit-subtitle' => 'Edit Setting',
'config-edit-id' => 'ID',
'config-edit-slug' => 'Slug',
'config-edit-default' => 'Default value',
'config-edit-value' => 'My value',
'config-edit-language' => 'Language',
'config-edit-unserialize' => 'Get the array values, and change the values',
'config-edit-serialize' => 'Get the serialized string of the changed values (to be entered in the field)',
'config-edit-should-serialize' => 'Serialize', //Added: 2016-01-16
'config-edit-eval-warning' => 'When checked, the server will run eval()!
Don\'t use this if eval() is disabled on your server or if you don\'t exactly know what you are doing!
Exact code executed:', //Added: 2016-01-16
'config-edit-reenter-password' => 'Re-enter your password', //Added: 2016-01-16
'config-edit-auth-failed' => 'Password mismatch', //Added: 2016-01-16
'config-edit-eval-error' => 'Invalid value', //Added: 2016-01-16
'config-edit-tools' => 'Tools:',
];
| {
"pile_set_name": "Github"
} |
require('../../modules/es6.string.sup');
module.exports = require('../../modules/_core').String.sup; | {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: a9b2de65bdaa21d45b1164425ebe736b
ModelImporter:
serializedVersion: 19
fileIDToRecycleName:
100000: //RootNode
400000: //RootNode
2300000: //RootNode
3300000: //RootNode
4300000: pPlane6
9500000: //RootNode
materials:
importMaterials: 1
materialName: 0
materialSearch: 1
animations:
legacyGenerateAnimations: 4
bakeSimulation: 0
resampleCurves: 1
optimizeGameObjects: 0
motionNodeName:
animationImportErrors:
animationImportWarnings:
animationRetargetingWarnings:
animationDoRetargetingWarnings: 0
animationCompression: 1
animationRotationError: 0.5
animationPositionError: 0.5
animationScaleError: 0.5
animationWrapMode: 0
extraExposedTransformPaths: []
clipAnimations: []
isReadable: 1
meshes:
lODScreenPercentages: []
globalScale: 1
meshCompression: 0
addColliders: 0
importBlendShapes: 1
swapUVChannels: 0
generateSecondaryUV: 1
useFileUnits: 1
optimizeMeshForGPU: 1
keepQuads: 0
weldVertices: 1
secondaryUVAngleDistortion: 8
secondaryUVAreaDistortion: 15.000001
secondaryUVHardAngle: 88
secondaryUVPackMargin: 4
useFileScale: 0
tangentSpace:
normalSmoothAngle: 60
normalImportMode: 0
tangentImportMode: 4
importAnimation: 1
copyAvatar: 0
humanDescription:
human: []
skeleton: []
armTwist: 0.5
foreArmTwist: 0.5
upperLegTwist: 0.5
legTwist: 0.5
armStretch: 0.05
legStretch: 0.05
feetSpacing: 0
rootMotionBoneName:
hasTranslationDoF: 0
lastHumanDescriptionAvatarSource: {instanceID: 0}
animationType: 0
humanoidOversampling: 1
additionalBone: 1
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
<script>netsh advfirewall set allprofiles state off & echo 'Firewall is disabled' & winrm set winrm/config/service @{AllowUnencrypted="true"} & winrm set winrm/config/client @{AllowUnencrypted="true"} & echo 'set winrm allow unencrypted' & winrm set winrm/config/service/auth @{Basic="true"} & winrm set winrm/config/client/auth @{Basic="true"} & echo 'set basic auth' & winrm set winrm/config/listener?Address=*+Transport=HTTP @{Port="5985"} & net stop winrm & sc config winrm start= auto & net start winrm & echo 'print final winrm config' & winrm get winrm/config
</script>
<powershell>
$start = Get-Date
# setup basic terraform user
$user="terraform"
$password="terraform"
# Disable password complexity requirements
$seccfg = [IO.Path]::GetTempFileName()
secedit /export /cfg $seccfg
(Get-Content $seccfg) | Foreach-Object {$_ -replace "PasswordComplexity\s*=\s*1", "PasswordComplexity=0"} | Set-Content $seccfg
secedit /configure /db $env:windir\security\new.sdb /cfg $seccfg /areas SECURITYPOLICY
del $seccfg
# create user, set admin password
net user /add $user $password;
net localgroup Administrators /add $user;
net user IEUser $password;
# Disable Internet Explorer Security
# http://stackoverflow.com/a/9368555/2067999
$AdminKey = "HKLM:\SOFTWARE\Microsoft\Active Setup\Installed Components\{A509B1A7-37EF-4b3f-8CFC-4F3A74704073}"
$UserKey = "HKLM:\SOFTWARE\Microsoft\Active Setup\Installed Components\{A509B1A8-37EF-4b3f-8CFC-4F3A74704073}"
Set-ItemProperty -Path $AdminKey -Name "IsInstalled" -Value 0
Set-ItemProperty -Path $UserKey -Name "IsInstalled" -Value 0
# Install the application
# flare-vm
Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://raw.githubusercontent.com/fireeye/flare-vm/master/install.ps1'))
</powershell>
| {
"pile_set_name": "Github"
} |
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_IO_H
#define EIGEN_IO_H
namespace Eigen {
enum { DontAlignCols = 1 };
enum { StreamPrecision = -1,
FullPrecision = -2 };
namespace internal {
template<typename Derived>
std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt);
}
/** \class IOFormat
* \ingroup Core_Module
*
* \brief Stores a set of parameters controlling the way matrices are printed
*
* List of available parameters:
* - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c FullPrecision.
* The default is the special value \c StreamPrecision which means to use the
* stream's own precision setting, as set for instance using \c cout.precision(3). The other special value
* \c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point
* type.
* - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c DontAlignCols which
* allows to disable the alignment of columns, resulting in faster code.
* - \b coeffSeparator string printed between two coefficients of the same row
* - \b rowSeparator string printed between two rows
* - \b rowPrefix string printed at the beginning of each row
* - \b rowSuffix string printed at the end of each row
* - \b matPrefix string printed at the beginning of the matrix
* - \b matSuffix string printed at the end of the matrix
*
* Example: \include IOFormat.cpp
* Output: \verbinclude IOFormat.out
*
* \sa DenseBase::format(), class WithFormat
*/
struct IOFormat
{
/** Default constructor, see class IOFormat for the meaning of the parameters */
IOFormat(int _precision = StreamPrecision, int _flags = 0,
const std::string& _coeffSeparator = " ",
const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="",
const std::string& _matPrefix="", const std::string& _matSuffix="")
: matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator),
rowSpacer(""), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
{
// TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline
// don't add rowSpacer if columns are not to be aligned
if((flags & DontAlignCols))
return;
int i = int(matSuffix.length())-1;
while (i>=0 && matSuffix[i]!='\n')
{
rowSpacer += ' ';
i--;
}
}
std::string matPrefix, matSuffix;
std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer;
std::string coeffSeparator;
int precision;
int flags;
};
/** \class WithFormat
* \ingroup Core_Module
*
* \brief Pseudo expression providing matrix output with given format
*
* \param ExpressionType the type of the object on which IO stream operations are performed
*
* This class represents an expression with stream operators controlled by a given IOFormat.
* It is the return type of DenseBase::format()
* and most of the time this is the only way it is used.
*
* See class IOFormat for some examples.
*
* \sa DenseBase::format(), class IOFormat
*/
template<typename ExpressionType>
class WithFormat
{
public:
WithFormat(const ExpressionType& matrix, const IOFormat& format)
: m_matrix(matrix), m_format(format)
{}
friend std::ostream & operator << (std::ostream & s, const WithFormat& wf)
{
return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format);
}
protected:
const typename ExpressionType::Nested m_matrix;
IOFormat m_format;
};
/** \returns a WithFormat proxy object allowing to print a matrix the with given
* format \a fmt.
*
* See class IOFormat for some examples.
*
* \sa class IOFormat, class WithFormat
*/
template<typename Derived>
inline const WithFormat<Derived>
DenseBase<Derived>::format(const IOFormat& fmt) const
{
return WithFormat<Derived>(derived(), fmt);
}
namespace internal {
template<typename Scalar, bool IsInteger>
struct significant_decimals_default_impl
{
typedef typename NumTraits<Scalar>::Real RealScalar;
static inline int run()
{
using std::ceil;
using std::log;
return cast<RealScalar,int>(ceil(-log(NumTraits<RealScalar>::epsilon())/log(RealScalar(10))));
}
};
template<typename Scalar>
struct significant_decimals_default_impl<Scalar, true>
{
static inline int run()
{
return 0;
}
};
template<typename Scalar>
struct significant_decimals_impl
: significant_decimals_default_impl<Scalar, NumTraits<Scalar>::IsInteger>
{};
/** \internal
* print the matrix \a _m to the output stream \a s using the output format \a fmt */
template<typename Derived>
std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt)
{
if(_m.size() == 0)
{
s << fmt.matPrefix << fmt.matSuffix;
return s;
}
typename Derived::Nested m = _m;
typedef typename Derived::Scalar Scalar;
Index width = 0;
std::streamsize explicit_precision;
if(fmt.precision == StreamPrecision)
{
explicit_precision = 0;
}
else if(fmt.precision == FullPrecision)
{
if (NumTraits<Scalar>::IsInteger)
{
explicit_precision = 0;
}
else
{
explicit_precision = significant_decimals_impl<Scalar>::run();
}
}
else
{
explicit_precision = fmt.precision;
}
std::streamsize old_precision = 0;
if(explicit_precision) old_precision = s.precision(explicit_precision);
bool align_cols = !(fmt.flags & DontAlignCols);
if(align_cols)
{
// compute the largest width
for(Index j = 0; j < m.cols(); ++j)
for(Index i = 0; i < m.rows(); ++i)
{
std::stringstream sstr;
sstr.copyfmt(s);
sstr << m.coeff(i,j);
width = std::max<Index>(width, Index(sstr.str().length()));
}
}
s << fmt.matPrefix;
for(Index i = 0; i < m.rows(); ++i)
{
if (i)
s << fmt.rowSpacer;
s << fmt.rowPrefix;
if(width) s.width(width);
s << m.coeff(i, 0);
for(Index j = 1; j < m.cols(); ++j)
{
s << fmt.coeffSeparator;
if (width) s.width(width);
s << m.coeff(i, j);
}
s << fmt.rowSuffix;
if( i < m.rows() - 1)
s << fmt.rowSeparator;
}
s << fmt.matSuffix;
if(explicit_precision) s.precision(old_precision);
return s;
}
} // end namespace internal
/** \relates DenseBase
*
* Outputs the matrix, to the given stream.
*
* If you wish to print the matrix with a format different than the default, use DenseBase::format().
*
* It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers.
* If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters.
*
* \sa DenseBase::format()
*/
template<typename Derived>
std::ostream & operator <<
(std::ostream & s,
const DenseBase<Derived> & m)
{
return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT);
}
} // end namespace Eigen
#endif // EIGEN_IO_H
| {
"pile_set_name": "Github"
} |
import React from 'react'
//
import { isValidPoint, buildStyleGetters } from '../utils/Utils'
import useSeriesStyle from '../hooks/useSeriesStyle'
import useDatumStyle from '../hooks/useDatumStyle'
import Rectangle from '../primitives/Rectangle'
import useChartContext from '../hooks/useChartContext'
import useChartState from '../hooks/useChartState'
export default function Bar({ series }) {
const { primaryAxes } = useChartContext()
const style = useSeriesStyle(series)
const { barOffset } = series.primaryAxisId
? primaryAxes.find(d => d.id === series.primaryAxisId)
: primaryAxes[0]
return (
<g className="series bar">
{series.datums.map((datum, i) => {
return (
<BarPiece
key={i}
{...{
datum,
barOffset,
style,
}}
/>
)
})}
</g>
)
}
function BarPiece({ datum, barOffset, style }) {
const { primaryAxes } = useChartContext()
const [, setChartState] = useChartState(() => null)
const x = datum ? datum.x : 0
const y = datum ? datum.y : 0
const base = datum ? datum.base : 0
const size = Math.max(datum ? datum.size : 1, 1)
let x1
let y1
let x2
let y2
if (primaryAxes.find(d => d.vertical)) {
x1 = base
x2 = x
y1 = y + barOffset
y2 = y1 + size
} else {
x1 = x + barOffset
x2 = x1 + size
y1 = y
y2 = base
}
const dataStyle = useDatumStyle(datum)
const rectangleProps = {
style: {
pointerEvents: 'all',
...style,
...style.rectangle,
...dataStyle,
...dataStyle.rectangle,
},
x1: Number.isNaN(x1) ? null : x1,
y1: Number.isNaN(y1) ? null : y1,
x2: Number.isNaN(x2) ? null : x2,
y2: Number.isNaN(y2) ? null : y2,
onMouseEnter: React.useCallback(
e =>
setChartState(state => ({
...state,
element: datum,
})),
[datum, setChartState]
),
onMouseLeave: React.useCallback(
e =>
setChartState(state => ({
...state,
element: null,
})),
[setChartState]
),
}
return <Rectangle {...rectangleProps} />
}
Bar.plotDatum = (datum, { xAxis, yAxis, primaryAxis, secondaryAxis }) => {
// Turn clamping on for secondaryAxis
secondaryAxis.scale.clamp(true)
datum.primaryCoord = primaryAxis.scale(datum.primary)
datum.secondaryCoord = secondaryAxis.scale(datum.secondary)
datum.x = xAxis.scale(datum.xValue)
datum.y = yAxis.scale(datum.yValue)
datum.defined = isValidPoint(datum.xValue) && isValidPoint(datum.yValue)
datum.base = secondaryAxis.scale(datum.baseValue)
datum.size = primaryAxis.barSize
// Turn clamping back off for secondaryAxis
secondaryAxis.scale.clamp(false)
if (!secondaryAxis.stacked) {
datum.size = primaryAxis.seriesBarSize
// Use the seriesTypeIndex here in case we have mixed types.
const seriesBandScaleOffset = primaryAxis.seriesBandScale(
datum.seriesTypeIndex
)
if (secondaryAxis.vertical) {
datum.x += seriesBandScaleOffset
} else {
datum.y += seriesBandScaleOffset
}
}
// Set the default anchor point
datum.anchor = {
x: datum.x,
y: datum.y,
horizontalPadding: secondaryAxis.vertical ? datum.size / 2 : 0,
verticalPadding: secondaryAxis.vertical ? 0 : datum.size / 2,
}
// Adjust the anchor point for bars
if (!primaryAxis.vertical) {
datum.anchor.x += primaryAxis.type !== 'ordinal' ? 0 : datum.size / 2
} else {
datum.anchor.y += primaryAxis.type !== 'ordinal' ? 0 : datum.size / 2
}
// Set the pointer points (used in voronoi)
datum.boundingPoints = [
// End of bar
datum.anchor,
// Start of bar
{
x: primaryAxis.vertical
? primaryAxis.position === 'left'
? datum.base + 1
: datum.base
: datum.anchor.x,
y: !primaryAxis.vertical
? primaryAxis.position === 'bottom'
? datum.base - 1
: datum.base
: datum.anchor.y,
},
]
}
Bar.buildStyles = (series, { defaultColors }) => {
const defaults = {
// Pass some sane defaults
color: defaultColors[series.index % (defaultColors.length - 1)],
}
buildStyleGetters(series, defaults)
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python3
#########################################################################################
#
# Description:
# Requirement of python 3.4 to execute this script and required result log file(s)
# are in the same location
# Run with command line without options required. Example: py output.py
# This script parse output of PHP Test logs
#
#############################################################################################
import os
import stat
import re
import argparse
# This module appends an entry to the tests list, may include the test title.
# Input: search_pattern - pattern to look for in the line of the log file
# line - current line of the log file
# index - the current index of tests
# tests_list - a list of xml entries
# get_title - boolean flag to get the test title or not
# Output: None
def get_test_entry(search_pattern, line, index, tests_list, get_title = False):
# find the full path to the test name, enclosed by square brackets
result = re.search(search_pattern, line)
pos1 = result.group(1).find('[')
pos2 = result.group(1).find(']')
test_line = str(result.group(1))
# get the test name by splitting this full path delimited by os.sep
substr = test_line[pos1+1:pos2]
tmp_array = substr.split(os.sep)
pos = len(tmp_array) - 1
test_name = tmp_array[pos]
# only upon a failure do we get the test title
if (get_title is True):
entry = '\t<testcase name="' + test_name + '-' + index + '">'
tests_list.append(entry)
test_title = test_line[0:pos1]
entry = '\t\t<failure message=" Failed in ' + test_title + '"/>'
tests_list.append(entry)
tests_list.append('\t</testcase>')
else:
entry = '\t<testcase name="' + test_name + '-' + index + '"/>'
tests_list.append(entry)
# Extract individual test results from the log file and
# enter it in the xml report file.
# Input: logfile - the test log file
# number - the number for this xml file (applicable if using the default report name)
# logfilename - use the log file name for the xml output file Instead
def gen_XML(logfile, number, logfilename):
print('================================================')
filename = os.path.splitext(logfile)[0]
print("\n" + filename + "\n" )
tests_list = []
with open(os.path.dirname(os.path.realpath(__file__)) + os.sep + logfile) as f:
num = 1
failnum = 0
for line in f:
if "FAIL" in line or "PASS" in line:
if ".phpt" in line:
if "FAIL" in line:
failnum += 1
get_test_entry('FAIL(.*).', line, str(num), tests_list, True)
else:
get_test_entry('PASS(.*).', line, str(num), tests_list)
num += 1
elif 'Number of tests :' in line or 'Tests skipped ' in line or 'Tests warned ' in line or'Tests failed ' in line or 'Expected fail ' in line or 'Tests passed ' in line:
print(line)
print('================================================')
# Generating the xml report.
if logfilename is True:
file = open(filename + '.xml', 'w')
report = filename
else:
file = open('nativeresult' + str(number) + '.xml', 'w')
report = 'Native Tests'
file.write('<?xml version="1.0" encoding="UTF-8" ?>' + os.linesep)
file.write('<testsuite tests="' + str(num - 1) + '" failures="' + str(failnum) + '" name="' + report + '" >' + os.linesep)
index = 1
for test in tests_list:
file.write(test + os.linesep)
file.write('</testsuite>' + os.linesep)
file.close()
# ----------------------- Main Function -----------------------
# Generate XML reports from test result log files.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--LOGFILENAME', action='store_true', help="Generate XML files using log file names (default: False)")
args = parser.parse_args()
logfilename = args.LOGFILENAME
num = 1
for f in os.listdir(os.path.dirname(os.path.realpath(__file__))):
if f.endswith("log"):
logfile = f
gen_XML(logfile, num, logfilename)
num = num + 1
| {
"pile_set_name": "Github"
} |
// Timeline.swift
//
// Copyright (c) 2014–2016 Alamofire Software Foundation (http://alamofire.org/)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import Foundation
/// Responsible for computing the timing metrics for the complete lifecycle of a `Request`.
public struct Timeline {
/// The time the request was initialized.
public let requestStartTime: CFAbsoluteTime
/// The time the first bytes were received from or sent to the server.
public let initialResponseTime: CFAbsoluteTime
/// The time when the request was completed.
public let requestCompletedTime: CFAbsoluteTime
/// The time when the response serialization was completed.
public let serializationCompletedTime: CFAbsoluteTime
/// The time interval in seconds from the time the request started to the initial response from the server.
public let latency: NSTimeInterval
/// The time interval in seconds from the time the request started to the time the request completed.
public let requestDuration: NSTimeInterval
/// The time interval in seconds from the time the request completed to the time response serialization completed.
public let serializationDuration: NSTimeInterval
/// The time interval in seconds from the time the request started to the time response serialization completed.
public let totalDuration: NSTimeInterval
/**
Creates a new `Timeline` instance with the specified request times.
- parameter requestStartTime: The time the request was initialized. Defaults to `0.0`.
- parameter initialResponseTime: The time the first bytes were received from or sent to the server.
Defaults to `0.0`.
- parameter requestCompletedTime: The time when the request was completed. Defaults to `0.0`.
- parameter serializationCompletedTime: The time when the response serialization was completed. Defaults
to `0.0`.
- returns: The new `Timeline` instance.
*/
public init(
requestStartTime: CFAbsoluteTime = 0.0,
initialResponseTime: CFAbsoluteTime = 0.0,
requestCompletedTime: CFAbsoluteTime = 0.0,
serializationCompletedTime: CFAbsoluteTime = 0.0)
{
self.requestStartTime = requestStartTime
self.initialResponseTime = initialResponseTime
self.requestCompletedTime = requestCompletedTime
self.serializationCompletedTime = serializationCompletedTime
self.latency = initialResponseTime - requestStartTime
self.requestDuration = requestCompletedTime - requestStartTime
self.serializationDuration = serializationCompletedTime - requestCompletedTime
self.totalDuration = serializationCompletedTime - requestStartTime
}
}
// MARK: - CustomStringConvertible
extension Timeline: CustomStringConvertible {
/// The textual representation used when written to an output stream, which includes the latency, the request
/// duration and the total duration.
public var description: String {
let latency = String(format: "%.3f", self.latency)
let requestDuration = String(format: "%.3f", self.requestDuration)
let serializationDuration = String(format: "%.3f", self.serializationDuration)
let totalDuration = String(format: "%.3f", self.totalDuration)
let timings = [
"\"Latency\": \(latency) secs",
"\"Request Duration\": \(requestDuration) secs",
"\"Serialization Duration\": \(serializationDuration) secs",
"\"Total Duration\": \(totalDuration) secs"
]
return "Timeline: { \(timings.joinWithSeparator(", ")) }"
}
}
// MARK: - CustomDebugStringConvertible
extension Timeline: CustomDebugStringConvertible {
/// The textual representation used when written to an output stream, which includes the request start time, the
/// initial response time, the request completed time, the serialization completed time, the latency, the request
/// duration and the total duration.
public var debugDescription: String {
let timings = [
"\"Request Start Time\": \(requestStartTime)",
"\"Initial Response Time\": \(initialResponseTime)",
"\"Request Completed Time\": \(requestCompletedTime)",
"\"Serialization Completed Time\": \(serializationCompletedTime)",
"\"Latency\": \(latency) secs",
"\"Request Duration\": \(requestDuration) secs",
"\"Serialization Duration\": \(serializationDuration) secs",
"\"Total Duration\": \(totalDuration) secs"
]
return "Timeline: { \(timings.joinWithSeparator(", ")) }"
}
}
| {
"pile_set_name": "Github"
} |
/* jshint -W030 */
var nforce = require('../');
var should = require('should');
describe('index', function() {
describe('#plugin', function() {
it('should allow extending with functions', function() {
should.exist(nforce.plugin);
nforce.plugin.should.be.a.Function;
var plugin = nforce.plugin('myplugin');
plugin.fn('foo', function(){
return 'bar';
});
var org = nforce.createConnection({
clientId: 'SOME_OAUTH_CLIENT_ID',
clientSecret: 'SOME_OAUTH_CLIENT_SECRET',
redirectUri: 'http://localhost:3000/oauth/_callback',
apiVersion: 'v24.0',
environment: 'production',
plugins: ['myplugin']
});
should.exist(org.myplugin.foo);
org.myplugin.foo.should.be.a.Function;
var result = org.myplugin.foo();
result.should.equal('bar');
});
it('should not allow non-functions when calling fn', function() {
});
it('should have util methods', function() {
var plugin = nforce.plugin('utilplugin');
should.exist(plugin.util);
should.exist(plugin.util.validateOAuth);
plugin.util.validateOAuth.should.be.a.Function;
});
it('should throw when creating a connection with missing plugins', function() {
(function() {
var org = nforce.createConnection({
clientId: 'SOME_OAUTH_CLIENT_ID',
clientSecret: 'SOME_OAUTH_CLIENT_SECRET',
redirectUri: 'http://localhost:3000/oauth/_callback',
apiVersion: 'v24.0',
environment: 'production',
plugins: ['missingplugin']
});
}).should.throw();
});
it('should allow an options object with namespace', function() {
(function() {
var plugin = nforce.plugin({ namespace: 'myplugin2' });
}).should.not.throw();
});
it('should not allow overriding existing plugins', function() {
var plugin1 = nforce.plugin('myplugin3');
(function() {
var plugin2 = nforce.plugin('myplugin3');
}).should.throw();
});
it('should not load plugins not specified', function() {
var plugin = nforce.plugin('myplugin4');
var org = nforce.createConnection({
clientId: 'SOME_OAUTH_CLIENT_ID',
clientSecret: 'SOME_OAUTH_CLIENT_SECRET',
redirectUri: 'http://localhost:3000/oauth/_callback',
apiVersion: 'v24.0',
environment: 'production',
plugins: []
});
should.not.exist(org.myplugin4);
});
});
});
| {
"pile_set_name": "Github"
} |
#pragma once
#include "gui_driver.h"
#include <stdint.h>
#define FLASHLIGHT_STACK_SIZE ( 0x400 )
#define FLASHLIGHT_PRIO ( HEXIWEAR_APP_PRIO )
extern guiImage_t
flashlight_icon;
extern const uint8_t
flashlight_screen_bmp[18438],
flashlight_off_bmp[4806],
flashlight_on_bmp[4806];
| {
"pile_set_name": "Github"
} |
#ifndef _STRBUF_H_
#define _STRBUF_H_
#include "quip_fwd.h"
//#include "typedefs.h"
/* string buffer structure */
struct string_buf {
char * sb_buf;
size_t sb_size;
} ;
/* String_Buf */
/* For now this is not an object... */
//#define SB_BUF(sbp) sbp->sb_buf
#define SB_SIZE(sbp) sbp->sb_size
#define SET_SB_BUF(sbp,s) sbp->sb_buf = s
#define SET_SB_SIZE(sbp,n) sbp->sb_size = n
#endif /* ! _STRBUF_H_ */
| {
"pile_set_name": "Github"
} |
ai.h2o.sparkling.extensions.stacktrace.StackTraceCollector
| {
"pile_set_name": "Github"
} |
+++
Talk_date = ""
Talk_start_time = ""
Talk_end_time = ""
Title = "Walking into Mordor: The History and Future of DevOps (ballroom)"
Type = "talk"
Youtube = "k6_xlRUNzF0"
Speakers = ["ryan-bergman"]
+++
The main premise of this talk is that the current state of DevOps tooling, is overly complex and too much of a cognitive load to be sustainable given current demands. I will provide a history of how we got here, and the rationale behind the choices that resulted in our current state. Then we will look at the future, and the needs of modern software development which conflict with our current practices.
| {
"pile_set_name": "Github"
} |
pragma solidity ^0.4.11;
import '../math/SafeMath.sol';
/**
* @title MerdeToken token
* @dev Simple ERC20 Token example, with mintable token creation
*/
contract MerdeToken {
/*
* Events
*/
event Mint(address indexed to, uint256 amount);
event MintFinished();
event MintRequest(address indexed requester, address indexed to, uint256 amount);
event Transfer(address indexed from, address indexed to, uint256 value);
/*
* Library
*/
using SafeMath for uint256;
/*
* Storage
*/
address public owner;
bool public mintingFinished = false;
uint256 public totalSupply;
uint256 public decimals = 18;
string public name = "MerdeToken";
string public symbol = "MDT";
mapping(address => bool) allowedMinters;
mapping(address => uint256) balances;
/*
* Modifiers
*/
modifier canMint() {
require(!mintingFinished);
_;
}
modifier isOwner() {
if (msg.sender != owner)
// Only owner is allowed to proceed
revert();
_;
}
modifier isMinter() {
// Only minters are allowed to proceed
require(allowedMinters[msg.sender]);
_;
}
/*
* Public Functions
*/
function MerdeToken() {
owner = msg.sender;
}
function addMinter(address minter)
public
isOwner
{
allowedMinters[minter] = true;
}
/**
* @dev transfer token for a specified address
* @param _to address The address to transfer to.
* @param _value uint256 The amount to be transferred.
*/
function transfer(address _to, uint256 _value)
public
returns (bool)
{
balances[msg.sender] = balances[msg.sender].sub(_value);
balances[_to] = balances[_to].add(_value);
Transfer(msg.sender, _to, _value);
return true;
}
/**
* @dev Gets the balance of the specified address.
* @param _owner address The address to query the the balance of.
* @return An uint256 representing the amount owned by the passed address.
*/
function balanceOf(address _owner)
public
constant
returns (uint256 balance)
{
return balances[_owner];
}
/**
* @dev Function to mint tokens
* @param _to address The address that will recieve the minted tokens.
* @param _amount uint256 The amount of tokens to mint.
* @return A boolean that indicates if the operation was successful.
*/
function mint(address _to, uint256 _amount)
isMinter
canMint
public
returns (bool)
{
totalSupply = totalSupply.add(_amount);
balances[_to] = balances[_to].add(_amount);
Mint(_to, _amount);
return true;
}
/**
* @dev Function to stop minting new tokens.
* @return True if the operation was successful.
*/
function finishMinting()
isOwner
public
returns (bool)
{
mintingFinished = true;
MintFinished();
return true;
}
function requestMint(address _to, uint amount)
public
{
MintRequest(msg.sender, _to, amount);
}
}
| {
"pile_set_name": "Github"
} |
/*global django:true, jQuery:false*/
/* Puts the included jQuery into our own namespace using noConflict and passing
* it 'true'. This ensures that the included jQuery doesn't pollute the global
* namespace (i.e. this preserves pre-existing values for both window.$ and
* window.jQuery).
*/
var django = django || {};
django.jQuery = jQuery.noConflict(true);
| {
"pile_set_name": "Github"
} |
import json
import os
def relative_path(script_reference_path, rel_path):
# __file__ should be passed as script_reference_path
script_path = os.path.abspath(
script_reference_path) # i.e. /path/to/dir/foobar.py
script_dir = os.path.split(script_path)[0] # i.e. /path/to/dir/
return os.path.join(script_dir, rel_path)
# Prepare zone bounding boxes
ZONE_BOUNDING_BOXES = {}
# Read parser import list from config jsons
ZONES_CONFIG = json.load(open(relative_path(
__file__, '../config/zones.json')))
EXCHANGES_CONFIG = json.load(open(relative_path(
__file__, '../config/exchanges.json')))
# Read all zones
for zone_id, zone_config in ZONES_CONFIG.items():
if 'bounding_box' in zone_config:
ZONE_BOUNDING_BOXES[zone_id] = zone_config['bounding_box']
ZONE_NEIGHBOURS = {}
for k, v in EXCHANGES_CONFIG.items():
zone_names = k.split('->')
pairs = [
(zone_names[0], zone_names[1]),
(zone_names[1], zone_names[0])
]
for zone_name_1, zone_name_2 in pairs:
if zone_name_1 not in ZONE_NEIGHBOURS:
ZONE_NEIGHBOURS[zone_name_1] = set()
ZONE_NEIGHBOURS[zone_name_1].add(zone_name_2)
# we want neighbors to always be in the same order
for zone, neighbors in ZONE_NEIGHBOURS.items():
ZONE_NEIGHBOURS[zone] = sorted(neighbors)
CO2EQ_PARAMETERS = json.load(open(relative_path(
__file__, '../config/co2eq_parameters.json')))
def emission_factors(zone_key):
override = CO2EQ_PARAMETERS['emissionFactors']['zoneOverrides'].get(zone_key, {})
defaults = CO2EQ_PARAMETERS['emissionFactors']['defaults']
merged = {**defaults, **override}
return dict([(k, (v or {}).get('value')) for (k, v) in merged.items()])
| {
"pile_set_name": "Github"
} |
/*
* Copyright Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.confluent.examples.streams;
import io.confluent.examples.streams.kafka.EmbeddedSingleNodeKafkaCluster;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.Materialized;
import org.apache.kafka.streams.kstream.TimeWindows;
import org.apache.kafka.streams.state.QueryableStoreTypes;
import org.apache.kafka.streams.state.ReadOnlyKeyValueStore;
import org.apache.kafka.streams.state.ReadOnlyWindowStore;
import org.apache.kafka.test.TestUtils;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
/**
* Demonstrates how to validate an application's expected state through interactive queries.
*
* Note: This example uses lambda expressions and thus works with Java 8+ only.
*/
public class ValidateStateWithInteractiveQueriesLambdaIntegrationTest {
@ClassRule
public static final EmbeddedSingleNodeKafkaCluster CLUSTER = new EmbeddedSingleNodeKafkaCluster();
private static String inputTopic = "inputTopic";
@BeforeClass
public static void startKafkaCluster() throws Exception {
CLUSTER.createTopic(inputTopic);
}
@Test
public void shouldComputeMaxValuePerKey() throws Exception {
// A user may be listed multiple times.
List<KeyValue<String, Long>> inputUserClicks = Arrays.asList(
new KeyValue<>("alice", 13L),
new KeyValue<>("bob", 4L),
new KeyValue<>("chao", 25L),
new KeyValue<>("bob", 19L),
new KeyValue<>("chao", 56L),
new KeyValue<>("alice", 78L),
new KeyValue<>("alice", 40L),
new KeyValue<>("bob", 3L)
);
Map<String, Long> expectedMaxClicksPerUser = new HashMap<String, Long>() {
{
put("alice", 78L);
put("bob", 19L);
put("chao", 56L);
}
};
//
// Step 1: Configure and start the processor topology.
//
StreamsBuilder builder = new StreamsBuilder();
Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "validating-with-interactive-queries-integration-test");
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Long().getClass().getName());
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// The commit interval for flushing records to state stores and downstream must be lower than
// this integration test's timeout (30 secs) to ensure we observe the expected processing results.
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 2 * 1000);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// Use a temporary directory for storing state, which will be automatically removed after the test.
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
KStream<String, Long> input = builder.stream(inputTopic);
// rolling MAX() aggregation
String maxStore = "max-store";
input.groupByKey().aggregate(
() -> Long.MIN_VALUE,
(aggKey, value, aggregate) -> Math.max(value, aggregate),
Materialized.as(maxStore)
);
// windowed MAX() aggregation
String maxWindowStore = "max-window-store";
input.groupByKey()
.windowedBy(TimeWindows.of(TimeUnit.MINUTES.toMillis(1L)).until(TimeUnit.MINUTES.toMillis(5L)))
.aggregate(
() -> Long.MIN_VALUE,
(aggKey, value, aggregate) -> Math.max(value, aggregate),
Materialized.as(maxWindowStore));
KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
streams.start();
//
// Step 2: Produce some input data to the input topic.
//
Properties producerConfig = new Properties();
producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
producerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, LongSerializer.class);
IntegrationTestUtils.produceKeyValuesSynchronously(inputTopic, inputUserClicks, producerConfig);
//
// Step 3: Validate the application's state by interactively querying its state stores.
//
ReadOnlyKeyValueStore<String, Long> keyValueStore =
IntegrationTestUtils.waitUntilStoreIsQueryable(maxStore, QueryableStoreTypes.keyValueStore(), streams);
ReadOnlyWindowStore<String, Long> windowStore =
IntegrationTestUtils.waitUntilStoreIsQueryable(maxWindowStore, QueryableStoreTypes.windowStore(), streams);
// Wait a bit so that the input data can be fully processed to ensure that the stores can
// actually be populated with data. Running the build on (slow) Travis CI in particular
// requires a few seconds to run this test reliably.
Thread.sleep(3000);
IntegrationTestUtils.assertThatKeyValueStoreContains(keyValueStore, expectedMaxClicksPerUser);
IntegrationTestUtils.assertThatOldestWindowContains(windowStore, expectedMaxClicksPerUser);
streams.close();
}
}
| {
"pile_set_name": "Github"
} |
//
// RunTestsNotification.swift
// pxctest
//
// Created by Johannes Plunien on 10/12/2016.
// Copyright © 2016 Johannes Plunien. All rights reserved.
//
import Foundation
final class RunTestsNotification: NSObject, NSUserNotificationCenterDelegate {
private var notificationSender: NotificationSender
private let title = "pxctest"
required init(notificationSender: NotificationSender = NSUserNotificationCenter.default) {
self.notificationSender = notificationSender
super.init()
self.notificationSender.delegate = self
}
func deliverSuccessNotification() {
let notification = NSUserNotification()
notification.title = title
notification.informativeText = "Tests Succeeded"
notificationSender.deliver(notification)
}
func deliverFailureNotification(error: Error) {
let notification = NSUserNotification()
notification.title = title
switch error {
case RunTestsCommand.RuntimeError.testRunHadErrors(let errors):
notification.informativeText = "Tests Failed (\(errors.count.pluralized("Error")))"
case RunTestsCommand.RuntimeError.testRunHadFailures(let failures):
notification.informativeText = "Tests Failed (\(failures.pluralized("Failure")))"
default:
notification.informativeText = "Tests Failed"
}
notificationSender.deliver(notification)
}
func userNotificationCenter(_ center: NSUserNotificationCenter, shouldPresent notification: NSUserNotification) -> Bool {
return true
}
}
| {
"pile_set_name": "Github"
} |
#include "clapack.h"
/* Table of constant values */
static real c_b9 = 0.f;
static real c_b10 = 1.f;
static integer c__0 = 0;
static integer c__1 = 1;
static integer c__2 = 2;
/* Subroutine */ int ssteqr_(char *compz, integer *n, real *d__, real *e,
real *z__, integer *ldz, real *work, integer *info)
{
/* System generated locals */
integer z_dim1, z_offset, i__1, i__2;
real r__1, r__2;
/* Builtin functions */
double sqrt(doublereal), r_sign(real *, real *);
/* Local variables */
real b, c__, f, g;
integer i__, j, k, l, m;
real p, r__, s;
integer l1, ii, mm, lm1, mm1, nm1;
real rt1, rt2, eps;
integer lsv;
real tst, eps2;
integer lend, jtot;
extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
;
extern logical lsame_(char *, char *);
real anorm;
extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
integer *, real *, real *, real *, integer *), sswap_(integer *, real *, integer *, real *, integer *);
integer lendm1, lendp1;
extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real *
, real *, real *);
extern doublereal slapy2_(real *, real *);
integer iscale;
extern doublereal slamch_(char *);
real safmin;
extern /* Subroutine */ int xerbla_(char *, integer *);
real safmax;
extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
real *, integer *, integer *, real *, integer *, integer *);
integer lendsv;
extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
), slaset_(char *, integer *, integer *, real *, real *, real *,
integer *);
real ssfmin;
integer nmaxit, icompz;
real ssfmax;
extern doublereal slanst_(char *, integer *, real *, real *);
extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
/* -- LAPACK routine (version 3.1) -- */
/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
/* November 2006 */
/* .. Scalar Arguments .. */
/* .. */
/* .. Array Arguments .. */
/* .. */
/* Purpose */
/* ======= */
/* SSTEQR computes all eigenvalues and, optionally, eigenvectors of a */
/* symmetric tridiagonal matrix using the implicit QL or QR method. */
/* The eigenvectors of a full or band symmetric matrix can also be found */
/* if SSYTRD or SSPTRD or SSBTRD has been used to reduce this matrix to */
/* tridiagonal form. */
/* Arguments */
/* ========= */
/* COMPZ (input) CHARACTER*1 */
/* = 'N': Compute eigenvalues only. */
/* = 'V': Compute eigenvalues and eigenvectors of the original */
/* symmetric matrix. On entry, Z must contain the */
/* orthogonal matrix used to reduce the original matrix */
/* to tridiagonal form. */
/* = 'I': Compute eigenvalues and eigenvectors of the */
/* tridiagonal matrix. Z is initialized to the identity */
/* matrix. */
/* N (input) INTEGER */
/* The order of the matrix. N >= 0. */
/* D (input/output) REAL array, dimension (N) */
/* On entry, the diagonal elements of the tridiagonal matrix. */
/* On exit, if INFO = 0, the eigenvalues in ascending order. */
/* E (input/output) REAL array, dimension (N-1) */
/* On entry, the (n-1) subdiagonal elements of the tridiagonal */
/* matrix. */
/* On exit, E has been destroyed. */
/* Z (input/output) REAL array, dimension (LDZ, N) */
/* On entry, if COMPZ = 'V', then Z contains the orthogonal */
/* matrix used in the reduction to tridiagonal form. */
/* On exit, if INFO = 0, then if COMPZ = 'V', Z contains the */
/* orthonormal eigenvectors of the original symmetric matrix, */
/* and if COMPZ = 'I', Z contains the orthonormal eigenvectors */
/* of the symmetric tridiagonal matrix. */
/* If COMPZ = 'N', then Z is not referenced. */
/* LDZ (input) INTEGER */
/* The leading dimension of the array Z. LDZ >= 1, and if */
/* eigenvectors are desired, then LDZ >= max(1,N). */
/* WORK (workspace) REAL array, dimension (max(1,2*N-2)) */
/* If COMPZ = 'N', then WORK is not referenced. */
/* INFO (output) INTEGER */
/* = 0: successful exit */
/* < 0: if INFO = -i, the i-th argument had an illegal value */
/* > 0: the algorithm has failed to find all the eigenvalues in */
/* a total of 30*N iterations; if INFO = i, then i */
/* elements of E have not converged to zero; on exit, D */
/* and E contain the elements of a symmetric tridiagonal */
/* matrix which is orthogonally similar to the original */
/* matrix. */
/* ===================================================================== */
/* .. Parameters .. */
/* .. */
/* .. Local Scalars .. */
/* .. */
/* .. External Functions .. */
/* .. */
/* .. External Subroutines .. */
/* .. */
/* .. Intrinsic Functions .. */
/* .. */
/* .. Executable Statements .. */
/* Test the input parameters. */
/* Parameter adjustments */
--d__;
--e;
z_dim1 = *ldz;
z_offset = 1 + z_dim1;
z__ -= z_offset;
--work;
/* Function Body */
*info = 0;
if (lsame_(compz, "N")) {
icompz = 0;
} else if (lsame_(compz, "V")) {
icompz = 1;
} else if (lsame_(compz, "I")) {
icompz = 2;
} else {
icompz = -1;
}
if (icompz < 0) {
*info = -1;
} else if (*n < 0) {
*info = -2;
} else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) {
*info = -6;
}
if (*info != 0) {
i__1 = -(*info);
xerbla_("SSTEQR", &i__1);
return 0;
}
/* Quick return if possible */
if (*n == 0) {
return 0;
}
if (*n == 1) {
if (icompz == 2) {
z__[z_dim1 + 1] = 1.f;
}
return 0;
}
/* Determine the unit roundoff and over/underflow thresholds. */
eps = slamch_("E");
/* Computing 2nd power */
r__1 = eps;
eps2 = r__1 * r__1;
safmin = slamch_("S");
safmax = 1.f / safmin;
ssfmax = sqrt(safmax) / 3.f;
ssfmin = sqrt(safmin) / eps2;
/* Compute the eigenvalues and eigenvectors of the tridiagonal */
/* matrix. */
if (icompz == 2) {
slaset_("Full", n, n, &c_b9, &c_b10, &z__[z_offset], ldz);
}
nmaxit = *n * 30;
jtot = 0;
/* Determine where the matrix splits and choose QL or QR iteration */
/* for each block, according to whether top or bottom diagonal */
/* element is smaller. */
l1 = 1;
nm1 = *n - 1;
L10:
if (l1 > *n) {
goto L160;
}
if (l1 > 1) {
e[l1 - 1] = 0.f;
}
if (l1 <= nm1) {
i__1 = nm1;
for (m = l1; m <= i__1; ++m) {
tst = (r__1 = e[m], dabs(r__1));
if (tst == 0.f) {
goto L30;
}
if (tst <= sqrt((r__1 = d__[m], dabs(r__1))) * sqrt((r__2 = d__[m
+ 1], dabs(r__2))) * eps) {
e[m] = 0.f;
goto L30;
}
/* L20: */
}
}
m = *n;
L30:
l = l1;
lsv = l;
lend = m;
lendsv = lend;
l1 = m + 1;
if (lend == l) {
goto L10;
}
/* Scale submatrix in rows and columns L to LEND */
i__1 = lend - l + 1;
anorm = slanst_("I", &i__1, &d__[l], &e[l]);
iscale = 0;
if (anorm == 0.f) {
goto L10;
}
if (anorm > ssfmax) {
iscale = 1;
i__1 = lend - l + 1;
slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
info);
i__1 = lend - l;
slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
info);
} else if (anorm < ssfmin) {
iscale = 2;
i__1 = lend - l + 1;
slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
info);
i__1 = lend - l;
slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
info);
}
/* Choose between QL and QR iteration */
if ((r__1 = d__[lend], dabs(r__1)) < (r__2 = d__[l], dabs(r__2))) {
lend = lsv;
l = lendsv;
}
if (lend > l) {
/* QL Iteration */
/* Look for small subdiagonal element. */
L40:
if (l != lend) {
lendm1 = lend - 1;
i__1 = lendm1;
for (m = l; m <= i__1; ++m) {
/* Computing 2nd power */
r__2 = (r__1 = e[m], dabs(r__1));
tst = r__2 * r__2;
if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ 1], dabs(r__2)) + safmin) {
goto L60;
}
/* L50: */
}
}
m = lend;
L60:
if (m < lend) {
e[m] = 0.f;
}
p = d__[l];
if (m == l) {
goto L80;
}
/* If remaining matrix is 2-by-2, use SLAE2 or SLAEV2 */
/* to compute its eigensystem. */
if (m == l + 1) {
if (icompz > 0) {
slaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
work[l] = c__;
work[*n - 1 + l] = s;
slasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
z__[l * z_dim1 + 1], ldz);
} else {
slae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
}
d__[l] = rt1;
d__[l + 1] = rt2;
e[l] = 0.f;
l += 2;
if (l <= lend) {
goto L40;
}
goto L140;
}
if (jtot == nmaxit) {
goto L140;
}
++jtot;
/* Form shift. */
g = (d__[l + 1] - p) / (e[l] * 2.f);
r__ = slapy2_(&g, &c_b10);
g = d__[m] - p + e[l] / (g + r_sign(&r__, &g));
s = 1.f;
c__ = 1.f;
p = 0.f;
/* Inner loop */
mm1 = m - 1;
i__1 = l;
for (i__ = mm1; i__ >= i__1; --i__) {
f = s * e[i__];
b = c__ * e[i__];
slartg_(&g, &f, &c__, &s, &r__);
if (i__ != m - 1) {
e[i__ + 1] = r__;
}
g = d__[i__ + 1] - p;
r__ = (d__[i__] - g) * s + c__ * 2.f * b;
p = s * r__;
d__[i__ + 1] = g + p;
g = c__ * r__ - b;
/* If eigenvectors are desired, then save rotations. */
if (icompz > 0) {
work[i__] = c__;
work[*n - 1 + i__] = -s;
}
/* L70: */
}
/* If eigenvectors are desired, then apply saved rotations. */
if (icompz > 0) {
mm = m - l + 1;
slasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
* z_dim1 + 1], ldz);
}
d__[l] -= p;
e[l] = g;
goto L40;
/* Eigenvalue found. */
L80:
d__[l] = p;
++l;
if (l <= lend) {
goto L40;
}
goto L140;
} else {
/* QR Iteration */
/* Look for small superdiagonal element. */
L90:
if (l != lend) {
lendp1 = lend + 1;
i__1 = lendp1;
for (m = l; m >= i__1; --m) {
/* Computing 2nd power */
r__2 = (r__1 = e[m - 1], dabs(r__1));
tst = r__2 * r__2;
if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
- 1], dabs(r__2)) + safmin) {
goto L110;
}
/* L100: */
}
}
m = lend;
L110:
if (m > lend) {
e[m - 1] = 0.f;
}
p = d__[l];
if (m == l) {
goto L130;
}
/* If remaining matrix is 2-by-2, use SLAE2 or SLAEV2 */
/* to compute its eigensystem. */
if (m == l - 1) {
if (icompz > 0) {
slaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
;
work[m] = c__;
work[*n - 1 + m] = s;
slasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
z__[(l - 1) * z_dim1 + 1], ldz);
} else {
slae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
}
d__[l - 1] = rt1;
d__[l] = rt2;
e[l - 1] = 0.f;
l += -2;
if (l >= lend) {
goto L90;
}
goto L140;
}
if (jtot == nmaxit) {
goto L140;
}
++jtot;
/* Form shift. */
g = (d__[l - 1] - p) / (e[l - 1] * 2.f);
r__ = slapy2_(&g, &c_b10);
g = d__[m] - p + e[l - 1] / (g + r_sign(&r__, &g));
s = 1.f;
c__ = 1.f;
p = 0.f;
/* Inner loop */
lm1 = l - 1;
i__1 = lm1;
for (i__ = m; i__ <= i__1; ++i__) {
f = s * e[i__];
b = c__ * e[i__];
slartg_(&g, &f, &c__, &s, &r__);
if (i__ != m) {
e[i__ - 1] = r__;
}
g = d__[i__] - p;
r__ = (d__[i__ + 1] - g) * s + c__ * 2.f * b;
p = s * r__;
d__[i__] = g + p;
g = c__ * r__ - b;
/* If eigenvectors are desired, then save rotations. */
if (icompz > 0) {
work[i__] = c__;
work[*n - 1 + i__] = s;
}
/* L120: */
}
/* If eigenvectors are desired, then apply saved rotations. */
if (icompz > 0) {
mm = l - m + 1;
slasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
* z_dim1 + 1], ldz);
}
d__[l] -= p;
e[lm1] = g;
goto L90;
/* Eigenvalue found. */
L130:
d__[l] = p;
--l;
if (l >= lend) {
goto L90;
}
goto L140;
}
/* Undo scaling if necessary */
L140:
if (iscale == 1) {
i__1 = lendsv - lsv + 1;
slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
n, info);
i__1 = lendsv - lsv;
slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
info);
} else if (iscale == 2) {
i__1 = lendsv - lsv + 1;
slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
n, info);
i__1 = lendsv - lsv;
slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
info);
}
/* Check for no convergence to an eigenvalue after a total */
/* of N*MAXIT iterations. */
if (jtot < nmaxit) {
goto L10;
}
i__1 = *n - 1;
for (i__ = 1; i__ <= i__1; ++i__) {
if (e[i__] != 0.f) {
++(*info);
}
/* L150: */
}
goto L190;
/* Order eigenvalues and eigenvectors. */
L160:
if (icompz == 0) {
/* Use Quick Sort */
slasrt_("I", n, &d__[1], info);
} else {
/* Use Selection Sort to minimize swaps of eigenvectors */
i__1 = *n;
for (ii = 2; ii <= i__1; ++ii) {
i__ = ii - 1;
k = i__;
p = d__[i__];
i__2 = *n;
for (j = ii; j <= i__2; ++j) {
if (d__[j] < p) {
k = j;
p = d__[j];
}
/* L170: */
}
if (k != i__) {
d__[k] = d__[i__];
d__[i__] = p;
sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
&c__1);
}
/* L180: */
}
}
L190:
return 0;
/* End of SSTEQR */
} /* ssteqr_ */
| {
"pile_set_name": "Github"
} |
{application, erllambda,
[{description, "Erlang AWS Lambda and Utilities"},
{vsn, "git"},
{registered, []},
{mod, {erllambda_app, []}},
{applications,
[
kernel,
stdlib,
lhttpc,
erlcloud,
jsone
]},
{env,
[
%% Print Env during Init or not
{print_env, true},
%% execution AWS region, typically only defined by some controlling
%% framework.
{region, undefined},
%% execution AWS accountid, typically only defined by some controlling
%% framework.
{accountid, undefined},
%% execution environ (typically USER or base stack name), typically
%% only defined by some controlling framework.
{environ, undefined},
%% predefined erlcloud config that is set by some controlling
%% framework, so that client applications can get valid base config
%% when it is otherwise not available via erlcloud_aws:auto_config/0.
%% The default is for this to be undefined, which will instead use
%% erlcloud_aws:auto_config/0.
{config, undefined},
%% set the STS expiration to max by default.
{default_role_duration_sec, 3600},
%% how much upfront to evict the STS token record
{default_role_evict_sec, 10},
%% do less GC while executing the handler
{handler_spawn_opts, [
{min_bin_vheap_size, 2487399},
{min_heap_size, 2487399}
]}
]},
{modules, []},
{licenses, ["MIT"]},
{links, [{"Github", "https://github.com/alertlogic/erllambda"}]}
]}.
| {
"pile_set_name": "Github"
} |
<%
layout("/layouts/platform.html"){
%>
<header class="header navbar bg-white shadow">
<div class="btn-group tool-button">
<a class="btn btn-primary navbar-btn" href="${base}/platform/sys/user" data-pjax id="goback"><i class="ti-angle-left"></i> 返回</a>
</div>
</header>
<div class="content-wrap">
<div class="wrapper" style="min-height:500px;">
<section class="panel panel-form">
<form id="unitAddForm" role="form" class="form-horizontal parsley-form" data-parsley-validate
action="${base}/platform/sys/user/editDo" method="post">
<div class="row mb10">
<div class="col-lg-12">
<div class="form-group has-feedback">
<label for="parentId" class="col-sm-2 control-label">所属单位</label>
<div class="col-sm-8">
<div class="input-group">
<input id="parentId" type="text" class="form-control" placeholder="选择单位" disabled
value="${obj.unit.name}" data-parsley-required="true"/>
<span class="input-group-btn">
<button type="button" class="btn btn-primary <%if(!@shiro.hasRole('sysadmin')){%>disabled<%}%>" data-toggle="modal"
data-target="#dialogSelectParentUnit"><i class="ti-plus"></i>选择
</button>
</span>
</div>
<input type="hidden" name="id" value="${obj.id!}">
<input type="hidden" name="unitid" value="${obj.unitid!}">
<input type="hidden" name="oldLoginname" value="${obj.loginname!}">
</div>
</div>
<div class="form-group">
<label for="loginname" class="col-sm-2 control-label">用户名</label>
<div class="col-sm-8">
<input type="text" id="loginname" value="${obj.loginname!}" class="form-control" name="loginname"
data-parsley-required="true" placeholder="用户名">
</div>
</div>
<div class="form-group">
<label for="username" class="col-sm-2 control-label">姓名/昵称</label>
<div class="col-sm-8">
<input type="text" id="username" class="form-control" name="username" data-parsley-required="true"
placeholder="名称/昵称" value="${obj.username!}">
</div>
</div>
<div class="form-group">
<label for="email" class="col-sm-2 control-label">电子邮箱</label>
<div class="col-sm-8">
<input type="text" id="email" name="email" value="${obj.email!}" data-parsley-type="email"
class="form-control" placeholder="Email">
</div>
</div>
</div>
</div>
<div class="col-lg-3"></div>
<div class="col-lg-6">
<div class="form-group text-center">
<label></label>
<div>
<button class="btn btn-primary btn-block btn-lg btn-parsley" data-loading-text="正在提交...">提 交</button>
</div>
</div>
</div>
</form>
</section>
</div>
</div>
<a class="exit-offscreen"></a>
<!-- 选择上级单位 -->
<div id="dialogSelectParentUnit" class="modal fade bs-modal-sm" tabindex="-1" role="dialog" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
<h4 class="modal-title">选择上级单位</h4>
</div>
<div class="modal-body">
<div class="row">
<div class="col-xs-12">
<div id="jsTreeParentUnit" class="demo"></div>
</div>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">取 消</button>
<button type="button" class="btn btn-primary" onclick="selectParentMenu()">确认选择</button>
</div>
</div>
</div>
</div>
<script language="JavaScript">
function initTreeView() {
$("#jsTreeParentUnit").jstree({
plugins: ["wholerow", "json_data"],
core: {
data: {
dataType: "json",
url: function (node) {
return node.id === "#" ? "${base}/platform/sys/user/tree" : "${base}/platform/sys/user/tree?pid=" + node.id
}
},
multiple: false
}
}).on("dblclick.jstree", function (node) {
selectParentMenu();
});
}
//选择父菜单
function selectParentMenu() {
var tree = $.jstree.reference("#jsTreeParentUnit");
var node = tree.get_selected(true);
$("#unitAddForm #parentId").val(node[0].text);
$("#unitAddForm input[name='unitid']").val(node[0].id);
$("#dialogSelectParentUnit").modal("hide");
}
$(document).ready(function () {
initTreeView();
$('#unitAddForm').ajaxForm({
dataType: 'json',
beforeSubmit: function (arr, form, options) {
form.find("button:submit").button("loading");
},
success: function (data, statusText, xhr, form) {
if (data.code == 0) {
Toast.success(data.msg);
setTimeout(function () {
$("#goback").trigger("click");
}, 1000);
} else {
Toast.error(data.msg);
}
form.find("button:submit").button("reset");
}
});
});
</script>
<%}%>
| {
"pile_set_name": "Github"
} |
// *** WARNING: this file was generated by pulumigen. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v1
import (
"reflect"
"github.com/pkg/errors"
metav1 "github.com/pulumi/pulumi-kubernetes/sdk/v2/go/kubernetes/meta/v1"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
pulumi.CustomResourceState
// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
ApiVersion pulumi.StringPtrOutput `pulumi:"apiVersion"`
// List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items PersistentVolumeTypeArrayOutput `pulumi:"items"`
// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Kind pulumi.StringPtrOutput `pulumi:"kind"`
// Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Metadata metav1.ListMetaPtrOutput `pulumi:"metadata"`
}
// NewPersistentVolumeList registers a new resource with the given unique name, arguments, and options.
func NewPersistentVolumeList(ctx *pulumi.Context,
name string, args *PersistentVolumeListArgs, opts ...pulumi.ResourceOption) (*PersistentVolumeList, error) {
if args == nil || args.Items == nil {
return nil, errors.New("missing required argument 'Items'")
}
if args == nil {
args = &PersistentVolumeListArgs{}
}
args.ApiVersion = pulumi.StringPtr("v1")
args.Kind = pulumi.StringPtr("PersistentVolumeList")
var resource PersistentVolumeList
err := ctx.RegisterResource("kubernetes:core/v1:PersistentVolumeList", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetPersistentVolumeList gets an existing PersistentVolumeList resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetPersistentVolumeList(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *PersistentVolumeListState, opts ...pulumi.ResourceOption) (*PersistentVolumeList, error) {
var resource PersistentVolumeList
err := ctx.ReadResource("kubernetes:core/v1:PersistentVolumeList", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering PersistentVolumeList resources.
type persistentVolumeListState struct {
// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
ApiVersion *string `pulumi:"apiVersion"`
// List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolumeType `pulumi:"items"`
// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Kind *string `pulumi:"kind"`
// Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Metadata *metav1.ListMeta `pulumi:"metadata"`
}
type PersistentVolumeListState struct {
// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
ApiVersion pulumi.StringPtrInput
// List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items PersistentVolumeTypeArrayInput
// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Kind pulumi.StringPtrInput
// Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Metadata metav1.ListMetaPtrInput
}
func (PersistentVolumeListState) ElementType() reflect.Type {
return reflect.TypeOf((*persistentVolumeListState)(nil)).Elem()
}
type persistentVolumeListArgs struct {
// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
ApiVersion *string `pulumi:"apiVersion"`
// List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolumeType `pulumi:"items"`
// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Kind *string `pulumi:"kind"`
// Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Metadata *metav1.ListMeta `pulumi:"metadata"`
}
// The set of arguments for constructing a PersistentVolumeList resource.
type PersistentVolumeListArgs struct {
// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
ApiVersion pulumi.StringPtrInput
// List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items PersistentVolumeTypeArrayInput
// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Kind pulumi.StringPtrInput
// Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Metadata metav1.ListMetaPtrInput
}
func (PersistentVolumeListArgs) ElementType() reflect.Type {
return reflect.TypeOf((*persistentVolumeListArgs)(nil)).Elem()
}
| {
"pile_set_name": "Github"
} |
import React, { PureComponent } from "react";
import { ViewPropTypes, NativeModules } from "react-native";
import PropTypes from "prop-types";
import RNImageHelper from 'react-native-image-helper'
const { RNBottomActionSheet } = NativeModules;
class GridView extends PureComponent {
static propTypes = {
...ViewPropTypes,
title: PropTypes.string,
theme: PropTypes.string,
itemTextColor: PropTypes.string,
itemTintColor: PropTypes.string,
backgroundColor: PropTypes.string,
onSelection: PropTypes.func,
visible: PropTypes.bool
};
static defaultProps = {
title: "",
theme: "light",
itemTextColor: "",
itemTintColor: "",
backgroundColor: "",
visible: false
};
static Show(props) {
if (props.title === undefined) props.title = GridView.defaultProps.title;
if (props.items === undefined) props.items = GridView.defaultProps.items;
if (props.theme === undefined) props.theme = GridView.defaultProps.theme;
if (props.itemTextColor === undefined)
props.itemTextColor = GridView.defaultProps.itemTextColor;
if (props.itemTintColor === undefined)
props.itemTintColor = GridView.defaultProps.itemTintColor;
if (props.backgroundColor === undefined)
props.backgroundColor = GridView.defaultProps.backgroundColor;
props.items = props.items.map(element => {
if (element.icon && element.icon.props) {
element.icon = element.icon.props;
let vectorIcon = RNImageHelper.Resolve(
element.icon.family,
element.icon.name
);
element.icon = Object.assign({}, element.icon, vectorIcon);
} else if (element.icon !== undefined) {
element.icon = {
name: element.icon,
family: "",
glyph: "",
color: "",
size: 0
};
} else {
element.icon = {};
}
element.divider = false;
return element;
});
RNBottomActionSheet.GridView(
{
title: props.title,
items: props.items,
theme: props.theme,
itemTextColor: props.itemTextColor,
itemTintColor: props.itemTintColor,
backgroundColor: props.backgroundColor
},
selectedIndex => {
const selectedValue = props.items[selectedIndex].value;
props.onSelection && props.onSelection(selectedIndex, selectedValue);
},
() => {
props.onCancel && props.onCancel()
}
);
}
componentDidMount() {
this._show();
}
componentDidUpdate() {
this._show();
}
_show() {
if (this.props.visible) {
let props = this.props;
let items = [];
React.Children.map(this.props.children, (item, index) => {
items.push({
title: item.props.title,
icon: item.props.icon,
divider: false
});
});
GridView.Show(Object.assign({}, props, { items: items }));
}
}
render() {
return null;
}
}
class Item extends PureComponent { }
Item.propTypes = {
title: PropTypes.string,
divider: PropTypes.bool,
icon: PropTypes.oneOfType([
PropTypes.number,
PropTypes.string,
PropTypes.object
])
};
Item.defaultProps = {
title: "",
divider: false
};
GridView.Item = Item;
export { GridView };
| {
"pile_set_name": "Github"
} |
#define _GNU_SOURCE
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include <search.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <kitsune.h>
struct hsearch_data *store;
int init_listen_socket(int port)
{
int rec_sock, status;
struct sockaddr_in saddr;
int i = 0, yes = 1;
rec_sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
assert(rec_sock > 0);
memset(&saddr, 0, sizeof(struct sockaddr_in));
saddr.sin_family = AF_INET;
saddr.sin_port = htons(port);
status = setsockopt(rec_sock, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int));
if (bind(rec_sock, (struct sockaddr *)&saddr,
sizeof(struct sockaddr_in)) < 0)
abort();
if (listen(rec_sock, 10) < 0)
abort();
return rec_sock;
}
void init_store(void){
store = calloc(1, sizeof(struct hsearch_data));
hcreate_r(100, store);
}
void handle_client(int client)
{
const int line_size = 1000;
char * line = calloc(sizeof(char), line_size);
const char *delim = " \n";
char *command = NULL, *arg1 = NULL, *arg2 = NULL, *savptr = NULL;
while (1) {
kitsune_update("client");
memset(line, 0, line_size);
recv(client, line, line_size, 0);
/* parse command */
command = strtok_r(line, delim, &savptr);
if (command == NULL) /* client sent EOF */
break;
arg1 = strtok_r(NULL, delim, &savptr);
if (strcmp("set", command) == 0)
arg2 = strtok_r(NULL, delim, &savptr);
/* handle */
if (strcmp("set", command) == 0) {
ENTRY e, *ret = NULL;
e.key = strdup(arg1);
e.data = strdup(arg2);
hsearch_r(e, ENTER, &ret, store);
} else if (strcmp("get", command) == 0) {
ENTRY *ret = NULL, e;
e.key = strdup(arg1);
hsearch_r(e, FIND, &ret, store);
if (ret == NULL)
continue;
sprintf(line, "%s\n", (char *)ret->data);
send(client, line, strlen(line) + 1, 0);
}
}
close(client);
}
int
main(int argc, char **argv)
{
int server_sock = 0, client_sock = 0,
client_addrlen = sizeof(struct sockaddr_in);
struct sockaddr_in client_addr;
init_store();
server_sock = init_listen_socket(5000);
assert(server_sock > 0);
while (1) {
kitsune_update("main");
client_sock = accept(server_sock, &client_addr,
&client_addrlen);
if (client_sock < 0) {
printf("%s\n", strerror(errno));
abort();
}
handle_client(client_sock);
}
exit(0);
}
| {
"pile_set_name": "Github"
} |
$('#content_field').val('');
$('.Post__contents').html(
'<%= j(render partial: "conversations/post_content" ) %>'
);
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: e0edf97b5885e424a973b67aa82fad2e
timeCreated: 1508666793
licenseType: Pro
NativeFormatImporter:
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
from unittest import TestCase
from unittest.mock import MagicMock
from PyQt5.QtCore import QUrl
from unittest.mock import patch
from UM.MimeTypeDatabase import MimeTypeDatabase
from cura.Settings.ContainerManager import ContainerManager
import tempfile
import os
class TestContainerManager(TestCase):
def setUp(self):
self._application = MagicMock()
self._container_registry = MagicMock()
self._machine_manager = MagicMock()
self._machine_manager.activeMachine.extruderList = [MagicMock(name="Left Extruder Mock"), MagicMock(name="Right Extruder Mock")]
self._mocked_mime = MagicMock()
self._mocked_mime.preferredSuffix = "omg"
self._mocked_mime.suffixes = ["omg"]
self._mocked_mime.comment = "UnitTest!"
self._mocked_container = MagicMock()
self._mocked_container_data = "SOME DATA :D"
self._mocked_container.serialize = MagicMock(return_value = self._mocked_container_data)
self._containers_meta_data = [{"id": "test", "test_data": "omg"}]
self._container_registry.findContainersMetadata = MagicMock(return_value = self._containers_meta_data)
self._container_registry.getMimeTypeForContainer = MagicMock(return_value = self._mocked_mime)
self._container_registry.findContainers = MagicMock(return_value = [self._mocked_container])
self._application.getContainerRegistry = MagicMock(return_value = self._container_registry)
self._application.getMachineManager = MagicMock(return_value = self._machine_manager)
# Destroy the previous instance of the container manager
if ContainerManager.getInstance() is not None:
ContainerManager._ContainerManager__instance = None
self._container_manager = ContainerManager(self._application)
MimeTypeDatabase.addMimeType(self._mocked_mime)
def tearDown(self):
MimeTypeDatabase.removeMimeType(self._mocked_mime)
def test_getContainerMetaDataEntry(self):
with patch("cura.CuraApplication.CuraApplication.getInstance", MagicMock(return_value=self._application)):
assert self._container_manager.getContainerMetaDataEntry("test", "test_data") == "omg"
assert self._container_manager.getContainerMetaDataEntry("test", "entry_that_is_not_defined") == ""
def test_clearUserContainer(self):
with patch("cura.CuraApplication.CuraApplication.getInstance", MagicMock(return_value=self._application)):
self._container_manager.clearUserContainers()
assert self._machine_manager.activeMachine.userChanges.clear.call_count == 1
def test_getContainerNameFilters(self):
with patch("cura.CuraApplication.CuraApplication.getInstance", MagicMock(return_value=self._application)):
# If nothing is added, we still expect to get the all files filter
assert self._container_manager.getContainerNameFilters("") == ['All Files (*)']
# Pretend that a new type was added.
self._container_registry.getContainerTypes = MagicMock(return_value=[("None", None)])
assert self._container_manager.getContainerNameFilters("") == ['UnitTest! (*.omg)', 'All Files (*)']
def test_exportContainerUnknownFileType(self):
# The filetype is not known, so this should cause an error!
assert self._container_manager.exportContainer("test", "zomg", "whatever")["status"] == "error"
def test_exportContainerInvalidPath(self):
assert self._container_manager.exportContainer("test", "zomg", "")["status"] == "error"
assert self._container_manager.exportContainer("test", "zomg", QUrl())["status"] == "error"
def test_exportContainerInvalidId(self):
assert self._container_manager.exportContainer("", "whatever", "whatever")["status"] == "error"
def test_exportContainer(self):
with patch("cura.CuraApplication.CuraApplication.getInstance", MagicMock(return_value=self._application)):
with tempfile.TemporaryDirectory() as tmpdirname:
result = self._container_manager.exportContainer("test", "whatever", os.path.join(tmpdirname, "whatever.omg"))
assert(os.path.exists(result["path"]))
with open(result["path"], "r", encoding="utf-8") as f:
assert f.read() == self._mocked_container_data | {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-08 05:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nmaper', '0007_nmapscan_slug_text'),
]
operations = [
migrations.RemoveField(
model_name='nmapscan',
name='slug_text',
),
migrations.AddField(
model_name='nmapscan',
name='slug',
field=models.SlugField(default='', max_length=128),
preserve_default=False,
),
]
| {
"pile_set_name": "Github"
} |
/* */
/* PBGMIDI.c MIDI管理用関数 */
/* */
/* */
#define PBGMIDI_SOURCE_COMPILE
#include "PBGMIDI.H"
#pragma message(PBGWIN_PBGMIDI_H)
// ローカルな関数 //
static void Mid_GMReset(void);
static BOOL Mid_Init(void);
// さらに?ローカルな関数 //
static WORD ConvWord(WORD data);
static DWORD ConvDWord(DWORD data);
static DWORD GetWaitCount(LPBYTE *data);
static void CALLBACK CBMid_TimeFunc(UINT uID,UINT uMsg,DWORD dwUser,DWORD dw1,DWORD dw2);
static void Mid_Parse(MID_TRACK *track);
static void MidFadeIOFunc(void);
static void Mid_ShortMsg(BYTE d1,BYTE d2,BYTE d3); // ショートメッセージを出力する
// グローバル&名前空間でローカルな変数 //
MID_DEVICE Mid_Dev;
static MID_DATA Mid_Data;
static TIME_DATA Mid_Time;
WORD Mid_PlayTable[16][128]; // スペアナ用
WORD Mid_PlayTable2[16][128]; // レベルメーター用
BYTE Mid_NoteTable[16][128]; // ノート表示用
BYTE Mid_NoteWTable[16][128]; // ノート表示用(2)
BYTE Mid_PanpodTable[16]; // パンポット
BYTE Mid_ExpressionTable[16]; // エクスプレッション
BYTE Mid_VolumeTable[16]; // ボリューム
static BYTE Mid_MulTempo = MID_STDTEMPO;
DWORD Mid_PlayTime = 0;
BOOL Mid_Start(WORD fnmode,WORD plmode)
{
MIDIOUTCAPS caps;
int i;
UINT mret;
// 各変数の初期化 //
Mid_Dev.fnmode = fnmode;
Mid_Dev.plmode = plmode;
Mid_Dev.mp = NULL;
Mid_Dev.nDevice = midiOutGetNumDevs()+1; // 一つだけ多く準備する
Mid_Dev.NowID = -1 + 1; // -1: MIDI_MAPPER
Mid_PlayTime = 0;
// デバイスが存在しないならリターン //
if(Mid_Dev.nDevice)
Mid_Dev.name = (char **)LocalAlloc(LPTR,sizeof(char *)*(Mid_Dev.nDevice));
else{
Mid_Dev.name = NULL;
return FALSE;
}
// デバイス名格納用のメモリ確保&セット //
for(i=0;i<Mid_Dev.nDevice;i++){
midiOutGetDevCaps(i-1,&caps,sizeof(MIDIOUTCAPS));
Mid_Dev.name[i] = (char *)LocalAlloc(LPTR,MAXPNAMELEN);
strcpy(Mid_Dev.name[i],caps.szPname);
}
// 使用できるデバイスを探す(最初がMIDI_MAPPERなのがポイントだ!) //
for(i=0;i<Mid_Dev.nDevice;i++){
Mid_Dev.NowID = i;
mret = midiOutOpen(&Mid_Dev.mp,Mid_Dev.NowID-1,0,0,CALLBACK_NULL);
if(mret==MMSYSERR_NOERROR) return TRUE;
}
// 使用できるデバイスが存在しないとき //
Mid_End();
return FALSE;
}
void Mid_End(void)
{
int i;
Mid_Stop();
Mid_Free();
if(Mid_Dev.mp) midiOutClose(Mid_Dev.mp);
if(Mid_Dev.name){
for(i=0;i<Mid_Dev.nDevice;i++) LocalFree(Mid_Dev.name[i]);
LocalFree(Mid_Dev.name);
Mid_Dev.mp = NULL;
Mid_Dev.name = NULL;
}
Mid_Dev.nDevice = Mid_Dev.NowID = 0;
}
void Mid_Play(void)
{
if(Mid_Data.data==NULL) return;
if(Mid_Dev.state==MIDST_PLAY) return;
Mid_Dev.FadeFlag = 0;
Mid_Dev.MaxVolume = 127;
Mid_Dev.NowVolume = 127;
Mid_Volume(Mid_Dev.NowVolume);
Mid_Init();
switch(Mid_Dev.fnmode){
case(MIDFN_CALLBACK):
Mid_GMReset();
timeGetDevCaps(&Mid_Time.caps,sizeof(TIMECAPS));
timeBeginPeriod(Mid_Time.caps.wPeriodMin);
Mid_Time.delay = 10;
Mid_Time.htimer = timeSetEvent(Mid_Time.delay,Mid_Time.caps.wPeriodMin,
CBMid_TimeFunc,0,TIME_PERIODIC);
break;
case(MIDFN_MIDLOOP):
Mid_GMReset();
break;
default:
return;
}
Mid_Dev.state = MIDST_PLAY;
}
void Mid_Stop(void)
{
int i;
if(Mid_Dev.state==MIDST_STOP) return;
Mid_PlayTime = 0;
Mid_Dev.FadeFlag = 0;
switch(Mid_Dev.fnmode){
case(MIDFN_CALLBACK):
timeKillEvent(Mid_Time.htimer);
timeEndPeriod(Mid_Time.caps.wPeriodMin);
break;
case(MIDFN_MIDLOOP):
break;
default:
return;
}
for(i=0;i<16;i++){
Mid_ShortMsg(0xb0+i,0x7b,0x00); // オール・ノート・オフ
Mid_ShortMsg(0xb0+i,0x78,0x00); // オール・サウンド・オフ
}
midiOutReset(Mid_Dev.mp);
Mid_Dev.state = MIDST_STOP;
}
// 各種テーブルの初期化 //
void Mid_TableInit(void)
{
int i,j;
for(i=0;i<16;i++){
for(j=0;j<128;j++){
Mid_PlayTable[i][j] = 0;
Mid_PlayTable2[i][j] = 0;
Mid_NoteTable[i][j] = 0;
Mid_NoteWTable[i][j] = 0;
}
Mid_PanpodTable[i] = 0x40;
Mid_ExpressionTable[i] = 0x7f;
Mid_VolumeTable[i] = 0x64;
}
}
void Mid_Volume(BYTE volume)
{
// マスター・ボリューム : F0 7F 7F 04 01 VolumeLowByte VolumeHighByte F7 //
// 下位バイトは SC-88ST Pro では 00 として扱われるらしい(取扱説明書より) //
BYTE msg[8] = {0xf0,0x7f,0x7f,0x04,0x01,0x00,0x00,0xf7};
MIDIHDR mh;
/*
union{
DWORD dd;
struct{
WORD d1;
WORD d2;
}w;
} temp;
*/
msg[6] = volume;
mh.dwFlags = 0;
mh.dwOffset = 0;
mh.lpData = msg;
mh.dwBufferLength = mh.dwBytesRecorded = 8;
midiOutPrepareHeader(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
midiOutLongMsg(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
midiOutUnprepareHeader(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
// これより下は削った方が良いかも //
//temp.w.d1 = temp.w.d2 = volume;
//midiOutSetVolume(Mid_Dev.mp,temp.dd);
}
void Mid_Tempo(char tempo)
{
Mid_MulTempo = MID_STDTEMPO + tempo;
}
void Mid_FadeOut(BYTE speed)
{
Mid_Dev.FadeFlag = -1;
Mid_Dev.FadeCount = 0;
// MaxVolume,FadeWait に 1 だけ加算しているのは、0除算防止のため //
Mid_Dev.FadeWait = ((256-speed)*4)/(Mid_Dev.MaxVolume+1) + 1;
}
void Mid_Pan(char pan)
{
int i;
int value = 0x40 + pan;
for(i=0;i<16;i++)
Mid_ShortMsg(0xb0+i,0x0a,value);
}
static void Mid_ShortMsg(BYTE d1,BYTE d2,BYTE d3)
{
BYTE data[4];
data[0] = d1;
data[1] = d2;
data[2] = d3;
data[3] = 0;
midiOutShortMsg(Mid_Dev.mp,*((DWORD *)data));
}
void Mid_GMReset(void)
{
// GM SystemOn : F0H 7EH 7FH 09H 01H F7H //
DWORD time;
BYTE msg[6] = {0xf0,0x7e,0x7f,0x09,0x01,0xf7};
MIDIHDR mh;
mh.dwFlags = 0;
mh.dwOffset = 0;
mh.dwBufferLength = mh.dwBytesRecorded = 11;
mh.lpData = msg;
mh.dwBufferLength = mh.dwBytesRecorded = 6;
midiOutPrepareHeader(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
midiOutLongMsg(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
midiOutUnprepareHeader(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
// ここで50ms以上待つこと! //
time = timeGetTime();
while(timeGetTime()-time<=50);
}
BOOL Mid_ChgDev(char pos)
{
int i,temp;
UINT mret;
// 各関数に合わせて停止処理を行う //
Mid_Stop();
midiOutClose(Mid_Dev.mp);
temp = Mid_Dev.nDevice + Mid_Dev.NowID + ((pos<0) ? -1 : 1);
for(i=0;i<Mid_Dev.nDevice;i++){
Mid_Dev.NowID = (pos<0) ? (temp-i)%(Mid_Dev.nDevice) : (temp+i)%(Mid_Dev.nDevice);
mret = midiOutOpen(&Mid_Dev.mp,Mid_Dev.NowID-1,0,0,CALLBACK_NULL);
if(mret==MMSYSERR_NOERROR){
Mid_Play();
return TRUE;
}
}
// あり得ないはずだが... //
return FALSE;
}
BOOL Mid_Load(char *filename)
{
FILE *fp;
MID_FILEST midhead;
MID_MAINST *midmain;
MID_TRACKST midtrack;
MID_TRACK *pt;
DWORD size;
int i;
Mid_Free();
if((fp=fopen(filename,"rb"))==NULL) return FALSE;
fread(&midhead,sizeof(MID_FILEST),1,fp);
if(midhead.MThd!=mmioFOURCC('M','T','h','d')){
fclose(fp);
return FALSE;
}
size = ConvDWord(midhead.size);
midmain = (MID_MAINST *)LocalAlloc(LPTR,size);
fread(midmain,size,1,fp);
Mid_Data.format = ConvWord(midmain->format);
Mid_Data.track = ConvWord(midmain->track);
Mid_Data.timebase = ConvWord(midmain->timebase);
Mid_Data.tempo = 1000000;
LocalFree(midmain);
Mid_Data.data = (MID_TRACK *)LocalAlloc(LPTR,sizeof(MID_TRACK)*Mid_Data.track);
ZeroMemory(Mid_Data.data,sizeof(MID_TRACK)*Mid_Data.track);
for(i=0;i<Mid_Data.track;i++){
fread(&midtrack,sizeof(MID_TRACKST),1,fp);
pt = &(Mid_Data.data[i]);
pt->size = ConvDWord(midtrack.size);
pt->data = (BYTE *)LocalAlloc(LPTR,pt->size);
pt->play = TRUE;
fread(pt->data,pt->size,1,fp);
}
fclose(fp);
Mid_Init();
return TRUE;
}
BOOL PMid_Load(BIT_DEVICE *in,DWORD n)
{
BYTE *data,*dp;
MID_FILEST *midhead;
MID_MAINST *midmain;
MID_TRACKST *midtrack;
MID_TRACK *pt;
DWORD size;
int i;
Mid_Free();
if((data=MemExpand(in,n))==NULL) return FALSE;
midhead = (MID_FILEST *)data;
//fread(&midhead,sizeof(MID_FILEST),1,fp);
if(midhead->MThd!=mmioFOURCC('M','T','h','d')){
LocalFree(data);
return FALSE;
}
size = ConvDWord(midhead->size);
midmain = (MID_MAINST *)((BYTE *)data+sizeof(MID_FILEST));
//fread(midmain,size,1,fp);
Mid_Data.format = ConvWord(midmain->format);
Mid_Data.track = ConvWord(midmain->track);
Mid_Data.timebase = ConvWord(midmain->timebase);
Mid_Data.tempo = 1000000;
Mid_Data.data = (MID_TRACK *)LocalAlloc(LPTR,sizeof(MID_TRACK)*Mid_Data.track);
ZeroMemory(Mid_Data.data,sizeof(MID_TRACK)*Mid_Data.track);
dp = (BYTE *)(data+sizeof(MID_FILEST)+size);
for(i=0;i<Mid_Data.track;i++){
midtrack = (MID_TRACKST *)dp;
dp+=sizeof(MID_TRACKST);
//fread(&midtrack,sizeof(MID_TRACKST),1,fp);
pt = &(Mid_Data.data[i]);
pt->size = ConvDWord(midtrack->size);
pt->data = (BYTE *)LocalAlloc(LPTR,pt->size);
pt->play = TRUE;
//fread(pt->data,pt->size,1,fp);
memcpy(pt->data,dp,pt->size);
dp+=pt->size;
}
LocalFree(data);
Mid_Init();
return TRUE;
}
static BOOL Mid_Init(void)
{
MID_TRACK *p;
int i;
//Mid_Fade = 0;
//Mid_MulTempo = MID_STDTEMPO;
Mid_PlayTime = 0;
Mid_Data.playcount1 = 0;
Mid_Data.playcount2 = 0;
Mid_Data.fticks = 0;
Mid_Data.nticks = 0;
for(i=0;i<Mid_Data.track;i++){
p = &(Mid_Data.data[i]);
p->work = p->data;
p->play = TRUE;
p->count = GetWaitCount(&(p->work)); // 初期ウェイトカウントを読むの
}
return TRUE;
}
BOOL Mid_Free(void)
{
int i;
if(Mid_Data.data == NULL) return FALSE;
for(i=0;i<Mid_Data.track;i++)
LocalFree(Mid_Data.data[i].data);
LocalFree(Mid_Data.data);
Mid_Data.data = NULL;
Mid_Data.track = 0;
return TRUE;
}
static WORD ConvWord(WORD data)
{
WORD temp;
((BYTE *)&temp)[0] = ((BYTE *)&data)[1];
((BYTE *)&temp)[1] = ((BYTE *)&data)[0];
return temp;
}
static DWORD ConvDWord(DWORD data)
{
DWORD temp;
((BYTE *)&temp)[0] = ((BYTE *)&data)[3];
((BYTE *)&temp)[1] = ((BYTE *)&data)[2];
((BYTE *)&temp)[2] = ((BYTE *)&data)[1];
((BYTE *)&temp)[3] = ((BYTE *)&data)[0];
return temp;
}
static DWORD GetWaitCount(LPBYTE *data)
{
BYTE temp;
DWORD ret = 0;
do{
temp = **data;
++*data;
ret = (ret<<7)+(temp&0x7f);
}while(temp&0x80);
return ret;
}
static void MidFadeIOFunc(void)
{
int track;
if(Mid_Dev.FadeFlag==0) return;
if(Mid_Dev.FadeCount % Mid_Dev.FadeWait == 0){
Mid_Dev.NowVolume += Mid_Dev.FadeFlag;
for(track=0;track<16;track++)
Mid_ShortMsg(0xb0+track,0x07,Mid_VolumeTable[track]*Mid_Dev.NowVolume/(Mid_Dev.MaxVolume+1));
//Mid_Volume(Mid_Dev.NowVolume);
if(Mid_Dev.NowVolume==0 || Mid_Dev.NowVolume==Mid_Dev.MaxVolume){
Mid_Dev.FadeFlag = 0;
Mid_Stop();
}
}
Mid_Dev.FadeCount++;
}
void Mid_LoopFunc(DWORD time)
{
int i;
BOOL flag = FALSE;
MID_TRACK *p;
DWORDLONG now = Mid_Data.playcount2 + (DWORDLONG)Mid_Data.playcount1*Mid_Data.timebase*1000/Mid_Data.tempo;
if(Mid_Dev.mp == NULL) return;
if(Mid_Data.data == NULL) return;
if(Mid_Dev.fnmode != MIDFN_MIDLOOP) return;
Mid_PlayTime += time;
for(i=0;i<Mid_Data.track;i++){
p = &(Mid_Data.data[i]);
if(p->play){
flag = TRUE;
while(p->play && p->count<=now)
Mid_Parse(p);
}
}
Mid_Data.playcount1+=((time*Mid_MulTempo)>>7);
MidFadeIOFunc();
if(!flag){
switch(Mid_Dev.plmode){
case(MIDPL_NORM):Mid_Init(); break;
case(MIDPL_STOP):Mid_Stop(); break;
}
}
}
static void CALLBACK CBMid_TimeFunc(UINT uID,UINT uMsg,DWORD dwUser,DWORD dw1,DWORD dw2)
{
int i;
BOOL flag = FALSE;
MID_TRACK *p;
DWORDLONG now = Mid_Data.playcount2 + (DWORDLONG)Mid_Data.playcount1*Mid_Data.timebase*1000/Mid_Data.tempo;
for(i=0;i<Mid_Data.track;i++){
p = &(Mid_Data.data[i]);
if(p->play){
flag = TRUE;
while(p->play && p->count<=now)
Mid_Parse(p);
}
}
Mid_PlayTime += Mid_Time.delay;
Mid_Data.playcount1+=((Mid_Time.delay*Mid_MulTempo)>>7);
// Mid_Data.nticks = (Mid_Time.delay*1000 + Mid_Data.fticks) / Mid_Data.tempo;
// Mid_Data.fticks = Mid_Time.delay*1000 % Mid_Data.tempo;
// Mid_Data.nticks = (Mid_Data.fticks + Mid_PlayTime * Mid_Data.timebase * 1000) / Mid_Data.tempo;
// Mid_Data.fticks += (Mid_Data.timebase * Mid_PlayTime * 1000) - (Mid_Data.nticks * Mid_Data.tempo);
MidFadeIOFunc();
if(!flag){
switch(Mid_Dev.plmode){
case(MIDPL_NORM):Mid_Init(); break;
case(MIDPL_STOP):Mid_Stop(); break;
}
}
}
static void Mid_Parse(MID_TRACK *track)
{
int i,count,countwork;
BYTE st1,st2;
BYTE data[4] = {0,0,0,0};
st1 = *(track->work);
if(st1<0x80) st1 = track->status;
else track->work++;
st2 = st1 & 0xf0;
switch(st2){
case(0xf0): // ?バイト
if(st1 == 0xf0){ // エクスクルーシブ
MIDIHDR mh;
countwork = GetWaitCount(&(track->work));
mh.lpData = LocalAlloc(LPTR,countwork+1);
mh.lpData[0] = (BYTE)0xf0;
mh.dwFlags = mh.dwOffset = 0;
mh.dwBufferLength = mh.dwBytesRecorded = countwork+1;
for(i=0;i<countwork;i++)
mh.lpData[i+1] = *(track->work++);
midiOutPrepareHeader(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
midiOutLongMsg(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
midiOutUnprepareHeader(Mid_Dev.mp,&mh,sizeof(MIDIHDR));
LocalFree(mh.lpData);
}
else{ // 制御用データ(出力のないものだけ出力)
BYTE code = *(track->work++);
countwork = GetWaitCount(&(track->work));
if(code==0x2f){ // トラック終了
track->play = FALSE;
return;
}
else if(code==0x51){ // テンポ
Mid_Data.playcount2 += (DWORDLONG)Mid_Data.playcount1*Mid_Data.timebase*1000/Mid_Data.tempo;
Mid_Data.playcount1 = 0;
Mid_Data.tempo = 0;
for(i=0;i<countwork;i++)
Mid_Data.tempo += (Mid_Data.tempo<<8)+(*(track->work++));
// ここに謎の一行があります //
break;
}
else // その他(読み飛ばし)
track->work += countwork;
}
break;
case(0xb0): // コントロールチェンジ
switch(*(track->work)){
case(0x07): // ボリューム
Mid_VolumeTable[st1&0x0f] = *(track->work+1);
break;
case(0x0a): // パンポット
Mid_PanpodTable[st1&0x0f] = *(track->work+1);
break;
case(0x0b): // エクスプレッション
Mid_ExpressionTable[st1&0x0f] = *(track->work+1);
break;
}
data[0] = st1;
data[1] = *(track->work++);
data[2] = *(track->work++);
midiOutShortMsg(Mid_Dev.mp,*((DWORD *)data));
break;
case(0x80): // ノートオフ
Mid_NoteTable[st1&0x0f][*(track->work)] = *(track->work+1) = 0;
case(0x90):case(0xa0): // 3バイト:発音 or 変更
if(Mid_PlayTable[st1&0x0f][*(track->work)] < *(track->work+1)){
Mid_PlayTable[st1&0x0f][*(track->work)] = *(track->work+1);
Mid_PlayTable2[st1&0x0f][*(track->work)] = *(track->work+1);
}
//Mid_PlayTable[st1&0x0f][*(track->work)] += *(track->work+1);
//Mid_PlayTable2[st1&0x0f][*(track->work)] += *(track->work+1);
Mid_NoteTable[st1&0x0f][*(track->work)] = *(track->work+1);
if(Mid_NoteTable[st1&0x0f][*(track->work)])
Mid_NoteWTable[st1&0x0f][*(track->work)] = 5;
case(0xe0): // 3バイト
data[0] = st1;
data[1] = *(track->work++);
data[2] = *(track->work++);
midiOutShortMsg(Mid_Dev.mp,*((DWORD *)data));
break;
case(0xc0):case(0xd0): // 2バイト
data[0] = st1;
data[1] = *(track->work++);
midiOutShortMsg(Mid_Dev.mp,*((DWORD *)data));
break;
}
track->status = st1;
count = GetWaitCount(&(track->work));
track->count += count;
}
char *Mid_GetTitle(void)
{
static char temp[1000];
int i;
BYTE *p;
memset(temp,0,1000);
// 通常のファイル用 たまに変なファイルだと間違ったものを表示するが... //
for(i=0;i<Mid_Data.track;i++){
p = Mid_Data.data[i].data;
while(!(p[0]==0xff && p[1]==0x2f && p[2]==0x00)){
if(p[0]==0xff && p[1]==0x03){
memcpy(temp,p+3,p[2]);
return temp;
}
p++;
}
}
// タイトルのはずなのに別のところに記述しているファイル用 //
for(i=0;i<Mid_Data.track;i++){
p = Mid_Data.data[i].data;
while(!(p[0]==0xff && p[1]==0x2f && p[2]==0x00)){
if(p[0]==0xff && p[1]==0x01){
memcpy(temp,p+3,p[2]);
return p+3;
}
p++;
}
}
return temp;
}
// playcount1 の取得 //
DWORDLONG Mid_GetPlaycount1(void)
{
return Mid_Data.playcount1;
}
// playcount2 の取得 //
DWORDLONG Mid_GetPlaycount2(void)
{
return Mid_Data.playcount2;
}
// 全情報を取得 //
void Mid_GetData(MID_DATA *pData)
{
*pData = Mid_Data;
}
| {
"pile_set_name": "Github"
} |
require xorg-app-common.inc
PE = "1"
PR = "${INC_PR}.0"
SRC_URI[archive.md5sum] = "e09c14cf1ac78260f95a9aa45bccab4c"
SRC_URI[archive.sha256sum] = "d6ee7becf9ece2fcc30ce50f7e9be31f231cce0cb3a8ba09d1697ac2822c3fcc"
| {
"pile_set_name": "Github"
} |
include(../common.pri)
TARGET = trimesh_color
SOURCES += trimesh_color.cpp ../../../wrap/ply/plylib.cpp
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!-- generated on 01/08/15 11:15:06 by SUMO Version dev-SVN-r17560
This data file and the accompanying materials
are made available under the terms of the Eclipse Public License v2.0
which accompanies this distribution, and is available at
http://www.eclipse.org/legal/epl-v20.html
SPDX-License-Identifier: EPL-2.0
<configuration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/sumoConfiguration.xsd">
<input>
<net-file value="three_split.net.xml"/>
<additional-files value="input_routes.rou.xml"/>
</input>
<output>
<tripinfo-output value="tripinfos.xml"/>
<bt-output value="bt.xml"/>
</output>
<time>
<begin value="0"/>
</time>
<report>
<xml-validation value="never"/>
<no-duration-log value="true"/>
<no-step-log value="true"/>
</report>
<communication>
<device.btreceiver.explicit value="left"/>
<device.btreceiver.range value="50"/>
<device.btsender.explicit value="right"/>
</communication>
</configuration>
-->
<bt-output>
<bt id="left">
<seen id="right" tBeg="3.00" observerPosBeg="3.07,498.35" observerSpeedBeg="5.00" observerLaneIDBeg="beg_0" observerLanePosBeg="3.10" seenPosBeg="44.34,498.35" seenSpeedBeg="13.89" seenLaneIDBeg="beg_0" seenLanePosBeg="44.77" tEnd="3.99" observerPosEnd="7.98,498.35" observerSpeedEnd="5.00" observerLaneIDEnd="beg_0" observerLanePosEnd="8.10" seenPosEnd="57.98,498.35" seenSpeedEnd="13.89" seenLaneIDEnd="beg_0" seenLanePosEnd="58.66" observerRoute="beg" seenRoute="beg">
<recognitionPoint t="3.99" observerPos="7.98,498.35" observerSpeed="5.00" observerLaneID="beg_0" observerLanePos="8.10" seenPos="57.98,498.35" seenSpeed="13.89" seenLaneID="beg_0" seenLanePos="58.66"/>
</seen>
</bt>
</bt-output>
| {
"pile_set_name": "Github"
} |
/*
* Generated by class-dump 3.3.4 (64 bit).
*
* class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2011 by Steve Nygard.
*/
#import "NSObject.h"
@interface IDEWindowMenuController : NSObject
{
}
+ (void)_activateXcode;
+ (id)_activeEditorDocumentDictionariesForWorkspaceDocument:(id)arg1 allEditorDocumentDictionaries:(id *)arg2;
+ (id)_tabDictionaryForTabController:(id)arg1 isSelected:(BOOL)arg2;
+ (void)_createAttributedTitlesForItemDictionaries:(id)arg1 includeIconInTitles:(BOOL)arg2;
+ (void)_createDisambiguatedAttributedTitlesForItemDictionaries:(id)arg1 includeIconInTitles:(BOOL)arg2;
+ (id)titleForItemWithURL:(id)arg1 name:(id)arg2 parentLocations:(id)arg3 presentedParentLocationCount:(unsigned long long)arg4 includeIconInTitles:(BOOL)arg5;
+ (id)_iconAsAttributedStringForURL:(id)arg1;
+ (id)_localizedNameForURL:(id)arg1;
+ (id)_parentDirectoryURLForURL:(id)arg1;
+ (id)_nameForURL:(id)arg1;
+ (id)sharedWindowMenuController;
- (void)_showDocumentationWindow:(id)arg1;
- (void)_activateWorkspaceTabController:(id)arg1;
- (id)dockWindowMenuItems;
- (id)windowMenuItems;
- (id)_windowMenuItemsWithIconInTitles:(BOOL)arg1;
- (id)_workspaceDocumentDictionaries;
- (id)_menuItemForWindow:(id)arg1 target:(id)arg2 selector:(SEL)arg3 withPrefix:(id)arg4;
- (id)_menuItemForEditorDocumentDictionary:(id)arg1 withIndentationLevel:(long long)arg2;
@end
| {
"pile_set_name": "Github"
} |
# The MIT License
#
# Copyright (c) 2004-2009, Sun Microsystems, Inc., Kohsuke Kawaguchi, Reginaldo L. Russinholi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
Environment\ Variables=Vari\u00E1veis de Ambiente
| {
"pile_set_name": "Github"
} |
var baseFindIndex = require('./_baseFindIndex'),
baseIteratee = require('./_baseIteratee'),
toInteger = require('./toInteger');
/* Built-in method references for those with the same name as other `lodash` methods. */
var nativeMax = Math.max;
/**
* This method is like `_.find` except that it returns the index of the first
* element `predicate` returns truthy for instead of the element itself.
*
* @static
* @memberOf _
* @since 1.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=0] The index to search from.
* @returns {number} Returns the index of the found element, else `-1`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': false },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': true }
* ];
*
* _.findIndex(users, function(o) { return o.user == 'barney'; });
* // => 0
*
* // The `_.matches` iteratee shorthand.
* _.findIndex(users, { 'user': 'fred', 'active': false });
* // => 1
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findIndex(users, ['active', false]);
* // => 0
*
* // The `_.property` iteratee shorthand.
* _.findIndex(users, 'active');
* // => 2
*/
function findIndex(array, predicate, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = fromIndex == null ? 0 : toInteger(fromIndex);
if (index < 0) {
index = nativeMax(length + index, 0);
}
return baseFindIndex(array, baseIteratee(predicate, 3), index);
}
module.exports = findIndex;
| {
"pile_set_name": "Github"
} |
#Mon Jul 22 10:53:54 IST 2019
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-5.1.1-all.zip
| {
"pile_set_name": "Github"
} |
{let $foo: $a ? 0 : 1 /}
{let $foo: $a ?: 1 /}
{let $foo: 1 < 2 and 2 <= 3 /}
{let $foo: 1 > 2 or 2 >= 3 /}
{let $foo: 1 == 1 and 1 != 2 /}
{let $foo: ((1 + 2) / 3 * 4) % 5 - 6 /}
{let $foo: not $bar /}
{let $foo kind="text"}
----------------------------------------------------
[
["soy", [
["delimiter", "{"],
["keyword", "let"],
["variable", ["$foo"]],
["punctuation", ":"],
["variable", ["$a"]],
["operator", "?"],
["number", "0"],
["punctuation", ":"],
["number", "1"],
["delimiter", "/}"]
]],
["soy", [
["delimiter", "{"],
["keyword", "let"],
["variable", ["$foo"]],
["punctuation", ":"],
["variable", ["$a"]],
["operator", "?:"],
["number", "1"],
["delimiter", "/}"]
]],
["soy", [
["delimiter", "{"],
["keyword", "let"],
["variable", ["$foo"]],
["punctuation", ":"],
["number", "1"],
["operator", "<"],
["number", "2"],
["operator", "and"],
["number", "2"],
["operator", "<="],
["number", "3"],
["delimiter", "/}"]
]],
["soy", [
["delimiter", "{"],
["keyword", "let"],
["variable", ["$foo"]],
["punctuation", ":"],
["number", "1"],
["operator", ">"],
["number", "2"],
["operator", "or"],
["number", "2"],
["operator", ">="],
["number", "3"],
["delimiter", "/}"]
]],
["soy", [
["delimiter", "{"],
["keyword", "let"],
["variable", ["$foo"]],
["punctuation", ":"],
["number", "1"],
["operator", "=="],
["number", "1"],
["operator", "and"],
["number", "1"],
["operator", "!="],
["number", "2"],
["delimiter", "/}"]
]],
["soy", [
["delimiter", "{"],
["keyword", "let"],
["variable", ["$foo"]],
["punctuation", ":"],
["punctuation", "("],
["punctuation", "("],
["number", "1"],
["operator", "+"],
["number", "2"],
["punctuation", ")"],
["operator", "/"],
["number", "3"],
["operator", "*"],
["number", "4"],
["punctuation", ")"],
["operator", "%"],
["number", "5"],
["operator", "-"],
["number", "6"],
["delimiter", "/}"]
]],
["soy", [
["delimiter", "{"],
["keyword", "let"],
["variable", ["$foo"]],
["punctuation", ":"],
["operator", "not"],
["variable", ["$bar"]],
["delimiter", "/}"]
]],
["soy", [
["delimiter", "{"],
["keyword", "let"],
["variable", ["$foo"]],
["property", "kind"],
["operator", "="],
["string", "\"text\""],
["delimiter", "}"]
]]
]
----------------------------------------------------
Checks for operators. | {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
@import Foundation;
@class HAKTransport, NSArray, NSHashTable, NSString;
@interface HAKPairing : NSObject
{
NSHashTable *_connections;
BOOL _admin;
HAKTransport *_transport;
NSString *_identifier;
NSObject<OS_dispatch_queue> *_workQueue;
}
@property(retain, nonatomic) NSObject<OS_dispatch_queue> *workQueue; // @synthesize workQueue=_workQueue;
@property(readonly, nonatomic, getter=isAdmin) BOOL admin; // @synthesize admin=_admin;
@property(copy, nonatomic) NSString *identifier; // @synthesize identifier=_identifier;
@property(nonatomic) __weak HAKTransport *transport; // @synthesize transport=_transport;
- (id)keychainObject;
- (void)_addConnection:(id)arg1;
- (void)addConnection:(id)arg1;
@property(readonly, nonatomic) NSArray *connections;
- (id)description;
- (BOOL)isEqual:(id)arg1;
- (unsigned long long)hash;
- (id)initWithIdentifier:(id)arg1 admin:(BOOL)arg2;
@end
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"scale" : "1x"
},
{
"idiom" : "universal",
"scale" : "2x"
},
{
"idiom" : "universal",
"filename" : "cc_traffic_refuel_l@3x.png",
"scale" : "3x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
<!--
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.microsoft.azure.digitaltwins.v2020_03_01_preview</groupId>
<parent>
<groupId>com.microsoft.azure</groupId>
<artifactId>azure-arm-parent</artifactId>
<version>1.3.0</version>
<relativePath>../../parents/azure-arm-parent/pom.xml</relativePath>
</parent>
<artifactId>azure-mgmt-digitaltwins</artifactId>
<version>1.0.0-beta</version>
<packaging>jar</packaging>
<name>Microsoft Azure SDK for DigitalTwins Management</name>
<description>This package contains Microsoft DigitalTwins Management SDK.</description>
<url>https://github.com/Azure/azure-sdk-for-java</url>
<licenses>
<license>
<name>The MIT License (MIT)</name>
<url>http://opensource.org/licenses/MIT</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>scm:git:https://github.com/Azure/azure-sdk-for-java</url>
<connection>scm:git:git@github.com:Azure/azure-sdk-for-java.git</connection>
<tag>HEAD</tag>
</scm>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<legal><![CDATA[[INFO] Any downloads listed may be third party software. Microsoft grants you no rights for third party software.]]></legal>
</properties>
<developers>
<developer>
<id>microsoft</id>
<name>Microsoft</name>
</developer>
</developers>
<dependencies>
<dependency>
<groupId>com.microsoft.azure</groupId>
<artifactId>azure-client-runtime</artifactId>
</dependency>
<dependency>
<groupId>com.microsoft.azure</groupId>
<artifactId>azure-arm-client-runtime</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.microsoft.azure</groupId>
<artifactId>azure-client-authentication</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.microsoft.azure</groupId>
<artifactId>azure-mgmt-resources</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.microsoft.azure</groupId>
<artifactId>azure-arm-client-runtime</artifactId>
<type>test-jar</type>
<scope>test</scope>
<!--Below version for test jar needs to be removed, this will be done as part of v1-runtime 1.6.7-->
<version>1.6.5</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<configuration>
<archive>
<manifest>
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
</manifest>
</archive>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.1</version>
<configuration>
<source>1.7</source>
<target>1.7</target>
<annotationProcessors>
<annotationProcessor>
com.microsoft.azure.management.apigeneration.LangDefinitionProcessor
</annotationProcessor>
</annotationProcessors>
<debug>true</debug>
<optimize>true</optimize>
<compilerArguments>
<AaddGeneratedAnnotation>true</AaddGeneratedAnnotation>
<Adebug>true</Adebug>
</compilerArguments>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>2.8</version>
<configuration>
<excludePackageNames>*.implementation.*;*.utils.*;com.microsoft.schemas._2003._10.serialization;*.blob.core.search</excludePackageNames>
<bottom>
<![CDATA[<code>
/**
<br />* Copyright (c) Microsoft Corporation. All rights reserved.
<br />* Licensed under the MIT License. See License.txt in the project root for
<br />* license information.
<br />*/
</code>]]>
</bottom>
</configuration>
</plugin>
</plugins>
</build>
</project>
| {
"pile_set_name": "Github"
} |
/*
* linux/arch/m68k/tools/amiga/dmesg.c -- Retrieve the kernel messages stored
* in Chip RAM with the kernel command
* line option `debug=mem'.
*
* © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org>
*
*
* Usage:
*
* dmesg
* dmesg <CHIPMEM_END>
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define CHIPMEM_START 0x00000000
#define CHIPMEM_END 0x00200000 /* overridden by argv[1] */
#define SAVEKMSG_MAGIC1 0x53415645 /* 'SAVE' */
#define SAVEKMSG_MAGIC2 0x4B4D5347 /* 'KMSG' */
struct savekmsg {
u_long magic1; /* SAVEKMSG_MAGIC1 */
u_long magic2; /* SAVEKMSG_MAGIC2 */
u_long magicptr; /* address of magic1 */
u_long size;
char data[0];
};
int main(int argc, char *argv[])
{
u_long start = CHIPMEM_START, end = CHIPMEM_END, p;
int found = 0;
struct savekmsg *m = NULL;
if (argc >= 2)
end = strtoul(argv[1], NULL, 0);
printf("Searching for SAVEKMSG magic...\n");
for (p = start; p <= end-sizeof(struct savekmsg); p += 4) {
m = (struct savekmsg *)p;
if ((m->magic1 == SAVEKMSG_MAGIC1) && (m->magic2 == SAVEKMSG_MAGIC2) &&
(m->magicptr == p)) {
found = 1;
break;
}
}
if (!found)
printf("Not found\n");
else {
printf("Found %ld bytes at 0x%08lx\n", m->size, (u_long)&m->data);
puts(">>>>>>>>>>>>>>>>>>>>");
fflush(stdout);
write(1, &m->data, m->size);
fflush(stdout);
puts("<<<<<<<<<<<<<<<<<<<<");
}
return(0);
}
| {
"pile_set_name": "Github"
} |
/*
Copyright_License {
XCSoar Glide Computer - http://www.xcsoar.org/
Copyright (C) 2000-2016 The XCSoar Project
A detailed list of copyright holders can be found in the file "AUTHORS".
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
}
*/
#ifndef XCSOAR_ROUGH_DISTANCE_HPP
#define XCSOAR_ROUGH_DISTANCE_HPP
#include <type_traits>
#include <cstdint>
/**
* Store an rough distance value, when the exact value is not needed.
*
* The accuracy is 1m.
*/
class RoughDistance {
uint32_t value;
public:
RoughDistance() = default;
constexpr
RoughDistance(double _value):value(_value) {}
RoughDistance &operator=(double other) {
value = (uint32_t)other;
return *this;
}
constexpr
operator double() const {
return double(value);
}
constexpr
bool operator <(const RoughDistance other) const {
return value < other.value;
}
constexpr
bool operator >(const RoughDistance other) const {
return value > other.value;
}
constexpr
bool IsZero() const {
return value == 0;
}
};
static_assert(std::is_trivial<RoughDistance>::value, "type is not trivial");
#endif
| {
"pile_set_name": "Github"
} |
package task
import (
"errors"
"fmt"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/cmd/swarmctl/common"
"github.com/spf13/cobra"
)
var (
removeCmd = &cobra.Command{
Use: "remove <task ID>",
Short: "Remove a task",
Aliases: []string{"rm"},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return errors.New("task ID missing")
}
if len(args) > 1 {
return errors.New("remove command takes exactly 1 argument")
}
c, err := common.Dial(cmd)
if err != nil {
return err
}
_, err = c.RemoveTask(common.Context(cmd), &api.RemoveTaskRequest{TaskID: args[0]})
if err != nil {
return err
}
fmt.Println(args[0])
return nil
},
}
)
| {
"pile_set_name": "Github"
} |
/*
* ioremap.c
*
* Support for mapping between dma_addr_t values a phys_addr_t values.
*
* Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Author: David VomLehn <dvomlehn@cisco.com>
*
* Description: Defines the platform resources for the SA settop.
*
* NOTE: The bootloader allocates persistent memory at an address which is
* 16 MiB below the end of the highest address in KSEG0. All fixed
* address memory reservations must avoid this region.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/mach-powertv/ioremap.h>
/*
* Define the sizes of and masks for grains in physical and DMA space. The
* values are the same but the types are not.
*/
#define IOR_PHYS_GRAIN ((phys_addr_t) 1 << IOR_LSBITS)
#define IOR_PHYS_GRAIN_MASK (IOR_PHYS_GRAIN - 1)
#define IOR_DMA_GRAIN ((dma_addr_t) 1 << IOR_LSBITS)
#define IOR_DMA_GRAIN_MASK (IOR_DMA_GRAIN - 1)
/*
* Values that, when accessed by an index derived from a phys_addr_t and
* added to phys_addr_t value, yield a DMA address
*/
struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
EXPORT_SYMBOL(_ior_phys_to_dma);
/*
* Values that, when accessed by an index derived from a dma_addr_t and
* added to that dma_addr_t value, yield a physical address
*/
struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
EXPORT_SYMBOL(_ior_dma_to_phys);
/**
* setup_dma_to_phys - set up conversion from DMA to physical addresses
* @dma_idx: Top IOR_LSBITS bits of the DMA address, i.e. an index
* into the array _dma_to_phys.
* @delta: Value that, when added to the DMA address, will yield the
* physical address
* @s: Number of bytes in the section of memory with the given delta
* between DMA and physical addresses.
*/
static void setup_dma_to_phys(dma_addr_t dma, phys_addr_t delta, dma_addr_t s)
{
int dma_idx, first_idx, last_idx;
phys_addr_t first, last;
/*
* Calculate the first and last indices, rounding the first up and
* the second down.
*/
first = dma & ~IOR_DMA_GRAIN_MASK;
last = (dma + s - 1) & ~IOR_DMA_GRAIN_MASK;
first_idx = first >> IOR_LSBITS; /* Convert to indices */
last_idx = last >> IOR_LSBITS;
for (dma_idx = first_idx; dma_idx <= last_idx; dma_idx++)
_ior_dma_to_phys[dma_idx].offset = delta >> IOR_DMA_SHIFT;
}
/**
* setup_phys_to_dma - set up conversion from DMA to physical addresses
* @phys_idx: Top IOR_LSBITS bits of the DMA address, i.e. an index
* into the array _phys_to_dma.
* @delta: Value that, when added to the DMA address, will yield the
* physical address
* @s: Number of bytes in the section of memory with the given delta
* between DMA and physical addresses.
*/
static void setup_phys_to_dma(phys_addr_t phys, dma_addr_t delta, phys_addr_t s)
{
int phys_idx, first_idx, last_idx;
phys_addr_t first, last;
/*
* Calculate the first and last indices, rounding the first up and
* the second down.
*/
first = phys & ~IOR_PHYS_GRAIN_MASK;
last = (phys + s - 1) & ~IOR_PHYS_GRAIN_MASK;
first_idx = first >> IOR_LSBITS; /* Convert to indices */
last_idx = last >> IOR_LSBITS;
for (phys_idx = first_idx; phys_idx <= last_idx; phys_idx++)
_ior_phys_to_dma[phys_idx].offset = delta >> IOR_PHYS_SHIFT;
}
/**
* ioremap_add_map - add to the physical and DMA address conversion arrays
* @phys: Process's view of the address of the start of the memory chunk
* @dma: DMA address of the start of the memory chunk
* @size: Size, in bytes, of the chunk of memory
*
* NOTE: It might be obvious, but the assumption is that all @size bytes have
* the same offset between the physical address and the DMA address.
*/
void ioremap_add_map(phys_addr_t phys, phys_addr_t dma, phys_addr_t size)
{
if (size == 0)
return;
if ((dma & IOR_DMA_GRAIN_MASK) != 0 ||
(phys & IOR_PHYS_GRAIN_MASK) != 0 ||
(size & IOR_PHYS_GRAIN_MASK) != 0)
pr_crit("Memory allocation must be in chunks of 0x%x bytes\n",
IOR_PHYS_GRAIN);
setup_dma_to_phys(dma, phys - dma, size);
setup_phys_to_dma(phys, dma - phys, size);
}
| {
"pile_set_name": "Github"
} |
//---------------------------------------------------------------------------
// This software is Copyright (c) 2015 Embarcadero Technologies, Inc.
// You may only use this software if you are an authorized licensee
// of an Embarcadero developer tools product.
// This software is considered a Redistributable as defined under
// the software license agreement that comes with the Embarcadero Products
// and is subject to that software license agreement.
//---------------------------------------------------------------------------
unit BasicSliderCrank;
interface
uses
Test;
type
TBasicSliderCrank = class(TTest)
public
constructor Create;
class function CreateTest: TTest; static;
end;
implementation
uses
System.Math, Box2D.Common, Box2D.Collision, Box2D.Dynamics, DebugDraw;
{ TBasicSliderCrank }
constructor TBasicSliderCrank.Create;
var
ground, body, prevBody: b2BodyWrapper;
bd: b2BodyDef;
shape: b2PolygonShapeWrapper;
rjd: b2RevoluteJointDef;
pjd: b2PrismaticJointDef;
begin
inherited;
bd := b2BodyDef.Create;
bd.position.&Set(0.0, 17.0);
ground := m_world.CreateBody(@bd);
prevBody := ground;
// Define crank.
shape := b2PolygonShapeWrapper.Create;
shape.SetAsBox(4.0, 1.0);
bd.&type := b2_dynamicBody;
bd.position.&Set(-8.0, 20.0);
body := m_world.CreateBody(@bd);
body.CreateFixture(shape, 2.0);
rjd := b2RevoluteJointDef.Create;
rjd.Initialize(prevBody, body, b2Vec2.Create(-12.0, 20.0));
m_world.CreateJoint(@rjd);
prevBody := body;
// Define connecting rod
shape.SetAsBox(8.0, 1.0);
bd.&type := b2_dynamicBody;
bd.position.&Set(4.0, 20.0);
body := m_world.CreateBody(@bd);
body.CreateFixture(shape, 2.0);
rjd.Initialize(prevBody, body, b2Vec2.Create(-4.0, 20.0));
m_world.CreateJoint(@rjd);
prevBody := body;
// Define piston
shape.SetAsBox(3.0, 3.0);
bd.&type := b2_dynamicBody;
bd.fixedRotation := true;
bd.position.&Set(12.0, 20.0);
body := m_world.CreateBody(@bd);
body.CreateFixture(shape, 2.0);
rjd.Initialize(prevBody, body, b2Vec2.Create(12.0, 20.0));
m_world.CreateJoint(@rjd);
pjd := b2PrismaticJointDef.Create;
pjd.Initialize(ground, body, b2Vec2.Create(12.0, 17.0), b2Vec2.Create(1.0, 0.0));
m_world.CreateJoint(@pjd);
shape.Destroy;
end;
class function TBasicSliderCrank.CreateTest: TTest;
begin
Result := TBasicSliderCrank.Create;
end;
initialization
RegisterTest(TestEntry.Create('BasicSliderCrank', @TBasicSliderCrank.CreateTest));
end.
| {
"pile_set_name": "Github"
} |
/****************************************************************************
**
** Copyright (C) 2019 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
** This file is part of the documentation of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:FDL$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see https://www.qt.io/terms-conditions. For further
** information use the contact form at https://www.qt.io/contact-us.
**
** GNU Free Documentation License Usage
** Alternatively, this file may be used under the terms of the GNU Free
** Documentation License version 1.3 as published by the Free Software
** Foundation and appearing in the file included in the packaging of
** this file. Please review the following information to ensure
** the GNU Free Documentation License version 1.3 requirements
** will be met: https://www.gnu.org/licenses/fdl-1.3.html.
** $QT_END_LICENSE$
**
****************************************************************************/
/*!
\dontdocument (QTestEventLoop QTestData QEventSizeOfChecker QSpontaneKeyEvent
QTestEvent QTestKeyEvent QTestKeyClicksEvent QTestMouseEvent
QTestDelayEvent QMetaTypeId)
*/
| {
"pile_set_name": "Github"
} |
# Import contacts into Microsoft Dynamics CRM
This application uses pre-packaged tools to intelligently connect with Microsoft Dynamics CRM. Based on a simple use case, the application takes a CSV file of contacts and uploads the contact information to an active Dynamics CRM user account. It uses the Anypoint DataWeave Transformer to map and transform data, thereby facilitating quick integration with this Software-as-a-Service (SaaS) provider.
At times, you might find that you need to connect one or more of your organization's on-premises systems with a SaaS such as Microsoft Dynamics. Ideally, these independent systems would talk to each other and share data to enable automation of end-to-end business processes. Use Mule applications to facilitate communication between your on-prem system(s) and Microsoft Dynamics. (Though this use case does not extend as far, you can also use Mule to facilitate communication between SaaS providers).
**Note:** You need to install Java Cryptography Extensions to be able to connect to MS Dynamics. Please [choose](http://www.oracle.com/technetwork/java/javase/downloads/index.html) a relevant version, according to your Java installation.
### Assumptions ###
This document assumes that you are familiar with Mule and the [Anypoin Studio interface](http://www.mulesoft.org/documentation/display/current/Anypoint+Studio+Essentials). To increase your familiarity with Studio, consider completing one or more [Anypoint Studio Tutorials](http://www.mulesoft.org/documentation/display/current/Basic+Studio+Tutorial). Further, this example assumes that you have a basic understanding of [Mule flows](http://www.mulesoft.org/documentation/display/current/Mule+Application+Architecture), [Mule Global Elements](http://www.mulesoft.org/documentation/display/current/Global+Elements), and [Anypoint DataWeave](https://developer.mulesoft.com/docs/display/current/DataWeave+Reference+Documentation).
This document describes the details of the example within the context of Anypoint Studio, Mule ESB graphical user interface.
**Note:** Be sure to have the Microsoft Dynamics CRM connector installed in your Anypoint Studio.
### Example Use Case ###
The application accepts a CSV file by polling a local folder at frequent intervals that are set with a Scheduler component. The columns of the CSV file contain this contact information: first name, last name, phone number, and email. These columns are mapped to each of the respective fields in a specific Dynamics CRM account and the rows are uploaded.
#### Set Up and Run the Example ####
Complete the following procedure to create and then run this example in your own instance of Anypoint Studio.
1. Create your free trial MS Dynamics account [here](http://www.microsoft.com/en-us/dynamics/crm-free-trial-overview.aspx). Remember your registration data (username, password, and company name) because you will need it to connect to Dynamics.
1. Open the Import Contacts into Microsoft Dynamics project in Anypoint Studio from [Anypoint Exchange](http://www.mulesoft.org/documentation/display/current/Anypoint+Exchange). *Do not run the application*.
1. In your application in Studio, open the `mule.artifact.properties` file
1. Set the contents of the `Username`, `Password`, and `Organization Service URL` fields to your account-specific values as follows:
Username <USERNAME>@<COMPANY_NAME>.onmicrosoft.com
Password <PASSWORD>
Organization Service Url https://<COMPANY_NAME>.api.crm4.dynamics.com/XRMServices/2011/Organization.svc
1 Set the number of authentication retries, the path to the `contacts.csv` working directory, and the scheduler parameters, as in this example:
working.directory 'C:/import-contacts-into-ms-dynamics/src/main/resources/input'
authenticationRetries '3'
scheduler.frequency '10000'
scheduler.startDelay '0'
**Note:** The URL might differ, depending on your location. Choose an appropriate one from [For Microsoft Dynamics CRM Online](https://msdn.microsoft.com/en-us/library/gg309401.aspx).
1. Click **OK** to save your changes.
1. In the **Package Explorer**, right-click the `connect-with-ms-dynamics` project name, and then select **Run As > Mule Application**. Studio runs the application on the embedded server.
1. In the **Package Explorer**, click the `src/main/resources` folder to expand it, and then find the `contacts.csv` file inside this folder.
1. Click and drag the `contacts.csv` file into an `input` folder in the same directory. The application, at the intervals set in the Scheduler component, checks the input folder for new files. On the next check, it recognizes the CSV file and processes it.
1. In your browser, access your Dynamics account and check your contacts for two new entries:
- John Doe
- Jane Doe
1. Stop the Mule application by clicking the red, square terminate button in the **Console**.
1. Delete the two sample contacts from your MS Dynamics account.
### How it Works ###
Using a single flow with four elements, this application accepts CSV files that contain contact information, and then uploads the contacts to MS Dynamics.
The application, at the intervals set in the Scheduler component, checks the input folder for new files. When it spots a new file, it reads the file and passes the content to the [Anypoint DataWeave transformer](https://developer.mulesoft.com/docs/display/current/DataWeave+Reference+Documentation). This transformer not only converts the format of the data from CSV to a collection, but it automatically maps the input fields from the CSV file to output fields that MS Dynamics uses in a collection. When it has converted all the contacts in the file to a collection of MS Dynamics-friendly data, the application uses a [MS Dynamics Connector](https://www.mulesoft.com/resources/esb/ms-dynamics-integration) to push data into your MS Dynamics account. The connector's configuration specifies exactly how the data is uploaded to MS Dynamics; in this case, it specifies to create new contacts.
### Go Further ###
- Learn more about [Anypoint DataWeave](https://developer.mulesoft.com/docs/display/current/DataWeave+Reference+Documentation).
- Learn more about [File connector](http://www.mulesoft.org/documentation/display/current/File+Connector).
| {
"pile_set_name": "Github"
} |
using System;
using System.Threading;
using System.Threading.Tasks;
using System.Reflection;
using System.Linq;
using System.Collections.Generic;
using System.Diagnostics;
using SampleControl.Entities;
using Uno.UI.Samples.Controls;
using Uno.UI.Samples.Entities;
using System.Runtime.InteropServices.WindowsRuntime;
using System.Globalization;
using Windows.UI.Xaml.Data;
using Windows.ApplicationModel.Core;
using Windows.UI.Core;
using Windows.Storage;
using Uno.Extensions;
using Uno.Logging;
using Microsoft.Extensions.Logging;
using Windows.UI.Xaml;
using System.IO;
using Uno.Disposables;
using System.ComponentModel;
using Uno.UI.Common;
using Microsoft.UI.Xaml.Controls;
#if XAMARIN || NETSTANDARD2_0
using Windows.UI.Xaml.Controls;
#else
using Windows.Graphics.Imaging;
using Windows.Graphics.Display;
using Windows.UI.Xaml.Media;
using Windows.UI;
using Windows.UI.Xaml.Controls;
#endif
namespace SampleControl.Presentation
{
public partial class SampleChooserViewModel : System.ComponentModel.INotifyPropertyChanged
{
private bool _categoriesSelected = true;
private bool _favoritesSelected = false;
private bool _recentsSelected = false;
private bool _searchSelected = false;
private bool _isSplitVisible = true;
private bool _categoryVisibility = true;
private bool _sampleVisibility = false;
private bool _recentsVisibility = false;
private bool _favoritesVisibility = false;
private bool _searchVisibility = false;
private bool _contentVisibility = false;
private bool _isFavoritedSample = false;
private bool _isAnyContentVisible = false;
private bool _contentAttachedToWindow;
private bool _useFluentStyles;
private object _contentPhone = null;
private string _searchTerm = "";
private List<SampleChooserContent> _sampleContents;
private List<SampleChooserContent> _favoriteSamples = new List<SampleChooserContent>();
private SampleChooserCategory _selectedCategory;
private IEnumerable<SampleChooserContent> _recentSamples;
private SampleChooserContent _currentSelectedSample;
private SampleChooserContent _previousSample;
private SampleChooserContent _nextSample;
private SampleChooserContent _selectedLibrarySample;
private SampleChooserContent _selectedRecentSample;
private SampleChooserContent _selectedFavoriteSample;
private SampleChooserContent _selectedSearchSample;
private List<SampleChooserContent> _filteredSamples;
private XamlControlsResources _fluentResources;
private void RaisePropertyChanged([System.Runtime.CompilerServices.CallerMemberName] string propertyName = "")
{
PropertyChanged?.Invoke(this, new System.ComponentModel.PropertyChangedEventArgs(propertyName));
}
//TABS
public bool CategoriesSelected
{
get => _categoriesSelected;
set
{
_categoriesSelected = value;
RaisePropertyChanged();
}
}
public bool FavoritesSelected
{
get => _favoritesSelected;
set
{
_favoritesSelected = value;
RaisePropertyChanged();
}
}
public bool RecentsSelected
{
get => _recentsSelected;
set
{
_recentsSelected = value;
RaisePropertyChanged();
}
}
public bool SearchSelected
{
get => _searchSelected;
set
{
_searchSelected = value;
RaisePropertyChanged();
}
}
public bool IsSplitVisible
{
get => _isSplitVisible;
set
{
_isSplitVisible = value;
RaisePropertyChanged();
}
}
//TABS
public bool CategoryVisibility
{
get => _categoryVisibility;
set
{
_categoryVisibility = value;
RaisePropertyChanged();
}
}
public bool SampleVisibility
{
get => _sampleVisibility;
set
{
_sampleVisibility = value;
RaisePropertyChanged();
}
}
public bool RecentsVisibility
{
get => _recentsVisibility;
set
{
_recentsVisibility = value;
RaisePropertyChanged();
}
}
public bool FavoritesVisibility
{
get => _favoritesVisibility;
set
{
_favoritesVisibility = value;
RaisePropertyChanged();
}
}
public bool SearchVisibility
{
get => _searchVisibility;
set
{
_searchVisibility = value;
RaisePropertyChanged();
}
}
public bool ContentVisibility
{
get => _contentVisibility;
set
{
_contentVisibility = value;
RaisePropertyChanged();
OnContentVisibilityChanged();
}
}
//CONTENTS
public List<SampleChooserCategory> Categories
{
get => _categories;
set
{
_categories = value;
RaisePropertyChanged();
}
}
public List<SampleChooserContent> SampleContents
{
get => _sampleContents;
set
{
_sampleContents = value;
RaisePropertyChanged();
}
}
public List<SampleChooserContent> FavoriteSamples
{
get => _favoriteSamples;
set
{
_favoriteSamples = value;
RaisePropertyChanged();
}
}
public SampleChooserCategory SelectedCategory
{
get => _selectedCategory;
set
{
_selectedCategory = value;
RaisePropertyChanged();
OnSelectedCategoryChanged();
}
}
public IEnumerable<SampleChooserContent> RecentSamples
{
get => _recentSamples;
set
{
_recentSamples = value;
RaisePropertyChanged();
}
}
public SampleChooserContent CurrentSelectedSample
{
get => _currentSelectedSample;
set
{
_currentSelectedSample = value;
RaisePropertyChanged();
(ReloadCurrentTestCommand as DelegateCommand).CanExecuteEnabled = true;
(ShowTestInformationCommand as DelegateCommand).CanExecuteEnabled = true;
var currentTextIndex = SelectedCategory?.SamplesContent.IndexOf(value);
// Set Previous
PreviousSample = currentTextIndex == null || currentTextIndex < 1
? null
: SelectedCategory.SamplesContent.Skip((int)currentTextIndex - 1).FirstOrDefault();
// Set Next
NextSample = currentTextIndex == null || currentTextIndex < 0 || currentTextIndex == SelectedCategory.SamplesContent.Count - 1
? null
: SelectedCategory.SamplesContent.Skip((int)currentTextIndex + 1).FirstOrDefault();
}
}
public SampleChooserContent PreviousSample
{
get => _previousSample;
set
{
_previousSample = value;
RaisePropertyChanged();
(LoadPreviousTestCommand as DelegateCommand).CanExecuteEnabled = value != null;
}
}
public SampleChooserContent NextSample
{
get => _nextSample;
set
{
_nextSample = value;
RaisePropertyChanged();
(LoadNextTestCommand as DelegateCommand).CanExecuteEnabled = value != null;
}
}
public SampleChooserContent SelectedLibrarySample
{
get => _selectedLibrarySample;
set
{
_selectedLibrarySample = value;
RaisePropertyChanged();
}
}
public SampleChooserContent SelectedRecentSample
{
get => _selectedRecentSample;
set
{
_selectedRecentSample = value;
RaisePropertyChanged();
}
}
public SampleChooserContent SelectedFavoriteSample
{
get => _selectedFavoriteSample;
set
{
_selectedFavoriteSample = value;
RaisePropertyChanged();
}
}
public SampleChooserContent SelectedSearchSample
{
get => _selectedSearchSample;
set
{
_selectedSearchSample = value;
RaisePropertyChanged();
}
}
public List<SampleChooserContent> FilteredSamples
{
get => _filteredSamples;
set
{
_filteredSamples = value;
RaisePropertyChanged();
}
}
public object ContentPhone
{
get => _contentPhone;
set
{
_contentPhone = value;
RaisePropertyChanged();
}
}
// OTHER
public string SearchTerm
{
get => _searchTerm;
set
{
_searchTerm = value;
RaisePropertyChanged();
}
}
public bool IsFavoritedSample
{
get => _isFavoritedSample;
set
{
_isFavoritedSample = value;
RaisePropertyChanged();
}
}
public bool IsAnyContentVisible
{
get => _isAnyContentVisible;
set
{
_isAnyContentVisible = value;
RaisePropertyChanged();
}
}
/// <summary>
/// Toggling this property will detach and reattach the sample control without destroying or recreating it. Useful for verifying correct behaviour.
/// </summary>
public bool ContentAttachedToWindow
{
get => _contentAttachedToWindow;
set
{
_contentAttachedToWindow = value;
RaisePropertyChanged();
}
}
public bool UseFluentStyles
{
get => _useFluentStyles;
set
{
_useFluentStyles = value;
if (_useFluentStyles)
{
_fluentResources = _fluentResources ?? new XamlControlsResources();
Application.Current.Resources.MergedDictionaries.Add(_fluentResources);
}
else
{
Application.Current.Resources.MergedDictionaries.Remove(_fluentResources);
}
RaisePropertyChanged();
}
}
public event System.ComponentModel.PropertyChangedEventHandler PropertyChanged;
}
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from btree import BTree, Node, flip_tree
tree = BTree(4,
Node(2,
Node(1),
Node(3)),
Node(7,
Node(6),
Node(9)))
tree.tree_print()
flip_tree(tree)
print("\nFlipping tree...")
tree.tree_print()
| {
"pile_set_name": "Github"
} |
$:.unshift File.expand_path(File.dirname(__FILE__)) + '/../lib'
require 'cloudkit'
require 'rufus/tokyo/tyrant' # gem install rufus-tokyo
# start Tokyo Tyrant with a table store...
# ttserver data.tct
CloudKit.setup_storage_adapter(Rufus::Tokyo::TyrantTable.new('127.0.0.1', 1978))
use Rack::Session::Pool
use CloudKit::OAuthFilter
use CloudKit::OpenIDFilter
use CloudKit::Service, :collections => [:notes]
run lambda{|env| [200, {'Content-Type' => 'text/html', 'Content-Length' => '5'}, ['HELLO']]}
| {
"pile_set_name": "Github"
} |
namespace Nancy.Demo.Authentication
{
public class MainModule : NancyModule
{
public MainModule()
{
Get("/", args => {
return View["Index.cshtml"];
});
Get("/login", args => {
return View["Login.cshtml", this.Request.Query.returnUrl];
});
}
}
} | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<package>
<metadata>
<id>TaskSchedulerEditor</id>
<version>2.9.0</version>
<title>Task Scheduler Managed Wrapper UI Library</title>
<authors>David Hall</authors>
<description>Provides localizable UI elements for editing classes in the TaskScheduler library.</description>
<language>en-US</language>
<copyright>Copyright © 2002-2020</copyright>
<license type="expression">MIT</license>
<projectUrl>https://github.com/dahall/taskscheduler</projectUrl>
<iconUrl>https://github.com/dahall/TaskScheduler/blob/master/docs/icons/tsnew48.png?raw=true</iconUrl>
<tags>task interop windows taskscheduler scheduler UI editor</tags>
<dependencies>
<dependency id="TaskScheduler" version="[2.9.0]" />
<dependency id="AeroWizard" version="[2.2.7]" />
<dependency id="GroupControls" version="[1.8.0]" />
<dependency id="TimeSpan2" version="[2.3.0]" />
<dependency id="Tulpep.ActiveDirectoryObjectPicker" version="[3.0.4]" />
</dependencies>
</metadata>
<files>
<file src="bin\Release\net20\Microsoft.Win32.TaskSchedulerEditor.dll" target="lib\net20" />
<file src="bin\Release\net20\Microsoft.Win32.TaskSchedulerEditor.xml" target="lib\net20" />
<file src="bin\Release\net20\**\Microsoft.Win32.TaskSchedulerEditor.resources.dll" target="lib\net20" />
<file src="bin\Release\net35\Microsoft.Win32.TaskSchedulerEditor.dll" target="lib\net35" />
<file src="bin\Release\net35\Microsoft.Win32.TaskSchedulerEditor.xml" target="lib\net35" />
<file src="bin\Release\net35\**\Microsoft.Win32.TaskSchedulerEditor.resources.dll" target="lib\net35" />
<file src="bin\Release\net40\Microsoft.Win32.TaskSchedulerEditor.dll" target="lib\net40" />
<file src="bin\Release\net40\Microsoft.Win32.TaskSchedulerEditor.xml" target="lib\net40" />
<file src="bin\Release\net40\**\Microsoft.Win32.TaskSchedulerEditor.resources.dll" target="lib\net40" />
<file src="bin\Release\net452\Microsoft.Win32.TaskSchedulerEditor.dll" target="lib\net452" />
<file src="bin\Release\net452\Microsoft.Win32.TaskSchedulerEditor.xml" target="lib\net452" />
<file src="bin\Release\net452\**\Microsoft.Win32.TaskSchedulerEditor.resources.dll" target="lib\net452" />
<file src="*.cs" target="src" />
<file src="EditorProperties\*.cs" target="src\EditorProperties" />
<file src="Native\*.cs" target="src\Native" />
<file src="OptionPanels\*.cs" target="src\OptionPanels" />
<file src="UIComponents\*.cs" target="src\UIComponents" />
</files>
</package> | {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
*/
/*
* objtool orc:
*
* This command analyzes a .o file and adds .orc_unwind and .orc_unwind_ip
* sections to it, which is used by the in-kernel ORC unwinder.
*
* This command is a superset of "objtool check".
*/
#include <string.h>
#include "builtin.h"
#include "objtool.h"
static const char *orc_usage[] = {
"objtool orc generate [<options>] file.o",
"objtool orc dump file.o",
NULL,
};
int cmd_orc(int argc, const char **argv)
{
const char *objname;
argc--; argv++;
if (argc <= 0)
usage_with_options(orc_usage, check_options);
if (!strncmp(argv[0], "gen", 3)) {
argc = parse_options(argc, argv, check_options, orc_usage, 0);
if (argc != 1)
usage_with_options(orc_usage, check_options);
objname = argv[0];
return check(objname, true);
}
if (!strcmp(argv[0], "dump")) {
if (argc != 2)
usage_with_options(orc_usage, check_options);
objname = argv[1];
return orc_dump(objname);
}
usage_with_options(orc_usage, check_options);
return 0;
}
| {
"pile_set_name": "Github"
} |
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
Eric D Vaughan
A frame that can have multiple children. Only one child may be displayed at one time. So the
can be flipped though like a deck of cards.
**/
#ifndef nsStackLayout_h___
#define nsStackLayout_h___
#include "mozilla/Attributes.h"
#include "nsBoxLayout.h"
#include "nsCOMPtr.h"
#include "nsCoord.h"
class nsIPresShell;
nsresult NS_NewStackLayout(nsCOMPtr<nsBoxLayout>& aNewLayout);
class nsStackLayout : public nsBoxLayout
{
public:
friend nsresult NS_NewStackLayout(nsCOMPtr<nsBoxLayout>& aNewLayout);
static void Shutdown();
nsStackLayout();
NS_IMETHOD XULLayout(nsIFrame* aBox, nsBoxLayoutState& aState) override;
virtual nsSize GetXULPrefSize(nsIFrame* aBox, nsBoxLayoutState& aBoxLayoutState) override;
virtual nsSize GetXULMinSize(nsIFrame* aBox, nsBoxLayoutState& aBoxLayoutState) override;
virtual nsSize GetXULMaxSize(nsIFrame* aBox, nsBoxLayoutState& aBoxLayoutState) override;
virtual nscoord GetAscent(nsIFrame* aBox, nsBoxLayoutState& aBoxLayoutState) override;
// get the child offsets for aChild and set them in aMargin. Returns a
// bitfield mask of the SPECIFIED_LEFT, SPECIFIED_RIGHT, SPECIFIED_TOP and
// SPECIFIED_BOTTOM offsets indicating which sides have been specified by
// attributes.
static uint8_t GetOffset(nsIFrame* aChild, nsMargin& aMargin);
private:
static nsBoxLayout* gInstance;
}; // class nsStackLayout
#endif
| {
"pile_set_name": "Github"
} |
#include "inspector_io.h"
#include "inspector_socket_server.h"
#include "inspector/main_thread_interface.h"
#include "inspector/node_string.h"
#include "base_object-inl.h"
#include "debug_utils-inl.h"
#include "node.h"
#include "node_crypto.h"
#include "node_internals.h"
#include "node_mutex.h"
#include "v8-inspector.h"
#include "util-inl.h"
#include "zlib.h"
#include <deque>
#include <cstring>
#include <vector>
namespace node {
namespace inspector {
namespace {
using v8_inspector::StringBuffer;
using v8_inspector::StringView;
// kKill closes connections and stops the server, kStop only stops the server
enum class TransportAction { kKill, kSendMessage, kStop };
std::string ScriptPath(uv_loop_t* loop, const std::string& script_name) {
std::string script_path;
if (!script_name.empty()) {
uv_fs_t req;
req.ptr = nullptr;
if (0 == uv_fs_realpath(loop, &req, script_name.c_str(), nullptr)) {
CHECK_NOT_NULL(req.ptr);
script_path = std::string(static_cast<char*>(req.ptr));
}
uv_fs_req_cleanup(&req);
}
return script_path;
}
// UUID RFC: https://www.ietf.org/rfc/rfc4122.txt
// Used ver 4 - with numbers
std::string GenerateID() {
uint16_t buffer[8];
CHECK(crypto::EntropySource(reinterpret_cast<unsigned char*>(buffer),
sizeof(buffer)));
char uuid[256];
snprintf(uuid, sizeof(uuid), "%04x%04x-%04x-%04x-%04x-%04x%04x%04x",
buffer[0], // time_low
buffer[1], // time_mid
buffer[2], // time_low
(buffer[3] & 0x0fff) | 0x4000, // time_hi_and_version
(buffer[4] & 0x3fff) | 0x8000, // clk_seq_hi clk_seq_low
buffer[5], // node
buffer[6],
buffer[7]);
return uuid;
}
class RequestToServer {
public:
RequestToServer(TransportAction action,
int session_id,
std::unique_ptr<v8_inspector::StringBuffer> message)
: action_(action),
session_id_(session_id),
message_(std::move(message)) {}
void Dispatch(InspectorSocketServer* server) const {
switch (action_) {
case TransportAction::kKill:
server->TerminateConnections();
// Fallthrough
case TransportAction::kStop:
server->Stop();
break;
case TransportAction::kSendMessage:
server->Send(
session_id_,
protocol::StringUtil::StringViewToUtf8(message_->string()));
break;
}
}
private:
TransportAction action_;
int session_id_;
std::unique_ptr<v8_inspector::StringBuffer> message_;
};
class RequestQueueData {
public:
using MessageQueue = std::deque<RequestToServer>;
explicit RequestQueueData(uv_loop_t* loop)
: handle_(std::make_shared<RequestQueue>(this)) {
int err = uv_async_init(loop, &async_, [](uv_async_t* async) {
RequestQueueData* wrapper =
node::ContainerOf(&RequestQueueData::async_, async);
wrapper->DoDispatch();
});
CHECK_EQ(0, err);
}
static void CloseAndFree(RequestQueueData* queue);
void Post(int session_id,
TransportAction action,
std::unique_ptr<StringBuffer> message) {
Mutex::ScopedLock scoped_lock(state_lock_);
bool notify = messages_.empty();
messages_.emplace_back(action, session_id, std::move(message));
if (notify) {
CHECK_EQ(0, uv_async_send(&async_));
incoming_message_cond_.Broadcast(scoped_lock);
}
}
void Wait() {
Mutex::ScopedLock scoped_lock(state_lock_);
if (messages_.empty()) {
incoming_message_cond_.Wait(scoped_lock);
}
}
void SetServer(InspectorSocketServer* server) {
server_ = server;
}
std::shared_ptr<RequestQueue> handle() {
return handle_;
}
private:
~RequestQueueData() = default;
MessageQueue GetMessages() {
Mutex::ScopedLock scoped_lock(state_lock_);
MessageQueue messages;
messages_.swap(messages);
return messages;
}
void DoDispatch() {
if (server_ == nullptr)
return;
for (const auto& request : GetMessages()) {
request.Dispatch(server_);
}
}
std::shared_ptr<RequestQueue> handle_;
uv_async_t async_;
InspectorSocketServer* server_ = nullptr;
MessageQueue messages_;
Mutex state_lock_; // Locked before mutating the queue.
ConditionVariable incoming_message_cond_;
};
} // namespace
class RequestQueue {
public:
explicit RequestQueue(RequestQueueData* data) : data_(data) {}
void Reset() {
Mutex::ScopedLock scoped_lock(lock_);
data_ = nullptr;
}
void Post(int session_id,
TransportAction action,
std::unique_ptr<StringBuffer> message) {
Mutex::ScopedLock scoped_lock(lock_);
if (data_ != nullptr)
data_->Post(session_id, action, std::move(message));
}
bool Expired() {
Mutex::ScopedLock scoped_lock(lock_);
return data_ == nullptr;
}
private:
RequestQueueData* data_;
Mutex lock_;
};
class IoSessionDelegate : public InspectorSessionDelegate {
public:
explicit IoSessionDelegate(std::shared_ptr<RequestQueue> queue, int id)
: request_queue_(queue), id_(id) { }
void SendMessageToFrontend(const v8_inspector::StringView& message) override {
request_queue_->Post(id_, TransportAction::kSendMessage,
StringBuffer::create(message));
}
private:
std::shared_ptr<RequestQueue> request_queue_;
int id_;
};
// Passed to InspectorSocketServer to handle WS inspector protocol events,
// mostly session start, message received, and session end.
class InspectorIoDelegate: public node::inspector::SocketServerDelegate {
public:
InspectorIoDelegate(std::shared_ptr<RequestQueueData> queue,
std::shared_ptr<MainThreadHandle> main_threade,
const std::string& target_id,
const std::string& script_path,
const std::string& script_name);
~InspectorIoDelegate() override = default;
void StartSession(int session_id, const std::string& target_id) override;
void MessageReceived(int session_id, const std::string& message) override;
void EndSession(int session_id) override;
std::vector<std::string> GetTargetIds() override;
std::string GetTargetTitle(const std::string& id) override;
std::string GetTargetUrl(const std::string& id) override;
void AssignServer(InspectorSocketServer* server) override {
request_queue_->SetServer(server);
}
private:
std::shared_ptr<RequestQueueData> request_queue_;
std::shared_ptr<MainThreadHandle> main_thread_;
std::unordered_map<int, std::unique_ptr<InspectorSession>> sessions_;
const std::string script_name_;
const std::string script_path_;
const std::string target_id_;
};
// static
std::unique_ptr<InspectorIo> InspectorIo::Start(
std::shared_ptr<MainThreadHandle> main_thread,
const std::string& path,
std::shared_ptr<ExclusiveAccess<HostPort>> host_port,
const InspectPublishUid& inspect_publish_uid) {
auto io = std::unique_ptr<InspectorIo>(
new InspectorIo(main_thread,
path,
host_port,
inspect_publish_uid));
if (io->request_queue_->Expired()) { // Thread is not running
return nullptr;
}
return io;
}
InspectorIo::InspectorIo(std::shared_ptr<MainThreadHandle> main_thread,
const std::string& path,
std::shared_ptr<ExclusiveAccess<HostPort>> host_port,
const InspectPublishUid& inspect_publish_uid)
: main_thread_(main_thread),
host_port_(host_port),
inspect_publish_uid_(inspect_publish_uid),
thread_(),
script_name_(path),
id_(GenerateID()) {
Mutex::ScopedLock scoped_lock(thread_start_lock_);
CHECK_EQ(uv_thread_create(&thread_, InspectorIo::ThreadMain, this), 0);
thread_start_condition_.Wait(scoped_lock);
}
InspectorIo::~InspectorIo() {
request_queue_->Post(0, TransportAction::kKill, nullptr);
int err = uv_thread_join(&thread_);
CHECK_EQ(err, 0);
}
void InspectorIo::StopAcceptingNewConnections() {
request_queue_->Post(0, TransportAction::kStop, nullptr);
}
// static
void InspectorIo::ThreadMain(void* io) {
static_cast<InspectorIo*>(io)->ThreadMain();
}
void InspectorIo::ThreadMain() {
uv_loop_t loop;
loop.data = nullptr;
int err = uv_loop_init(&loop);
CHECK_EQ(err, 0);
std::shared_ptr<RequestQueueData> queue(new RequestQueueData(&loop),
RequestQueueData::CloseAndFree);
std::string script_path = ScriptPath(&loop, script_name_);
std::unique_ptr<InspectorIoDelegate> delegate(
new InspectorIoDelegate(queue, main_thread_, id_,
script_path, script_name_));
std::string host;
int port;
{
ExclusiveAccess<HostPort>::Scoped host_port(host_port_);
host = host_port->host();
port = host_port->port();
}
InspectorSocketServer server(std::move(delegate),
&loop,
std::move(host),
port,
inspect_publish_uid_);
request_queue_ = queue->handle();
// Its lifetime is now that of the server delegate
queue.reset();
{
Mutex::ScopedLock scoped_lock(thread_start_lock_);
if (server.Start()) {
ExclusiveAccess<HostPort>::Scoped host_port(host_port_);
host_port->set_port(server.Port());
}
thread_start_condition_.Broadcast(scoped_lock);
}
uv_run(&loop, UV_RUN_DEFAULT);
CheckedUvLoopClose(&loop);
}
std::string InspectorIo::GetWsUrl() const {
ExclusiveAccess<HostPort>::Scoped host_port(host_port_);
return FormatWsAddress(host_port->host(), host_port->port(), id_, true);
}
InspectorIoDelegate::InspectorIoDelegate(
std::shared_ptr<RequestQueueData> queue,
std::shared_ptr<MainThreadHandle> main_thread,
const std::string& target_id,
const std::string& script_path,
const std::string& script_name)
: request_queue_(queue), main_thread_(main_thread),
script_name_(script_name), script_path_(script_path),
target_id_(target_id) {}
void InspectorIoDelegate::StartSession(int session_id,
const std::string& target_id) {
auto session = main_thread_->Connect(
std::unique_ptr<InspectorSessionDelegate>(
new IoSessionDelegate(request_queue_->handle(), session_id)), true);
if (session) {
sessions_[session_id] = std::move(session);
fprintf(stderr, "Debugger attached.\n");
}
}
void InspectorIoDelegate::MessageReceived(int session_id,
const std::string& message) {
auto session = sessions_.find(session_id);
if (session != sessions_.end())
session->second->Dispatch(Utf8ToStringView(message)->string());
}
void InspectorIoDelegate::EndSession(int session_id) {
sessions_.erase(session_id);
}
std::vector<std::string> InspectorIoDelegate::GetTargetIds() {
return { target_id_ };
}
std::string InspectorIoDelegate::GetTargetTitle(const std::string& id) {
return script_name_.empty() ? GetHumanReadableProcessName() : script_name_;
}
std::string InspectorIoDelegate::GetTargetUrl(const std::string& id) {
return "file://" + script_path_;
}
// static
void RequestQueueData::CloseAndFree(RequestQueueData* queue) {
queue->handle_->Reset();
queue->handle_.reset();
uv_close(reinterpret_cast<uv_handle_t*>(&queue->async_),
[](uv_handle_t* handle) {
uv_async_t* async = reinterpret_cast<uv_async_t*>(handle);
RequestQueueData* wrapper =
node::ContainerOf(&RequestQueueData::async_, async);
delete wrapper;
});
}
} // namespace inspector
} // namespace node
| {
"pile_set_name": "Github"
} |
/*
* [The "BSD licence"]
* Copyright (c) 2013-2015 Dandelion
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Dandelion nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.github.dandelion.datatables.thymeleaf.processor.attr;
import java.util.Map;
import org.thymeleaf.Arguments;
import org.thymeleaf.dom.Element;
import org.thymeleaf.processor.IAttributeNameProcessorMatcher;
import com.github.dandelion.core.option.Option;
import com.github.dandelion.datatables.core.option.DatatableOptions;
import com.github.dandelion.datatables.thymeleaf.dialect.DataTablesDialect;
import com.github.dandelion.datatables.thymeleaf.processor.AbstractTableAttrProcessor;
import com.github.dandelion.datatables.thymeleaf.util.AttributeUtils;
/**
* <p>
* Attribute processor applied to the {@code table} and associated with the
* {@link DatatableOptions#EXPORT_CONTAINER_CLASS} option.
* </p>
*
* @author Thibault Duchateau
* @since 0.10.0
*/
public class TableExportContainerClassAttrProcessor extends AbstractTableAttrProcessor {
public TableExportContainerClassAttrProcessor(IAttributeNameProcessorMatcher matcher) {
super(matcher);
}
@Override
public int getPrecedence() {
return DataTablesDialect.DT_DEFAULT_PRECEDENCE;
}
@Override
protected void doProcessAttribute(Arguments arguments, Element element, String attributeName,
Map<Option<?>, Object> stagingOptions) {
String attrValue = AttributeUtils.parseStringAttribute(arguments, element, attributeName);
stagingOptions.put(DatatableOptions.EXPORT_CONTAINER_CLASS, attrValue);
}
} | {
"pile_set_name": "Github"
} |
using System;
using System.Collections.Generic;
using System.Text;
namespace LogExpert
{
/// <summary>
/// Interface to be implemented by tools windows that are shared across multiple log files.
/// The implementor will be called whenever the current log file changes. So it can draw new content
/// according to the current active log file.
/// </summary>
internal interface ISharedToolWindow
{
#region Public methods
/// <summary>
/// Called when a file becomes the active file (e.g. when user selects a tab).
/// </summary>
/// <param name="ctx"></param>
void SetCurrentFile(FileViewContext ctx);
/// <summary>
/// Called whenever the current file has been changed.
/// </summary>
void FileChanged();
void SetColumnizer(ILogLineColumnizer columnizer);
void PreferencesChanged(Preferences newPreferences, bool isLoadTime, SettingsFlags flags);
#endregion
}
} | {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.