text
stringlengths 2
99.9k
| meta
dict |
---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/hfsplus/xattr.c
*
* Vyacheslav Dubeyko <slava@dubeyko.com>
*
* Logic of processing extended attributes
*/
#include "hfsplus_fs.h"
#include <linux/posix_acl_xattr.h>
#include <linux/nls.h>
#include "xattr.h"
#include "acl.h"
static int hfsplus_removexattr(struct inode *inode, const char *name);
const struct xattr_handler *hfsplus_xattr_handlers[] = {
&hfsplus_xattr_osx_handler,
&hfsplus_xattr_user_handler,
&hfsplus_xattr_trusted_handler,
#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
#endif
&hfsplus_xattr_security_handler,
NULL
};
static int strcmp_xattr_finder_info(const char *name)
{
if (name) {
return strncmp(name, HFSPLUS_XATTR_FINDER_INFO_NAME,
sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME));
}
return -1;
}
static int strcmp_xattr_acl(const char *name)
{
if (name) {
return strncmp(name, HFSPLUS_XATTR_ACL_NAME,
sizeof(HFSPLUS_XATTR_ACL_NAME));
}
return -1;
}
static bool is_known_namespace(const char *name)
{
if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
return false;
return true;
}
static void hfsplus_init_header_node(struct inode *attr_file,
u32 clump_size,
char *buf, u16 node_size)
{
struct hfs_bnode_desc *desc;
struct hfs_btree_header_rec *head;
u16 offset;
__be16 *rec_offsets;
u32 hdr_node_map_rec_bits;
char *bmp;
u32 used_nodes;
u32 used_bmp_bytes;
u64 tmp;
hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %u\n",
clump_size, node_size);
/* The end of the node contains list of record offsets */
rec_offsets = (__be16 *)(buf + node_size);
desc = (struct hfs_bnode_desc *)buf;
desc->type = HFS_NODE_HEADER;
desc->num_recs = cpu_to_be16(HFSPLUS_BTREE_HDR_NODE_RECS_COUNT);
offset = sizeof(struct hfs_bnode_desc);
*--rec_offsets = cpu_to_be16(offset);
head = (struct hfs_btree_header_rec *)(buf + offset);
head->node_size = cpu_to_be16(node_size);
tmp = i_size_read(attr_file);
do_div(tmp, node_size);
head->node_count = cpu_to_be32(tmp);
head->free_nodes = cpu_to_be32(be32_to_cpu(head->node_count) - 1);
head->clump_size = cpu_to_be32(clump_size);
head->attributes |= cpu_to_be32(HFS_TREE_BIGKEYS | HFS_TREE_VARIDXKEYS);
head->max_key_len = cpu_to_be16(HFSPLUS_ATTR_KEYLEN - sizeof(u16));
offset += sizeof(struct hfs_btree_header_rec);
*--rec_offsets = cpu_to_be16(offset);
offset += HFSPLUS_BTREE_HDR_USER_BYTES;
*--rec_offsets = cpu_to_be16(offset);
hdr_node_map_rec_bits = 8 * (node_size - offset - (4 * sizeof(u16)));
if (be32_to_cpu(head->node_count) > hdr_node_map_rec_bits) {
u32 map_node_bits;
u32 map_nodes;
desc->next = cpu_to_be32(be32_to_cpu(head->leaf_tail) + 1);
map_node_bits = 8 * (node_size - sizeof(struct hfs_bnode_desc) -
(2 * sizeof(u16)) - 2);
map_nodes = (be32_to_cpu(head->node_count) -
hdr_node_map_rec_bits +
(map_node_bits - 1)) / map_node_bits;
be32_add_cpu(&head->free_nodes, 0 - map_nodes);
}
bmp = buf + offset;
used_nodes =
be32_to_cpu(head->node_count) - be32_to_cpu(head->free_nodes);
used_bmp_bytes = used_nodes / 8;
if (used_bmp_bytes) {
memset(bmp, 0xFF, used_bmp_bytes);
bmp += used_bmp_bytes;
used_nodes %= 8;
}
*bmp = ~(0xFF >> used_nodes);
offset += hdr_node_map_rec_bits / 8;
*--rec_offsets = cpu_to_be16(offset);
}
static int hfsplus_create_attributes_file(struct super_block *sb)
{
int err = 0;
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct inode *attr_file;
struct hfsplus_inode_info *hip;
u32 clump_size;
u16 node_size = HFSPLUS_ATTR_TREE_NODE_SIZE;
char *buf;
int index, written;
struct address_space *mapping;
struct page *page;
int old_state = HFSPLUS_EMPTY_ATTR_TREE;
hfs_dbg(ATTR_MOD, "create_attr_file: ino %d\n", HFSPLUS_ATTR_CNID);
check_attr_tree_state_again:
switch (atomic_read(&sbi->attr_tree_state)) {
case HFSPLUS_EMPTY_ATTR_TREE:
if (old_state != atomic_cmpxchg(&sbi->attr_tree_state,
old_state,
HFSPLUS_CREATING_ATTR_TREE))
goto check_attr_tree_state_again;
break;
case HFSPLUS_CREATING_ATTR_TREE:
/*
* This state means that another thread is in process
* of AttributesFile creation. Theoretically, it is
* possible to be here. But really __setxattr() method
* first of all calls hfs_find_init() for lookup in
* B-tree of CatalogFile. This method locks mutex of
* CatalogFile's B-tree. As a result, if some thread
* is inside AttributedFile creation operation then
* another threads will be waiting unlocking of
* CatalogFile's B-tree's mutex. However, if code will
* change then we will return error code (-EAGAIN) from
* here. Really, it means that first try to set of xattr
* fails with error but second attempt will have success.
*/
return -EAGAIN;
case HFSPLUS_VALID_ATTR_TREE:
return 0;
case HFSPLUS_FAILED_ATTR_TREE:
return -EOPNOTSUPP;
default:
BUG();
}
attr_file = hfsplus_iget(sb, HFSPLUS_ATTR_CNID);
if (IS_ERR(attr_file)) {
pr_err("failed to load attributes file\n");
return PTR_ERR(attr_file);
}
BUG_ON(i_size_read(attr_file) != 0);
hip = HFSPLUS_I(attr_file);
clump_size = hfsplus_calc_btree_clump_size(sb->s_blocksize,
node_size,
sbi->sect_count,
HFSPLUS_ATTR_CNID);
mutex_lock(&hip->extents_lock);
hip->clump_blocks = clump_size >> sbi->alloc_blksz_shift;
mutex_unlock(&hip->extents_lock);
if (sbi->free_blocks <= (hip->clump_blocks << 1)) {
err = -ENOSPC;
goto end_attr_file_creation;
}
while (hip->alloc_blocks < hip->clump_blocks) {
err = hfsplus_file_extend(attr_file, false);
if (unlikely(err)) {
pr_err("failed to extend attributes file\n");
goto end_attr_file_creation;
}
hip->phys_size = attr_file->i_size =
(loff_t)hip->alloc_blocks << sbi->alloc_blksz_shift;
hip->fs_blocks = hip->alloc_blocks << sbi->fs_shift;
inode_set_bytes(attr_file, attr_file->i_size);
}
buf = kzalloc(node_size, GFP_NOFS);
if (!buf) {
pr_err("failed to allocate memory for header node\n");
err = -ENOMEM;
goto end_attr_file_creation;
}
hfsplus_init_header_node(attr_file, clump_size, buf, node_size);
mapping = attr_file->i_mapping;
index = 0;
written = 0;
for (; written < node_size; index++, written += PAGE_SIZE) {
void *kaddr;
page = read_mapping_page(mapping, index, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto failed_header_node_init;
}
kaddr = kmap_atomic(page);
memcpy(kaddr, buf + written,
min_t(size_t, PAGE_SIZE, node_size - written));
kunmap_atomic(kaddr);
set_page_dirty(page);
put_page(page);
}
hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
if (!sbi->attr_tree)
pr_err("failed to load attributes file\n");
failed_header_node_init:
kfree(buf);
end_attr_file_creation:
iput(attr_file);
if (!err)
atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
else if (err == -ENOSPC)
atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
else
atomic_set(&sbi->attr_tree_state, HFSPLUS_FAILED_ATTR_TREE);
return err;
}
int __hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
int err = 0;
struct hfs_find_data cat_fd;
hfsplus_cat_entry entry;
u16 cat_entry_flags, cat_entry_type;
u16 folder_finderinfo_len = sizeof(struct DInfo) +
sizeof(struct DXInfo);
u16 file_finderinfo_len = sizeof(struct FInfo) +
sizeof(struct FXInfo);
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
HFSPLUS_IS_RSRC(inode))
return -EOPNOTSUPP;
if (value == NULL)
return hfsplus_removexattr(inode, name);
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
if (err) {
pr_err("can't init xattr find struct\n");
return err;
}
err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
if (err) {
pr_err("catalog searching failed\n");
goto end_setxattr;
}
if (!strcmp_xattr_finder_info(name)) {
if (flags & XATTR_CREATE) {
pr_err("xattr exists yet\n");
err = -EOPNOTSUPP;
goto end_setxattr;
}
hfs_bnode_read(cat_fd.bnode, &entry, cat_fd.entryoffset,
sizeof(hfsplus_cat_entry));
if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) {
if (size == folder_finderinfo_len) {
memcpy(&entry.folder.user_info, value,
folder_finderinfo_len);
hfs_bnode_write(cat_fd.bnode, &entry,
cat_fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
hfsplus_mark_inode_dirty(inode,
HFSPLUS_I_CAT_DIRTY);
} else {
err = -ERANGE;
goto end_setxattr;
}
} else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) {
if (size == file_finderinfo_len) {
memcpy(&entry.file.user_info, value,
file_finderinfo_len);
hfs_bnode_write(cat_fd.bnode, &entry,
cat_fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_mark_inode_dirty(inode,
HFSPLUS_I_CAT_DIRTY);
} else {
err = -ERANGE;
goto end_setxattr;
}
} else {
err = -EOPNOTSUPP;
goto end_setxattr;
}
goto end_setxattr;
}
if (!HFSPLUS_SB(inode->i_sb)->attr_tree) {
err = hfsplus_create_attributes_file(inode->i_sb);
if (unlikely(err))
goto end_setxattr;
}
if (hfsplus_attr_exists(inode, name)) {
if (flags & XATTR_CREATE) {
pr_err("xattr exists yet\n");
err = -EOPNOTSUPP;
goto end_setxattr;
}
err = hfsplus_delete_attr(inode, name);
if (err)
goto end_setxattr;
err = hfsplus_create_attr(inode, name, value, size);
if (err)
goto end_setxattr;
} else {
if (flags & XATTR_REPLACE) {
pr_err("cannot replace xattr\n");
err = -EOPNOTSUPP;
goto end_setxattr;
}
err = hfsplus_create_attr(inode, name, value, size);
if (err)
goto end_setxattr;
}
cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
if (cat_entry_type == HFSPLUS_FOLDER) {
cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
cat_fd.entryoffset +
offsetof(struct hfsplus_cat_folder, flags));
cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
if (!strcmp_xattr_acl(name))
cat_entry_flags |= HFSPLUS_ACL_EXISTS;
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_folder, flags),
cat_entry_flags);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
} else if (cat_entry_type == HFSPLUS_FILE) {
cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
cat_fd.entryoffset +
offsetof(struct hfsplus_cat_file, flags));
cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
if (!strcmp_xattr_acl(name))
cat_entry_flags |= HFSPLUS_ACL_EXISTS;
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_file, flags),
cat_entry_flags);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
} else {
pr_err("invalid catalog entry type\n");
err = -EIO;
goto end_setxattr;
}
end_setxattr:
hfs_find_exit(&cat_fd);
return err;
}
static int name_len(const char *xattr_name, int xattr_name_len)
{
int len = xattr_name_len + 1;
if (!is_known_namespace(xattr_name))
len += XATTR_MAC_OSX_PREFIX_LEN;
return len;
}
static int copy_name(char *buffer, const char *xattr_name, int name_len)
{
int len = name_len;
int offset = 0;
if (!is_known_namespace(xattr_name)) {
strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
offset += XATTR_MAC_OSX_PREFIX_LEN;
len += XATTR_MAC_OSX_PREFIX_LEN;
}
strncpy(buffer + offset, xattr_name, name_len);
memset(buffer + offset + name_len, 0, 1);
len += 1;
return len;
}
int hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags,
const char *prefix, size_t prefixlen)
{
char *xattr_name;
int res;
xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
GFP_KERNEL);
if (!xattr_name)
return -ENOMEM;
strcpy(xattr_name, prefix);
strcpy(xattr_name + prefixlen, name);
res = __hfsplus_setxattr(inode, xattr_name, value, size, flags);
kfree(xattr_name);
return res;
}
static ssize_t hfsplus_getxattr_finder_info(struct inode *inode,
void *value, size_t size)
{
ssize_t res = 0;
struct hfs_find_data fd;
u16 entry_type;
u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo);
u16 record_len = max(folder_rec_len, file_rec_len);
u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
if (size >= record_len) {
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
if (res) {
pr_err("can't init xattr find struct\n");
return res;
}
res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
if (res)
goto end_getxattr_finder_info;
entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
if (entry_type == HFSPLUS_FOLDER) {
hfs_bnode_read(fd.bnode, folder_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_folder, user_info),
folder_rec_len);
memcpy(value, folder_finder_info, folder_rec_len);
res = folder_rec_len;
} else if (entry_type == HFSPLUS_FILE) {
hfs_bnode_read(fd.bnode, file_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_file, user_info),
file_rec_len);
memcpy(value, file_finder_info, file_rec_len);
res = file_rec_len;
} else {
res = -EOPNOTSUPP;
goto end_getxattr_finder_info;
}
} else
res = size ? -ERANGE : record_len;
end_getxattr_finder_info:
if (size >= record_len)
hfs_find_exit(&fd);
return res;
}
ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
void *value, size_t size)
{
struct hfs_find_data fd;
hfsplus_attr_entry *entry;
__be32 xattr_record_type;
u32 record_type;
u16 record_length = 0;
ssize_t res = 0;
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
HFSPLUS_IS_RSRC(inode))
return -EOPNOTSUPP;
if (!strcmp_xattr_finder_info(name))
return hfsplus_getxattr_finder_info(inode, value, size);
if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
return -EOPNOTSUPP;
entry = hfsplus_alloc_attr_entry();
if (!entry) {
pr_err("can't allocate xattr entry\n");
return -ENOMEM;
}
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
if (res) {
pr_err("can't init xattr find struct\n");
goto failed_getxattr_init;
}
res = hfsplus_find_attr(inode->i_sb, inode->i_ino, name, &fd);
if (res) {
if (res == -ENOENT)
res = -ENODATA;
else
pr_err("xattr searching failed\n");
goto out;
}
hfs_bnode_read(fd.bnode, &xattr_record_type,
fd.entryoffset, sizeof(xattr_record_type));
record_type = be32_to_cpu(xattr_record_type);
if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
record_length = hfs_bnode_read_u16(fd.bnode,
fd.entryoffset +
offsetof(struct hfsplus_attr_inline_data,
length));
if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) {
pr_err("invalid xattr record size\n");
res = -EIO;
goto out;
}
} else if (record_type == HFSPLUS_ATTR_FORK_DATA ||
record_type == HFSPLUS_ATTR_EXTENTS) {
pr_err("only inline data xattr are supported\n");
res = -EOPNOTSUPP;
goto out;
} else {
pr_err("invalid xattr record\n");
res = -EIO;
goto out;
}
if (size) {
hfs_bnode_read(fd.bnode, entry, fd.entryoffset,
offsetof(struct hfsplus_attr_inline_data,
raw_bytes) + record_length);
}
if (size >= record_length) {
memcpy(value, entry->inline_data.raw_bytes, record_length);
res = record_length;
} else
res = size ? -ERANGE : record_length;
out:
hfs_find_exit(&fd);
failed_getxattr_init:
hfsplus_destroy_attr_entry(entry);
return res;
}
ssize_t hfsplus_getxattr(struct inode *inode, const char *name,
void *value, size_t size,
const char *prefix, size_t prefixlen)
{
int res;
char *xattr_name;
xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + 1,
GFP_KERNEL);
if (!xattr_name)
return -ENOMEM;
strcpy(xattr_name, prefix);
strcpy(xattr_name + prefixlen, name);
res = __hfsplus_getxattr(inode, xattr_name, value, size);
kfree(xattr_name);
return res;
}
static inline int can_list(const char *xattr_name)
{
if (!xattr_name)
return 0;
return strncmp(xattr_name, XATTR_TRUSTED_PREFIX,
XATTR_TRUSTED_PREFIX_LEN) ||
capable(CAP_SYS_ADMIN);
}
static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
char *buffer, size_t size)
{
ssize_t res = 0;
struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
u16 entry_type;
u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
unsigned long len, found_bit;
int xattr_name_len, symbols_count;
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
if (res) {
pr_err("can't init xattr find struct\n");
return res;
}
res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
if (res)
goto end_listxattr_finder_info;
entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
if (entry_type == HFSPLUS_FOLDER) {
len = sizeof(struct DInfo) + sizeof(struct DXInfo);
hfs_bnode_read(fd.bnode, folder_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_folder, user_info),
len);
found_bit = find_first_bit((void *)folder_finder_info, len*8);
} else if (entry_type == HFSPLUS_FILE) {
len = sizeof(struct FInfo) + sizeof(struct FXInfo);
hfs_bnode_read(fd.bnode, file_finder_info,
fd.entryoffset +
offsetof(struct hfsplus_cat_file, user_info),
len);
found_bit = find_first_bit((void *)file_finder_info, len*8);
} else {
res = -EOPNOTSUPP;
goto end_listxattr_finder_info;
}
if (found_bit >= (len*8))
res = 0;
else {
symbols_count = sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME) - 1;
xattr_name_len =
name_len(HFSPLUS_XATTR_FINDER_INFO_NAME, symbols_count);
if (!buffer || !size) {
if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME))
res = xattr_name_len;
} else if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME)) {
if (size < xattr_name_len)
res = -ERANGE;
else {
res = copy_name(buffer,
HFSPLUS_XATTR_FINDER_INFO_NAME,
symbols_count);
}
}
}
end_listxattr_finder_info:
hfs_find_exit(&fd);
return res;
}
ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
ssize_t err;
ssize_t res = 0;
struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
u16 key_len = 0;
struct hfsplus_attr_key attr_key;
char *strbuf;
int xattr_name_len;
if ((!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) ||
HFSPLUS_IS_RSRC(inode))
return -EOPNOTSUPP;
res = hfsplus_listxattr_finder_info(dentry, buffer, size);
if (res < 0)
return res;
else if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
return (res == 0) ? -EOPNOTSUPP : res;
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
if (err) {
pr_err("can't init xattr find struct\n");
return err;
}
strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
if (!strbuf) {
res = -ENOMEM;
goto out;
}
err = hfsplus_find_attr(inode->i_sb, inode->i_ino, NULL, &fd);
if (err) {
if (err == -ENOENT) {
if (res == 0)
res = -ENODATA;
goto end_listxattr;
} else {
res = err;
goto end_listxattr;
}
}
for (;;) {
key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
if (key_len == 0 || key_len > fd.tree->max_key_len) {
pr_err("invalid xattr key length: %d\n", key_len);
res = -EIO;
goto end_listxattr;
}
hfs_bnode_read(fd.bnode, &attr_key,
fd.keyoffset, key_len + sizeof(key_len));
if (be32_to_cpu(attr_key.cnid) != inode->i_ino)
goto end_listxattr;
xattr_name_len = NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN;
if (hfsplus_uni2asc(inode->i_sb,
(const struct hfsplus_unistr *)&fd.key->attr.key_name,
strbuf, &xattr_name_len)) {
pr_err("unicode conversion failed\n");
res = -EIO;
goto end_listxattr;
}
if (!buffer || !size) {
if (can_list(strbuf))
res += name_len(strbuf, xattr_name_len);
} else if (can_list(strbuf)) {
if (size < (res + name_len(strbuf, xattr_name_len))) {
res = -ERANGE;
goto end_listxattr;
} else
res += copy_name(buffer + res,
strbuf, xattr_name_len);
}
if (hfs_brec_goto(&fd, 1))
goto end_listxattr;
}
end_listxattr:
kfree(strbuf);
out:
hfs_find_exit(&fd);
return res;
}
static int hfsplus_removexattr(struct inode *inode, const char *name)
{
int err = 0;
struct hfs_find_data cat_fd;
u16 flags;
u16 cat_entry_type;
int is_xattr_acl_deleted = 0;
int is_all_xattrs_deleted = 0;
if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
return -EOPNOTSUPP;
if (!strcmp_xattr_finder_info(name))
return -EOPNOTSUPP;
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
if (err) {
pr_err("can't init xattr find struct\n");
return err;
}
err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
if (err) {
pr_err("catalog searching failed\n");
goto end_removexattr;
}
err = hfsplus_delete_attr(inode, name);
if (err)
goto end_removexattr;
is_xattr_acl_deleted = !strcmp_xattr_acl(name);
is_all_xattrs_deleted = !hfsplus_attr_exists(inode, NULL);
if (!is_xattr_acl_deleted && !is_all_xattrs_deleted)
goto end_removexattr;
cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
if (cat_entry_type == HFSPLUS_FOLDER) {
flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_folder, flags));
if (is_xattr_acl_deleted)
flags &= ~HFSPLUS_ACL_EXISTS;
if (is_all_xattrs_deleted)
flags &= ~HFSPLUS_XATTR_EXISTS;
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_folder, flags),
flags);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
} else if (cat_entry_type == HFSPLUS_FILE) {
flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_file, flags));
if (is_xattr_acl_deleted)
flags &= ~HFSPLUS_ACL_EXISTS;
if (is_all_xattrs_deleted)
flags &= ~HFSPLUS_XATTR_EXISTS;
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
offsetof(struct hfsplus_cat_file, flags),
flags);
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
} else {
pr_err("invalid catalog entry type\n");
err = -EIO;
goto end_removexattr;
}
end_removexattr:
hfs_find_exit(&cat_fd);
return err;
}
static int hfsplus_osx_getxattr(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
/*
* Don't allow retrieving properly prefixed attributes
* by prepending them with "osx."
*/
if (is_known_namespace(name))
return -EOPNOTSUPP;
/*
* osx is the namespace we use to indicate an unprefixed
* attribute on the filesystem (like the ones that OS X
* creates), so we pass the name through unmodified (after
* ensuring it doesn't conflict with another namespace).
*/
return __hfsplus_getxattr(inode, name, buffer, size);
}
static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{
/*
* Don't allow setting properly prefixed attributes
* by prepending them with "osx."
*/
if (is_known_namespace(name))
return -EOPNOTSUPP;
/*
* osx is the namespace we use to indicate an unprefixed
* attribute on the filesystem (like the ones that OS X
* creates), so we pass the name through unmodified (after
* ensuring it doesn't conflict with another namespace).
*/
return __hfsplus_setxattr(inode, name, buffer, size, flags);
}
const struct xattr_handler hfsplus_xattr_osx_handler = {
.prefix = XATTR_MAC_OSX_PREFIX,
.get = hfsplus_osx_getxattr,
.set = hfsplus_osx_setxattr,
};
| {
"pile_set_name": "Github"
} |
// (C) Copyright John Maddock 2007.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// This file is machine generated, do not edit by hand
// Polynomial evaluation using second order Horners rule
#ifndef BOOST_MATH_TOOLS_RAT_EVAL_18_HPP
#define BOOST_MATH_TOOLS_RAT_EVAL_18_HPP
namespace boost{ namespace math{ namespace tools{ namespace detail{
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T*, const U*, const V&, const boost::integral_constant<int, 0>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>(0);
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V&, const boost::integral_constant<int, 1>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>(a[0]) / static_cast<V>(b[0]);
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 2>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>((a[1] * x + a[0]) / (b[1] * x + b[0]));
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 3>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>(((a[2] * x + a[1]) * x + a[0]) / ((b[2] * x + b[1]) * x + b[0]));
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 4>*) BOOST_MATH_NOEXCEPT(V)
{
return static_cast<V>((((a[3] * x + a[2]) * x + a[1]) * x + a[0]) / (((b[3] * x + b[2]) * x + b[1]) * x + b[0]));
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 5>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>(((a[4] * x2 + a[2]) * x2 + a[0] + (a[3] * x2 + a[1]) * x) / ((b[4] * x2 + b[2]) * x2 + b[0] + (b[3] * x2 + b[1]) * x));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>(((a[0] * z2 + a[2]) * z2 + a[4] + (a[1] * z2 + a[3]) * z) / ((b[0] * z2 + b[2]) * z2 + b[4] + (b[1] * z2 + b[3]) * z));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 6>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>((((a[5] * x2 + a[3]) * x2 + a[1]) * x + (a[4] * x2 + a[2]) * x2 + a[0]) / (((b[5] * x2 + b[3]) * x2 + b[1]) * x + (b[4] * x2 + b[2]) * x2 + b[0]));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>((((a[0] * z2 + a[2]) * z2 + a[4]) * z + (a[1] * z2 + a[3]) * z2 + a[5]) / (((b[0] * z2 + b[2]) * z2 + b[4]) * z + (b[1] * z2 + b[3]) * z2 + b[5]));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 7>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>((((a[6] * x2 + a[4]) * x2 + a[2]) * x2 + a[0] + ((a[5] * x2 + a[3]) * x2 + a[1]) * x) / (((b[6] * x2 + b[4]) * x2 + b[2]) * x2 + b[0] + ((b[5] * x2 + b[3]) * x2 + b[1]) * x));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6] + ((a[1] * z2 + a[3]) * z2 + a[5]) * z) / (((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6] + ((b[1] * z2 + b[3]) * z2 + b[5]) * z));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 8>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>(((((a[7] * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x + ((a[6] * x2 + a[4]) * x2 + a[2]) * x2 + a[0]) / ((((b[7] * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x + ((b[6] * x2 + b[4]) * x2 + b[2]) * x2 + b[0]));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>(((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z + ((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) / ((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z + ((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 9>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>(((((a[8] * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0] + (((a[7] * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x) / ((((b[8] * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0] + (((b[7] * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>(((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8] + (((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z) / ((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8] + (((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 10>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>((((((a[9] * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x + (((a[8] * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0]) / (((((b[9] * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x + (((b[8] * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0]));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z + (((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) / (((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z + (((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 11>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>((((((a[10] * x2 + a[8]) * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0] + ((((a[9] * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x) / (((((b[10] * x2 + b[8]) * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0] + ((((b[9] * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z2 + a[10] + ((((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) * z) / (((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z2 + b[10] + ((((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]) * z));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 12>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>(((((((a[11] * x2 + a[9]) * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x + ((((a[10] * x2 + a[8]) * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0]) / ((((((b[11] * x2 + b[9]) * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x + ((((b[10] * x2 + b[8]) * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0]));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>(((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z2 + a[10]) * z + ((((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) * z2 + a[11]) / ((((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z2 + b[10]) * z + ((((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]) * z2 + b[11]));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 13>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>(((((((a[12] * x2 + a[10]) * x2 + a[8]) * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0] + (((((a[11] * x2 + a[9]) * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x) / ((((((b[12] * x2 + b[10]) * x2 + b[8]) * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0] + (((((b[11] * x2 + b[9]) * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>(((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z2 + a[10]) * z2 + a[12] + (((((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) * z2 + a[11]) * z) / ((((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z2 + b[10]) * z2 + b[12] + (((((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]) * z2 + b[11]) * z));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 14>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>((((((((a[13] * x2 + a[11]) * x2 + a[9]) * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x + (((((a[12] * x2 + a[10]) * x2 + a[8]) * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0]) / (((((((b[13] * x2 + b[11]) * x2 + b[9]) * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x + (((((b[12] * x2 + b[10]) * x2 + b[8]) * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0]));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>((((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z2 + a[10]) * z2 + a[12]) * z + (((((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) * z2 + a[11]) * z2 + a[13]) / (((((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z2 + b[10]) * z2 + b[12]) * z + (((((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]) * z2 + b[11]) * z2 + b[13]));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 15>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>((((((((a[14] * x2 + a[12]) * x2 + a[10]) * x2 + a[8]) * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0] + ((((((a[13] * x2 + a[11]) * x2 + a[9]) * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x) / (((((((b[14] * x2 + b[12]) * x2 + b[10]) * x2 + b[8]) * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0] + ((((((b[13] * x2 + b[11]) * x2 + b[9]) * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>((((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z2 + a[10]) * z2 + a[12]) * z2 + a[14] + ((((((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) * z2 + a[11]) * z2 + a[13]) * z) / (((((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z2 + b[10]) * z2 + b[12]) * z2 + b[14] + ((((((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]) * z2 + b[11]) * z2 + b[13]) * z));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 16>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>(((((((((a[15] * x2 + a[13]) * x2 + a[11]) * x2 + a[9]) * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x + ((((((a[14] * x2 + a[12]) * x2 + a[10]) * x2 + a[8]) * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0]) / ((((((((b[15] * x2 + b[13]) * x2 + b[11]) * x2 + b[9]) * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x + ((((((b[14] * x2 + b[12]) * x2 + b[10]) * x2 + b[8]) * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0]));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>(((((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z2 + a[10]) * z2 + a[12]) * z2 + a[14]) * z + ((((((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) * z2 + a[11]) * z2 + a[13]) * z2 + a[15]) / ((((((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z2 + b[10]) * z2 + b[12]) * z2 + b[14]) * z + ((((((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]) * z2 + b[11]) * z2 + b[13]) * z2 + b[15]));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 17>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>(((((((((a[16] * x2 + a[14]) * x2 + a[12]) * x2 + a[10]) * x2 + a[8]) * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0] + (((((((a[15] * x2 + a[13]) * x2 + a[11]) * x2 + a[9]) * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x) / ((((((((b[16] * x2 + b[14]) * x2 + b[12]) * x2 + b[10]) * x2 + b[8]) * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0] + (((((((b[15] * x2 + b[13]) * x2 + b[11]) * x2 + b[9]) * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>(((((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z2 + a[10]) * z2 + a[12]) * z2 + a[14]) * z2 + a[16] + (((((((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) * z2 + a[11]) * z2 + a[13]) * z2 + a[15]) * z) / ((((((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z2 + b[10]) * z2 + b[12]) * z2 + b[14]) * z2 + b[16] + (((((((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]) * z2 + b[11]) * z2 + b[13]) * z2 + b[15]) * z));
}
}
template <class T, class U, class V>
inline V evaluate_rational_c_imp(const T* a, const U* b, const V& x, const boost::integral_constant<int, 18>*) BOOST_MATH_NOEXCEPT(V)
{
if(x <= 1)
{
V x2 = x * x;
return static_cast<V>((((((((((a[17] * x2 + a[15]) * x2 + a[13]) * x2 + a[11]) * x2 + a[9]) * x2 + a[7]) * x2 + a[5]) * x2 + a[3]) * x2 + a[1]) * x + (((((((a[16] * x2 + a[14]) * x2 + a[12]) * x2 + a[10]) * x2 + a[8]) * x2 + a[6]) * x2 + a[4]) * x2 + a[2]) * x2 + a[0]) / (((((((((b[17] * x2 + b[15]) * x2 + b[13]) * x2 + b[11]) * x2 + b[9]) * x2 + b[7]) * x2 + b[5]) * x2 + b[3]) * x2 + b[1]) * x + (((((((b[16] * x2 + b[14]) * x2 + b[12]) * x2 + b[10]) * x2 + b[8]) * x2 + b[6]) * x2 + b[4]) * x2 + b[2]) * x2 + b[0]));
}
else
{
V z = 1 / x;
V z2 = 1 / (x * x);
return static_cast<V>((((((((((a[0] * z2 + a[2]) * z2 + a[4]) * z2 + a[6]) * z2 + a[8]) * z2 + a[10]) * z2 + a[12]) * z2 + a[14]) * z2 + a[16]) * z + (((((((a[1] * z2 + a[3]) * z2 + a[5]) * z2 + a[7]) * z2 + a[9]) * z2 + a[11]) * z2 + a[13]) * z2 + a[15]) * z2 + a[17]) / (((((((((b[0] * z2 + b[2]) * z2 + b[4]) * z2 + b[6]) * z2 + b[8]) * z2 + b[10]) * z2 + b[12]) * z2 + b[14]) * z2 + b[16]) * z + (((((((b[1] * z2 + b[3]) * z2 + b[5]) * z2 + b[7]) * z2 + b[9]) * z2 + b[11]) * z2 + b[13]) * z2 + b[15]) * z2 + b[17]));
}
}
}}}} // namespaces
#endif // include guard
| {
"pile_set_name": "Github"
} |
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run maketables.go -output tables.go
// Package display provides display names for languages, scripts and regions in
// a requested language.
//
// The data is based on CLDR's localeDisplayNames. It includes the names of the
// draft level "contributed" or "approved". The resulting tables are quite
// large. The display package is designed so that users can reduce the linked-in
// table sizes by cherry picking the languages one wishes to support. There is a
// Dictionary defined for a selected set of common languages for this purpose.
package display // import "golang.org/x/text/language/display"
import (
"fmt"
"strings"
"golang.org/x/text/internal/format"
"golang.org/x/text/language"
)
/*
TODO:
All fairly low priority at the moment:
- Include alternative and variants as an option (using func options).
- Option for returning the empty string for undefined values.
- Support variants, currencies, time zones, option names and other data
provided in CLDR.
- Do various optimizations:
- Reduce size of offset tables.
- Consider compressing infrequently used languages and decompress on demand.
*/
// A Formatter formats a tag in the current language. It is used in conjunction
// with the message package.
type Formatter struct {
lookup func(tag int, x interface{}) string
x interface{}
}
// Format implements "golang.org/x/text/internal/format".Formatter.
func (f Formatter) Format(state format.State, verb rune) {
// TODO: there are a lot of inefficiencies in this code. Fix it when we
// language.Tag has embedded compact tags.
t := state.Language()
_, index, _ := matcher.Match(t)
str := f.lookup(index, f.x)
if str == "" {
// TODO: use language-specific punctuation.
// TODO: use codePattern instead of language?
if unknown := f.lookup(index, language.Und); unknown != "" {
fmt.Fprintf(state, "%v (%v)", unknown, f.x)
} else {
fmt.Fprintf(state, "[language: %v]", f.x)
}
} else {
state.Write([]byte(str))
}
}
// Language returns a Formatter that renders the name for lang in the
// current language. x may be a language.Base or a language.Tag.
// It renders lang in the default language if no translation for the current
// language is supported.
func Language(lang interface{}) Formatter {
return Formatter{langFunc, lang}
}
// Region returns a Formatter that renders the name for region in the current
// language. region may be a language.Region or a language.Tag.
// It renders region in the default language if no translation for the current
// language is supported.
func Region(region interface{}) Formatter {
return Formatter{regionFunc, region}
}
// Script returns a Formatter that renders the name for script in the current
// language. script may be a language.Script or a language.Tag.
// It renders script in the default language if no translation for the current
// language is supported.
func Script(script interface{}) Formatter {
return Formatter{scriptFunc, script}
}
// Script returns a Formatter that renders the name for tag in the current
// language. tag may be a language.Tag.
// It renders tag in the default language if no translation for the current
// language is supported.
func Tag(tag interface{}) Formatter {
return Formatter{tagFunc, tag}
}
// A Namer is used to get the name for a given value, such as a Tag, Language,
// Script or Region.
type Namer interface {
// Name returns a display string for the given value. A Namer returns an
// empty string for values it does not support. A Namer may support naming
// an unspecified value. For example, when getting the name for a region for
// a tag that does not have a defined Region, it may return the name for an
// unknown region. It is up to the user to filter calls to Name for values
// for which one does not want to have a name string.
Name(x interface{}) string
}
var (
// Supported lists the languages for which names are defined.
Supported language.Coverage
// The set of all possible values for which names are defined. Note that not
// all Namer implementations will cover all the values of a given type.
// A Namer will return the empty string for unsupported values.
Values language.Coverage
matcher language.Matcher
)
func init() {
tags := make([]language.Tag, numSupported)
s := supported
for i := range tags {
p := strings.IndexByte(s, '|')
tags[i] = language.Raw.Make(s[:p])
s = s[p+1:]
}
matcher = language.NewMatcher(tags)
Supported = language.NewCoverage(tags)
Values = language.NewCoverage(langTagSet.Tags, supportedScripts, supportedRegions)
}
// Languages returns a Namer for naming languages. It returns nil if there is no
// data for the given tag. The type passed to Name must be either language.Base
// or language.Tag. Note that the result may differ between passing a tag or its
// base language. For example, for English, passing "nl-BE" would return Flemish
// whereas passing "nl" returns "Dutch".
func Languages(t language.Tag) Namer {
if _, index, conf := matcher.Match(t); conf != language.No {
return languageNamer(index)
}
return nil
}
type languageNamer int
func langFunc(i int, x interface{}) string {
return nameLanguage(languageNamer(i), x)
}
func (n languageNamer) name(i int) string {
return lookup(langHeaders[:], int(n), i)
}
// Name implements the Namer interface for language names.
func (n languageNamer) Name(x interface{}) string {
return nameLanguage(n, x)
}
// nonEmptyIndex walks up the parent chain until a non-empty header is found.
// It returns -1 if no index could be found.
func nonEmptyIndex(h []header, index int) int {
for ; index != -1 && h[index].data == ""; index = int(parents[index]) {
}
return index
}
// Scripts returns a Namer for naming scripts. It returns nil if there is no
// data for the given tag. The type passed to Name must be either a
// language.Script or a language.Tag. It will not attempt to infer a script for
// tags with an unspecified script.
func Scripts(t language.Tag) Namer {
if _, index, conf := matcher.Match(t); conf != language.No {
if index = nonEmptyIndex(scriptHeaders[:], index); index != -1 {
return scriptNamer(index)
}
}
return nil
}
type scriptNamer int
func scriptFunc(i int, x interface{}) string {
return nameScript(scriptNamer(i), x)
}
func (n scriptNamer) name(i int) string {
return lookup(scriptHeaders[:], int(n), i)
}
// Name implements the Namer interface for script names.
func (n scriptNamer) Name(x interface{}) string {
return nameScript(n, x)
}
// Regions returns a Namer for naming regions. It returns nil if there is no
// data for the given tag. The type passed to Name must be either a
// language.Region or a language.Tag. It will not attempt to infer a region for
// tags with an unspecified region.
func Regions(t language.Tag) Namer {
if _, index, conf := matcher.Match(t); conf != language.No {
if index = nonEmptyIndex(regionHeaders[:], index); index != -1 {
return regionNamer(index)
}
}
return nil
}
type regionNamer int
func regionFunc(i int, x interface{}) string {
return nameRegion(regionNamer(i), x)
}
func (n regionNamer) name(i int) string {
return lookup(regionHeaders[:], int(n), i)
}
// Name implements the Namer interface for region names.
func (n regionNamer) Name(x interface{}) string {
return nameRegion(n, x)
}
// Tags returns a Namer for giving a full description of a tag. The names of
// scripts and regions that are not already implied by the language name will
// in appended within parentheses. It returns nil if there is not data for the
// given tag. The type passed to Name must be a tag.
func Tags(t language.Tag) Namer {
if _, index, conf := matcher.Match(t); conf != language.No {
return tagNamer(index)
}
return nil
}
type tagNamer int
func tagFunc(i int, x interface{}) string {
return nameTag(languageNamer(i), scriptNamer(i), regionNamer(i), x)
}
// Name implements the Namer interface for tag names.
func (n tagNamer) Name(x interface{}) string {
return nameTag(languageNamer(n), scriptNamer(n), regionNamer(n), x)
}
// lookup finds the name for an entry in a global table, traversing the
// inheritance hierarchy if needed.
func lookup(table []header, dict, want int) string {
for dict != -1 {
if s := table[dict].name(want); s != "" {
return s
}
dict = int(parents[dict])
}
return ""
}
// A Dictionary holds a collection of Namers for a single language. One can
// reduce the amount of data linked in to a binary by only referencing
// Dictionaries for the languages one needs to support instead of using the
// generic Namer factories.
type Dictionary struct {
parent *Dictionary
lang header
script header
region header
}
// Tags returns a Namer for giving a full description of a tag. The names of
// scripts and regions that are not already implied by the language name will
// in appended within parentheses. It returns nil if there is not data for the
// given tag. The type passed to Name must be a tag.
func (d *Dictionary) Tags() Namer {
return dictTags{d}
}
type dictTags struct {
d *Dictionary
}
// Name implements the Namer interface for tag names.
func (n dictTags) Name(x interface{}) string {
return nameTag(dictLanguages{n.d}, dictScripts{n.d}, dictRegions{n.d}, x)
}
// Languages returns a Namer for naming languages. It returns nil if there is no
// data for the given tag. The type passed to Name must be either language.Base
// or language.Tag. Note that the result may differ between passing a tag or its
// base language. For example, for English, passing "nl-BE" would return Flemish
// whereas passing "nl" returns "Dutch".
func (d *Dictionary) Languages() Namer {
return dictLanguages{d}
}
type dictLanguages struct {
d *Dictionary
}
func (n dictLanguages) name(i int) string {
for d := n.d; d != nil; d = d.parent {
if s := d.lang.name(i); s != "" {
return s
}
}
return ""
}
// Name implements the Namer interface for language names.
func (n dictLanguages) Name(x interface{}) string {
return nameLanguage(n, x)
}
// Scripts returns a Namer for naming scripts. It returns nil if there is no
// data for the given tag. The type passed to Name must be either a
// language.Script or a language.Tag. It will not attempt to infer a script for
// tags with an unspecified script.
func (d *Dictionary) Scripts() Namer {
return dictScripts{d}
}
type dictScripts struct {
d *Dictionary
}
func (n dictScripts) name(i int) string {
for d := n.d; d != nil; d = d.parent {
if s := d.script.name(i); s != "" {
return s
}
}
return ""
}
// Name implements the Namer interface for script names.
func (n dictScripts) Name(x interface{}) string {
return nameScript(n, x)
}
// Regions returns a Namer for naming regions. It returns nil if there is no
// data for the given tag. The type passed to Name must be either a
// language.Region or a language.Tag. It will not attempt to infer a region for
// tags with an unspecified region.
func (d *Dictionary) Regions() Namer {
return dictRegions{d}
}
type dictRegions struct {
d *Dictionary
}
func (n dictRegions) name(i int) string {
for d := n.d; d != nil; d = d.parent {
if s := d.region.name(i); s != "" {
return s
}
}
return ""
}
// Name implements the Namer interface for region names.
func (n dictRegions) Name(x interface{}) string {
return nameRegion(n, x)
}
// A SelfNamer implements a Namer that returns the name of language in this same
// language. It provides a very compact mechanism to provide a comprehensive
// list of languages to users in their native language.
type SelfNamer struct {
// Supported defines the values supported by this Namer.
Supported language.Coverage
}
var (
// Self is a shared instance of a SelfNamer.
Self *SelfNamer = &self
self = SelfNamer{language.NewCoverage(selfTagSet.Tags)}
)
// Name returns the name of a given language tag in the language identified by
// this tag. It supports both the language.Base and language.Tag types.
func (n SelfNamer) Name(x interface{}) string {
t, _ := language.All.Compose(x)
base, scr, reg := t.Raw()
baseScript := language.Script{}
if (scr == language.Script{} && reg != language.Region{}) {
// For looking up in the self dictionary, we need to select the
// maximized script. This is even the case if the script isn't
// specified.
s1, _ := t.Script()
if baseScript = getScript(base); baseScript != s1 {
scr = s1
}
}
i, scr, reg := selfTagSet.index(base, scr, reg)
if i == -1 {
return ""
}
// Only return the display name if the script matches the expected script.
if (scr != language.Script{}) {
if (baseScript == language.Script{}) {
baseScript = getScript(base)
}
if baseScript != scr {
return ""
}
}
return selfHeaders[0].name(i)
}
// getScript returns the maximized script for a base language.
func getScript(b language.Base) language.Script {
tag, _ := language.Raw.Compose(b)
scr, _ := tag.Script()
return scr
}
| {
"pile_set_name": "Github"
} |
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
all: regenerate
regenerate:
go install github.com/gogo/protobuf/protoc-min-version
protoc-min-version --version="3.0.0" --gogo_out=. test.proto
| {
"pile_set_name": "Github"
} |
require 'models/helpers/relational_operators'
module VCAP::CloudController
class BaseListFetcher
class << self
def filter(message, dataset, klass)
advanced_filters = {}
advanced_filters['created_at'] = message.created_ats if message.requested?(:created_ats)
advanced_filters['updated_at'] = message.updated_ats if message.requested?(:updated_ats)
advanced_filters.each do |filter, values|
if values.is_a?(Hash)
values.map do |operator, given_timestamp|
if operator == RelationalOperators::LESS_THAN_COMPARATOR
normalized_timestamp = Time.parse(given_timestamp).utc
dataset = dataset.where { Sequel.qualify(klass.table_name, filter) < normalized_timestamp }
elsif operator == RelationalOperators::LESS_THAN_OR_EQUAL_COMPARATOR
normalized_timestamp = (Time.parse(given_timestamp).utc + 0.999999).utc
dataset = dataset.where { Sequel.qualify(klass.table_name, filter) <= normalized_timestamp }
elsif operator == RelationalOperators::GREATER_THAN_COMPARATOR
normalized_timestamp = (Time.parse(given_timestamp).utc + 0.999999).utc
dataset = dataset.where { Sequel.qualify(klass.table_name, filter) > normalized_timestamp }
elsif operator == RelationalOperators::GREATER_THAN_OR_EQUAL_COMPARATOR
normalized_timestamp = Time.parse(given_timestamp).utc
dataset = dataset.where { Sequel.qualify(klass.table_name, filter) >= normalized_timestamp }
end
end
else
# Gotcha: unlike the other relational operators, which are hashes such as
# { lt: '2020-06-30T12:34:56Z' }, the equals operator is simply an array, e.g.
# [ '2020-06-30T12:34:56Z' ].
# Gotcha: the equals operator returns all resources occurring within
# the span of the second (e.g. "12:34:56.00-12:34:56.9999999"), for databases store
# timestamps in sub-second accuracy (PostgreSQL stores in microseconds, for example)
bounds_expressions = values.map do |timestamp|
lower_bound = Time.parse(timestamp).utc
upper_bound = Time.at(lower_bound + 0.999999).utc
(Sequel.qualify(klass.table_name, filter) <= upper_bound) &
(Sequel.qualify(klass.table_name, filter) >= lower_bound)
end
dataset = dataset.where(Sequel.|(*bounds_expressions))
end
end
dataset
end
end
end
end
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 1996, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.beans;
/**
* A "PropertyChange" event gets fired whenever a bean changes a "bound"
* property. You can register a PropertyChangeListener with a source
* bean so as to be notified of any bound property updates.
* @since 1.1
*/
public interface PropertyChangeListener extends java.util.EventListener {
/**
* This method gets called when a bound property is changed.
* @param evt A PropertyChangeEvent object describing the event source
* and the property that has changed.
*/
void propertyChange(PropertyChangeEvent evt);
}
| {
"pile_set_name": "Github"
} |
<mat-radio-group
[(ngModel)]="value"
(change)="changeHandler($event)">
<mat-radio-button [attr.data-automation-id]="'search-radio-'+(option.name | translate)"
*ngFor="let option of options"
[value]="option.value"
class="adf-facet-filter">
<div
matTooltip="{{ option.name | translate }}"
matTooltipPosition="right"
class="filter-label">
{{ option.name | translate }}
</div>
</mat-radio-button>
</mat-radio-group>
<div class="adf-facet-buttons" *ngIf="!options.fitsPage">
<button mat-icon-button
*ngIf="options.canShowLessItems"
title="{{ 'SEARCH.FILTER.ACTIONS.SHOW-LESS' | translate }}"
(click)="options.showLessItems()">
<mat-icon>keyboard_arrow_up</mat-icon>
</button>
<button mat-icon-button
*ngIf="options.canShowMoreItems"
title="{{ 'SEARCH.FILTER.ACTIONS.SHOW-MORE' | translate }}"
(click)="options.showMoreItems()">
<mat-icon>keyboard_arrow_down</mat-icon>
</button>
</div>
| {
"pile_set_name": "Github"
} |
#include "esp_common.h"
#include "testrunner.h"
#include <stdlib.h>
#include "spiffs_test_params.h"
enum {
CMD_SPIFFS,
CMD_END,
};
#define SSC_CMD_N (CMD_END + 1)
LOCAL void spiffs_test_init(void);
LOCAL ssc_cmd_t sscCmdSet[SSC_CMD_N] = {
{"fs", CMD_T_SYNC, CMD_SPIFFS, spiffs_test_init, NULL},
{"", CMD_T_ASYNC, CMD_END, NULL, NULL}
};
void spiffs_test_init(void)
{
char *argv[10], pLine[128];
int argc;
strcpy(pLine, ssc_param_str());
argc = ssc_parse_param(pLine, argv);
run_tests(argc, argv);
}
void spiffs_test_help(void)
{
printf("\nhelp:\n");
printf("$ fs \n");
}
void spiffs_fs1_init(void)
{
struct esp_spiffs_config config;
config.phys_size = FS1_FLASH_SIZE;
config.phys_addr = FS1_FLASH_ADDR;
config.phys_erase_block = SECTOR_SIZE;
config.log_block_size = LOG_BLOCK;
config.log_page_size = LOG_PAGE;
config.fd_buf_size = FD_BUF_SIZE * 2;
config.cache_buf_size = CACHE_BUF_SIZE;
esp_spiffs_init(&config);
}
void user_init(void)
{
spiffs_fs1_init();
ssc_attach(SSC_BR_74880);
ssc_register(sscCmdSet, SSC_CMD_N, spiffs_test_help);
}
| {
"pile_set_name": "Github"
} |
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20161021125616) do
create_table "conversations", force: :cascade do |t|
t.integer "author_id"
t.integer "receiver_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.index ["author_id", "receiver_id"], name: "index_conversations_on_author_id_and_receiver_id", unique: true
t.index ["author_id"], name: "index_conversations_on_author_id"
t.index ["receiver_id"], name: "index_conversations_on_receiver_id"
end
create_table "personal_messages", force: :cascade do |t|
t.text "body"
t.integer "conversation_id"
t.integer "user_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.index ["conversation_id"], name: "index_personal_messages_on_conversation_id"
t.index ["user_id"], name: "index_personal_messages_on_user_id"
end
create_table "users", force: :cascade do |t|
t.string "email", default: "", null: false
t.string "encrypted_password", default: "", null: false
t.string "reset_password_token"
t.datetime "reset_password_sent_at"
t.datetime "remember_created_at"
t.integer "sign_in_count", default: 0, null: false
t.datetime "current_sign_in_at"
t.datetime "last_sign_in_at"
t.string "current_sign_in_ip"
t.string "last_sign_in_ip"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.index ["email"], name: "index_users_on_email", unique: true
t.index ["reset_password_token"], name: "index_users_on_reset_password_token", unique: true
end
end
| {
"pile_set_name": "Github"
} |
{% extends "layout.html" %}
{% block title %}
{{ gettext("Oops") }} - {{ super() }}
{% endblock %}
{% if not mobile %}
{% block extra_headers %}
{% assets filters='less', output='build/main.%(version)s.css',
depends=['**/*.less'], 'less/main.less' %}
<link rel="stylesheet" href="{{ASSET_URL}}">
{% endassets %}
{% endblock %}
{% endif %}
{% block content %}
<div class="alert alert-error">
{{ gettext("This is not the web page you are looking for.") }}.
</div>
{% endblock %}
| {
"pile_set_name": "Github"
} |
/*
* /MathJax/jax/input/MathML/entities/u.js
*
* Copyright (c) 2012 Design Science, Inc.
*
* Part of the MathJax library.
* See http://www.mathjax.org for details.
*
* Licensed under the Apache License, Version 2.0;
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*/
(function(a){MathJax.Hub.Insert(a.Parse.Entity,{Uacute:"\u00DA",Uarr:"\u219F",Uarrocir:"\u2949",Ubrcy:"\u040E",Ubreve:"\u016C",Ucirc:"\u00DB",Ucy:"\u0423",Udblac:"\u0170",Ugrave:"\u00D9",Umacr:"\u016A",UnderBracket:"\u23B5",UnderParenthesis:"\u23DD",Uogon:"\u0172",UpArrowBar:"\u2912",UpArrowDownArrow:"\u21C5",UpEquilibrium:"\u296E",UpTeeArrow:"\u21A5",UpperLeftArrow:"\u2196",UpperRightArrow:"\u2197",Upsi:"\u03D2",Uring:"\u016E",Utilde:"\u0168",Uuml:"\u00DC",uArr:"\u21D1",uHar:"\u2963",uacute:"\u00FA",uarr:"\u2191",ubrcy:"\u045E",ubreve:"\u016D",ucirc:"\u00FB",ucy:"\u0443",udarr:"\u21C5",udblac:"\u0171",udhar:"\u296E",ufisht:"\u297E",ugrave:"\u00F9",uharl:"\u21BF",uharr:"\u21BE",uhblk:"\u2580",ulcorn:"\u231C",ulcorner:"\u231C",ulcrop:"\u230F",ultri:"\u25F8",umacr:"\u016B",uml:"\u00A8",uogon:"\u0173",uparrow:"\u2191",updownarrow:"\u2195",upharpoonleft:"\u21BF",upharpoonright:"\u21BE",uplus:"\u228E",upsih:"\u03D2",upsilon:"\u03C5",urcorn:"\u231D",urcorner:"\u231D",urcrop:"\u230E",uring:"\u016F",urtri:"\u25F9",utdot:"\u22F0",utilde:"\u0169",utri:"\u25B5",utrif:"\u25B4",uuarr:"\u21C8",uuml:"\u00FC",uwangle:"\u29A7"});MathJax.Ajax.loadComplete(a.entityDir+"/u.js")})(MathJax.InputJax.MathML);
| {
"pile_set_name": "Github"
} |
/*
* alc5632.h -- ALC5632 ALSA SoC Audio Codec
*
* Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
*
* Authors: Leon Romanovsky <leon@leon.nu>
* Andrey Danin <danindrey@mail.ru>
* Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
*
* Based on alc5623.h by Arnaud Patard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ALC5632_H
#define _ALC5632_H
#define ALC5632_RESET 0x00
/* speaker output vol 2 2 */
/* line output vol 4 2 */
/* HP output vol 4 0 4 */
#define ALC5632_SPK_OUT_VOL 0x02 /* spe out vol */
#define ALC5632_SPK_OUT_VOL_STEP 1.5
#define ALC5632_HP_OUT_VOL 0x04 /* hp out vol */
#define ALC5632_AUX_OUT_VOL 0x06 /* aux out vol */
#define ALC5632_PHONE_IN_VOL 0x08 /* phone in vol */
#define ALC5632_LINE_IN_VOL 0x0A /* line in vol */
#define ALC5632_STEREO_DAC_IN_VOL 0x0C /* stereo dac in vol */
#define ALC5632_MIC_VOL 0x0E /* mic in vol */
/* stero dac/mic routing */
#define ALC5632_MIC_ROUTING_CTRL 0x10
#define ALC5632_MIC_ROUTE_MONOMIX (1 << 0)
#define ALC5632_MIC_ROUTE_SPK (1 << 1)
#define ALC5632_MIC_ROUTE_HP (1 << 2)
#define ALC5632_ADC_REC_GAIN 0x12 /* rec gain */
#define ALC5632_ADC_REC_GAIN_RANGE 0x1F1F
#define ALC5632_ADC_REC_GAIN_BASE (-16.5)
#define ALC5632_ADC_REC_GAIN_STEP 1.5
#define ALC5632_ADC_REC_MIXER 0x14 /* mixer control */
#define ALC5632_ADC_REC_MIC1 (1 << 6)
#define ALC5632_ADC_REC_MIC2 (1 << 5)
#define ALC5632_ADC_REC_LINE_IN (1 << 4)
#define ALC5632_ADC_REC_AUX (1 << 3)
#define ALC5632_ADC_REC_HP (1 << 2)
#define ALC5632_ADC_REC_SPK (1 << 1)
#define ALC5632_ADC_REC_MONOMIX (1 << 0)
#define ALC5632_VOICE_DAC_VOL 0x18 /* voice dac vol */
#define ALC5632_I2S_OUT_CTL 0x1A /* undocumented reg. found in path scheme */
/* ALC5632_OUTPUT_MIXER_CTRL : */
/* same remark as for reg 2 line vs speaker */
#define ALC5632_OUTPUT_MIXER_CTRL 0x1C /* out mix ctrl */
#define ALC5632_OUTPUT_MIXER_RP (1 << 14)
#define ALC5632_OUTPUT_MIXER_WEEK (1 << 12)
#define ALC5632_OUTPUT_MIXER_HP (1 << 10)
#define ALC5632_OUTPUT_MIXER_AUX_SPK (2 << 6)
#define ALC5632_OUTPUT_MIXER_AUX_HP_LR (1 << 6)
#define ALC5632_OUTPUT_MIXER_HP_R (1 << 8)
#define ALC5632_OUTPUT_MIXER_HP_L (1 << 9)
#define ALC5632_MIC_CTRL 0x22 /* mic phone ctrl */
#define ALC5632_MIC_BOOST_BYPASS 0
#define ALC5632_MIC_BOOST_20DB 1
#define ALC5632_MIC_BOOST_30DB 2
#define ALC5632_MIC_BOOST_40DB 3
#define ALC5632_DIGI_BOOST_CTRL 0x24 /* digi mic / bost ctl */
#define ALC5632_MIC_BOOST_RANGE 7
#define ALC5632_MIC_BOOST_STEP 6
#define ALC5632_PWR_DOWN_CTRL_STATUS 0x26
#define ALC5632_PWR_DOWN_CTRL_STATUS_MASK 0xEF00
#define ALC5632_PWR_VREF_PR3 (1 << 11)
#define ALC5632_PWR_VREF_PR2 (1 << 10)
#define ALC5632_PWR_VREF_STATUS (1 << 3)
#define ALC5632_PWR_AMIX_STATUS (1 << 2)
#define ALC5632_PWR_DAC_STATUS (1 << 1)
#define ALC5632_PWR_ADC_STATUS (1 << 0)
/* stereo/voice DAC / stereo adc func ctrl */
#define ALC5632_DAC_FUNC_SELECT 0x2E
/* Main serial data port ctrl (i2s) */
#define ALC5632_DAI_CONTROL 0x34
#define ALC5632_DAI_SDP_MASTER_MODE (0 << 15)
#define ALC5632_DAI_SDP_SLAVE_MODE (1 << 15)
#define ALC5632_DAI_SADLRCK_MODE (1 << 14)
/* 0:voice, 1:main */
#define ALC5632_DAI_MAIN_I2S_SYSCLK_SEL (1 << 8)
#define ALC5632_DAI_MAIN_I2S_BCLK_POL_CTRL (1 << 7)
/* 0:normal, 1:invert */
#define ALC5632_DAI_MAIN_I2S_LRCK_INV (1 << 6)
#define ALC5632_DAI_I2S_DL_MASK (3 << 2)
#define ALC5632_DAI_I2S_DL_8 (3 << 2)
#define ALC5632_DAI_I2S_DL_24 (2 << 2)
#define ALC5632_DAI_I2S_DL_20 (1 << 2)
#define ALC5632_DAI_I2S_DL_16 (0 << 2)
#define ALC5632_DAI_I2S_DF_MASK (3 << 0)
#define ALC5632_DAI_I2S_DF_PCM_B (3 << 0)
#define ALC5632_DAI_I2S_DF_PCM_A (2 << 0)
#define ALC5632_DAI_I2S_DF_LEFT (1 << 0)
#define ALC5632_DAI_I2S_DF_I2S (0 << 0)
/* extend serial data port control (VoDAC_i2c/pcm) */
#define ALC5632_DAI_CONTROL2 0x36
/* 0:gpio func, 1:voice pcm */
#define ALC5632_DAI_VOICE_PCM_ENABLE (1 << 15)
/* 0:master, 1:slave */
#define ALC5632_DAI_VOICE_MODE_SEL (1 << 14)
/* 0:disable, 1:enable */
#define ALC5632_DAI_HPF_CLK_CTRL (1 << 13)
/* 0:main, 1:voice */
#define ALC5632_DAI_VOICE_I2S_SYSCLK_SEL (1 << 8)
/* 0:normal, 1:invert */
#define ALC5632_DAI_VOICE_VBCLK_SYSCLK_SEL (1 << 7)
/* 0:normal, 1:invert */
#define ALC5632_DAI_VOICE_I2S_LR_INV (1 << 6)
#define ALC5632_DAI_VOICE_DL_MASK (3 << 2)
#define ALC5632_DAI_VOICE_DL_16 (0 << 2)
#define ALC5632_DAI_VOICE_DL_20 (1 << 2)
#define ALC5632_DAI_VOICE_DL_24 (2 << 2)
#define ALC5632_DAI_VOICE_DL_8 (3 << 2)
#define ALC5632_DAI_VOICE_DF_MASK (3 << 0)
#define ALC5632_DAI_VOICE_DF_I2S (0 << 0)
#define ALC5632_DAI_VOICE_DF_LEFT (1 << 0)
#define ALC5632_DAI_VOICE_DF_PCM_A (2 << 0)
#define ALC5632_DAI_VOICE_DF_PCM_B (3 << 0)
#define ALC5632_PWR_MANAG_ADD1 0x3A
#define ALC5632_PWR_MANAG_ADD1_MASK 0xEFFF
#define ALC5632_PWR_ADD1_DAC_L_EN (1 << 15)
#define ALC5632_PWR_ADD1_DAC_R_EN (1 << 14)
#define ALC5632_PWR_ADD1_ZERO_CROSS (1 << 13)
#define ALC5632_PWR_ADD1_MAIN_I2S_EN (1 << 11)
#define ALC5632_PWR_ADD1_SPK_AMP_EN (1 << 10)
#define ALC5632_PWR_ADD1_HP_OUT_AMP (1 << 9)
#define ALC5632_PWR_ADD1_HP_OUT_ENH_AMP (1 << 8)
#define ALC5632_PWR_ADD1_VOICE_DAC_MIX (1 << 7)
#define ALC5632_PWR_ADD1_SOFTGEN_EN (1 << 6)
#define ALC5632_PWR_ADD1_MIC1_SHORT_CURR (1 << 5)
#define ALC5632_PWR_ADD1_MIC2_SHORT_CURR (1 << 4)
#define ALC5632_PWR_ADD1_MIC1_EN (1 << 3)
#define ALC5632_PWR_ADD1_MIC2_EN (1 << 2)
#define ALC5632_PWR_ADD1_MAIN_BIAS (1 << 1)
#define ALC5632_PWR_ADD1_DAC_REF (1 << 0)
#define ALC5632_PWR_MANAG_ADD2 0x3C
#define ALC5632_PWR_MANAG_ADD2_MASK 0x7FFF
#define ALC5632_PWR_ADD2_PLL1 (1 << 15)
#define ALC5632_PWR_ADD2_PLL2 (1 << 14)
#define ALC5632_PWR_ADD2_VREF (1 << 13)
#define ALC5632_PWR_ADD2_OVT_DET (1 << 12)
#define ALC5632_PWR_ADD2_VOICE_DAC (1 << 10)
#define ALC5632_PWR_ADD2_L_DAC_CLK (1 << 9)
#define ALC5632_PWR_ADD2_R_DAC_CLK (1 << 8)
#define ALC5632_PWR_ADD2_L_ADC_CLK_GAIN (1 << 7)
#define ALC5632_PWR_ADD2_R_ADC_CLK_GAIN (1 << 6)
#define ALC5632_PWR_ADD2_L_HP_MIXER (1 << 5)
#define ALC5632_PWR_ADD2_R_HP_MIXER (1 << 4)
#define ALC5632_PWR_ADD2_SPK_MIXER (1 << 3)
#define ALC5632_PWR_ADD2_MONO_MIXER (1 << 2)
#define ALC5632_PWR_ADD2_L_ADC_REC_MIXER (1 << 1)
#define ALC5632_PWR_ADD2_R_ADC_REC_MIXER (1 << 0)
#define ALC5632_PWR_MANAG_ADD3 0x3E
#define ALC5632_PWR_MANAG_ADD3_MASK 0x7CFF
#define ALC5632_PWR_ADD3_AUXOUT_VOL (1 << 14)
#define ALC5632_PWR_ADD3_SPK_L_OUT (1 << 13)
#define ALC5632_PWR_ADD3_SPK_R_OUT (1 << 12)
#define ALC5632_PWR_ADD3_HP_L_OUT_VOL (1 << 11)
#define ALC5632_PWR_ADD3_HP_R_OUT_VOL (1 << 10)
#define ALC5632_PWR_ADD3_LINEIN_L_VOL (1 << 7)
#define ALC5632_PWR_ADD3_LINEIN_R_VOL (1 << 6)
#define ALC5632_PWR_ADD3_AUXIN_VOL (1 << 5)
#define ALC5632_PWR_ADD3_AUXIN_MIX (1 << 4)
#define ALC5632_PWR_ADD3_MIC1_VOL (1 << 3)
#define ALC5632_PWR_ADD3_MIC2_VOL (1 << 2)
#define ALC5632_PWR_ADD3_MIC1_BOOST_AD (1 << 1)
#define ALC5632_PWR_ADD3_MIC2_BOOST_AD (1 << 0)
#define ALC5632_GPCR1 0x40
#define ALC5632_GPCR1_CLK_SYS_SRC_SEL_PLL1 (1 << 15)
#define ALC5632_GPCR1_CLK_SYS_SRC_SEL_MCLK (0 << 15)
#define ALC5632_GPCR1_DAC_HI_FLT_EN (1 << 10)
#define ALC5632_GPCR1_SPK_AMP_CTRL (7 << 1)
#define ALC5632_GPCR1_VDD_100 (5 << 1)
#define ALC5632_GPCR1_VDD_125 (4 << 1)
#define ALC5632_GPCR1_VDD_150 (3 << 1)
#define ALC5632_GPCR1_VDD_175 (2 << 1)
#define ALC5632_GPCR1_VDD_200 (1 << 1)
#define ALC5632_GPCR1_VDD_225 (0 << 1)
#define ALC5632_GPCR2 0x42
#define ALC5632_GPCR2_PLL1_SOUR_SEL (3 << 12)
#define ALC5632_PLL_FR_MCLK (0 << 12)
#define ALC5632_PLL_FR_BCLK (2 << 12)
#define ALC5632_PLL_FR_VBCLK (3 << 12)
#define ALC5632_GPCR2_CLK_PLL_PRE_DIV1 (0 << 0)
#define ALC5632_PLL1_CTRL 0x44
#define ALC5632_PLL1_CTRL_N_VAL(n) (((n) & 0x0f) << 8)
#define ALC5632_PLL1_M_BYPASS (1 << 7)
#define ALC5632_PLL1_CTRL_K_VAL(k) (((k) & 0x07) << 4)
#define ALC5632_PLL1_CTRL_M_VAL(m) (((m) & 0x0f) << 0)
#define ALC5632_PLL2_CTRL 0x46
#define ALC5632_PLL2_EN (1 << 15)
#define ALC5632_PLL2_RATIO (0 << 15)
#define ALC5632_GPIO_PIN_CONFIG 0x4C
#define ALC5632_GPIO_PIN_POLARITY 0x4E
#define ALC5632_GPIO_PIN_STICKY 0x50
#define ALC5632_GPIO_PIN_WAKEUP 0x52
#define ALC5632_GPIO_PIN_STATUS 0x54
#define ALC5632_GPIO_PIN_SHARING 0x56
#define ALC5632_OVER_CURR_STATUS 0x58
#define ALC5632_SOFTVOL_CTRL 0x5A
#define ALC5632_GPIO_OUPUT_PIN_CTRL 0x5C
#define ALC5632_MISC_CTRL 0x5E
#define ALC5632_MISC_DISABLE_FAST_VREG (1 << 15)
#define ALC5632_MISC_AVC_TRGT_SEL (3 << 12)
#define ALC5632_MISC_AVC_TRGT_RIGHT (1 << 12)
#define ALC5632_MISC_AVC_TRGT_LEFT (2 << 12)
#define ALC5632_MISC_AVC_TRGT_BOTH (3 << 12)
#define ALC5632_MISC_HP_DEPOP_MODE1_EN (1 << 9)
#define ALC5632_MISC_HP_DEPOP_MODE2_EN (1 << 8)
#define ALC5632_MISC_HP_DEPOP_MUTE_L (1 << 7)
#define ALC5632_MISC_HP_DEPOP_MUTE_R (1 << 6)
#define ALC5632_MISC_HP_DEPOP_MUTE (1 << 5)
#define ALC5632_MISC_GPIO_WAKEUP_CTRL (1 << 1)
#define ALC5632_MISC_IRQOUT_INV_CTRL (1 << 0)
#define ALC5632_DAC_CLK_CTRL1 0x60
#define ALC5632_DAC_CLK_CTRL2 0x62
#define ALC5632_DAC_CLK_CTRL2_DIV1_2 (1 << 0)
#define ALC5632_VOICE_DAC_PCM_CLK_CTRL1 0x64
#define ALC5632_PSEUDO_SPATIAL_CTRL 0x68
#define ALC5632_HID_CTRL_INDEX 0x6A
#define ALC5632_HID_CTRL_DATA 0x6C
#define ALC5632_EQ_CTRL 0x6E
/* undocumented */
#define ALC5632_VENDOR_ID1 0x7C
#define ALC5632_VENDOR_ID2 0x7E
#define ALC5632_MAX_REGISTER 0x7E
#endif
| {
"pile_set_name": "Github"
} |
---
title: 如何:创建用户设置的属性网格
ms.date: 07/20/2015
helpviewer_keywords:
- My.Settings object [Visual Basic], creating property grids for user settings
- user settings [Visual Basic], creating property grids
- property grids [Visual Basic], creating for user settings
- property grids
ms.assetid: b0bc737e-50d1-43d1-a6df-268db6e6f91c
ms.openlocfilehash: e93c62ad138be260422319e28a3ed85dd1871a1b
ms.sourcegitcommit: f8c270376ed905f6a8896ce0fe25b4f4b38ff498
ms.translationtype: HT
ms.contentlocale: zh-CN
ms.lasthandoff: 06/04/2020
ms.locfileid: "84410161"
---
# <a name="how-to-create-property-grids-for-user-settings-in-visual-basic"></a>如何:在 Visual Basic 中为用户设置创建属性网格
可通过使用 `My.Settings` 对象的用户设置属性填充 <xref:System.Windows.Forms.PropertyGrid> 控件,创建用户设置的属性网格。
> [!NOTE]
> 若要使此示例正确运行,应用程序必须配置用户设置。 有关详细信息,请参阅[管理应用程序设置 (.NET)](/visualstudio/ide/managing-application-settings-dotnet)。
`My.Settings` 对象将每个设置公开为一个属性。 属性名称就是设置的名称,属性类型就是设置类型。 设置的“范围”确定属性是否为只读;“应用程序”范围设置的属性为只读,而“用户”范围设置的属性为读写。 有关详细信息,请参阅 [My.Settings 对象](../../../language-reference/objects/my-settings-object.md)。
> [!NOTE]
> 不能在运行时更改或保存应用程序范围设置的值。 只有在创建应用程序(通过“项目设计器”)或编辑应用程序的配置文件时才能更改应用程序范围设置。 有关详细信息,请参阅[管理应用程序设置 (.NET)](/visualstudio/ide/managing-application-settings-dotnet)。
此示例使用 <xref:System.Windows.Forms.PropertyGrid> 控件访问 `My.Settings` 对象的用户设置属性。 默认情况下,<xref:System.Windows.Forms.PropertyGrid> 显示 `My.Settings` 对象的所有属性。 但是,用户设置属性具有 <xref:System.Configuration.UserScopedSettingAttribute> 特性。 此示例将 <xref:System.Windows.Forms.PropertyGrid> 的 <xref:System.Windows.Forms.PropertyGrid.BrowsableAttributes%2A> 属性设置为 <xref:System.Configuration.UserScopedSettingAttribute>,以仅显示用户设置属性。
### <a name="to-add-a-user-setting-property-grid"></a>添加用户设置属性网格
1. 将“PropertyGrid”控件从“工具箱”添加到应用程序的设计图面上(假定此处为 `Form1`)。
属性网格控件的默认名称为 `PropertyGrid1`。
2. 双击 `Form1` 的设计图面打开窗体加载事件处理程序的代码。
3. 将 `My.Settings` 对象设置为属性网格的选定对象。
[!code-vb[VbVbalrMyResources#11](~/samples/snippets/visualbasic/VS_Snippets_VBCSharp/VbVbalrMyResources/VB/Form1.vb#11)]
4. 将属性网格配置为只显示用户设置。
[!code-vb[VbVbalrMyResources#12](~/samples/snippets/visualbasic/VS_Snippets_VBCSharp/VbVbalrMyResources/VB/Form1.vb#12)]
> [!NOTE]
> 若要只显示应用程序范围设置,请使用 <xref:System.Configuration.ApplicationScopedSettingAttribute> 特性而不是 <xref:System.Configuration.UserScopedSettingAttribute>。
## <a name="robust-programming"></a>可靠编程
应用程序在关闭时会保存用户设置。 若要立即保存设置,请调用 `My.Settings.Save` 方法。 有关详细信息,请参阅[如何:在 Visual Basic 中暂留用户设置](how-to-persist-user-settings.md)。
## <a name="see-also"></a>请参阅
- [My.Settings 对象](../../../language-reference/objects/my-settings-object.md)
- [如何:在 Visual Basic 中读取应用程序设置](how-to-read-application-settings.md)
- [如何:在 Visual Basic 中更改用户设置](how-to-change-user-settings.md)
- [如何:在 Visual Basic 中暂留用户设置](how-to-persist-user-settings.md)
- [管理应用程序设置 (.NET)](/visualstudio/ide/managing-application-settings-dotnet)
| {
"pile_set_name": "Github"
} |
# ----------------------------------------------------------------------------
# ATMEL Microcontroller Software Support
# ----------------------------------------------------------------------------
# Copyright (c) 2010, Atmel Corporation
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following condition is met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the disclaimer below.
#
# Atmel's name may not be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
# DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# make inner variables
.DEFAULT_GOAL := all
# custom variables
BUILD_NUMBER :=
BUILDS :=
CLEANS :=
# Build macro
define BUILD_SERIES
# output test number information
$(info Defining cmsis_example_$(1)_build and cmsis_example_$(1)_clean)
# add the incoming targets to global targets
BUILDS += cmsis_example_$(1)_build
CLEANS += cmsis_example_$(1)_clean
BUILD_NUMBER += x
.PHONY: cmsis_example_$(1)_build
cmsis_example_$(1)_build:
@echo ---
@echo ---
@echo --- Making $(1)
@echo ---
@$(MAKE) CHIP=$(1) -f cmsis_example.mk
.PHONY: cmsis_example_$(1)_clean
cmsis_example_$(1)_clean:
@echo ---
@echo ---
@echo --- Cleaning $(1)
@echo ---
@$(MAKE) CHIP=$(1) clean -f cmsis_example.mk
endef
# define SAM series
include sam_series.mk
$(foreach SERIES, $(SAM_SERIES), $(eval $(call BUILD_SERIES,$(SERIES))))
# output test number information
$(info Number of devices to be tested $(words $(BUILD_NUMBER)) / $(words $(SAM_SERIES)))
all: $(BUILDS)
clean: $(CLEANS)
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>examples-vaadin</artifactId>
<groupId>io.thorntail.examples</groupId>
<version>2.4.1.Final-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>example-vaadin-14</artifactId>
<name>Thorntail Examples: CDI and Vaadin 14 (LTS)</name>
<description>Thorntail Examples: CDI and Vaadin 14</description>
<packaging>war</packaging>
<repositories>
<!-- Repository used by many Vaadin add-ons -->
<repository>
<id>Vaadin Directory</id>
<url>https://maven.vaadin.com/vaadin-addons</url>
</repository>
<!-- Repository needed for prerelease versions of Vaadin -->
<repository>
<id>Vaadin - Prereleases</id>
<url>https://maven.vaadin.com/vaadin-prereleases</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
</repositories>
<pluginRepositories>
<!-- Repository needed for prerelease versions of Vaadin -->
<pluginRepository>
<id>Vaadin - Prereleases</id>
<url>https://maven.vaadin.com/vaadin-prereleases</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</pluginRepository>
</pluginRepositories>
<build>
<plugins>
<plugin>
<artifactId>maven-war-plugin</artifactId>
<configuration>
<failOnMissingWebXml>false</failOnMissingWebXml>
</configuration>
</plugin>
<plugin>
<groupId>io.thorntail</groupId>
<artifactId>thorntail-maven-plugin</artifactId>
<executions>
<execution>
<id>package</id>
</execution>
<execution>
<id>start</id>
</execution>
<execution>
<id>stop</id>
</execution>
</executions>
</plugin>
</plugins>
</build>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.vaadin</groupId>
<artifactId>vaadin-bom</artifactId>
<version>14.0.0.rc1</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<!-- Wildfly Swarm Fractions -->
<dependency>
<groupId>io.thorntail</groupId>
<artifactId>jaxrs</artifactId>
</dependency>
<dependency>
<groupId>io.thorntail</groupId>
<artifactId>cdi</artifactId>
</dependency>
<dependency>
<groupId>io.thorntail</groupId>
<artifactId>undertow</artifactId>
</dependency>
<dependency>
<groupId>io.thorntail.examples</groupId>
<artifactId>examples-base</artifactId>
</dependency>
<!--Vaadin related -->
<dependency>
<groupId>com.vaadin</groupId>
<artifactId>vaadin</artifactId>
</dependency>
<dependency>
<groupId>javax.enterprise</groupId>
<artifactId>cdi-api</artifactId>
<version>2.0</version>
<scope>provided</scope>
</dependency>
</dependencies>
</project> | {
"pile_set_name": "Github"
} |
// Locale support (codecvt) -*- C++ -*-
// Copyright (C) 2000-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/codecvt.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{locale}
*/
//
// ISO C++ 14882: 22.2.1.5 Template class codecvt
//
// Written by Benjamin Kosnik <bkoz@redhat.com>
#ifndef _CODECVT_H
#define _CODECVT_H 1
#pragma GCC system_header
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
/// Empty base class for codecvt facet [22.2.1.5].
class codecvt_base
{
public:
enum result
{
ok,
partial,
error,
noconv
};
};
/**
* @brief Common base for codecvt functions.
*
* This template class provides implementations of the public functions
* that forward to the protected virtual functions.
*
* This template also provides abstract stubs for the protected virtual
* functions.
*/
template<typename _InternT, typename _ExternT, typename _StateT>
class __codecvt_abstract_base
: public locale::facet, public codecvt_base
{
public:
// Types:
typedef codecvt_base::result result;
typedef _InternT intern_type;
typedef _ExternT extern_type;
typedef _StateT state_type;
// 22.2.1.5.1 codecvt members
/**
* @brief Convert from internal to external character set.
*
* Converts input string of intern_type to output string of
* extern_type. This is analogous to wcsrtombs. It does this by
* calling codecvt::do_out.
*
* The source and destination character sets are determined by the
* facet's locale, internal and external types.
*
* The characters in [from,from_end) are converted and written to
* [to,to_end). from_next and to_next are set to point to the
* character following the last successfully converted character,
* respectively. If the result needed no conversion, from_next and
* to_next are not affected.
*
* The @a state argument should be initialized if the input is at the
* beginning and carried from a previous call if continuing
* conversion. There are no guarantees about how @a state is used.
*
* The result returned is a member of codecvt_base::result. If
* all the input is converted, returns codecvt_base::ok. If no
* conversion is necessary, returns codecvt_base::noconv. If
* the input ends early or there is insufficient space in the
* output, returns codecvt_base::partial. Otherwise the
* conversion failed and codecvt_base::error is returned.
*
* @param __state Persistent conversion state data.
* @param __from Start of input.
* @param __from_end End of input.
* @param __from_next Returns start of unconverted data.
* @param __to Start of output buffer.
* @param __to_end End of output buffer.
* @param __to_next Returns start of unused output area.
* @return codecvt_base::result.
*/
result
out(state_type& __state, const intern_type* __from,
const intern_type* __from_end, const intern_type*& __from_next,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const
{
return this->do_out(__state, __from, __from_end, __from_next,
__to, __to_end, __to_next);
}
/**
* @brief Reset conversion state.
*
* Writes characters to output that would restore @a state to initial
* conditions. The idea is that if a partial conversion occurs, then
* the converting the characters written by this function would leave
* the state in initial conditions, rather than partial conversion
* state. It does this by calling codecvt::do_unshift().
*
* For example, if 4 external characters always converted to 1 internal
* character, and input to in() had 6 external characters with state
* saved, this function would write two characters to the output and
* set the state to initialized conditions.
*
* The source and destination character sets are determined by the
* facet's locale, internal and external types.
*
* The result returned is a member of codecvt_base::result. If the
* state could be reset and data written, returns codecvt_base::ok. If
* no conversion is necessary, returns codecvt_base::noconv. If the
* output has insufficient space, returns codecvt_base::partial.
* Otherwise the reset failed and codecvt_base::error is returned.
*
* @param __state Persistent conversion state data.
* @param __to Start of output buffer.
* @param __to_end End of output buffer.
* @param __to_next Returns start of unused output area.
* @return codecvt_base::result.
*/
result
unshift(state_type& __state, extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const
{ return this->do_unshift(__state, __to,__to_end,__to_next); }
/**
* @brief Convert from external to internal character set.
*
* Converts input string of extern_type to output string of
* intern_type. This is analogous to mbsrtowcs. It does this by
* calling codecvt::do_in.
*
* The source and destination character sets are determined by the
* facet's locale, internal and external types.
*
* The characters in [from,from_end) are converted and written to
* [to,to_end). from_next and to_next are set to point to the
* character following the last successfully converted character,
* respectively. If the result needed no conversion, from_next and
* to_next are not affected.
*
* The @a state argument should be initialized if the input is at the
* beginning and carried from a previous call if continuing
* conversion. There are no guarantees about how @a state is used.
*
* The result returned is a member of codecvt_base::result. If
* all the input is converted, returns codecvt_base::ok. If no
* conversion is necessary, returns codecvt_base::noconv. If
* the input ends early or there is insufficient space in the
* output, returns codecvt_base::partial. Otherwise the
* conversion failed and codecvt_base::error is returned.
*
* @param __state Persistent conversion state data.
* @param __from Start of input.
* @param __from_end End of input.
* @param __from_next Returns start of unconverted data.
* @param __to Start of output buffer.
* @param __to_end End of output buffer.
* @param __to_next Returns start of unused output area.
* @return codecvt_base::result.
*/
result
in(state_type& __state, const extern_type* __from,
const extern_type* __from_end, const extern_type*& __from_next,
intern_type* __to, intern_type* __to_end,
intern_type*& __to_next) const
{
return this->do_in(__state, __from, __from_end, __from_next,
__to, __to_end, __to_next);
}
int
encoding() const throw()
{ return this->do_encoding(); }
bool
always_noconv() const throw()
{ return this->do_always_noconv(); }
int
length(state_type& __state, const extern_type* __from,
const extern_type* __end, size_t __max) const
{ return this->do_length(__state, __from, __end, __max); }
int
max_length() const throw()
{ return this->do_max_length(); }
protected:
explicit
__codecvt_abstract_base(size_t __refs = 0) : locale::facet(__refs) { }
virtual
~__codecvt_abstract_base() { }
/**
* @brief Convert from internal to external character set.
*
* Converts input string of intern_type to output string of
* extern_type. This function is a hook for derived classes to change
* the value returned. @see out for more information.
*/
virtual result
do_out(state_type& __state, const intern_type* __from,
const intern_type* __from_end, const intern_type*& __from_next,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const = 0;
virtual result
do_unshift(state_type& __state, extern_type* __to,
extern_type* __to_end, extern_type*& __to_next) const = 0;
virtual result
do_in(state_type& __state, const extern_type* __from,
const extern_type* __from_end, const extern_type*& __from_next,
intern_type* __to, intern_type* __to_end,
intern_type*& __to_next) const = 0;
virtual int
do_encoding() const throw() = 0;
virtual bool
do_always_noconv() const throw() = 0;
virtual int
do_length(state_type&, const extern_type* __from,
const extern_type* __end, size_t __max) const = 0;
virtual int
do_max_length() const throw() = 0;
};
/**
* @brief Primary class template codecvt.
* @ingroup locales
*
* NB: Generic, mostly useless implementation.
*
*/
template<typename _InternT, typename _ExternT, typename _StateT>
class codecvt
: public __codecvt_abstract_base<_InternT, _ExternT, _StateT>
{
public:
// Types:
typedef codecvt_base::result result;
typedef _InternT intern_type;
typedef _ExternT extern_type;
typedef _StateT state_type;
protected:
__c_locale _M_c_locale_codecvt;
public:
static locale::id id;
explicit
codecvt(size_t __refs = 0)
: __codecvt_abstract_base<_InternT, _ExternT, _StateT> (__refs),
_M_c_locale_codecvt(0)
{ }
explicit
codecvt(__c_locale __cloc, size_t __refs = 0);
protected:
virtual
~codecvt() { }
virtual result
do_out(state_type& __state, const intern_type* __from,
const intern_type* __from_end, const intern_type*& __from_next,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const;
virtual result
do_unshift(state_type& __state, extern_type* __to,
extern_type* __to_end, extern_type*& __to_next) const;
virtual result
do_in(state_type& __state, const extern_type* __from,
const extern_type* __from_end, const extern_type*& __from_next,
intern_type* __to, intern_type* __to_end,
intern_type*& __to_next) const;
virtual int
do_encoding() const throw();
virtual bool
do_always_noconv() const throw();
virtual int
do_length(state_type&, const extern_type* __from,
const extern_type* __end, size_t __max) const;
virtual int
do_max_length() const throw();
};
template<typename _InternT, typename _ExternT, typename _StateT>
locale::id codecvt<_InternT, _ExternT, _StateT>::id;
/// class codecvt<char, char, mbstate_t> specialization.
template<>
class codecvt<char, char, mbstate_t>
: public __codecvt_abstract_base<char, char, mbstate_t>
{
public:
// Types:
typedef char intern_type;
typedef char extern_type;
typedef mbstate_t state_type;
protected:
__c_locale _M_c_locale_codecvt;
public:
static locale::id id;
explicit
codecvt(size_t __refs = 0);
explicit
codecvt(__c_locale __cloc, size_t __refs = 0);
protected:
virtual
~codecvt();
virtual result
do_out(state_type& __state, const intern_type* __from,
const intern_type* __from_end, const intern_type*& __from_next,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const;
virtual result
do_unshift(state_type& __state, extern_type* __to,
extern_type* __to_end, extern_type*& __to_next) const;
virtual result
do_in(state_type& __state, const extern_type* __from,
const extern_type* __from_end, const extern_type*& __from_next,
intern_type* __to, intern_type* __to_end,
intern_type*& __to_next) const;
virtual int
do_encoding() const throw();
virtual bool
do_always_noconv() const throw();
virtual int
do_length(state_type&, const extern_type* __from,
const extern_type* __end, size_t __max) const;
virtual int
do_max_length() const throw();
};
#ifdef _GLIBCXX_USE_WCHAR_T
/// class codecvt<wchar_t, char, mbstate_t> specialization.
template<>
class codecvt<wchar_t, char, mbstate_t>
: public __codecvt_abstract_base<wchar_t, char, mbstate_t>
{
public:
// Types:
typedef wchar_t intern_type;
typedef char extern_type;
typedef mbstate_t state_type;
protected:
__c_locale _M_c_locale_codecvt;
public:
static locale::id id;
explicit
codecvt(size_t __refs = 0);
explicit
codecvt(__c_locale __cloc, size_t __refs = 0);
protected:
virtual
~codecvt();
virtual result
do_out(state_type& __state, const intern_type* __from,
const intern_type* __from_end, const intern_type*& __from_next,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const;
virtual result
do_unshift(state_type& __state,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const;
virtual result
do_in(state_type& __state,
const extern_type* __from, const extern_type* __from_end,
const extern_type*& __from_next,
intern_type* __to, intern_type* __to_end,
intern_type*& __to_next) const;
virtual
int do_encoding() const throw();
virtual
bool do_always_noconv() const throw();
virtual
int do_length(state_type&, const extern_type* __from,
const extern_type* __end, size_t __max) const;
virtual int
do_max_length() const throw();
};
#endif //_GLIBCXX_USE_WCHAR_T
/// class codecvt_byname [22.2.1.6].
template<typename _InternT, typename _ExternT, typename _StateT>
class codecvt_byname : public codecvt<_InternT, _ExternT, _StateT>
{
public:
explicit
codecvt_byname(const char* __s, size_t __refs = 0)
: codecvt<_InternT, _ExternT, _StateT>(__refs)
{
if (__builtin_strcmp(__s, "C") != 0
&& __builtin_strcmp(__s, "POSIX") != 0)
{
this->_S_destroy_c_locale(this->_M_c_locale_codecvt);
this->_S_create_c_locale(this->_M_c_locale_codecvt, __s);
}
}
protected:
virtual
~codecvt_byname() { }
};
// Inhibit implicit instantiations for required instantiations,
// which are defined via explicit instantiations elsewhere.
#if _GLIBCXX_EXTERN_TEMPLATE
extern template class codecvt_byname<char, char, mbstate_t>;
extern template
const codecvt<char, char, mbstate_t>&
use_facet<codecvt<char, char, mbstate_t> >(const locale&);
extern template
bool
has_facet<codecvt<char, char, mbstate_t> >(const locale&);
#ifdef _GLIBCXX_USE_WCHAR_T
extern template class codecvt_byname<wchar_t, char, mbstate_t>;
extern template
const codecvt<wchar_t, char, mbstate_t>&
use_facet<codecvt<wchar_t, char, mbstate_t> >(const locale&);
extern template
bool
has_facet<codecvt<wchar_t, char, mbstate_t> >(const locale&);
#endif
#endif
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif // _CODECVT_H
| {
"pile_set_name": "Github"
} |
First the regular steps, i.e. working on the fork and merging the dev-branch of the fork with master of the fork
A> Fork the repo from the upstream remote repo to my personal github
B> Create a Branch in the forked repository (This is Optional)
C> Work on that branch, and at the end merge it with the Master Branch (in the forked repo itself, i.e if I have actually worked on a separate Branch )
Lets say my Upstream Repo name is institutions-web under the Organization name of MyOrganization.
Fork the repo from MyOrganization to my personal github > then clone the repo into local machine to work locally > after development-work is done in my locally cloned repo from the root of the project.
Then run following command
`$ git remote add upstream git@github.com:MyOrganization/institutions-web.git`
i.e. the very first time before running < git fetch >
The above step configures a git remote for a fork. This step is ONLY required ONCE per repository, which is Add a new remote upstream repository to sync with the fork.
#### Now, I actually have to sync my local forked Repo with the remote Repo, before raising a new PR, this is VERY IMPORTANT as it will avoid all merge-conflict later on. So run below command, to Fetch project branches from the upstream repository to get all the commits.
`$ git fetch upstream`
Followed - https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork
#### 1> First sync it, by fetching latest changes from Upstream post the merge
`git fetch upstream`
I got below
remote: Enumerating objects: 1, done.
remote: Counting objects: 100% (1/1), done.
remote: Total 1 (delta 0), reused 0 (delta 0), pack-reused 0
Unpacking objects: 100% (1/1), done.
From github.com:MyOrganization/institutions-web
a6cd1e82..68ce35a3 master -> upstream/master
#### After fetching from the remote branch, you would still have to merge the commits. So you can actually replace
`$ git fetch upstream`
with
`$ git pull upstream master`
since git pull is essentially git fetch + git merge.
#### 2> Merge the changes from upstream/master into your local master branch. This brings your fork's master branch into sync with the upstream repository, without losing your local changes.
`$ git checkout master`
`$ git merge upstream/master`
#### Generally follow the below principles
You will probably find things more manageable if you work on a feature branch rather than your own master branch. That is, start with git checkout -b my-feature before you start making any changes. If you will be regularly contributing upstream, your life is easiest if your master branch always reflects the state of the upstream master branch at some point in time, because this permits you to update your local master branch with a simple
`git pull upstream master`
Avoiding merging code into your feature branches. Regularly rebase on upstream/master if you need to incorporate upstream changes.
| {
"pile_set_name": "Github"
} |
//
// MMHandwritingTemplateView.m
// LooseLeaf
//
// Created by Adam Wulf on 4/5/17.
// Copyright © 2017 Milestone Made, LLC. All rights reserved.
//
#import "MMHandwritingTemplateView.h"
#import "MMScrapViewState.h"
#import "MMScrapBackgroundView.h"
#import "NSFileManager+DirectoryOptimizations.h"
#import "UIView+MPHelpers.h"
#import "Constants.h"
@implementation MMHandwritingTemplateView
-(instancetype) initWithFrame:(CGRect)frame andOriginalSize:(CGSize)_originalSize andProperties:(NSDictionary*)properties{
if(self = [super initWithFrame:frame andOriginalSize:_originalSize andProperties:properties]){
[self finishInit];
}
return self;
}
-(instancetype) initWithFrame:(CGRect)frame andProperties:(NSDictionary*)properties{
if(self = [super initWithFrame:frame andProperties:properties]){
[self finishInit];
}
return self;
}
-(void) finishInit{
CAShapeLayer* blueLines = [CAShapeLayer layer];
blueLines.path = [[self pathForBlueLines] CGPath];
blueLines.backgroundColor = [UIColor clearColor].CGColor;
blueLines.strokeColor = [self lightBlue].CGColor;
blueLines.fillColor = [UIColor clearColor].CGColor;
[[self layer] addSublayer:blueLines];
CAShapeLayer* redLines = [CAShapeLayer layer];
redLines.path = [[self pathForRedLines] CGPath];
redLines.backgroundColor = [UIColor clearColor].CGColor;
redLines.strokeColor = [self lightRed].CGColor;
redLines.fillColor = [UIColor clearColor].CGColor;
redLines.lineDashPattern = @[@(8 * [self scale].x), @(6 * [self scale].x)];
redLines.lineDashPhase = 0;
[[self layer] addSublayer:redLines];
}
-(UIColor*)lightBlue{
return [UIColor colorWithRed:16/255.0 green:178/255.0 blue:242/255.0 alpha:1.0];
}
-(UIColor*)lightRed{
return [UIColor colorWithRed:238/255.0 green:91/255.0 blue:162/255.0 alpha:1.0];
}
-(UIBezierPath*) pathForBlueLines{
CGFloat verticalMargin = [UIDevice ppi] * 1.2 / [[UIScreen mainScreen] scale];
CGFloat verticalSpacing = [UIDevice ppi] * .75 / [[UIScreen mainScreen] scale];
CGFloat horizontalMargin = [UIDevice ppi] * .75 / [[UIScreen mainScreen] scale];
CGFloat singleLineSpacing = [UIDevice ppi] * 1 / [[UIScreen mainScreen] scale];
UIBezierPath* path = [UIBezierPath bezierPath];
CGFloat y = verticalMargin;
while (y < originalSize.height - verticalMargin) {
[path moveToPoint:CGPointMake(horizontalMargin, y)];
[path addLineToPoint:CGPointMake(originalSize.width - horizontalMargin, y)];
y += singleLineSpacing;
[path moveToPoint:CGPointMake(horizontalMargin, y)];
[path addLineToPoint:CGPointMake(originalSize.width - horizontalMargin, y)];
y += verticalSpacing;
}
[path applyTransform:CGAffineTransformMakeScale([self scale].x, [self scale].y)];
return path;
}
-(UIBezierPath*) pathForRedLines{
CGFloat verticalMargin = [UIDevice ppi] * 1.2 / [[UIScreen mainScreen] scale];
CGFloat verticalSpacing = [UIDevice ppi] * .75 / [[UIScreen mainScreen] scale];
CGFloat horizontalMargin = [UIDevice ppi] * .75 / [[UIScreen mainScreen] scale];
CGFloat singleLineSpacing = [UIDevice ppi] * 1 / [[UIScreen mainScreen] scale];
UIBezierPath* path = [UIBezierPath bezierPath];
CGFloat outsideBorderPattern[] = {8 * [self scale].x, 6 * [self scale].x};
[path setLineDash:outsideBorderPattern count:2 phase:0];
CGFloat y = verticalMargin;
while (y < originalSize.height) {
y += singleLineSpacing / 2;
[path moveToPoint:CGPointMake(horizontalMargin, y)];
[path addLineToPoint:CGPointMake(originalSize.width - horizontalMargin, y)];
y += singleLineSpacing / 2;
y += verticalSpacing;
}
[path applyTransform:CGAffineTransformMakeScale([self scale].x, [self scale].y)];
return path;
}
-(void) drawInContext:(CGContextRef)context forSize:(CGSize)size{
CGRect scaledScreen = CGSizeFill(originalSize, size);
CGContextSaveThenRestoreForBlock(context, ^{
// Scraps
// adjust so that (0,0) is the origin of the content rect in the PDF page,
// since the PDF may be much taller/wider than our screen
CGContextScaleCTM(context, size.width / pageSize.width, size.height / pageSize.height);
CGContextTranslateCTM(context, -scaledScreen.origin.x, -scaledScreen.origin.y);
[[self lightBlue] setStroke];
[[self pathForBlueLines] stroke];
[[self lightRed] setStroke];
[[self pathForRedLines] stroke];
});
}
@end
| {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* The "beaconinfo" collection of methods.
* Typical usage is:
* <code>
* $proximitybeaconService = new Google_Service_Proximitybeacon(...);
* $beaconinfo = $proximitybeaconService->beaconinfo;
* </code>
*/
class Google_Service_Proximitybeacon_Resource_Beaconinfo extends Google_Service_Resource
{
/**
* Given one or more beacon observations, returns any beacon information and
* attachments accessible to your application. Authorize by using the [API
* key](https://developers.google.com/beacons/proximity/get-
* started#request_a_browser_api_key) for the application.
* (beaconinfo.getforobserved)
*
* @param Google_Service_Proximitybeacon_GetInfoForObservedBeaconsRequest $postBody
* @param array $optParams Optional parameters.
* @return Google_Service_Proximitybeacon_GetInfoForObservedBeaconsResponse
*/
public function getforobserved(Google_Service_Proximitybeacon_GetInfoForObservedBeaconsRequest $postBody, $optParams = array())
{
$params = array('postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('getforobserved', array($params), "Google_Service_Proximitybeacon_GetInfoForObservedBeaconsResponse");
}
}
| {
"pile_set_name": "Github"
} |
package com.yiqiniu.easytrans.rpc;
import java.util.List;
import java.util.Map;
import com.yiqiniu.easytrans.filter.EasyTransFilter;
import com.yiqiniu.easytrans.protocol.BusinessIdentifer;
import com.yiqiniu.easytrans.protocol.RpcBusinessProvider;
public interface EasyTransRpcProvider {
/**
* start the service list offered
* @param businessInterface the service interface
* @param businessList detailServiceImpl
*/
void startService(Class<?> businessInterface,Map<BusinessIdentifer,RpcBusinessProvider<?>> businessList);
/**
* add EasyTransFilter to RPC filters
* @param filters ordered filter
*/
void addEasyTransFilter(List<EasyTransFilter> filters);
}
| {
"pile_set_name": "Github"
} |
#include "os.h"
#include <libsec.h>
/*
* This MD4 is implemented from the description in Stinson's Cryptography,
* theory and practice. -- presotto
*/
/*
* Rotate ammounts used in the algorithm
*/
enum
{
S11= 3,
S12= 7,
S13= 11,
S14= 19,
S21= 3,
S22= 5,
S23= 9,
S24= 13,
S31= 3,
S32= 9,
S33= 11,
S34= 15,
};
typedef struct MD4Table MD4Table;
struct MD4Table
{
uchar x; /* index into data block */
uchar rot; /* amount to rotate left by */
};
static MD4Table tab[] =
{
/* round 1 */
/*[0]*/ { 0, S11},
{ 1, S12},
{ 2, S13},
{ 3, S14},
{ 4, S11},
{ 5, S12},
{ 6, S13},
{ 7, S14},
{ 8, S11},
{ 9, S12},
{ 10, S13},
{ 11, S14},
{ 12, S11},
{ 13, S12},
{ 14, S13},
{ 15, S14},
/* round 2 */
/*[16]*/{ 0, S21},
{ 4, S22},
{ 8, S23},
{ 12, S24},
{ 1, S21},
{ 5, S22},
{ 9, S23},
{ 13, S24},
{ 2, S21},
{ 6, S22},
{ 10, S23},
{ 14, S24},
{ 3, S21},
{ 7, S22},
{ 11, S23},
{ 15, S24},
/* round 3 */
/*[32]*/{ 0, S31},
{ 8, S32},
{ 4, S33},
{ 12, S34},
{ 2, S31},
{ 10, S32},
{ 6, S33},
{ 14, S34},
{ 1, S31},
{ 9, S32},
{ 5, S33},
{ 13, S34},
{ 3, S31},
{ 11, S32},
{ 7, S33},
{ 15, S34},
};
static void encode(uchar*, u32int*, ulong);
static void decode(u32int*, uchar*, ulong);
static void
md4block(uchar *p, ulong len, MD4state *s)
{
int i;
u32int a, b, c, d, tmp;
MD4Table *t;
uchar *end;
u32int x[16];
for(end = p+len; p < end; p += 64){
a = s->state[0];
b = s->state[1];
c = s->state[2];
d = s->state[3];
decode(x, p, 64);
for(i = 0; i < 48; i++){
t = tab + i;
switch(i>>4){
case 0:
a += (b & c) | (~b & d);
break;
case 1:
a += ((b & c) | (b & d) | (c & d)) + 0x5A827999;
break;
case 2:
a += (b ^ c ^ d) + 0x6ED9EBA1;
break;
}
a += x[t->x];
a = (a << t->rot) | (a >> (32 - t->rot));
/* rotate variables */
tmp = d;
d = c;
c = b;
b = a;
a = tmp;
}
s->state[0] += a;
s->state[1] += b;
s->state[2] += c;
s->state[3] += d;
s->len += 64;
}
}
MD4state*
md4(uchar *p, ulong len, uchar *digest, MD4state *s)
{
u32int x[16];
uchar buf[128];
int i;
uchar *e;
if(s == nil){
s = malloc(sizeof(*s));
if(s == nil)
return nil;
memset(s, 0, sizeof(*s));
s->malloced = 1;
}
if(s->seeded == 0){
/* seed the state, these constants would look nicer big-endian */
s->state[0] = 0x67452301;
s->state[1] = 0xefcdab89;
s->state[2] = 0x98badcfe;
s->state[3] = 0x10325476;
s->seeded = 1;
}
/* fill out the partial 64 byte block from previous calls */
if(s->blen){
i = 64 - s->blen;
if(len < i)
i = len;
memmove(s->buf + s->blen, p, i);
len -= i;
s->blen += i;
p += i;
if(s->blen == 64){
md4block(s->buf, s->blen, s);
s->blen = 0;
}
}
/* do 64 byte blocks */
i = len & ~0x3f;
if(i){
md4block(p, i, s);
len -= i;
p += i;
}
/* save the left overs if not last call */
if(digest == 0){
if(len){
memmove(s->buf, p, len);
s->blen += len;
}
return s;
}
/*
* this is the last time through, pad what's left with 0x80,
* 0's, and the input count to create a multiple of 64 bytes
*/
if(s->blen){
p = s->buf;
len = s->blen;
} else {
memmove(buf, p, len);
p = buf;
}
s->len += len;
e = p + len;
if(len < 56)
i = 56 - len;
else
i = 120 - len;
memset(e, 0, i);
*e = 0x80;
len += i;
/* append the count */
x[0] = s->len<<3;
x[1] = s->len>>29;
encode(p+len, x, 8);
/* digest the last part */
md4block(p, len+8, s);
/* return result and free state */
encode(digest, s->state, MD4dlen);
if(s->malloced == 1)
free(s);
return nil;
}
/*
* encodes input (u32int) into output (uchar). Assumes len is
* a multiple of 4.
*/
static void
encode(uchar *output, u32int *input, ulong len)
{
u32int x;
uchar *e;
for(e = output + len; output < e;) {
x = *input++;
*output++ = x;
*output++ = x >> 8;
*output++ = x >> 16;
*output++ = x >> 24;
}
}
/*
* decodes input (uchar) into output (u32int). Assumes len is
* a multiple of 4.
*/
static void
decode(u32int *output, uchar *input, ulong len)
{
uchar *e;
for(e = input+len; input < e; input += 4)
*output++ = input[0] | (input[1] << 8) |
(input[2] << 16) | (input[3] << 24);
}
| {
"pile_set_name": "Github"
} |
package api2go
import (
"fmt"
"net/http"
"net/url"
"github.com/manyminds/api2go/jsonapi"
)
// The Response struct implements api2go.Responder and can be used as a default
// implementation for your responses
// you can fill the field `Meta` with all the metadata your application needs
// like license, tokens, etc
type Response struct {
Res interface{}
Code int
Meta map[string]interface{}
Pagination Pagination
}
// Metadata returns additional meta data
func (r Response) Metadata() map[string]interface{} {
return r.Meta
}
// Result returns the actual payload
func (r Response) Result() interface{} {
return r.Res
}
// StatusCode sets the return status code
func (r Response) StatusCode() int {
return r.Code
}
func buildLink(base string, r *http.Request, pagination map[string]string) jsonapi.Link {
params := r.URL.Query()
for k, v := range pagination {
qk := fmt.Sprintf("page[%s]", k)
params.Set(qk, v)
}
if len(params) == 0 {
return jsonapi.Link{Href: base}
}
query, _ := url.QueryUnescape(params.Encode())
return jsonapi.Link{Href: fmt.Sprintf("%s?%s", base, query)}
}
// Links returns a jsonapi.Links object to include in the top-level response
func (r Response) Links(req *http.Request, baseURL string) (ret jsonapi.Links) {
ret = make(jsonapi.Links)
if r.Pagination.Next != nil {
ret["next"] = buildLink(baseURL, req, r.Pagination.Next)
}
if r.Pagination.Prev != nil {
ret["prev"] = buildLink(baseURL, req, r.Pagination.Prev)
}
if r.Pagination.First != nil {
ret["first"] = buildLink(baseURL, req, r.Pagination.First)
}
if r.Pagination.Last != nil {
ret["last"] = buildLink(baseURL, req, r.Pagination.Last)
}
return
}
| {
"pile_set_name": "Github"
} |
# Makefile.in generated by automake 1.14.1 from Makefile.am.
# packages/Linux/RPM/Makefile. Generated from Makefile.in by configure.
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/curl
pkgincludedir = $(includedir)/curl
pkglibdir = $(libdir)/curl
pkglibexecdir = $(libexecdir)/curl
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = x86_64-unknown-linux-gnu
host_triplet = x86_64-unknown-linux-gnu
subdir = packages/Linux/RPM
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(top_srcdir)/mkinstalldirs $(srcdir)/curl.spec.in \
$(srcdir)/curl-ssl.spec.in README
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/curl-compilers.m4 \
$(top_srcdir)/m4/curl-confopts.m4 \
$(top_srcdir)/m4/curl-functions.m4 \
$(top_srcdir)/m4/curl-openssl.m4 \
$(top_srcdir)/m4/curl-override.m4 \
$(top_srcdir)/m4/curl-reentrant.m4 $(top_srcdir)/m4/libtool.m4 \
$(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \
$(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \
$(top_srcdir)/m4/xc-am-iface.m4 \
$(top_srcdir)/m4/xc-cc-check.m4 \
$(top_srcdir)/m4/xc-lt-iface.m4 \
$(top_srcdir)/m4/xc-translit.m4 \
$(top_srcdir)/m4/xc-val-flgs.m4 \
$(top_srcdir)/m4/zz40-xc-ovr.m4 \
$(top_srcdir)/m4/zz50-xc-ovr.m4 \
$(top_srcdir)/m4/zz60-xc-ovr.m4 $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
CONFIG_HEADER = $(top_builddir)/lib/curl_config.h \
$(top_builddir)/include/curl/curlbuild.h
CONFIG_CLEAN_FILES = curl.spec curl-ssl.spec
CONFIG_CLEAN_VPATH_FILES =
AM_V_P = $(am__v_P_$(V))
am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY))
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_$(V))
am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY))
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_$(V))
am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY))
am__v_at_0 = @
am__v_at_1 =
SOURCES =
DIST_SOURCES =
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = ${SHELL} "/home/sniper/SurviveServer/deps/curl-7.38.0/missing" --run aclocal-1.14
AMTAR = $${TAR-tar}
AM_DEFAULT_VERBOSITY = 1
AR = /usr/bin/ar
AS = as
AUTOCONF = ${SHELL} "/home/sniper/SurviveServer/deps/curl-7.38.0/missing" --run autoconf
AUTOHEADER = ${SHELL} "/home/sniper/SurviveServer/deps/curl-7.38.0/missing" --run autoheader
AUTOMAKE = ${SHELL} "/home/sniper/SurviveServer/deps/curl-7.38.0/missing" --run automake-1.14
AWK = mawk
BLANK_AT_MAKETIME =
CC = gcc
CCDEPMODE = depmode=gcc3
CFLAGS = -O2 -Wno-system-headers
CFLAG_CURL_SYMBOL_HIDING = -fvisibility=hidden
CONFIGURE_OPTIONS = ""
CPP = gcc -E
CPPFLAGS =
CPPFLAG_CURL_STATICLIB =
CURLVERSION = 7.38.0
CURL_CA_BUNDLE = "/etc/ssl/certs/ca-certificates.crt"
CURL_CFLAG_EXTRAS =
CURL_DISABLE_DICT =
CURL_DISABLE_FILE =
CURL_DISABLE_FTP =
CURL_DISABLE_GOPHER =
CURL_DISABLE_HTTP =
CURL_DISABLE_IMAP =
CURL_DISABLE_LDAP = 1
CURL_DISABLE_LDAPS = 1
CURL_DISABLE_POP3 =
CURL_DISABLE_PROXY =
CURL_DISABLE_RTSP =
CURL_DISABLE_SMTP =
CURL_DISABLE_TELNET =
CURL_DISABLE_TFTP =
CURL_LT_SHLIB_VERSIONED_FLAVOUR =
CURL_NETWORK_AND_TIME_LIBS =
CURL_NETWORK_LIBS =
CYGPATH_W = echo
DEFS = -DHAVE_CONFIG_H
DEPDIR = .deps
DLLTOOL = false
DSYMUTIL =
DUMPBIN =
ECHO_C =
ECHO_N = -n
ECHO_T =
EGREP = /bin/grep -E
ENABLE_SHARED = yes
ENABLE_STATIC = yes
EXEEXT =
FGREP = /bin/grep -F
GREP = /bin/grep
HAVE_GNUTLS_SRP =
HAVE_LDAP_SSL = 1
HAVE_LIBZ = 1
HAVE_SSLEAY_SRP = 1
IDN_ENABLED =
INSTALL = /usr/bin/install -c
INSTALL_DATA = ${INSTALL} -m 644
INSTALL_PROGRAM = ${INSTALL}
INSTALL_SCRIPT = ${INSTALL}
INSTALL_STRIP_PROGRAM = $(install_sh) -c -s
IPV6_ENABLED = 1
LD = /usr/bin/ld -m elf_x86_64
LDFLAGS =
LIBCURL_LIBS = -lssl -lcrypto -lssl -lcrypto -lz
LIBMETALINK_CPPFLAGS =
LIBMETALINK_LDFLAGS =
LIBMETALINK_LIBS =
LIBOBJS =
LIBS = -lssl -lcrypto -lssl -lcrypto -lz
LIBTOOL = $(SHELL) $(top_builddir)/libtool
LIPO =
LN_S = ln -s
LTLIBOBJS =
MAINT = #
MAKEINFO = ${SHELL} "/home/sniper/SurviveServer/deps/curl-7.38.0/missing" --run makeinfo
MANIFEST_TOOL = :
MANOPT = -man
MKDIR_P = /bin/mkdir -p
NM = /usr/bin/nm -B
NMEDIT =
NROFF = /usr/bin/nroff
NSS_LIBS =
OBJDUMP = objdump
OBJEXT = o
OTOOL =
OTOOL64 =
PACKAGE = curl
PACKAGE_BUGREPORT = a suitable curl mailing list: http://curl.haxx.se/mail/
PACKAGE_NAME = curl
PACKAGE_STRING = curl -
PACKAGE_TARNAME = curl
PACKAGE_URL =
PACKAGE_VERSION = -
PATH_SEPARATOR = :
PERL = /usr/bin/perl
PKGADD_NAME = cURL - a client that groks URLs
PKGADD_PKG = HAXXcurl
PKGADD_VENDOR = curl.haxx.se
PKGCONFIG = no
RANDOM_FILE = /dev/urandom
RANLIB = ranlib
REQUIRE_LIB_DEPS = no
SED = /bin/sed
SET_MAKE =
SHELL = /bin/bash
SSL_ENABLED = 1
STRIP = strip
SUPPORT_FEATURES = SSL IPv6 libz NTLM NTLM_WB TLS-SRP
SUPPORT_PROTOCOLS = DICT FILE FTP FTPS GOPHER HTTP HTTPS IMAP IMAPS POP3 POP3S RTSP SMTP SMTPS TELNET TFTP
USE_ARES =
USE_AXTLS =
USE_CYASSL =
USE_DARWINSSL =
USE_GNUTLS =
USE_GNUTLS_NETTLE =
USE_LIBRTMP =
USE_LIBSSH2 =
USE_NGHTTP2 =
USE_NSS =
USE_OPENLDAP =
USE_POLARSSL =
USE_SCHANNEL =
USE_SSLEAY = 1
USE_WINDOWS_SSPI =
VERSION = -
VERSIONNUM = 072600
ZLIB_LIBS = -lz
abs_builddir = /home/sniper/SurviveServer/deps/curl-7.38.0/packages/Linux/RPM
abs_srcdir = /home/sniper/SurviveServer/deps/curl-7.38.0/packages/Linux/RPM
abs_top_builddir = /home/sniper/SurviveServer/deps/curl-7.38.0
abs_top_srcdir = /home/sniper/SurviveServer/deps/curl-7.38.0
ac_ct_AR =
ac_ct_CC = gcc
ac_ct_DUMPBIN =
am__include = include
am__leading_dot = .
am__quote =
am__tar = $${TAR-tar} chof - "$$tardir"
am__untar = $${TAR-tar} xf -
bindir = ${exec_prefix}/bin
build = x86_64-unknown-linux-gnu
build_alias =
build_cpu = x86_64
build_os = linux-gnu
build_vendor = unknown
builddir = .
datadir = ${datarootdir}
datarootdir = ${prefix}/share
docdir = ${datarootdir}/doc/${PACKAGE_TARNAME}
dvidir = ${docdir}
exec_prefix = ${prefix}
host = x86_64-unknown-linux-gnu
host_alias =
host_cpu = x86_64
host_os = linux-gnu
host_vendor = unknown
htmldir = ${docdir}
includedir = ${prefix}/include
infodir = ${datarootdir}/info
install_sh = ${SHELL} /home/sniper/SurviveServer/deps/curl-7.38.0/install-sh
libdir = ${exec_prefix}/lib
libexecdir = ${exec_prefix}/libexec
libext = a
localedir = ${datarootdir}/locale
localstatedir = ${prefix}/var
mandir = ${datarootdir}/man
mkdir_p = $(MKDIR_P)
oldincludedir = /usr/include
pdfdir = ${docdir}
prefix = /usr/local
program_transform_name = s,x,x,
psdir = ${docdir}
sbindir = ${exec_prefix}/sbin
sharedstatedir = ${prefix}/com
srcdir = .
subdirs =
sysconfdir = ${prefix}/etc
target_alias =
top_build_prefix = ../../../
top_builddir = ../../..
top_srcdir = ../../..
EXTRA_DIST = README curl-ssl.spec.in curl.spec.in make_curl_rpm
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu packages/Linux/RPM/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --gnu packages/Linux/RPM/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: # $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): # $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
curl.spec: $(top_builddir)/config.status $(srcdir)/curl.spec.in
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
curl-ssl.spec: $(top_builddir)/config.status $(srcdir)/curl-ssl.spec.in
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
tags TAGS:
ctags CTAGS:
cscope cscopelist:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile
installdirs:
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic clean-libtool mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am:
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am:
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am:
.MAKE: install-am install-strip
.PHONY: all all-am check check-am clean clean-generic clean-libtool \
cscopelist-am ctags-am distclean distclean-generic \
distclean-libtool distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am install-dvi \
install-dvi-am install-exec install-exec-am install-html \
install-html-am install-info install-info-am install-man \
install-pdf install-pdf-am install-ps install-ps-am \
install-strip installcheck installcheck-am installdirs \
maintainer-clean maintainer-clean-generic mostlyclean \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags-am uninstall uninstall-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
| {
"pile_set_name": "Github"
} |
import { Activity, ConnectionStatus, IBotConnection, Message, User } from 'botframework-directlinejs';
import { Strings } from './Strings';
import { BehaviorSubject } from 'rxjs/BehaviorSubject';
import { ActivityOrID } from './Types';
import { HostConfig } from 'adaptivecards';
import { Reducer } from 'redux';
export declare enum ListeningState {
STOPPED = 0,
STARTING = 1,
STARTED = 2,
STOPPING = 3,
}
export declare const sendMessage: (text: string, from: User, locale: string) => ChatActions;
export declare const sendFiles: (files: FileList, from: User, locale: string) => ChatActions;
export interface ShellState {
sendTyping: boolean;
input: string;
listeningState: ListeningState;
lastInputViaSpeech: boolean;
}
export declare type ShellAction = {
type: 'Update_Input';
input: string;
source: "text" | "speech";
} | {
type: 'Listening_Starting';
} | {
type: 'Listening_Start';
} | {
type: 'Listening_Stopping';
} | {
type: 'Listening_Stop';
} | {
type: 'Stop_Speaking';
} | {
type: 'Card_Action_Clicked';
} | {
type: 'Set_Send_Typing';
sendTyping: boolean;
} | {
type: 'Send_Message';
activity: Activity;
} | {
type: 'Speak_SSML';
ssml: string;
locale: string;
autoListenAfterSpeak: boolean;
};
export declare const shell: Reducer<ShellState>;
export interface FormatState {
chatTitle: boolean | string;
locale: string;
showUploadButton: boolean;
strings: Strings;
carouselMargin: number;
}
export declare type FormatAction = {
type: 'Set_Chat_Title';
chatTitle: boolean | string;
} | {
type: 'Set_Locale';
locale: string;
} | {
type: 'Set_Measurements';
carouselMargin: number;
} | {
type: 'Toggle_Upload_Button';
showUploadButton: boolean;
};
export declare const format: Reducer<FormatState>;
export interface SizeState {
height: number;
width: number;
}
export declare type SizeAction = {
type: 'Set_Size';
width: number;
height: number;
};
export declare const size: Reducer<SizeState>;
export interface ConnectionState {
connectionStatus: ConnectionStatus;
botConnection: IBotConnection;
selectedActivity: BehaviorSubject<ActivityOrID>;
user: User;
bot: User;
}
export declare type ConnectionAction = {
type: 'Start_Connection';
botConnection: IBotConnection;
user: User;
bot: User;
selectedActivity: BehaviorSubject<ActivityOrID>;
} | {
type: 'Connection_Change';
connectionStatus: ConnectionStatus;
};
export declare const connection: Reducer<ConnectionState>;
export interface HistoryState {
activities: Activity[];
clientActivityBase: string;
clientActivityCounter: number;
selectedActivity: Activity;
}
export declare type HistoryAction = {
type: 'Receive_Message' | 'Send_Message' | 'Show_Typing' | 'Receive_Sent_Message';
activity: Activity;
} | {
type: 'Send_Message_Try' | 'Send_Message_Fail' | 'Send_Message_Retry';
clientActivityId: string;
} | {
type: 'Send_Message_Succeed';
clientActivityId: string;
id: string;
} | {
type: 'Select_Activity';
selectedActivity: Activity;
} | {
type: 'Take_SuggestedAction';
message: Message;
} | {
type: 'Clear_Typing';
id: string;
};
export declare const history: Reducer<HistoryState>;
export interface AdaptiveCardsState {
hostConfig: HostConfig;
}
export declare type AdaptiveCardsAction = {
type: 'Set_AdaptiveCardsHostConfig';
payload: any;
};
export declare const adaptiveCards: Reducer<AdaptiveCardsState>;
export declare type ChatActions = ShellAction | FormatAction | SizeAction | ConnectionAction | HistoryAction | AdaptiveCardsAction;
export interface ChatState {
adaptiveCards: AdaptiveCardsState;
connection: ConnectionState;
format: FormatState;
history: HistoryState;
shell: ShellState;
size: SizeState;
}
import 'rxjs/add/operator/catch';
import 'rxjs/add/operator/delay';
import 'rxjs/add/operator/do';
import 'rxjs/add/operator/filter';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/merge';
import 'rxjs/add/operator/mergeMap';
import 'rxjs/add/operator/throttleTime';
import 'rxjs/add/operator/takeUntil';
import 'rxjs/add/observable/bindCallback';
import 'rxjs/add/observable/empty';
import 'rxjs/add/observable/of';
import { Store } from 'redux';
export declare const createStore: () => Store<ChatState>;
export declare type ChatStore = Store<ChatState>;
| {
"pile_set_name": "Github"
} |
// To regenerate api.pb.go run hack/update-generated-runtime.sh
syntax = 'proto3';
package runtime.v1alpha2;
option go_package = "v1alpha2";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.goproto_stringer_all) = false;
option (gogoproto.stringer_all) = true;
option (gogoproto.goproto_getters_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_unrecognized_all) = false;
// Runtime service defines the public APIs for remote container runtimes
service RuntimeService {
// Version returns the runtime name, runtime version, and runtime API version.
rpc Version(VersionRequest) returns (VersionResponse) {}
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes must ensure
// the sandbox is in the ready state on success.
rpc RunPodSandbox(RunPodSandboxRequest) returns (RunPodSandboxResponse) {}
// StopPodSandbox stops any running process that is part of the sandbox and
// reclaims network resources (e.g., IP addresses) allocated to the sandbox.
// If there are any running containers in the sandbox, they must be forcibly
// terminated.
// This call is idempotent, and must not return an error if all relevant
// resources have already been reclaimed. kubelet will call StopPodSandbox
// at least once before calling RemovePodSandbox. It will also attempt to
// reclaim resources eagerly, as soon as a sandbox is not needed. Hence,
// multiple StopPodSandbox calls are expected.
rpc StopPodSandbox(StopPodSandboxRequest) returns (StopPodSandboxResponse) {}
// RemovePodSandbox removes the sandbox. If there are any running containers
// in the sandbox, they must be forcibly terminated and removed.
// This call is idempotent, and must not return an error if the sandbox has
// already been removed.
rpc RemovePodSandbox(RemovePodSandboxRequest) returns (RemovePodSandboxResponse) {}
// PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not
// present, returns an error.
rpc PodSandboxStatus(PodSandboxStatusRequest) returns (PodSandboxStatusResponse) {}
// ListPodSandbox returns a list of PodSandboxes.
rpc ListPodSandbox(ListPodSandboxRequest) returns (ListPodSandboxResponse) {}
// CreateContainer creates a new container in specified PodSandbox
rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {}
// StartContainer starts the container.
rpc StartContainer(StartContainerRequest) returns (StartContainerResponse) {}
// StopContainer stops a running container with a grace period (i.e., timeout).
// This call is idempotent, and must not return an error if the container has
// already been stopped.
// TODO: what must the runtime do after the grace period is reached?
rpc StopContainer(StopContainerRequest) returns (StopContainerResponse) {}
// RemoveContainer removes the container. If the container is running, the
// container must be forcibly removed.
// This call is idempotent, and must not return an error if the container has
// already been removed.
rpc RemoveContainer(RemoveContainerRequest) returns (RemoveContainerResponse) {}
// ListContainers lists all containers by filters.
rpc ListContainers(ListContainersRequest) returns (ListContainersResponse) {}
// ContainerStatus returns status of the container. If the container is not
// present, returns an error.
rpc ContainerStatus(ContainerStatusRequest) returns (ContainerStatusResponse) {}
// UpdateContainerResources updates ContainerConfig of the container.
rpc UpdateContainerResources(UpdateContainerResourcesRequest) returns (UpdateContainerResourcesResponse) {}
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated. If the container is not running, container runtime can choose
// to either create a new log file and return nil, or return an error.
// Once it returns error, new container log file MUST NOT be created.
rpc ReopenContainerLog(ReopenContainerLogRequest) returns (ReopenContainerLogResponse) {}
// ExecSync runs a command in a container synchronously.
rpc ExecSync(ExecSyncRequest) returns (ExecSyncResponse) {}
// Exec prepares a streaming endpoint to execute a command in the container.
rpc Exec(ExecRequest) returns (ExecResponse) {}
// Attach prepares a streaming endpoint to attach to a running container.
rpc Attach(AttachRequest) returns (AttachResponse) {}
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
rpc PortForward(PortForwardRequest) returns (PortForwardResponse) {}
// ContainerStats returns stats of the container. If the container does not
// exist, the call returns an error.
rpc ContainerStats(ContainerStatsRequest) returns (ContainerStatsResponse) {}
// ListContainerStats returns stats of all running containers.
rpc ListContainerStats(ListContainerStatsRequest) returns (ListContainerStatsResponse) {}
// UpdateRuntimeConfig updates the runtime configuration based on the given request.
rpc UpdateRuntimeConfig(UpdateRuntimeConfigRequest) returns (UpdateRuntimeConfigResponse) {}
// Status returns the status of the runtime.
rpc Status(StatusRequest) returns (StatusResponse) {}
}
// ImageService defines the public APIs for managing images.
service ImageService {
// ListImages lists existing images.
rpc ListImages(ListImagesRequest) returns (ListImagesResponse) {}
// ImageStatus returns the status of the image. If the image is not
// present, returns a response with ImageStatusResponse.Image set to
// nil.
rpc ImageStatus(ImageStatusRequest) returns (ImageStatusResponse) {}
// PullImage pulls an image with authentication config.
rpc PullImage(PullImageRequest) returns (PullImageResponse) {}
// RemoveImage removes the image.
// This call is idempotent, and must not return an error if the image has
// already been removed.
rpc RemoveImage(RemoveImageRequest) returns (RemoveImageResponse) {}
// ImageFSInfo returns information of the filesystem that is used to store images.
rpc ImageFsInfo(ImageFsInfoRequest) returns (ImageFsInfoResponse) {}
}
message VersionRequest {
// Version of the kubelet runtime API.
string version = 1;
}
message VersionResponse {
// Version of the kubelet runtime API.
string version = 1;
// Name of the container runtime.
string runtime_name = 2;
// Version of the container runtime. The string must be
// semver-compatible.
string runtime_version = 3;
// API version of the container runtime. The string must be
// semver-compatible.
string runtime_api_version = 4;
}
// DNSConfig specifies the DNS servers and search domains of a sandbox.
message DNSConfig {
// List of DNS servers of the cluster.
repeated string servers = 1;
// List of DNS search domains of the cluster.
repeated string searches = 2;
// List of DNS options. See https://linux.die.net/man/5/resolv.conf
// for all available options.
repeated string options = 3;
}
enum Protocol {
TCP = 0;
UDP = 1;
SCTP = 2;
}
// PortMapping specifies the port mapping configurations of a sandbox.
message PortMapping {
// Protocol of the port mapping.
Protocol protocol = 1;
// Port number within the container. Default: 0 (not specified).
int32 container_port = 2;
// Port number on the host. Default: 0 (not specified).
int32 host_port = 3;
// Host IP.
string host_ip = 4;
}
enum MountPropagation {
// No mount propagation ("private" in Linux terminology).
PROPAGATION_PRIVATE = 0;
// Mounts get propagated from the host to the container ("rslave" in Linux).
PROPAGATION_HOST_TO_CONTAINER = 1;
// Mounts get propagated from the host to the container and from the
// container to the host ("rshared" in Linux).
PROPAGATION_BIDIRECTIONAL = 2;
}
// Mount specifies a host volume to mount into a container.
message Mount {
// Path of the mount within the container.
string container_path = 1;
// Path of the mount on the host. If the hostPath doesn't exist, then runtimes
// should report error. If the hostpath is a symbolic link, runtimes should
// follow the symlink and mount the real destination to container.
string host_path = 2;
// If set, the mount is read-only.
bool readonly = 3;
// If set, the mount needs SELinux relabeling.
bool selinux_relabel = 4;
// Requested propagation mode.
MountPropagation propagation = 5;
}
// A NamespaceMode describes the intended namespace configuration for each
// of the namespaces (Network, PID, IPC) in NamespaceOption. Runtimes should
// map these modes as appropriate for the technology underlying the runtime.
enum NamespaceMode {
// A POD namespace is common to all containers in a pod.
// For example, a container with a PID namespace of POD expects to view
// all of the processes in all of the containers in the pod.
POD = 0;
// A CONTAINER namespace is restricted to a single container.
// For example, a container with a PID namespace of CONTAINER expects to
// view only the processes in that container.
CONTAINER = 1;
// A NODE namespace is the namespace of the Kubernetes node.
// For example, a container with a PID namespace of NODE expects to view
// all of the processes on the host running the kubelet.
NODE = 2;
}
// NamespaceOption provides options for Linux namespaces.
message NamespaceOption {
// Network namespace for this container/sandbox.
// Note: There is currently no way to set CONTAINER scoped network in the Kubernetes API.
// Namespaces currently set by the kubelet: POD, NODE
NamespaceMode network = 1;
// PID namespace for this container/sandbox.
// Note: The CRI default is POD, but the v1.PodSpec default is CONTAINER.
// The kubelet's runtime manager will set this to CONTAINER explicitly for v1 pods.
// Namespaces currently set by the kubelet: POD, CONTAINER, NODE
NamespaceMode pid = 2;
// IPC namespace for this container/sandbox.
// Note: There is currently no way to set CONTAINER scoped IPC in the Kubernetes API.
// Namespaces currently set by the kubelet: POD, NODE
NamespaceMode ipc = 3;
}
// Int64Value is the wrapper of int64.
message Int64Value {
// The value.
int64 value = 1;
}
// LinuxSandboxSecurityContext holds linux security configuration that will be
// applied to a sandbox. Note that:
// 1) It does not apply to containers in the pods.
// 2) It may not be applicable to a PodSandbox which does not contain any running
// process.
message LinuxSandboxSecurityContext {
// Configurations for the sandbox's namespaces.
// This will be used only if the PodSandbox uses namespace for isolation.
NamespaceOption namespace_options = 1;
// Optional SELinux context to be applied.
SELinuxOption selinux_options = 2;
// UID to run sandbox processes as, when applicable.
Int64Value run_as_user = 3;
// GID to run sandbox processes as, when applicable. run_as_group should only
// be specified when run_as_user is specified; otherwise, the runtime MUST error.
Int64Value run_as_group = 8;
// If set, the root filesystem of the sandbox is read-only.
bool readonly_rootfs = 4;
// List of groups applied to the first process run in the sandbox, in
// addition to the sandbox's primary GID.
repeated int64 supplemental_groups = 5;
// Indicates whether the sandbox will be asked to run a privileged
// container. If a privileged container is to be executed within it, this
// MUST be true.
// This allows a sandbox to take additional security precautions if no
// privileged containers are expected to be run.
bool privileged = 6;
// Seccomp profile for the sandbox, candidate values are:
// * runtime/default: the default profile for the container runtime
// * unconfined: unconfined profile, ie, no seccomp sandboxing
// * localhost/<full-path-to-profile>: the profile installed on the node.
// <full-path-to-profile> is the full path of the profile.
// Default: "", which is identical with unconfined.
string seccomp_profile_path = 7;
}
// LinuxPodSandboxConfig holds platform-specific configurations for Linux
// host platforms and Linux-based containers.
message LinuxPodSandboxConfig {
// Parent cgroup of the PodSandbox.
// The cgroupfs style syntax will be used, but the container runtime can
// convert it to systemd semantics if needed.
string cgroup_parent = 1;
// LinuxSandboxSecurityContext holds sandbox security attributes.
LinuxSandboxSecurityContext security_context = 2;
// Sysctls holds linux sysctls config for the sandbox.
map<string, string> sysctls = 3;
}
// PodSandboxMetadata holds all necessary information for building the sandbox name.
// The container runtime is encouraged to expose the metadata associated with the
// PodSandbox in its user interface for better user experience. For example,
// the runtime can construct a unique PodSandboxName based on the metadata.
message PodSandboxMetadata {
// Pod name of the sandbox. Same as the pod name in the PodSpec.
string name = 1;
// Pod UID of the sandbox. Same as the pod UID in the PodSpec.
string uid = 2;
// Pod namespace of the sandbox. Same as the pod namespace in the PodSpec.
string namespace = 3;
// Attempt number of creating the sandbox. Default: 0.
uint32 attempt = 4;
}
// PodSandboxConfig holds all the required and optional fields for creating a
// sandbox.
message PodSandboxConfig {
// Metadata of the sandbox. This information will uniquely identify the
// sandbox, and the runtime should leverage this to ensure correct
// operation. The runtime may also use this information to improve UX, such
// as by constructing a readable name.
PodSandboxMetadata metadata = 1;
// Hostname of the sandbox.
string hostname = 2;
// Path to the directory on the host in which container log files are
// stored.
// By default the log of a container going into the LogDirectory will be
// hooked up to STDOUT and STDERR. However, the LogDirectory may contain
// binary log files with structured logging data from the individual
// containers. For example, the files might be newline separated JSON
// structured logs, systemd-journald journal files, gRPC trace files, etc.
// E.g.,
// PodSandboxConfig.LogDirectory = `/var/log/pods/<podUID>/`
// ContainerConfig.LogPath = `containerName/Instance#.log`
//
// WARNING: Log management and how kubelet should interface with the
// container logs are under active discussion in
// https://issues.k8s.io/24677. There *may* be future change of direction
// for logging as the discussion carries on.
string log_directory = 3;
// DNS config for the sandbox.
DNSConfig dns_config = 4;
// Port mappings for the sandbox.
repeated PortMapping port_mappings = 5;
// Key-value pairs that may be used to scope and select individual resources.
map<string, string> labels = 6;
// Unstructured key-value map that may be set by the kubelet to store and
// retrieve arbitrary metadata. This will include any annotations set on a
// pod through the Kubernetes API.
//
// Annotations MUST NOT be altered by the runtime; the annotations stored
// here MUST be returned in the PodSandboxStatus associated with the pod
// this PodSandboxConfig creates.
//
// In general, in order to preserve a well-defined interface between the
// kubelet and the container runtime, annotations SHOULD NOT influence
// runtime behaviour.
//
// Annotations can also be useful for runtime authors to experiment with
// new features that are opaque to the Kubernetes APIs (both user-facing
// and the CRI). Whenever possible, however, runtime authors SHOULD
// consider proposing new typed fields for any new features instead.
map<string, string> annotations = 7;
// Optional configurations specific to Linux hosts.
LinuxPodSandboxConfig linux = 8;
}
message RunPodSandboxRequest {
// Configuration for creating a PodSandbox.
PodSandboxConfig config = 1;
// Named runtime configuration to use for this PodSandbox.
// If the runtime handler is unknown, this request should be rejected. An
// empty string should select the default handler, equivalent to the
// behavior before this feature was added.
// See https://git.k8s.io/community/keps/sig-node/0014-runtime-class.md
string runtime_handler = 2;
}
message RunPodSandboxResponse {
// ID of the PodSandbox to run.
string pod_sandbox_id = 1;
}
message StopPodSandboxRequest {
// ID of the PodSandbox to stop.
string pod_sandbox_id = 1;
}
message StopPodSandboxResponse {}
message RemovePodSandboxRequest {
// ID of the PodSandbox to remove.
string pod_sandbox_id = 1;
}
message RemovePodSandboxResponse {}
message PodSandboxStatusRequest {
// ID of the PodSandbox for which to retrieve status.
string pod_sandbox_id = 1;
// Verbose indicates whether to return extra information about the pod sandbox.
bool verbose = 2;
}
// PodSandboxNetworkStatus is the status of the network for a PodSandbox.
message PodSandboxNetworkStatus {
// IP address of the PodSandbox.
string ip = 1;
}
// Namespace contains paths to the namespaces.
message Namespace {
// Namespace options for Linux namespaces.
NamespaceOption options = 2;
}
// LinuxSandboxStatus contains status specific to Linux sandboxes.
message LinuxPodSandboxStatus {
// Paths to the sandbox's namespaces.
Namespace namespaces = 1;
}
enum PodSandboxState {
SANDBOX_READY = 0;
SANDBOX_NOTREADY = 1;
}
// PodSandboxStatus contains the status of the PodSandbox.
message PodSandboxStatus {
// ID of the sandbox.
string id = 1;
// Metadata of the sandbox.
PodSandboxMetadata metadata = 2;
// State of the sandbox.
PodSandboxState state = 3;
// Creation timestamp of the sandbox in nanoseconds. Must be > 0.
int64 created_at = 4;
// Network contains network status if network is handled by the runtime.
PodSandboxNetworkStatus network = 5;
// Linux-specific status to a pod sandbox.
LinuxPodSandboxStatus linux = 6;
// Labels are key-value pairs that may be used to scope and select individual resources.
map<string, string> labels = 7;
// Unstructured key-value map holding arbitrary metadata.
// Annotations MUST NOT be altered by the runtime; the value of this field
// MUST be identical to that of the corresponding PodSandboxConfig used to
// instantiate the pod sandbox this status represents.
map<string, string> annotations = 8;
}
message PodSandboxStatusResponse {
// Status of the PodSandbox.
PodSandboxStatus status = 1;
// Info is extra information of the PodSandbox. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. network namespace for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
map<string, string> info = 2;
}
// PodSandboxStateValue is the wrapper of PodSandboxState.
message PodSandboxStateValue {
// State of the sandbox.
PodSandboxState state = 1;
}
// PodSandboxFilter is used to filter a list of PodSandboxes.
// All those fields are combined with 'AND'
message PodSandboxFilter {
// ID of the sandbox.
string id = 1;
// State of the sandbox.
PodSandboxStateValue state = 2;
// LabelSelector to select matches.
// Only api.MatchLabels is supported for now and the requirements
// are ANDed. MatchExpressions is not supported yet.
map<string, string> label_selector = 3;
}
message ListPodSandboxRequest {
// PodSandboxFilter to filter a list of PodSandboxes.
PodSandboxFilter filter = 1;
}
// PodSandbox contains minimal information about a sandbox.
message PodSandbox {
// ID of the PodSandbox.
string id = 1;
// Metadata of the PodSandbox.
PodSandboxMetadata metadata = 2;
// State of the PodSandbox.
PodSandboxState state = 3;
// Creation timestamps of the PodSandbox in nanoseconds. Must be > 0.
int64 created_at = 4;
// Labels of the PodSandbox.
map<string, string> labels = 5;
// Unstructured key-value map holding arbitrary metadata.
// Annotations MUST NOT be altered by the runtime; the value of this field
// MUST be identical to that of the corresponding PodSandboxConfig used to
// instantiate this PodSandbox.
map<string, string> annotations = 6;
}
message ListPodSandboxResponse {
// List of PodSandboxes.
repeated PodSandbox items = 1;
}
// ImageSpec is an internal representation of an image. Currently, it wraps the
// value of a Container's Image field (e.g. imageID or imageDigest), but in the
// future it will include more detailed information about the different image types.
message ImageSpec {
string image = 1;
}
message KeyValue {
string key = 1;
string value = 2;
}
// LinuxContainerResources specifies Linux specific configuration for
// resources.
// TODO: Consider using Resources from opencontainers/runtime-spec/specs-go
// directly.
message LinuxContainerResources {
// CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified).
int64 cpu_period = 1;
// CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified).
int64 cpu_quota = 2;
// CPU shares (relative weight vs. other containers). Default: 0 (not specified).
int64 cpu_shares = 3;
// Memory limit in bytes. Default: 0 (not specified).
int64 memory_limit_in_bytes = 4;
// OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified).
int64 oom_score_adj = 5;
// CpusetCpus constrains the allowed set of logical CPUs. Default: "" (not specified).
string cpuset_cpus = 6;
// CpusetMems constrains the allowed set of memory nodes. Default: "" (not specified).
string cpuset_mems = 7;
}
// SELinuxOption are the labels to be applied to the container.
message SELinuxOption {
string user = 1;
string role = 2;
string type = 3;
string level = 4;
}
// Capability contains the container capabilities to add or drop
message Capability {
// List of capabilities to add.
repeated string add_capabilities = 1;
// List of capabilities to drop.
repeated string drop_capabilities = 2;
}
// LinuxContainerSecurityContext holds linux security configuration that will be applied to a container.
message LinuxContainerSecurityContext {
// Capabilities to add or drop.
Capability capabilities = 1;
// If set, run container in privileged mode.
// Privileged mode is incompatible with the following options. If
// privileged is set, the following features MAY have no effect:
// 1. capabilities
// 2. selinux_options
// 4. seccomp
// 5. apparmor
//
// Privileged mode implies the following specific options are applied:
// 1. All capabilities are added.
// 2. Sensitive paths, such as kernel module paths within sysfs, are not masked.
// 3. Any sysfs and procfs mounts are mounted RW.
// 4. Apparmor confinement is not applied.
// 5. Seccomp restrictions are not applied.
// 6. The device cgroup does not restrict access to any devices.
// 7. All devices from the host's /dev are available within the container.
// 8. SELinux restrictions are not applied (e.g. label=disabled).
bool privileged = 2;
// Configurations for the container's namespaces.
// Only used if the container uses namespace for isolation.
NamespaceOption namespace_options = 3;
// SELinux context to be optionally applied.
SELinuxOption selinux_options = 4;
// UID to run the container process as. Only one of run_as_user and
// run_as_username can be specified at a time.
Int64Value run_as_user = 5;
// GID to run the container process as. run_as_group should only be specified
// when run_as_user or run_as_username is specified; otherwise, the runtime
// MUST error.
Int64Value run_as_group = 12;
// User name to run the container process as. If specified, the user MUST
// exist in the container image (i.e. in the /etc/passwd inside the image),
// and be resolved there by the runtime; otherwise, the runtime MUST error.
string run_as_username = 6;
// If set, the root filesystem of the container is read-only.
bool readonly_rootfs = 7;
// List of groups applied to the first process run in the container, in
// addition to the container's primary GID.
repeated int64 supplemental_groups = 8;
// AppArmor profile for the container, candidate values are:
// * runtime/default: equivalent to not specifying a profile.
// * unconfined: no profiles are loaded
// * localhost/<profile_name>: profile loaded on the node
// (localhost) by name. The possible profile names are detailed at
// http://wiki.apparmor.net/index.php/AppArmor_Core_Policy_Reference
string apparmor_profile = 9;
// Seccomp profile for the container, candidate values are:
// * runtime/default: the default profile for the container runtime
// * unconfined: unconfined profile, ie, no seccomp sandboxing
// * localhost/<full-path-to-profile>: the profile installed on the node.
// <full-path-to-profile> is the full path of the profile.
// Default: "", which is identical with unconfined.
string seccomp_profile_path = 10;
// no_new_privs defines if the flag for no_new_privs should be set on the
// container.
bool no_new_privs = 11;
// masked_paths is a slice of paths that should be masked by the container
// runtime, this can be passed directly to the OCI spec.
repeated string masked_paths = 13;
// readonly_paths is a slice of paths that should be set as readonly by the
// container runtime, this can be passed directly to the OCI spec.
repeated string readonly_paths = 14;
}
// LinuxContainerConfig contains platform-specific configuration for
// Linux-based containers.
message LinuxContainerConfig {
// Resources specification for the container.
LinuxContainerResources resources = 1;
// LinuxContainerSecurityContext configuration for the container.
LinuxContainerSecurityContext security_context = 2;
}
// WindowsContainerSecurityContext holds windows security configuration that will be applied to a container.
message WindowsContainerSecurityContext {
// User name to run the container process as. If specified, the user MUST
// exist in the container image and be resolved there by the runtime;
// otherwise, the runtime MUST return error.
string run_as_username = 1;
}
// WindowsContainerConfig contains platform-specific configuration for
// Windows-based containers.
message WindowsContainerConfig {
// Resources specification for the container.
WindowsContainerResources resources = 1;
// WindowsContainerSecurityContext configuration for the container.
WindowsContainerSecurityContext security_context = 2;
}
// WindowsContainerResources specifies Windows specific configuration for
// resources.
message WindowsContainerResources {
// CPU shares (relative weight vs. other containers). Default: 0 (not specified).
int64 cpu_shares = 1;
// Number of CPUs available to the container. Default: 0 (not specified).
int64 cpu_count = 2;
// Specifies the portion of processor cycles that this container can use as a percentage times 100.
int64 cpu_maximum = 3;
// Memory limit in bytes. Default: 0 (not specified).
int64 memory_limit_in_bytes = 4;
}
// ContainerMetadata holds all necessary information for building the container
// name. The container runtime is encouraged to expose the metadata in its user
// interface for better user experience. E.g., runtime can construct a unique
// container name based on the metadata. Note that (name, attempt) is unique
// within a sandbox for the entire lifetime of the sandbox.
message ContainerMetadata {
// Name of the container. Same as the container name in the PodSpec.
string name = 1;
// Attempt number of creating the container. Default: 0.
uint32 attempt = 2;
}
// Device specifies a host device to mount into a container.
message Device {
// Path of the device within the container.
string container_path = 1;
// Path of the device on the host.
string host_path = 2;
// Cgroups permissions of the device, candidates are one or more of
// * r - allows container to read from the specified device.
// * w - allows container to write to the specified device.
// * m - allows container to create device files that do not yet exist.
string permissions = 3;
}
// ContainerConfig holds all the required and optional fields for creating a
// container.
message ContainerConfig {
// Metadata of the container. This information will uniquely identify the
// container, and the runtime should leverage this to ensure correct
// operation. The runtime may also use this information to improve UX, such
// as by constructing a readable name.
ContainerMetadata metadata = 1 ;
// Image to use.
ImageSpec image = 2;
// Command to execute (i.e., entrypoint for docker)
repeated string command = 3;
// Args for the Command (i.e., command for docker)
repeated string args = 4;
// Current working directory of the command.
string working_dir = 5;
// List of environment variable to set in the container.
repeated KeyValue envs = 6;
// Mounts for the container.
repeated Mount mounts = 7;
// Devices for the container.
repeated Device devices = 8;
// Key-value pairs that may be used to scope and select individual resources.
// Label keys are of the form:
// label-key ::= prefixed-name | name
// prefixed-name ::= prefix '/' name
// prefix ::= DNS_SUBDOMAIN
// name ::= DNS_LABEL
map<string, string> labels = 9;
// Unstructured key-value map that may be used by the kubelet to store and
// retrieve arbitrary metadata.
//
// Annotations MUST NOT be altered by the runtime; the annotations stored
// here MUST be returned in the ContainerStatus associated with the container
// this ContainerConfig creates.
//
// In general, in order to preserve a well-defined interface between the
// kubelet and the container runtime, annotations SHOULD NOT influence
// runtime behaviour.
map<string, string> annotations = 10;
// Path relative to PodSandboxConfig.LogDirectory for container to store
// the log (STDOUT and STDERR) on the host.
// E.g.,
// PodSandboxConfig.LogDirectory = `/var/log/pods/<podUID>/`
// ContainerConfig.LogPath = `containerName/Instance#.log`
//
// WARNING: Log management and how kubelet should interface with the
// container logs are under active discussion in
// https://issues.k8s.io/24677. There *may* be future change of direction
// for logging as the discussion carries on.
string log_path = 11;
// Variables for interactive containers, these have very specialized
// use-cases (e.g. debugging).
// TODO: Determine if we need to continue supporting these fields that are
// part of Kubernetes's Container Spec.
bool stdin = 12;
bool stdin_once = 13;
bool tty = 14;
// Configuration specific to Linux containers.
LinuxContainerConfig linux = 15;
// Configuration specific to Windows containers.
WindowsContainerConfig windows = 16;
}
message CreateContainerRequest {
// ID of the PodSandbox in which the container should be created.
string pod_sandbox_id = 1;
// Config of the container.
ContainerConfig config = 2;
// Config of the PodSandbox. This is the same config that was passed
// to RunPodSandboxRequest to create the PodSandbox. It is passed again
// here just for easy reference. The PodSandboxConfig is immutable and
// remains the same throughout the lifetime of the pod.
PodSandboxConfig sandbox_config = 3;
}
message CreateContainerResponse {
// ID of the created container.
string container_id = 1;
}
message StartContainerRequest {
// ID of the container to start.
string container_id = 1;
}
message StartContainerResponse {}
message StopContainerRequest {
// ID of the container to stop.
string container_id = 1;
// Timeout in seconds to wait for the container to stop before forcibly
// terminating it. Default: 0 (forcibly terminate the container immediately)
int64 timeout = 2;
}
message StopContainerResponse {}
message RemoveContainerRequest {
// ID of the container to remove.
string container_id = 1;
}
message RemoveContainerResponse {}
enum ContainerState {
CONTAINER_CREATED = 0;
CONTAINER_RUNNING = 1;
CONTAINER_EXITED = 2;
CONTAINER_UNKNOWN = 3;
}
// ContainerStateValue is the wrapper of ContainerState.
message ContainerStateValue {
// State of the container.
ContainerState state = 1;
}
// ContainerFilter is used to filter containers.
// All those fields are combined with 'AND'
message ContainerFilter {
// ID of the container.
string id = 1;
// State of the container.
ContainerStateValue state = 2;
// ID of the PodSandbox.
string pod_sandbox_id = 3;
// LabelSelector to select matches.
// Only api.MatchLabels is supported for now and the requirements
// are ANDed. MatchExpressions is not supported yet.
map<string, string> label_selector = 4;
}
message ListContainersRequest {
ContainerFilter filter = 1;
}
// Container provides the runtime information for a container, such as ID, hash,
// state of the container.
message Container {
// ID of the container, used by the container runtime to identify
// a container.
string id = 1;
// ID of the sandbox to which this container belongs.
string pod_sandbox_id = 2;
// Metadata of the container.
ContainerMetadata metadata = 3;
// Spec of the image.
ImageSpec image = 4;
// Reference to the image in use. For most runtimes, this should be an
// image ID.
string image_ref = 5;
// State of the container.
ContainerState state = 6;
// Creation time of the container in nanoseconds.
int64 created_at = 7;
// Key-value pairs that may be used to scope and select individual resources.
map<string, string> labels = 8;
// Unstructured key-value map holding arbitrary metadata.
// Annotations MUST NOT be altered by the runtime; the value of this field
// MUST be identical to that of the corresponding ContainerConfig used to
// instantiate this Container.
map<string, string> annotations = 9;
}
message ListContainersResponse {
// List of containers.
repeated Container containers = 1;
}
message ContainerStatusRequest {
// ID of the container for which to retrieve status.
string container_id = 1;
// Verbose indicates whether to return extra information about the container.
bool verbose = 2;
}
// ContainerStatus represents the status of a container.
message ContainerStatus {
// ID of the container.
string id = 1;
// Metadata of the container.
ContainerMetadata metadata = 2;
// Status of the container.
ContainerState state = 3;
// Creation time of the container in nanoseconds.
int64 created_at = 4;
// Start time of the container in nanoseconds. Default: 0 (not specified).
int64 started_at = 5;
// Finish time of the container in nanoseconds. Default: 0 (not specified).
int64 finished_at = 6;
// Exit code of the container. Only required when finished_at != 0. Default: 0.
int32 exit_code = 7;
// Spec of the image.
ImageSpec image = 8;
// Reference to the image in use. For most runtimes, this should be an
// image ID
string image_ref = 9;
// Brief CamelCase string explaining why container is in its current state.
string reason = 10;
// Human-readable message indicating details about why container is in its
// current state.
string message = 11;
// Key-value pairs that may be used to scope and select individual resources.
map<string,string> labels = 12;
// Unstructured key-value map holding arbitrary metadata.
// Annotations MUST NOT be altered by the runtime; the value of this field
// MUST be identical to that of the corresponding ContainerConfig used to
// instantiate the Container this status represents.
map<string,string> annotations = 13;
// Mounts for the container.
repeated Mount mounts = 14;
// Log path of container.
string log_path = 15;
}
message ContainerStatusResponse {
// Status of the container.
ContainerStatus status = 1;
// Info is extra information of the Container. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. pid for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
map<string, string> info = 2;
}
message UpdateContainerResourcesRequest {
// ID of the container to update.
string container_id = 1;
// Resource configuration specific to Linux containers.
LinuxContainerResources linux = 2;
}
message UpdateContainerResourcesResponse {}
message ExecSyncRequest {
// ID of the container.
string container_id = 1;
// Command to execute.
repeated string cmd = 2;
// Timeout in seconds to stop the command. Default: 0 (run forever).
int64 timeout = 3;
}
message ExecSyncResponse {
// Captured command stdout output.
bytes stdout = 1;
// Captured command stderr output.
bytes stderr = 2;
// Exit code the command finished with. Default: 0 (success).
int32 exit_code = 3;
}
message ExecRequest {
// ID of the container in which to execute the command.
string container_id = 1;
// Command to execute.
repeated string cmd = 2;
// Whether to exec the command in a TTY.
bool tty = 3;
// Whether to stream stdin.
// One of `stdin`, `stdout`, and `stderr` MUST be true.
bool stdin = 4;
// Whether to stream stdout.
// One of `stdin`, `stdout`, and `stderr` MUST be true.
bool stdout = 5;
// Whether to stream stderr.
// One of `stdin`, `stdout`, and `stderr` MUST be true.
// If `tty` is true, `stderr` MUST be false. Multiplexing is not supported
// in this case. The output of stdout and stderr will be combined to a
// single stream.
bool stderr = 6;
}
message ExecResponse {
// Fully qualified URL of the exec streaming server.
string url = 1;
}
message AttachRequest {
// ID of the container to which to attach.
string container_id = 1;
// Whether to stream stdin.
// One of `stdin`, `stdout`, and `stderr` MUST be true.
bool stdin = 2;
// Whether the process being attached is running in a TTY.
// This must match the TTY setting in the ContainerConfig.
bool tty = 3;
// Whether to stream stdout.
// One of `stdin`, `stdout`, and `stderr` MUST be true.
bool stdout = 4;
// Whether to stream stderr.
// One of `stdin`, `stdout`, and `stderr` MUST be true.
// If `tty` is true, `stderr` MUST be false. Multiplexing is not supported
// in this case. The output of stdout and stderr will be combined to a
// single stream.
bool stderr = 5;
}
message AttachResponse {
// Fully qualified URL of the attach streaming server.
string url = 1;
}
message PortForwardRequest {
// ID of the container to which to forward the port.
string pod_sandbox_id = 1;
// Port to forward.
repeated int32 port = 2;
}
message PortForwardResponse {
// Fully qualified URL of the port-forward streaming server.
string url = 1;
}
message ImageFilter {
// Spec of the image.
ImageSpec image = 1;
}
message ListImagesRequest {
// Filter to list images.
ImageFilter filter = 1;
}
// Basic information about a container image.
message Image {
// ID of the image.
string id = 1;
// Other names by which this image is known.
repeated string repo_tags = 2;
// Digests by which this image is known.
repeated string repo_digests = 3;
// Size of the image in bytes. Must be > 0.
uint64 size = 4;
// UID that will run the command(s). This is used as a default if no user is
// specified when creating the container. UID and the following user name
// are mutually exclusive.
Int64Value uid = 5;
// User name that will run the command(s). This is used if UID is not set
// and no user is specified when creating container.
string username = 6;
}
message ListImagesResponse {
// List of images.
repeated Image images = 1;
}
message ImageStatusRequest {
// Spec of the image.
ImageSpec image = 1;
// Verbose indicates whether to return extra information about the image.
bool verbose = 2;
}
message ImageStatusResponse {
// Status of the image.
Image image = 1;
// Info is extra information of the Image. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful
// for debug, e.g. image config for oci image based container runtime.
// It should only be returned non-empty when Verbose is true.
map<string, string> info = 2;
}
// AuthConfig contains authorization information for connecting to a registry.
message AuthConfig {
string username = 1;
string password = 2;
string auth = 3;
string server_address = 4;
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
string identity_token = 5;
// RegistryToken is a bearer token to be sent to a registry
string registry_token = 6;
}
message PullImageRequest {
// Spec of the image.
ImageSpec image = 1;
// Authentication configuration for pulling the image.
AuthConfig auth = 2;
// Config of the PodSandbox, which is used to pull image in PodSandbox context.
PodSandboxConfig sandbox_config = 3;
}
message PullImageResponse {
// Reference to the image in use. For most runtimes, this should be an
// image ID or digest.
string image_ref = 1;
}
message RemoveImageRequest {
// Spec of the image to remove.
ImageSpec image = 1;
}
message RemoveImageResponse {}
message NetworkConfig {
// CIDR to use for pod IP addresses. If the CIDR is empty, runtimes
// should omit it.
string pod_cidr = 1;
}
message RuntimeConfig {
NetworkConfig network_config = 1;
}
message UpdateRuntimeConfigRequest {
RuntimeConfig runtime_config = 1;
}
message UpdateRuntimeConfigResponse {}
// RuntimeCondition contains condition information for the runtime.
// There are 2 kinds of runtime conditions:
// 1. Required conditions: Conditions are required for kubelet to work
// properly. If any required condition is unmet, the node will be not ready.
// The required conditions include:
// * RuntimeReady: RuntimeReady means the runtime is up and ready to accept
// basic containers e.g. container only needs host network.
// * NetworkReady: NetworkReady means the runtime network is up and ready to
// accept containers which require container network.
// 2. Optional conditions: Conditions are informative to the user, but kubelet
// will not rely on. Since condition type is an arbitrary string, all conditions
// not required are optional. These conditions will be exposed to users to help
// them understand the status of the system.
message RuntimeCondition {
// Type of runtime condition.
string type = 1;
// Status of the condition, one of true/false. Default: false.
bool status = 2;
// Brief CamelCase string containing reason for the condition's last transition.
string reason = 3;
// Human-readable message indicating details about last transition.
string message = 4;
}
// RuntimeStatus is information about the current status of the runtime.
message RuntimeStatus {
// List of current observed runtime conditions.
repeated RuntimeCondition conditions = 1;
}
message StatusRequest {
// Verbose indicates whether to return extra information about the runtime.
bool verbose = 1;
}
message StatusResponse {
// Status of the Runtime.
RuntimeStatus status = 1;
// Info is extra information of the Runtime. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. plugins used by the container runtime.
// It should only be returned non-empty when Verbose is true.
map<string, string> info = 2;
}
message ImageFsInfoRequest {}
// UInt64Value is the wrapper of uint64.
message UInt64Value {
// The value.
uint64 value = 1;
}
// FilesystemIdentifier uniquely identify the filesystem.
message FilesystemIdentifier{
// Mountpoint of a filesystem.
string mountpoint = 1;
}
// FilesystemUsage provides the filesystem usage information.
message FilesystemUsage {
// Timestamp in nanoseconds at which the information were collected. Must be > 0.
int64 timestamp = 1;
// The unique identifier of the filesystem.
FilesystemIdentifier fs_id = 2;
// UsedBytes represents the bytes used for images on the filesystem.
// This may differ from the total bytes used on the filesystem and may not
// equal CapacityBytes - AvailableBytes.
UInt64Value used_bytes = 3;
// InodesUsed represents the inodes used by the images.
// This may not equal InodesCapacity - InodesAvailable because the underlying
// filesystem may also be used for purposes other than storing images.
UInt64Value inodes_used = 4;
}
message ImageFsInfoResponse {
// Information of image filesystem(s).
repeated FilesystemUsage image_filesystems = 1;
}
message ContainerStatsRequest{
// ID of the container for which to retrieve stats.
string container_id = 1;
}
message ContainerStatsResponse {
// Stats of the container.
ContainerStats stats = 1;
}
message ListContainerStatsRequest{
// Filter for the list request.
ContainerStatsFilter filter = 1;
}
// ContainerStatsFilter is used to filter containers.
// All those fields are combined with 'AND'
message ContainerStatsFilter {
// ID of the container.
string id = 1;
// ID of the PodSandbox.
string pod_sandbox_id = 2;
// LabelSelector to select matches.
// Only api.MatchLabels is supported for now and the requirements
// are ANDed. MatchExpressions is not supported yet.
map<string, string> label_selector = 3;
}
message ListContainerStatsResponse {
// Stats of the container.
repeated ContainerStats stats = 1;
}
// ContainerAttributes provides basic information of the container.
message ContainerAttributes {
// ID of the container.
string id = 1;
// Metadata of the container.
ContainerMetadata metadata = 2;
// Key-value pairs that may be used to scope and select individual resources.
map<string,string> labels = 3;
// Unstructured key-value map holding arbitrary metadata.
// Annotations MUST NOT be altered by the runtime; the value of this field
// MUST be identical to that of the corresponding ContainerConfig used to
// instantiate the Container this status represents.
map<string,string> annotations = 4;
}
// ContainerStats provides the resource usage statistics for a container.
message ContainerStats {
// Information of the container.
ContainerAttributes attributes = 1;
// CPU usage gathered from the container.
CpuUsage cpu = 2;
// Memory usage gathered from the container.
MemoryUsage memory = 3;
// Usage of the writeable layer.
FilesystemUsage writable_layer = 4;
}
// CpuUsage provides the CPU usage information.
message CpuUsage {
// Timestamp in nanoseconds at which the information were collected. Must be > 0.
int64 timestamp = 1;
// Cumulative CPU usage (sum across all cores) since object creation.
UInt64Value usage_core_nano_seconds = 2;
}
// MemoryUsage provides the memory usage information.
message MemoryUsage {
// Timestamp in nanoseconds at which the information were collected. Must be > 0.
int64 timestamp = 1;
// The amount of working set memory in bytes.
UInt64Value working_set_bytes = 2;
}
message ReopenContainerLogRequest {
// ID of the container for which to reopen the log.
string container_id = 1;
}
message ReopenContainerLogResponse{
}
| {
"pile_set_name": "Github"
} |
//
// Author: Christian Schulz <christian.schulz.phone@gmail.com>
//
#ifndef GREEDY_NS_LOCAL_SEARCH_P9KLE4NH
#define GREEDY_NS_LOCAL_SEARCH_P9KLE4NH
#include "definitions.h"
#include "partition_config.h"
#include "data_structure/graph_access.h"
#include "data_structure/priority_queues/maxNodeHeap.h"
class greedy_ns_local_search {
public:
greedy_ns_local_search();
virtual ~greedy_ns_local_search();
EdgeWeight perform_refinement(const PartitionConfig & config, graph_access & G);
private:
void compute_gain( graph_access & G, NodeID node, Gain & toLHS, Gain & toRHS);
void move_node( graph_access & G, NodeID & node, PartitionID & to_block, PartitionID & other_block,
std::vector< NodeWeight > & block_weights,
std::vector< bool > & moved_out_of_S,
std::vector< maxNodeHeap > & heaps);
};
inline
void greedy_ns_local_search::compute_gain( graph_access & G, NodeID node, Gain & toLHS, Gain & toRHS) {
toLHS = G.getNodeWeight(node);
toRHS = G.getNodeWeight(node);
forall_out_edges(G, e, node) {
NodeID target = G.getEdgeTarget(e);
if( G.getPartitionIndex(target) == 0) {
toRHS -= G.getNodeWeight(target);
} else if( G.getPartitionIndex(target) == 1 ) {
toLHS -= G.getNodeWeight(target);
}
} endfor
}
inline
void greedy_ns_local_search::move_node( graph_access & G, NodeID & node, PartitionID & to_block, PartitionID & other_block,
std::vector< NodeWeight > & block_weights,
std::vector< bool > & moved_out_of_S,
std::vector< maxNodeHeap > & queues) {
G.setPartitionIndex(node, to_block);
block_weights[to_block] += G.getNodeWeight(node);
block_weights[2] -= G.getNodeWeight(node);
moved_out_of_S[node] = true;
std::vector< NodeID > to_be_added;
std::vector< NodeID > to_be_updated;
Gain gain_achieved = G.getNodeWeight(node);
forall_out_edges(G, e, node) {
NodeID target = G.getEdgeTarget(e);
if( G.getPartitionIndex( target ) == other_block ) {
G.setPartitionIndex(target, 2);
block_weights[other_block] -= G.getNodeWeight(target);
block_weights[2] += G.getNodeWeight(target);
gain_achieved -= G.getNodeWeight(target);
if( !moved_out_of_S[target] ) {
to_be_added.push_back(target);
}
forall_out_edges(G, e_bar, target) {
NodeID v = G.getEdgeTarget(e_bar);
if( queues[0].contains(v) ) {
to_be_updated.push_back(v);
}
} endfor
} else if( G.getPartitionIndex( target ) == 2 ) {
to_be_updated.push_back(target);
}
} endfor
Gain toLHS = 0;
Gain toRHS = 0;
for( NodeID node : to_be_added ) {
compute_gain( G, node, toLHS, toRHS);
queues[0].insert(node, toLHS);
queues[1].insert(node, toRHS);
}
for( NodeID node : to_be_updated) {
compute_gain( G, node, toLHS, toRHS);
queues[0].changeKey(node, toLHS);
queues[1].changeKey(node, toRHS);
}
}
#endif /* end of include guard: GREEDY_NS_LOCAL_SEARCH_P9KLE4NH */
| {
"pile_set_name": "Github"
} |
body{
margin: 0;
padding: 0;
background: #ffffff;
background-image: url('../img/nature.png');
}
.band_wrapper{
height: 220px;
margin: auto;
top: 50%;
transform: translateY(80%);
}
.page_icon{
float: left;
margin-top: -30px;
margin-right: 50px;
margin-left: 170px;
}
.server_icon{
float: left;
margin-right: 50px;
margin-left: 170px;
margin-top: -60px;
}
.big_grey_band{
background-image: url('../img/dark_grey.png');
background-repeat: repeat;
height: 150px;
}
.small_grey_light_band{
background-image: url('../img/light_grey.png');
background-repeat: repeat;
height: 70px;
}
.big_grey_band h1, .small_grey_light_band h4{
margin: 0;
font-family: Arial;
font-weight: lighter;
color: white;
}
.big_grey_band h1{
font-size: 150px;
}
.small_grey_light_band h4{
font-size: 50px;
padding: 5px;
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env ruby
$:<< '../lib' << 'lib'
require 'goliath'
#
# A test endpoint that will:
# * with 'delay' parameter, take the given time to respond
# * with 'drop' parameter, drop connection before responding
# * with 'fail' parameter, raise an error of the given type (eg 400 raises a BadRequestError)
# * with 'echo_status', 'echo_headers', or 'echo_body' parameter, replace the given component directly.
#
# If the delay param is given, sleep for that many seconds
#
# Note that though this is non-blocking, the call chain does *not* proceed in parallel
class Delay
include Goliath::Rack::AsyncMiddleware
def post_process(env, status, headers, body)
if delay = env.params['delay']
delay = [0, [delay.to_f, 5].min].max
EM::Synchrony.sleep(delay)
body.merge!(:delay => delay, :actual => (Time.now.to_f - env[:start_time]))
end
[status, headers, body]
end
end
# if the middleware_failure parameter is given, raise an error causing that
# status code
class MiddlewareFailure
include Goliath::Rack::AsyncMiddleware
def call(env)
if code = env.params['fail']
raise Goliath::Validation::Error.new(code.to_i, "Middleware error #{code}")
end
super
end
end
# if the drop_pre parameter is given, close the connection before headers are sent
# This works, but probably does awful awful things to Goliath's innards
class DropConnection
include Goliath::Rack::AsyncMiddleware
def call(env)
if env.params['drop'].to_s == 'true'
env.logger.info "Dropping connection"
env.stream_close
[0, {}, '']
else
super
end
end
end
# if echo_status, echo_headers or echo_body are given, blindly substitute their
# value, clobbering whatever was there.
#
# If you are going to use echo_headers you probably need to use a JSON post body:
# curl -v -H "Content-Type: application/json" --data-ascii '{"echo_headers":{"X-Question":"What is brown and sticky"},"echo_body":{"answer":"a stick"}}' 'http://127.0.0.1:9001/'
#
class Echo
include Goliath::Rack::AsyncMiddleware
def post_process env, status, headers, body
if env.params['echo_status']
status = env.params['echo_status'].to_i
end
if env.params['echo_headers']
headers = env.params['echo_headers']
end
if env.params['echo_body']
body = env.params['echo_body']
end
[status, headers, body]
end
end
# Rescue validation errors and send them up the chain as normal non-200 responses
class ExceptionHandler
include Goliath::Rack::AsyncMiddleware
include Goliath::Rack::Validator
def call(env)
begin
super
rescue Goliath::Validation::Error => e
validation_error(e.status_code, e.message)
end
end
end
class TestRig < Goliath::API
use Goliath::Rack::Tracer # log trace statistics
use Goliath::Rack::Params # parse & merge query and body parameters
#
use Goliath::Rack::DefaultMimeType # cleanup accepted media types
use Goliath::Rack::Render, 'json' # auto-negotiate response format
#
use ExceptionHandler # turn raised errors into HTTP responses
use MiddlewareFailure # make response fail if 'fail' param
use DropConnection # drop connection if 'drop' param
use Echo # replace status, headers or body if 'echo_status' etc given
use Delay # make response take X seconds if 'delay' param
def on_headers(env, headers)
env['client-headers'] = headers
end
def response(env)
query_params = env.params.collect { |param| param.join(": ") }
query_headers = env['client-headers'].collect { |param| param.join(": ") }
headers = {
"Special" => "Header",
"Params" => query_params.join("|"),
"Path" => env[Goliath::Request::REQUEST_PATH],
"Headers" => query_headers.join("|"),
"Method" => env[Goliath::Request::REQUEST_METHOD]}
[200, headers, headers.dup]
end
end
| {
"pile_set_name": "Github"
} |
using Opc.Ua.Client;
namespace OpcPublisher
{
using Opc.Ua;
/// <summary>
/// Class to encapsulate OPC UA monitored item API.
/// </summary>
public class OpcUaMonitoredItem : IOpcUaMonitoredItem
{
public MonitoredItem MonitoredItem => _monitoredItem;
public uint AttributeId
{
get
{
return _monitoredItem.AttributeId;
}
set
{
_monitoredItem.AttributeId = value;
}
}
public bool DiscardOldest
{
get
{
return _monitoredItem.DiscardOldest;
}
set
{
_monitoredItem.DiscardOldest = value;
}
}
public string DisplayName
{
get
{
return _monitoredItem.DisplayName;
}
set
{
_monitoredItem.DisplayName = value;
}
}
public MonitoringMode MonitoringMode
{
get
{
return _monitoredItem.MonitoringMode;
}
set
{
_monitoredItem.MonitoringMode = value;
}
}
public uint QueueSize
{
get
{
return _monitoredItem.QueueSize;
}
set
{
_monitoredItem.QueueSize = value;
}
}
public int SamplingInterval
{
get
{
return _monitoredItem.SamplingInterval;
}
set
{
_monitoredItem.SamplingInterval = value;
}
}
public NodeId StartNodeId
{
get
{
return _monitoredItem.StartNodeId;
}
set
{
_monitoredItem.StartNodeId = value;
}
}
public OpcUaMonitoredItem()
{
_monitoredItem = new MonitoredItem();
}
public event MonitoredItemNotificationEventHandler Notification
{
add
{
_monitoredItem.Notification += value;
}
remove
{
_monitoredItem.Notification -= value;
}
}
private readonly MonitoredItem _monitoredItem;
}
}
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/ide.h>
int generic_ide_suspend(struct device *dev, pm_message_t mesg)
{
ide_drive_t *drive = to_ide_device(dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
struct ide_pm_state rqpm;
int ret;
if (ide_port_acpi(hwif)) {
/* call ACPI _GTM only once */
if ((drive->dn & 1) == 0 || pair == NULL)
ide_acpi_get_timing(hwif);
}
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
rqpm.pm_state = mesg.event;
blk_execute_rq(drive->queue, NULL, rq, 0);
ret = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
if (ret == 0 && ide_port_acpi(hwif)) {
/* call ACPI _PS3 only after both devices are suspended */
if ((drive->dn & 1) || pair == NULL)
ide_acpi_set_state(hwif, 0);
}
return ret;
}
static int ide_pm_execute_rq(struct request *rq)
{
struct request_queue *q = rq->q;
if (unlikely(blk_queue_dying(q))) {
rq->rq_flags |= RQF_QUIET;
scsi_req(rq)->result = -ENXIO;
blk_mq_end_request(rq, BLK_STS_OK);
return -ENXIO;
}
blk_execute_rq(q, NULL, rq, true);
return scsi_req(rq)->result ? -EIO : 0;
}
int generic_ide_resume(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
struct ide_pm_state rqpm;
int err;
blk_mq_start_stopped_hw_queues(drive->queue, true);
if (ide_port_acpi(hwif)) {
/* call ACPI _PS0 / _STM only once */
if ((drive->dn & 1) == 0 || pair == NULL) {
ide_acpi_set_state(hwif, 1);
ide_acpi_push_timing(hwif);
}
ide_acpi_exec_tfs(drive);
}
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
err = ide_pm_execute_rq(rq);
blk_put_request(rq);
if (err == 0 && dev->driver) {
struct ide_driver *drv = to_ide_driver(dev->driver);
if (drv->resume)
drv->resume(drive);
}
return err;
}
void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
{
struct ide_pm_state *pm = ide_req(rq)->special;
#ifdef DEBUG_PM
printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
drive->name, pm->pm_step);
#endif
if (drive->media != ide_disk)
return;
switch (pm->pm_step) {
case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
if (pm->pm_state == PM_EVENT_FREEZE)
pm->pm_step = IDE_PM_COMPLETED;
else
pm->pm_step = IDE_PM_STANDBY;
break;
case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
pm->pm_step = IDE_PM_COMPLETED;
break;
case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
pm->pm_step = IDE_PM_IDLE;
break;
case IDE_PM_IDLE: /* Resume step 2 (idle)*/
pm->pm_step = IDE_PM_RESTORE_DMA;
break;
}
}
ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
{
struct ide_pm_state *pm = ide_req(rq)->special;
struct ide_cmd cmd = { };
switch (pm->pm_step) {
case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
if (drive->media != ide_disk)
break;
/* Not supported? Switch to next step now. */
if (ata_id_flush_enabled(drive->id) == 0 ||
(drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
ide_complete_power_step(drive, rq);
return ide_stopped;
}
if (ata_id_flush_ext_enabled(drive->id))
cmd.tf.command = ATA_CMD_FLUSH_EXT;
else
cmd.tf.command = ATA_CMD_FLUSH;
goto out_do_tf;
case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
cmd.tf.command = ATA_CMD_STANDBYNOW1;
goto out_do_tf;
case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
ide_set_max_pio(drive);
/*
* skip IDE_PM_IDLE for ATAPI devices
*/
if (drive->media != ide_disk)
pm->pm_step = IDE_PM_RESTORE_DMA;
else
ide_complete_power_step(drive, rq);
return ide_stopped;
case IDE_PM_IDLE: /* Resume step 2 (idle) */
cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
goto out_do_tf;
case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
/*
* Right now, all we do is call ide_set_dma(drive),
* we could be smarter and check for current xfer_speed
* in struct drive etc...
*/
if (drive->hwif->dma_ops == NULL)
break;
/*
* TODO: respect IDE_DFLAG_USING_DMA
*/
ide_set_dma(drive);
break;
}
pm->pm_step = IDE_PM_COMPLETED;
return ide_stopped;
out_do_tf:
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
cmd.protocol = ATA_PROT_NODATA;
return do_rw_taskfile(drive, &cmd);
}
/**
* ide_complete_pm_rq - end the current Power Management request
* @drive: target drive
* @rq: request
*
* This function cleans up the current PM request and stops the queue
* if necessary.
*/
void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
struct ide_pm_state *pm = ide_req(rq)->special;
ide_complete_power_step(drive, rq);
if (pm->pm_step != IDE_PM_COMPLETED)
return;
#ifdef DEBUG_PM
printk("%s: completing PM request, %s\n", drive->name,
(ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
#endif
if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
blk_mq_stop_hw_queues(q);
else
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
drive->hwif->rq = NULL;
blk_mq_end_request(rq, BLK_STS_OK);
}
void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
struct ide_pm_state *pm = ide_req(rq)->special;
if (blk_rq_is_private(rq) &&
ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
pm->pm_step == IDE_PM_START_SUSPEND)
/* Mark drive blocked when starting the suspend sequence. */
drive->dev_flags |= IDE_DFLAG_BLOCKED;
else if (blk_rq_is_private(rq) &&
ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
pm->pm_step == IDE_PM_START_RESUME) {
/*
* The first thing we do on wakeup is to wait for BSY bit to
* go away (with a looong timeout) as a drive on this hwif may
* just be POSTing itself.
* We do that before even selecting as the "other" device on
* the bus may be broken enough to walk on our toes at this
* point.
*/
ide_hwif_t *hwif = drive->hwif;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
struct request_queue *q = drive->queue;
int rc;
#ifdef DEBUG_PM
printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#endif
rc = ide_wait_not_busy(hwif, 35000);
if (rc)
printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
tp_ops->dev_select(drive);
tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
rc = ide_wait_not_busy(hwif, 100000);
if (rc)
printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
blk_mq_start_hw_queues(q);
}
}
| {
"pile_set_name": "Github"
} |
from __future__ import print_function, division
from sympy.core import Mul
from sympy.functions import DiracDelta, Heaviside
from sympy.solvers import solve
from sympy.core.compatibility import default_sort_key
def change_mul(node, x):
"""change_mul(node, x)
Rearranges the operands of a product, bringing to front any simple
DiracDelta expression.
If no simple DiracDelta expression was found, then all the DiracDelta
expressions are simplified (using DiracDelta.simplify).
Return: (dirac, new node)
Where:
o dirac is either a simple DiracDelta expression or None (if no simple
expression was found);
o new node is either a simplified DiracDelta expressions or None (if it
could not be simplified).
Examples
========
>>> from sympy import DiracDelta, cos
>>> from sympy.integrals.deltafunctions import change_mul
>>> from sympy.abc import x, y
>>> change_mul(x*y*DiracDelta(x)*cos(x), x)
(DiracDelta(x), x*y*cos(x))
>>> change_mul(x*y*DiracDelta(x**2 - 1)*cos(x), x)
(None, x*y*cos(x)*DiracDelta(x - 1)/2 + x*y*cos(x)*DiracDelta(x + 1)/2)
>>> change_mul(x*y*DiracDelta(cos(x))*cos(x), x)
(None, None)
See Also
========
sympy.functions.special.delta_functions.DiracDelta
deltaintegrate
"""
if not (node.is_Mul or node.is_Pow):
return node
new_args = []
dirac = None
#Sorting is needed so that we consistently collapse the same delta;
#However, we must preserve the ordering of non-commutative terms
c, nc = node.args_cnc()
sorted_args = sorted(c, key=default_sort_key)
sorted_args.extend(nc)
for arg in sorted_args:
if arg.is_Pow and arg.base.func is DiracDelta:
new_args.append(arg.func(arg.base, arg.exp - 1))
arg = arg.base
if dirac is None and (arg.func is DiracDelta and arg.is_simple(x)
and (len(arg.args) <= 1 or arg.args[1] == 0)):
dirac = arg
else:
new_args.append(arg)
if not dirac: # there was no simple dirac
new_args = []
for arg in sorted_args:
if arg.func is DiracDelta:
new_args.append(arg.simplify(x))
elif arg.is_Pow and arg.base.func is DiracDelta:
new_args.append(arg.func(arg.base.simplify(x), arg.exp))
else:
new_args.append(change_mul(arg, x))
if new_args != sorted_args:
nnode = Mul(*new_args).expand()
else: # if the node didn't change there is nothing to do
nnode = None
return (None, nnode)
return (dirac, Mul(*new_args))
def deltaintegrate(f, x):
"""
deltaintegrate(f, x)
The idea for integration is the following:
- If we are dealing with a DiracDelta expression, i.e. DiracDelta(g(x)),
we try to simplify it.
If we could simplify it, then we integrate the resulting expression.
We already know we can integrate a simplified expression, because only
simple DiracDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression: we return the integral,
taking care if we are dealing with a Derivative or with a proper
DiracDelta.
2) The expression is not simple (i.e. DiracDelta(cos(x))): we can do
nothing at all.
- If the node is a multiplication node having a DiracDelta term:
First we expand it.
If the expansion did work, the we try to integrate the expansion.
If not, we try to extract a simple DiracDelta term, then we have two
cases:
1) We have a simple DiracDelta term, so we return the integral.
2) We didn't have a simple term, but we do have an expression with
simplified DiracDelta terms, so we integrate this expression.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.integrals.deltafunctions import deltaintegrate
>>> from sympy import sin, cos, DiracDelta, Heaviside
>>> deltaintegrate(x*sin(x)*cos(x)*DiracDelta(x - 1), x)
sin(1)*cos(1)*Heaviside(x - 1)
>>> deltaintegrate(y**2*DiracDelta(x - z)*DiracDelta(y - z), y)
z**2*DiracDelta(x - z)*Heaviside(y - z)
See Also
========
sympy.functions.special.delta_functions.DiracDelta
sympy.integrals.integrals.Integral
"""
if not f.has(DiracDelta):
return None
from sympy.integrals import Integral, integrate
# g(x) = DiracDelta(h(x))
if f.func == DiracDelta:
h = f.simplify(x)
if h == f: # can't simplify the expression
#FIXME: the second term tells whether is DeltaDirac or Derivative
#For integrating derivatives of DiracDelta we need the chain rule
if f.is_simple(x):
if (len(f.args) <= 1 or f.args[1] == 0):
return Heaviside(f.args[0])
else:
return (DiracDelta(f.args[0], f.args[1] - 1) /
f.args[0].as_poly().LC())
else: # let's try to integrate the simplified expression
fh = integrate(h, x)
return fh
elif f.is_Mul or f.is_Pow: # g(x) = a*b*c*f(DiracDelta(h(x)))*d*e
g = f.expand()
if f != g: # the expansion worked
fh = integrate(g, x)
if fh is not None and not isinstance(fh, Integral):
return fh
else:
# no expansion performed, try to extract a simple DiracDelta term
dg, rest_mult = change_mul(f, x)
if not dg:
if rest_mult:
fh = integrate(rest_mult, x)
return fh
else:
dg = dg.simplify(x)
if dg.is_Mul: # Take out any extracted factors
dg, rest_mult_2 = change_mul(dg, x)
rest_mult = rest_mult*rest_mult_2
point = solve(dg.args[0], x)[0]
return (rest_mult.subs(x, point)*Heaviside(x - point))
return None
| {
"pile_set_name": "Github"
} |
using System;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
[assembly: AssemblyTitle("Title")]
[assembly: AssemblyCompany("Company.")]
[assembly: AssemblyProduct("Product")]
[assembly: AssemblyCopyright("Copyright © 2012-2016 Generic Inc.")]
[assembly: ComVisible(false)]
[assembly: CLSCompliant(true)]
[assembly: AssemblyInformationalVersion("7.61.0")]
[assembly: AssemblyVersion("7.0")] | {
"pile_set_name": "Github"
} |
/// @ref gtc_bitfield
/// @file glm/gtc/bitfield.inl
#include "../simd/integer.h"
namespace glm{
namespace detail
{
template<typename PARAM, typename RET>
GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y);
template<typename PARAM, typename RET>
GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z);
template<typename PARAM, typename RET>
GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z, PARAM w);
template<>
GLM_FUNC_QUALIFIER glm::uint16 bitfieldInterleave(glm::uint8 x, glm::uint8 y)
{
glm::uint16 REG1(x);
glm::uint16 REG2(y);
REG1 = ((REG1 << 4) | REG1) & glm::uint16(0x0F0F);
REG2 = ((REG2 << 4) | REG2) & glm::uint16(0x0F0F);
REG1 = ((REG1 << 2) | REG1) & glm::uint16(0x3333);
REG2 = ((REG2 << 2) | REG2) & glm::uint16(0x3333);
REG1 = ((REG1 << 1) | REG1) & glm::uint16(0x5555);
REG2 = ((REG2 << 1) | REG2) & glm::uint16(0x5555);
return REG1 | (REG2 << 1);
}
template<>
GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint16 x, glm::uint16 y)
{
glm::uint32 REG1(x);
glm::uint32 REG2(y);
REG1 = ((REG1 << 8) | REG1) & glm::uint32(0x00FF00FF);
REG2 = ((REG2 << 8) | REG2) & glm::uint32(0x00FF00FF);
REG1 = ((REG1 << 4) | REG1) & glm::uint32(0x0F0F0F0F);
REG2 = ((REG2 << 4) | REG2) & glm::uint32(0x0F0F0F0F);
REG1 = ((REG1 << 2) | REG1) & glm::uint32(0x33333333);
REG2 = ((REG2 << 2) | REG2) & glm::uint32(0x33333333);
REG1 = ((REG1 << 1) | REG1) & glm::uint32(0x55555555);
REG2 = ((REG2 << 1) | REG2) & glm::uint32(0x55555555);
return REG1 | (REG2 << 1);
}
template<>
GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y)
{
glm::uint64 REG1(x);
glm::uint64 REG2(y);
REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFFull);
REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFFull);
REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FFull);
REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FFull);
REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0Full);
REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0Full);
REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333ull);
REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333ull);
REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555ull);
REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555ull);
return REG1 | (REG2 << 1);
}
template<>
GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z)
{
glm::uint32 REG1(x);
glm::uint32 REG2(y);
glm::uint32 REG3(z);
REG1 = ((REG1 << 16) | REG1) & glm::uint32(0x00FF0000FF0000FF);
REG2 = ((REG2 << 16) | REG2) & glm::uint32(0x00FF0000FF0000FF);
REG3 = ((REG3 << 16) | REG3) & glm::uint32(0x00FF0000FF0000FF);
REG1 = ((REG1 << 8) | REG1) & glm::uint32(0xF00F00F00F00F00F);
REG2 = ((REG2 << 8) | REG2) & glm::uint32(0xF00F00F00F00F00F);
REG3 = ((REG3 << 8) | REG3) & glm::uint32(0xF00F00F00F00F00F);
REG1 = ((REG1 << 4) | REG1) & glm::uint32(0x30C30C30C30C30C3);
REG2 = ((REG2 << 4) | REG2) & glm::uint32(0x30C30C30C30C30C3);
REG3 = ((REG3 << 4) | REG3) & glm::uint32(0x30C30C30C30C30C3);
REG1 = ((REG1 << 2) | REG1) & glm::uint32(0x9249249249249249);
REG2 = ((REG2 << 2) | REG2) & glm::uint32(0x9249249249249249);
REG3 = ((REG3 << 2) | REG3) & glm::uint32(0x9249249249249249);
return REG1 | (REG2 << 1) | (REG3 << 2);
}
template<>
GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z)
{
glm::uint64 REG1(x);
glm::uint64 REG2(y);
glm::uint64 REG3(z);
REG1 = ((REG1 << 32) | REG1) & glm::uint64(0xFFFF00000000FFFFull);
REG2 = ((REG2 << 32) | REG2) & glm::uint64(0xFFFF00000000FFFFull);
REG3 = ((REG3 << 32) | REG3) & glm::uint64(0xFFFF00000000FFFFull);
REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x00FF0000FF0000FFull);
REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x00FF0000FF0000FFull);
REG3 = ((REG3 << 16) | REG3) & glm::uint64(0x00FF0000FF0000FFull);
REG1 = ((REG1 << 8) | REG1) & glm::uint64(0xF00F00F00F00F00Full);
REG2 = ((REG2 << 8) | REG2) & glm::uint64(0xF00F00F00F00F00Full);
REG3 = ((REG3 << 8) | REG3) & glm::uint64(0xF00F00F00F00F00Full);
REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x30C30C30C30C30C3ull);
REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x30C30C30C30C30C3ull);
REG3 = ((REG3 << 4) | REG3) & glm::uint64(0x30C30C30C30C30C3ull);
REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x9249249249249249ull);
REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x9249249249249249ull);
REG3 = ((REG3 << 2) | REG3) & glm::uint64(0x9249249249249249ull);
return REG1 | (REG2 << 1) | (REG3 << 2);
}
template<>
GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y, glm::uint32 z)
{
glm::uint64 REG1(x);
glm::uint64 REG2(y);
glm::uint64 REG3(z);
REG1 = ((REG1 << 32) | REG1) & glm::uint64(0xFFFF00000000FFFFull);
REG2 = ((REG2 << 32) | REG2) & glm::uint64(0xFFFF00000000FFFFull);
REG3 = ((REG3 << 32) | REG3) & glm::uint64(0xFFFF00000000FFFFull);
REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x00FF0000FF0000FFull);
REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x00FF0000FF0000FFull);
REG3 = ((REG3 << 16) | REG3) & glm::uint64(0x00FF0000FF0000FFull);
REG1 = ((REG1 << 8) | REG1) & glm::uint64(0xF00F00F00F00F00Full);
REG2 = ((REG2 << 8) | REG2) & glm::uint64(0xF00F00F00F00F00Full);
REG3 = ((REG3 << 8) | REG3) & glm::uint64(0xF00F00F00F00F00Full);
REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x30C30C30C30C30C3ull);
REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x30C30C30C30C30C3ull);
REG3 = ((REG3 << 4) | REG3) & glm::uint64(0x30C30C30C30C30C3ull);
REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x9249249249249249ull);
REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x9249249249249249ull);
REG3 = ((REG3 << 2) | REG3) & glm::uint64(0x9249249249249249ull);
return REG1 | (REG2 << 1) | (REG3 << 2);
}
template<>
GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z, glm::uint8 w)
{
glm::uint32 REG1(x);
glm::uint32 REG2(y);
glm::uint32 REG3(z);
glm::uint32 REG4(w);
REG1 = ((REG1 << 12) | REG1) & glm::uint32(0x000F000F000F000F);
REG2 = ((REG2 << 12) | REG2) & glm::uint32(0x000F000F000F000F);
REG3 = ((REG3 << 12) | REG3) & glm::uint32(0x000F000F000F000F);
REG4 = ((REG4 << 12) | REG4) & glm::uint32(0x000F000F000F000F);
REG1 = ((REG1 << 6) | REG1) & glm::uint32(0x0303030303030303);
REG2 = ((REG2 << 6) | REG2) & glm::uint32(0x0303030303030303);
REG3 = ((REG3 << 6) | REG3) & glm::uint32(0x0303030303030303);
REG4 = ((REG4 << 6) | REG4) & glm::uint32(0x0303030303030303);
REG1 = ((REG1 << 3) | REG1) & glm::uint32(0x1111111111111111);
REG2 = ((REG2 << 3) | REG2) & glm::uint32(0x1111111111111111);
REG3 = ((REG3 << 3) | REG3) & glm::uint32(0x1111111111111111);
REG4 = ((REG4 << 3) | REG4) & glm::uint32(0x1111111111111111);
return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3);
}
template<>
GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z, glm::uint16 w)
{
glm::uint64 REG1(x);
glm::uint64 REG2(y);
glm::uint64 REG3(z);
glm::uint64 REG4(w);
REG1 = ((REG1 << 24) | REG1) & glm::uint64(0x000000FF000000FFull);
REG2 = ((REG2 << 24) | REG2) & glm::uint64(0x000000FF000000FFull);
REG3 = ((REG3 << 24) | REG3) & glm::uint64(0x000000FF000000FFull);
REG4 = ((REG4 << 24) | REG4) & glm::uint64(0x000000FF000000FFull);
REG1 = ((REG1 << 12) | REG1) & glm::uint64(0x000F000F000F000Full);
REG2 = ((REG2 << 12) | REG2) & glm::uint64(0x000F000F000F000Full);
REG3 = ((REG3 << 12) | REG3) & glm::uint64(0x000F000F000F000Full);
REG4 = ((REG4 << 12) | REG4) & glm::uint64(0x000F000F000F000Full);
REG1 = ((REG1 << 6) | REG1) & glm::uint64(0x0303030303030303ull);
REG2 = ((REG2 << 6) | REG2) & glm::uint64(0x0303030303030303ull);
REG3 = ((REG3 << 6) | REG3) & glm::uint64(0x0303030303030303ull);
REG4 = ((REG4 << 6) | REG4) & glm::uint64(0x0303030303030303ull);
REG1 = ((REG1 << 3) | REG1) & glm::uint64(0x1111111111111111ull);
REG2 = ((REG2 << 3) | REG2) & glm::uint64(0x1111111111111111ull);
REG3 = ((REG3 << 3) | REG3) & glm::uint64(0x1111111111111111ull);
REG4 = ((REG4 << 3) | REG4) & glm::uint64(0x1111111111111111ull);
return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3);
}
}//namespace detail
template<typename genIUType>
GLM_FUNC_QUALIFIER genIUType mask(genIUType Bits)
{
GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'mask' accepts only integer values");
return Bits >= sizeof(genIUType) * 8 ? ~static_cast<genIUType>(0) : (static_cast<genIUType>(1) << Bits) - static_cast<genIUType>(1);
}
template<length_t L, typename T, precision P, template<length_t, typename, precision> class vecIUType>
GLM_FUNC_QUALIFIER vecIUType<L, T, P> mask(vecIUType<L, T, P> const& v)
{
GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'mask' accepts only integer values");
return detail::functor1<L, T, T, P>::call(mask, v);
}
template<typename genIType>
GLM_FUNC_QUALIFIER genIType bitfieldRotateRight(genIType In, int Shift)
{
GLM_STATIC_ASSERT(std::numeric_limits<genIType>::is_integer, "'bitfieldRotateRight' accepts only integer values");
int const BitSize = static_cast<genIType>(sizeof(genIType) * 8);
return (In << static_cast<genIType>(Shift)) | (In >> static_cast<genIType>(BitSize - Shift));
}
template<length_t L, typename T, precision P, template<length_t, typename, precision> class vecType>
GLM_FUNC_QUALIFIER vecType<L, T, P> bitfieldRotateRight(vecType<L, T, P> const & In, int Shift)
{
GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldRotateRight' accepts only integer values");
int const BitSize = static_cast<int>(sizeof(T) * 8);
return (In << static_cast<T>(Shift)) | (In >> static_cast<T>(BitSize - Shift));
}
template<typename genIType>
GLM_FUNC_QUALIFIER genIType bitfieldRotateLeft(genIType In, int Shift)
{
GLM_STATIC_ASSERT(std::numeric_limits<genIType>::is_integer, "'bitfieldRotateLeft' accepts only integer values");
int const BitSize = static_cast<genIType>(sizeof(genIType) * 8);
return (In >> static_cast<genIType>(Shift)) | (In << static_cast<genIType>(BitSize - Shift));
}
template<length_t L, typename T, precision P, template<length_t, typename, precision> class vecType>
GLM_FUNC_QUALIFIER vecType<L, T, P> bitfieldRotateLeft(vecType<L, T, P> const& In, int Shift)
{
GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldRotateLeft' accepts only integer values");
int const BitSize = static_cast<int>(sizeof(T) * 8);
return (In >> static_cast<T>(Shift)) | (In << static_cast<T>(BitSize - Shift));
}
template<typename genIUType>
GLM_FUNC_QUALIFIER genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount)
{
return Value | static_cast<genIUType>(mask(BitCount) << FirstBit);
}
template<length_t L, typename T, precision P, template<length_t, typename, precision> class vecType>
GLM_FUNC_QUALIFIER vecType<L, T, P> bitfieldFillOne(vecType<L, T, P> const& Value, int FirstBit, int BitCount)
{
return Value | static_cast<T>(mask(BitCount) << FirstBit);
}
template<typename genIUType>
GLM_FUNC_QUALIFIER genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount)
{
return Value & static_cast<genIUType>(~(mask(BitCount) << FirstBit));
}
template<length_t L, typename T, precision P, template<length_t, typename, precision> class vecType>
GLM_FUNC_QUALIFIER vecType<L, T, P> bitfieldFillZero(vecType<L, T, P> const& Value, int FirstBit, int BitCount)
{
return Value & static_cast<T>(~(mask(BitCount) << FirstBit));
}
GLM_FUNC_QUALIFIER int16 bitfieldInterleave(int8 x, int8 y)
{
union sign8
{
int8 i;
uint8 u;
} sign_x, sign_y;
union sign16
{
int16 i;
uint16 u;
} result;
sign_x.i = x;
sign_y.i = y;
result.u = bitfieldInterleave(sign_x.u, sign_y.u);
return result.i;
}
GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(uint8 x, uint8 y)
{
return detail::bitfieldInterleave<uint8, uint16>(x, y);
}
GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int16 x, int16 y)
{
union sign16
{
int16 i;
uint16 u;
} sign_x, sign_y;
union sign32
{
int32 i;
uint32 u;
} result;
sign_x.i = x;
sign_y.i = y;
result.u = bitfieldInterleave(sign_x.u, sign_y.u);
return result.i;
}
GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint16 x, uint16 y)
{
return detail::bitfieldInterleave<uint16, uint32>(x, y);
}
GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y)
{
union sign32
{
int32 i;
uint32 u;
} sign_x, sign_y;
union sign64
{
int64 i;
uint64 u;
} result;
sign_x.i = x;
sign_y.i = y;
result.u = bitfieldInterleave(sign_x.u, sign_y.u);
return result.i;
}
GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y)
{
return detail::bitfieldInterleave<uint32, uint64>(x, y);
}
GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z)
{
union sign8
{
int8 i;
uint8 u;
} sign_x, sign_y, sign_z;
union sign32
{
int32 i;
uint32 u;
} result;
sign_x.i = x;
sign_y.i = y;
sign_z.i = z;
result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u);
return result.i;
}
GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z)
{
return detail::bitfieldInterleave<uint8, uint32>(x, y, z);
}
GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z)
{
union sign16
{
int16 i;
uint16 u;
} sign_x, sign_y, sign_z;
union sign64
{
int64 i;
uint64 u;
} result;
sign_x.i = x;
sign_y.i = y;
sign_z.i = z;
result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u);
return result.i;
}
GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z)
{
return detail::bitfieldInterleave<uint32, uint64>(x, y, z);
}
GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y, int32 z)
{
union sign16
{
int32 i;
uint32 u;
} sign_x, sign_y, sign_z;
union sign64
{
int64 i;
uint64 u;
} result;
sign_x.i = x;
sign_y.i = y;
sign_z.i = z;
result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u);
return result.i;
}
GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z)
{
return detail::bitfieldInterleave<uint32, uint64>(x, y, z);
}
GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w)
{
union sign8
{
int8 i;
uint8 u;
} sign_x, sign_y, sign_z, sign_w;
union sign32
{
int32 i;
uint32 u;
} result;
sign_x.i = x;
sign_y.i = y;
sign_z.i = z;
sign_w.i = w;
result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u);
return result.i;
}
GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w)
{
return detail::bitfieldInterleave<uint8, uint32>(x, y, z, w);
}
GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w)
{
union sign16
{
int16 i;
uint16 u;
} sign_x, sign_y, sign_z, sign_w;
union sign64
{
int64 i;
uint64 u;
} result;
sign_x.i = x;
sign_y.i = y;
sign_z.i = z;
sign_w.i = w;
result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u);
return result.i;
}
GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w)
{
return detail::bitfieldInterleave<uint16, uint64>(x, y, z, w);
}
}//namespace glm
| {
"pile_set_name": "Github"
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
is.autoload=true
javac.compilerargs=-Xlint -Xlint:-serial
javac.source=1.8
javadoc.main.page=org/openide/actions/doc-files/api.html
javadoc.arch=${basedir}/arch.xml
javadoc.apichanges=${basedir}/apichanges.xml
test.config.stableBTD.includes=**/*Test.class
test.config.stableBTD.excludes=\
**/PageSetupActionTest.class
| {
"pile_set_name": "Github"
} |
package railo.runtime.debug;
public interface ImplicitAccess {
public void inc();
/**
* @return the count
*/
public int getCount();
/**
* @return the scope
*/
public String getScope();
/**
* @return the template
*/
public String getTemplate();
/**
* @return the line
*/
public int getLine();
/**
* @return the name
*/
public String getName();
}
| {
"pile_set_name": "Github"
} |
// Copyright 2002 Michael E. Stillman
#ifndef _schorder_hpp_
#define _schorder_hpp_
#include "buffer.hpp"
#include "intarray.hpp"
#include "monoid.hpp"
class GBMatrix;
class Matrix;
class SchreyerOrder : public our_new_delete
{
const Monoid *M;
intarray _order; // Each 'entry' is an array of ints of length _nslots:
// compare_num, followed by the (encoded) monomial.
int _nslots;
int _rank;
SchreyerOrder(const Monoid *m)
: M(m), _nslots(m->monomial_size() + 1), _rank(0)
{
}
~SchreyerOrder() { abort(); }
public:
static SchreyerOrder *create(const Monoid *m);
static SchreyerOrder *create(const Matrix *m);
static SchreyerOrder *create(const GBMatrix *m);
void remove();
int rank() const { return _rank; }
int compare_num(int i) const { return _order[i * _nslots]; }
const int *base_monom(int i) const { return _order.raw() + i * _nslots + 1; }
const Monoid *getMonoid() const { return M; }
bool is_equal(const SchreyerOrder *G) const;
SchreyerOrder *copy() const;
SchreyerOrder *sub_space(int n) const;
SchreyerOrder *sub_space(M2_arrayint a) const;
void append_order(const SchreyerOrder *G);
SchreyerOrder *direct_sum(const SchreyerOrder *G) const;
SchreyerOrder *tensor(const SchreyerOrder *G) const;
SchreyerOrder *exterior(int p) const;
SchreyerOrder *symm(int n) const;
void append(int compare_num, const int *base_monom);
// Copies the monomial
void schreyer_up(const int *m, int comp, int *result) const
// 'result' is allowed to be 'm'.
{
M->mult(m, base_monom(comp), result);
}
void schreyer_down(const int *m, int comp, int *result) const
// 'result' is allowed to be 'm'.
{
M->divide(m, base_monom(comp), result);
}
int schreyer_compare(const int *m,
int m_comp,
const int *n,
int n_comp) const;
int schreyer_compare_encoded(const int *m,
int m_comp,
const int *n,
int n_comp) const;
void text_out(buffer &o) const;
};
#endif
// Local Variables:
// compile-command: "make -C $M2BUILDDIR/Macaulay2/e "
// indent-tabs-mode: nil
// End:
| {
"pile_set_name": "Github"
} |
//
// SSZipArchive.h
// SSZipArchive
//
// Created by Sam Soffes on 7/21/10.
// Copyright (c) Sam Soffes 2010-2015. All rights reserved.
//
#ifndef _SSZIPARCHIVE_H
#define _SSZIPARCHIVE_H
#import <Foundation/Foundation.h>
#include "SSZipCommon.h"
NS_ASSUME_NONNULL_BEGIN
@protocol SSZipArchiveDelegate;
@interface SSZipArchive : NSObject
// Password check
+ (BOOL)isFilePasswordProtectedAtPath:(NSString *)path;
// Unzip
+ (BOOL)unzipFileAtPath:(NSString *)path toDestination:(NSString *)destination;
+ (BOOL)unzipFileAtPath:(NSString *)path toDestination:(NSString *)destination delegate:(nullable id<SSZipArchiveDelegate>)delegate;
+ (BOOL)unzipFileAtPath:(NSString *)path toDestination:(NSString *)destination overwrite:(BOOL)overwrite password:(nullable NSString *)password error:(NSError * *)error;
+ (BOOL)unzipFileAtPath:(NSString *)path toDestination:(NSString *)destination overwrite:(BOOL)overwrite password:(nullable NSString *)password error:(NSError * *)error delegate:(nullable id<SSZipArchiveDelegate>)delegate NS_REFINED_FOR_SWIFT;
+ (BOOL)unzipFileAtPath:(NSString *)path
toDestination:(NSString *)destination
preserveAttributes:(BOOL)preserveAttributes
overwrite:(BOOL)overwrite
password:(nullable NSString *)password
error:(NSError * *)error
delegate:(nullable id<SSZipArchiveDelegate>)delegate;
+ (BOOL)unzipFileAtPath:(NSString *)path
toDestination:(NSString *)destination
progressHandler:(void (^)(NSString *entry, unz_file_info zipInfo, long entryNumber, long total))progressHandler
completionHandler:(void (^)(NSString *path, BOOL succeeded, NSError *error))completionHandler;
+ (BOOL)unzipFileAtPath:(NSString *)path
toDestination:(NSString *)destination
overwrite:(BOOL)overwrite
password:(nullable NSString *)password
progressHandler:(void (^)(NSString *entry, unz_file_info zipInfo, long entryNumber, long total))progressHandler
completionHandler:(void (^)(NSString *path, BOOL succeeded, NSError *error))completionHandler;
// Zip
// without password
+ (BOOL)createZipFileAtPath:(NSString *)path withFilesAtPaths:(NSArray *)paths;
+ (BOOL)createZipFileAtPath:(NSString *)path withContentsOfDirectory:(NSString *)directoryPath;
+ (BOOL)createZipFileAtPath:(NSString *)path withContentsOfDirectory:(NSString *)directoryPath keepParentDirectory:(BOOL)keepParentDirectory;
// with password, password could be nil
+ (BOOL)createZipFileAtPath:(NSString *)path withFilesAtPaths:(NSArray *)paths withPassword:(nullable NSString *)password;
+ (BOOL)createZipFileAtPath:(NSString *)path withContentsOfDirectory:(NSString *)directoryPath withPassword:(nullable NSString *)password;
+ (BOOL)createZipFileAtPath:(NSString *)path withContentsOfDirectory:(NSString *)directoryPath keepParentDirectory:(BOOL)keepParentDirectory withPassword:(nullable NSString *)password;
- (instancetype)initWithPath:(NSString *)path;
@property (NS_NONATOMIC_IOSONLY, readonly) BOOL open;
- (BOOL)writeFile:(NSString *)path withPassword:(nullable NSString *)password;
- (BOOL)writeFolderAtPath:(NSString *)path withFolderName:(NSString *)folderName withPassword:(nullable NSString *)password;
- (BOOL)writeFileAtPath:(NSString *)path withFileName:(nullable NSString *)fileName withPassword:(nullable NSString *)password;
- (BOOL)writeData:(NSData *)data filename:(nullable NSString *)filename withPassword:(nullable NSString *)password;
@property (NS_NONATOMIC_IOSONLY, readonly) BOOL close;
@end
@protocol SSZipArchiveDelegate <NSObject>
@optional
- (void)zipArchiveWillUnzipArchiveAtPath:(NSString *)path zipInfo:(unz_global_info)zipInfo;
- (void)zipArchiveDidUnzipArchiveAtPath:(NSString *)path zipInfo:(unz_global_info)zipInfo unzippedPath:(NSString *)unzippedPath;
- (BOOL)zipArchiveShouldUnzipFileAtIndex:(NSInteger)fileIndex totalFiles:(NSInteger)totalFiles archivePath:(NSString *)archivePath fileInfo:(unz_file_info)fileInfo;
- (void)zipArchiveWillUnzipFileAtIndex:(NSInteger)fileIndex totalFiles:(NSInteger)totalFiles archivePath:(NSString *)archivePath fileInfo:(unz_file_info)fileInfo;
- (void)zipArchiveDidUnzipFileAtIndex:(NSInteger)fileIndex totalFiles:(NSInteger)totalFiles archivePath:(NSString *)archivePath fileInfo:(unz_file_info)fileInfo;
- (void)zipArchiveDidUnzipFileAtIndex:(NSInteger)fileIndex totalFiles:(NSInteger)totalFiles archivePath:(NSString *)archivePath unzippedFilePath:(NSString *)unzippedFilePath;
- (void)zipArchiveProgressEvent:(unsigned long long)loaded total:(unsigned long long)total;
- (void)zipArchiveDidUnzipArchiveFile:(NSString *)zipFile entryPath:(NSString *)entryPath destPath:(NSString *)destPath;
@end
NS_ASSUME_NONNULL_END
#endif /* _SSZIPARCHIVE_H */
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0
/* Atheros AR71xx built-in ethernet mac driver
*
* Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
*
* List of authors contributed to this driver before mainlining:
* Alexander Couzens <lynxis@fe80.eu>
* Christian Lamparter <chunkeey@gmail.com>
* Chuanhong Guo <gch981213@gmail.com>
* Daniel F. Dickinson <cshored@thecshore.com>
* David Bauer <mail@david-bauer.net>
* Felix Fietkau <nbd@nbd.name>
* Gabor Juhos <juhosg@freemail.hu>
* Hauke Mehrtens <hauke@hauke-m.de>
* Johann Neuhauser <johann@it-neuhauser.de>
* John Crispin <john@phrozen.org>
* Jo-Philipp Wich <jo@mein.io>
* Koen Vandeputte <koen.vandeputte@ncentric.com>
* Lucian Cristian <lucian.cristian@gmail.com>
* Matt Merhar <mattmerhar@protonmail.com>
* Milan Krstic <milan.krstic@gmail.com>
* Petr Štetiar <ynezz@true.cz>
* Rosen Penev <rosenp@gmail.com>
* Stephen Walker <stephendwalker+github@gmail.com>
* Vittorio Gambaletta <openwrt@vittgam.net>
* Weijie Gao <hackpascal@gmail.com>
* Imre Kaloz <kaloz@openwrt.org>
*/
#include <linux/if_vlan.h>
#include <linux/mfd/syscon.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/io.h>
/* For our NAPI weight bigger does *NOT* mean better - it means more
* D-cache misses and lots more wasted cycles than we'll ever
* possibly gain from saving instructions.
*/
#define AG71XX_NAPI_WEIGHT 32
#define AG71XX_OOM_REFILL (1 + HZ / 10)
#define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
#define AG71XX_INT_TX (AG71XX_INT_TX_PS)
#define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
#define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX)
#define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL)
#define AG71XX_TX_MTU_LEN 1540
#define AG71XX_TX_RING_SPLIT 512
#define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
AG71XX_TX_RING_SPLIT)
#define AG71XX_TX_RING_SIZE_DEFAULT 128
#define AG71XX_RX_RING_SIZE_DEFAULT 256
#define AG71XX_MDIO_RETRY 1000
#define AG71XX_MDIO_DELAY 5
#define AG71XX_MDIO_MAX_CLK 5000000
/* Register offsets */
#define AG71XX_REG_MAC_CFG1 0x0000
#define MAC_CFG1_TXE BIT(0) /* Tx Enable */
#define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */
#define MAC_CFG1_RXE BIT(2) /* Rx Enable */
#define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */
#define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */
#define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */
#define MAC_CFG1_SR BIT(31) /* Soft Reset */
#define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
MAC_CFG1_SRX | MAC_CFG1_STX)
#define AG71XX_REG_MAC_CFG2 0x0004
#define MAC_CFG2_FDX BIT(0)
#define MAC_CFG2_PAD_CRC_EN BIT(2)
#define MAC_CFG2_LEN_CHECK BIT(4)
#define MAC_CFG2_IF_1000 BIT(9)
#define MAC_CFG2_IF_10_100 BIT(8)
#define AG71XX_REG_MAC_MFL 0x0010
#define AG71XX_REG_MII_CFG 0x0020
#define MII_CFG_CLK_DIV_4 0
#define MII_CFG_CLK_DIV_6 2
#define MII_CFG_CLK_DIV_8 3
#define MII_CFG_CLK_DIV_10 4
#define MII_CFG_CLK_DIV_14 5
#define MII_CFG_CLK_DIV_20 6
#define MII_CFG_CLK_DIV_28 7
#define MII_CFG_CLK_DIV_34 8
#define MII_CFG_CLK_DIV_42 9
#define MII_CFG_CLK_DIV_50 10
#define MII_CFG_CLK_DIV_58 11
#define MII_CFG_CLK_DIV_66 12
#define MII_CFG_CLK_DIV_74 13
#define MII_CFG_CLK_DIV_82 14
#define MII_CFG_CLK_DIV_98 15
#define MII_CFG_RESET BIT(31)
#define AG71XX_REG_MII_CMD 0x0024
#define MII_CMD_READ BIT(0)
#define AG71XX_REG_MII_ADDR 0x0028
#define MII_ADDR_SHIFT 8
#define AG71XX_REG_MII_CTRL 0x002c
#define AG71XX_REG_MII_STATUS 0x0030
#define AG71XX_REG_MII_IND 0x0034
#define MII_IND_BUSY BIT(0)
#define MII_IND_INVALID BIT(2)
#define AG71XX_REG_MAC_IFCTL 0x0038
#define MAC_IFCTL_SPEED BIT(16)
#define AG71XX_REG_MAC_ADDR1 0x0040
#define AG71XX_REG_MAC_ADDR2 0x0044
#define AG71XX_REG_FIFO_CFG0 0x0048
#define FIFO_CFG0_WTM BIT(0) /* Watermark Module */
#define FIFO_CFG0_RXS BIT(1) /* Rx System Module */
#define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */
#define FIFO_CFG0_TXS BIT(3) /* Tx System Module */
#define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */
#define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
| FIFO_CFG0_TXS | FIFO_CFG0_TXF)
#define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
#define FIFO_CFG0_ENABLE_SHIFT 8
#define AG71XX_REG_FIFO_CFG1 0x004c
#define AG71XX_REG_FIFO_CFG2 0x0050
#define AG71XX_REG_FIFO_CFG3 0x0054
#define AG71XX_REG_FIFO_CFG4 0x0058
#define FIFO_CFG4_DE BIT(0) /* Drop Event */
#define FIFO_CFG4_DV BIT(1) /* RX_DV Event */
#define FIFO_CFG4_FC BIT(2) /* False Carrier */
#define FIFO_CFG4_CE BIT(3) /* Code Error */
#define FIFO_CFG4_CR BIT(4) /* CRC error */
#define FIFO_CFG4_LM BIT(5) /* Length Mismatch */
#define FIFO_CFG4_LO BIT(6) /* Length out of range */
#define FIFO_CFG4_OK BIT(7) /* Packet is OK */
#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
#define FIFO_CFG4_DR BIT(10) /* Dribble */
#define FIFO_CFG4_LE BIT(11) /* Long Event */
#define FIFO_CFG4_CF BIT(12) /* Control Frame */
#define FIFO_CFG4_PF BIT(13) /* Pause Frame */
#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
FIFO_CFG4_VT)
#define AG71XX_REG_FIFO_CFG5 0x005c
#define FIFO_CFG5_DE BIT(0) /* Drop Event */
#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
#define FIFO_CFG5_FC BIT(2) /* False Carrier */
#define FIFO_CFG5_CE BIT(3) /* Code Error */
#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
#define FIFO_CFG5_OK BIT(6) /* Packet is OK */
#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
#define FIFO_CFG5_DR BIT(9) /* Dribble */
#define FIFO_CFG5_CF BIT(10) /* Control Frame */
#define FIFO_CFG5_PF BIT(11) /* Pause Frame */
#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
#define FIFO_CFG5_LE BIT(14) /* Long Event */
#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
#define FIFO_CFG5_16 BIT(16) /* unknown */
#define FIFO_CFG5_17 BIT(17) /* unknown */
#define FIFO_CFG5_SF BIT(18) /* Short Frame */
#define FIFO_CFG5_BM BIT(19) /* Byte Mode */
#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
FIFO_CFG5_17 | FIFO_CFG5_SF)
#define AG71XX_REG_TX_CTRL 0x0180
#define TX_CTRL_TXE BIT(0) /* Tx Enable */
#define AG71XX_REG_TX_DESC 0x0184
#define AG71XX_REG_TX_STATUS 0x0188
#define TX_STATUS_PS BIT(0) /* Packet Sent */
#define TX_STATUS_UR BIT(1) /* Tx Underrun */
#define TX_STATUS_BE BIT(3) /* Bus Error */
#define AG71XX_REG_RX_CTRL 0x018c
#define RX_CTRL_RXE BIT(0) /* Rx Enable */
#define AG71XX_DMA_RETRY 10
#define AG71XX_DMA_DELAY 1
#define AG71XX_REG_RX_DESC 0x0190
#define AG71XX_REG_RX_STATUS 0x0194
#define RX_STATUS_PR BIT(0) /* Packet Received */
#define RX_STATUS_OF BIT(2) /* Rx Overflow */
#define RX_STATUS_BE BIT(3) /* Bus Error */
#define AG71XX_REG_INT_ENABLE 0x0198
#define AG71XX_REG_INT_STATUS 0x019c
#define AG71XX_INT_TX_PS BIT(0)
#define AG71XX_INT_TX_UR BIT(1)
#define AG71XX_INT_TX_BE BIT(3)
#define AG71XX_INT_RX_PR BIT(4)
#define AG71XX_INT_RX_OF BIT(6)
#define AG71XX_INT_RX_BE BIT(7)
#define AG71XX_REG_FIFO_DEPTH 0x01a8
#define AG71XX_REG_RX_SM 0x01b0
#define AG71XX_REG_TX_SM 0x01b4
#define ETH_SWITCH_HEADER_LEN 2
#define AG71XX_DEFAULT_MSG_ENABLE \
(NETIF_MSG_DRV \
| NETIF_MSG_PROBE \
| NETIF_MSG_LINK \
| NETIF_MSG_TIMER \
| NETIF_MSG_IFDOWN \
| NETIF_MSG_IFUP \
| NETIF_MSG_RX_ERR \
| NETIF_MSG_TX_ERR)
#define DESC_EMPTY BIT(31)
#define DESC_MORE BIT(24)
#define DESC_PKTLEN_M 0xfff
struct ag71xx_desc {
u32 data;
u32 ctrl;
u32 next;
u32 pad;
} __aligned(4);
#define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \
L1_CACHE_BYTES)
struct ag71xx_buf {
union {
struct {
struct sk_buff *skb;
unsigned int len;
} tx;
struct {
dma_addr_t dma_addr;
void *rx_buf;
} rx;
};
};
struct ag71xx_ring {
/* "Hot" fields in the data path. */
unsigned int curr;
unsigned int dirty;
/* "Cold" fields - not used in the data path. */
struct ag71xx_buf *buf;
u16 order;
u16 desc_split;
dma_addr_t descs_dma;
u8 *descs_cpu;
};
enum ag71xx_type {
AR7100,
AR7240,
AR9130,
AR9330,
AR9340,
QCA9530,
QCA9550,
};
struct ag71xx_dcfg {
u32 max_frame_len;
const u32 *fifodata;
u16 desc_pktlen_mask;
bool tx_hang_workaround;
enum ag71xx_type type;
};
struct ag71xx {
/* Critical data related to the per-packet data path are clustered
* early in this structure to help improve the D-cache footprint.
*/
struct ag71xx_ring rx_ring ____cacheline_aligned;
struct ag71xx_ring tx_ring ____cacheline_aligned;
u16 rx_buf_size;
u8 rx_buf_offset;
struct net_device *ndev;
struct platform_device *pdev;
struct napi_struct napi;
u32 msg_enable;
const struct ag71xx_dcfg *dcfg;
/* From this point onwards we're not looking at per-packet fields. */
void __iomem *mac_base;
struct ag71xx_desc *stop_desc;
dma_addr_t stop_desc_dma;
phy_interface_t phy_if_mode;
struct phylink *phylink;
struct phylink_config phylink_config;
struct delayed_work restart_work;
struct timer_list oom_timer;
struct reset_control *mac_reset;
u32 fifodata[3];
int mac_idx;
struct reset_control *mdio_reset;
struct mii_bus *mii_bus;
struct clk *clk_mdio;
struct clk *clk_eth;
};
static int ag71xx_desc_empty(struct ag71xx_desc *desc)
{
return (desc->ctrl & DESC_EMPTY) != 0;
}
static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
{
return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
}
static int ag71xx_ring_size_order(int size)
{
return fls(size - 1);
}
static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
{
return ag->dcfg->type == type;
}
static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
{
iowrite32(value, ag->mac_base + reg);
/* flush write */
(void)ioread32(ag->mac_base + reg);
}
static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
{
return ioread32(ag->mac_base + reg);
}
static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
{
void __iomem *r;
r = ag->mac_base + reg;
iowrite32(ioread32(r) | mask, r);
/* flush write */
(void)ioread32(r);
}
static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
{
void __iomem *r;
r = ag->mac_base + reg;
iowrite32(ioread32(r) & ~mask, r);
/* flush write */
(void)ioread32(r);
}
static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
{
ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
}
static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
{
ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
}
static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
{
struct net_device *ndev = ag->ndev;
int i;
for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
u32 busy;
udelay(AG71XX_MDIO_DELAY);
busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
if (!busy)
return 0;
udelay(AG71XX_MDIO_DELAY);
}
netif_err(ag, link, ndev, "MDIO operation timed out\n");
return -ETIMEDOUT;
}
static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
{
struct ag71xx *ag = bus->priv;
int err, val;
err = ag71xx_mdio_wait_busy(ag);
if (err)
return err;
ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
/* enable read mode */
ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
err = ag71xx_mdio_wait_busy(ag);
if (err)
return err;
val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
/* disable read mode */
ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
addr, reg, val);
return val;
}
static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
u16 val)
{
struct ag71xx *ag = bus->priv;
netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
addr, reg, val);
ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
return ag71xx_mdio_wait_busy(ag);
}
static const u32 ar71xx_mdio_div_table[] = {
4, 4, 6, 8, 10, 14, 20, 28,
};
static const u32 ar7240_mdio_div_table[] = {
2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
};
static const u32 ar933x_mdio_div_table[] = {
4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
};
static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
{
unsigned long ref_clock;
const u32 *table;
int ndivs, i;
ref_clock = clk_get_rate(ag->clk_mdio);
if (!ref_clock)
return -EINVAL;
if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
table = ar933x_mdio_div_table;
ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
} else if (ag71xx_is(ag, AR7240)) {
table = ar7240_mdio_div_table;
ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
} else {
table = ar71xx_mdio_div_table;
ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
}
for (i = 0; i < ndivs; i++) {
unsigned long t;
t = ref_clock / table[i];
if (t <= AG71XX_MDIO_MAX_CLK) {
*div = i;
return 0;
}
}
return -ENOENT;
}
static int ag71xx_mdio_reset(struct mii_bus *bus)
{
struct ag71xx *ag = bus->priv;
int err;
u32 t;
err = ag71xx_mdio_get_divider(ag, &t);
if (err)
return err;
ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
usleep_range(100, 200);
ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
usleep_range(100, 200);
return 0;
}
static int ag71xx_mdio_probe(struct ag71xx *ag)
{
struct device *dev = &ag->pdev->dev;
struct net_device *ndev = ag->ndev;
static struct mii_bus *mii_bus;
struct device_node *np, *mnp;
int err;
np = dev->of_node;
ag->mii_bus = NULL;
ag->clk_mdio = devm_clk_get(dev, "mdio");
if (IS_ERR(ag->clk_mdio)) {
netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
return PTR_ERR(ag->clk_mdio);
}
err = clk_prepare_enable(ag->clk_mdio);
if (err) {
netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
return err;
}
mii_bus = devm_mdiobus_alloc(dev);
if (!mii_bus) {
err = -ENOMEM;
goto mdio_err_put_clk;
}
ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
if (IS_ERR(ag->mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
err = PTR_ERR(ag->mdio_reset);
goto mdio_err_put_clk;
}
mii_bus->name = "ag71xx_mdio";
mii_bus->read = ag71xx_mdio_mii_read;
mii_bus->write = ag71xx_mdio_mii_write;
mii_bus->reset = ag71xx_mdio_reset;
mii_bus->priv = ag;
mii_bus->parent = dev;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
if (!IS_ERR(ag->mdio_reset)) {
reset_control_assert(ag->mdio_reset);
msleep(100);
reset_control_deassert(ag->mdio_reset);
msleep(200);
}
mnp = of_get_child_by_name(np, "mdio");
err = of_mdiobus_register(mii_bus, mnp);
of_node_put(mnp);
if (err)
goto mdio_err_put_clk;
ag->mii_bus = mii_bus;
return 0;
mdio_err_put_clk:
clk_disable_unprepare(ag->clk_mdio);
return err;
}
static void ag71xx_mdio_remove(struct ag71xx *ag)
{
if (ag->mii_bus)
mdiobus_unregister(ag->mii_bus);
clk_disable_unprepare(ag->clk_mdio);
}
static void ag71xx_hw_stop(struct ag71xx *ag)
{
/* disable all interrupts and stop the rx/tx engine */
ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
}
static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
{
unsigned long timestamp;
u32 rx_sm, tx_sm, rx_fd;
timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
if (likely(time_before(jiffies, timestamp + HZ / 10)))
return false;
if (!netif_carrier_ok(ag->ndev))
return false;
rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
return true;
tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
return true;
return false;
}
static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int sent = 0, bytes_compl = 0, n = 0;
struct net_device *ndev = ag->ndev;
int ring_mask, ring_size;
bool dma_stuck = false;
ring_mask = BIT(ring->order) - 1;
ring_size = BIT(ring->order);
netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
while (ring->dirty + n != ring->curr) {
struct ag71xx_desc *desc;
struct sk_buff *skb;
unsigned int i;
i = (ring->dirty + n) & ring_mask;
desc = ag71xx_ring_desc(ring, i);
skb = ring->buf[i].tx.skb;
if (!flush && !ag71xx_desc_empty(desc)) {
if (ag->dcfg->tx_hang_workaround &&
ag71xx_check_dma_stuck(ag)) {
schedule_delayed_work(&ag->restart_work,
HZ / 2);
dma_stuck = true;
}
break;
}
if (flush)
desc->ctrl |= DESC_EMPTY;
n++;
if (!skb)
continue;
dev_kfree_skb_any(skb);
ring->buf[i].tx.skb = NULL;
bytes_compl += ring->buf[i].tx.len;
sent++;
ring->dirty += n;
while (n > 0) {
ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
n--;
}
}
netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
if (!sent)
return 0;
ag->ndev->stats.tx_bytes += bytes_compl;
ag->ndev->stats.tx_packets += sent;
netdev_completed_queue(ag->ndev, sent, bytes_compl);
if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
netif_wake_queue(ag->ndev);
if (!dma_stuck)
cancel_delayed_work(&ag->restart_work);
return sent;
}
static void ag71xx_dma_wait_stop(struct ag71xx *ag)
{
struct net_device *ndev = ag->ndev;
int i;
for (i = 0; i < AG71XX_DMA_RETRY; i++) {
u32 rx, tx;
mdelay(AG71XX_DMA_DELAY);
rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
if (!rx && !tx)
return;
}
netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
}
static void ag71xx_dma_reset(struct ag71xx *ag)
{
struct net_device *ndev = ag->ndev;
u32 val;
int i;
/* stop RX and TX */
ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
/* give the hardware some time to really stop all rx/tx activity
* clearing the descriptors too early causes random memory corruption
*/
ag71xx_dma_wait_stop(ag);
/* clear descriptor addresses */
ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
/* clear pending RX/TX interrupts */
for (i = 0; i < 256; i++) {
ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
}
/* clear pending errors */
ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
if (val)
netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
val);
val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
/* mask out reserved bits */
val &= ~0xff000000;
if (val)
netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
val);
}
static void ag71xx_hw_setup(struct ag71xx *ag)
{
u32 init = MAC_CFG1_INIT;
/* setup MAC configuration registers */
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
/* setup max frame length to zero */
ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
/* setup FIFO configuration registers */
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
}
static unsigned int ag71xx_max_frame_len(unsigned int mtu)
{
return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
}
static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
{
u32 t;
t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
| (((u32)mac[3]) << 8) | ((u32)mac[2]);
ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
}
static void ag71xx_fast_reset(struct ag71xx *ag)
{
struct net_device *dev = ag->ndev;
u32 rx_ds;
u32 mii_reg;
ag71xx_hw_stop(ag);
mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
ag71xx_tx_packets(ag, true);
reset_control_assert(ag->mac_reset);
usleep_range(10, 20);
reset_control_deassert(ag->mac_reset);
usleep_range(10, 20);
ag71xx_dma_reset(ag);
ag71xx_hw_setup(ag);
ag->tx_ring.curr = 0;
ag->tx_ring.dirty = 0;
netdev_reset_queue(ag->ndev);
/* setup max frame length */
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
ag71xx_max_frame_len(ag->ndev->mtu));
ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
ag71xx_hw_set_macaddr(ag, dev->dev_addr);
}
static void ag71xx_hw_start(struct ag71xx *ag)
{
/* start RX engine */
ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
/* enable interrupts */
ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
netif_wake_queue(ag->ndev);
}
static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
if (phylink_autoneg_inband(mode))
return;
if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
ag71xx_fast_reset(ag);
if (ag->tx_ring.desc_split) {
ag->fifodata[2] &= 0xffff;
ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
}
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
}
static void ag71xx_mac_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
switch (state->interface) {
case PHY_INTERFACE_MODE_NA:
break;
case PHY_INTERFACE_MODE_MII:
if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
ag71xx_is(ag, AR9340) ||
ag71xx_is(ag, QCA9530) ||
(ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
break;
goto unsupported;
case PHY_INTERFACE_MODE_GMII:
if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
(ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
(ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
break;
goto unsupported;
case PHY_INTERFACE_MODE_SGMII:
if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
break;
goto unsupported;
case PHY_INTERFACE_MODE_RMII:
if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
break;
goto unsupported;
case PHY_INTERFACE_MODE_RGMII:
if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
(ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
break;
goto unsupported;
default:
goto unsupported;
}
phylink_set(mask, MII);
phylink_set(mask, Autoneg);
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
if (state->interface == PHY_INTERFACE_MODE_NA ||
state->interface == PHY_INTERFACE_MODE_SGMII ||
state->interface == PHY_INTERFACE_MODE_RGMII ||
state->interface == PHY_INTERFACE_MODE_GMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
}
bitmap_and(supported, supported, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
return;
unsupported:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
state->link = 0;
}
static void ag71xx_mac_an_restart(struct phylink_config *config)
{
/* Not Supported */
}
static void ag71xx_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
ag71xx_hw_stop(ag);
}
static void ag71xx_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
u32 cfg2;
u32 ifctl;
u32 fifo5;
cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
cfg2 |= duplex ? MAC_CFG2_FDX : 0;
ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
ifctl &= ~(MAC_IFCTL_SPEED);
fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
fifo5 &= ~FIFO_CFG5_BM;
switch (speed) {
case SPEED_1000:
cfg2 |= MAC_CFG2_IF_1000;
fifo5 |= FIFO_CFG5_BM;
break;
case SPEED_100:
cfg2 |= MAC_CFG2_IF_10_100;
ifctl |= MAC_IFCTL_SPEED;
break;
case SPEED_10:
cfg2 |= MAC_CFG2_IF_10_100;
break;
default:
return;
}
ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
ag71xx_hw_start(ag);
}
static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
.validate = ag71xx_mac_validate,
.mac_pcs_get_state = ag71xx_mac_pcs_get_state,
.mac_an_restart = ag71xx_mac_an_restart,
.mac_config = ag71xx_mac_config,
.mac_link_down = ag71xx_mac_link_down,
.mac_link_up = ag71xx_mac_link_up,
};
static int ag71xx_phylink_setup(struct ag71xx *ag)
{
struct phylink *phylink;
ag->phylink_config.dev = &ag->ndev->dev;
ag->phylink_config.type = PHYLINK_NETDEV;
phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
ag->phy_if_mode, &ag71xx_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
ag->phylink = phylink;
return 0;
}
static void ag71xx_ring_tx_clean(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int ring_mask = BIT(ring->order) - 1;
u32 bytes_compl = 0, pkts_compl = 0;
struct net_device *ndev = ag->ndev;
while (ring->curr != ring->dirty) {
struct ag71xx_desc *desc;
u32 i = ring->dirty & ring_mask;
desc = ag71xx_ring_desc(ring, i);
if (!ag71xx_desc_empty(desc)) {
desc->ctrl = 0;
ndev->stats.tx_errors++;
}
if (ring->buf[i].tx.skb) {
bytes_compl += ring->buf[i].tx.len;
pkts_compl++;
dev_kfree_skb_any(ring->buf[i].tx.skb);
}
ring->buf[i].tx.skb = NULL;
ring->dirty++;
}
/* flush descriptors */
wmb();
netdev_completed_queue(ndev, pkts_compl, bytes_compl);
}
static void ag71xx_ring_tx_init(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int ring_size = BIT(ring->order);
int ring_mask = ring_size - 1;
int i;
for (i = 0; i < ring_size; i++) {
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
desc->next = (u32)(ring->descs_dma +
AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
desc->ctrl = DESC_EMPTY;
ring->buf[i].tx.skb = NULL;
}
/* flush descriptors */
wmb();
ring->curr = 0;
ring->dirty = 0;
netdev_reset_queue(ag->ndev);
}
static void ag71xx_ring_rx_clean(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
int ring_size = BIT(ring->order);
int i;
if (!ring->buf)
return;
for (i = 0; i < ring_size; i++)
if (ring->buf[i].rx.rx_buf) {
dma_unmap_single(&ag->pdev->dev,
ring->buf[i].rx.dma_addr,
ag->rx_buf_size, DMA_FROM_DEVICE);
skb_free_frag(ring->buf[i].rx.rx_buf);
}
}
static int ag71xx_buffer_size(struct ag71xx *ag)
{
return ag->rx_buf_size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
int offset,
void *(*alloc)(unsigned int size))
{
struct ag71xx_ring *ring = &ag->rx_ring;
struct ag71xx_desc *desc;
void *data;
desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
data = alloc(ag71xx_buffer_size(ag));
if (!data)
return false;
buf->rx.rx_buf = data;
buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
DMA_FROM_DEVICE);
desc->data = (u32)buf->rx.dma_addr + offset;
return true;
}
static int ag71xx_ring_rx_init(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
struct net_device *ndev = ag->ndev;
int ring_mask = BIT(ring->order) - 1;
int ring_size = BIT(ring->order);
unsigned int i;
int ret;
ret = 0;
for (i = 0; i < ring_size; i++) {
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
desc->next = (u32)(ring->descs_dma +
AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
desc, desc->next);
}
for (i = 0; i < ring_size; i++) {
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
netdev_alloc_frag)) {
ret = -ENOMEM;
break;
}
desc->ctrl = DESC_EMPTY;
}
/* flush descriptors */
wmb();
ring->curr = 0;
ring->dirty = 0;
return ret;
}
static int ag71xx_ring_rx_refill(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
int ring_mask = BIT(ring->order) - 1;
int offset = ag->rx_buf_offset;
unsigned int count;
count = 0;
for (; ring->curr - ring->dirty > 0; ring->dirty++) {
struct ag71xx_desc *desc;
unsigned int i;
i = ring->dirty & ring_mask;
desc = ag71xx_ring_desc(ring, i);
if (!ring->buf[i].rx.rx_buf &&
!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
napi_alloc_frag))
break;
desc->ctrl = DESC_EMPTY;
count++;
}
/* flush descriptors */
wmb();
netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
count);
return count;
}
static int ag71xx_rings_init(struct ag71xx *ag)
{
struct ag71xx_ring *tx = &ag->tx_ring;
struct ag71xx_ring *rx = &ag->rx_ring;
int ring_size, tx_size;
ring_size = BIT(tx->order) + BIT(rx->order);
tx_size = BIT(tx->order);
tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
if (!tx->buf)
return -ENOMEM;
tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
ring_size * AG71XX_DESC_SIZE,
&tx->descs_dma, GFP_KERNEL);
if (!tx->descs_cpu) {
kfree(tx->buf);
tx->buf = NULL;
return -ENOMEM;
}
rx->buf = &tx->buf[tx_size];
rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
ag71xx_ring_tx_init(ag);
return ag71xx_ring_rx_init(ag);
}
static void ag71xx_rings_free(struct ag71xx *ag)
{
struct ag71xx_ring *tx = &ag->tx_ring;
struct ag71xx_ring *rx = &ag->rx_ring;
int ring_size;
ring_size = BIT(tx->order) + BIT(rx->order);
if (tx->descs_cpu)
dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
tx->descs_cpu, tx->descs_dma);
kfree(tx->buf);
tx->descs_cpu = NULL;
rx->descs_cpu = NULL;
tx->buf = NULL;
rx->buf = NULL;
}
static void ag71xx_rings_cleanup(struct ag71xx *ag)
{
ag71xx_ring_rx_clean(ag);
ag71xx_ring_tx_clean(ag);
ag71xx_rings_free(ag);
netdev_reset_queue(ag->ndev);
}
static void ag71xx_hw_init(struct ag71xx *ag)
{
ag71xx_hw_stop(ag);
ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
usleep_range(20, 30);
reset_control_assert(ag->mac_reset);
msleep(100);
reset_control_deassert(ag->mac_reset);
msleep(200);
ag71xx_hw_setup(ag);
ag71xx_dma_reset(ag);
}
static int ag71xx_hw_enable(struct ag71xx *ag)
{
int ret;
ret = ag71xx_rings_init(ag);
if (ret)
return ret;
napi_enable(&ag->napi);
ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
netif_start_queue(ag->ndev);
return 0;
}
static void ag71xx_hw_disable(struct ag71xx *ag)
{
netif_stop_queue(ag->ndev);
ag71xx_hw_stop(ag);
ag71xx_dma_reset(ag);
napi_disable(&ag->napi);
del_timer_sync(&ag->oom_timer);
ag71xx_rings_cleanup(ag);
}
static int ag71xx_open(struct net_device *ndev)
{
struct ag71xx *ag = netdev_priv(ndev);
unsigned int max_frame_len;
int ret;
ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
if (ret) {
netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
ret);
goto err;
}
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
ag->rx_buf_size =
SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
/* setup max frame length */
ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
ret = ag71xx_hw_enable(ag);
if (ret)
goto err;
phylink_start(ag->phylink);
return 0;
err:
ag71xx_rings_cleanup(ag);
return ret;
}
static int ag71xx_stop(struct net_device *ndev)
{
struct ag71xx *ag = netdev_priv(ndev);
phylink_stop(ag->phylink);
phylink_disconnect_phy(ag->phylink);
ag71xx_hw_disable(ag);
return 0;
}
static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
{
int i, ring_mask, ndesc, split;
struct ag71xx_desc *desc;
ring_mask = BIT(ring->order) - 1;
ndesc = 0;
split = ring->desc_split;
if (!split)
split = len;
while (len > 0) {
unsigned int cur_len = len;
i = (ring->curr + ndesc) & ring_mask;
desc = ag71xx_ring_desc(ring, i);
if (!ag71xx_desc_empty(desc))
return -1;
if (cur_len > split) {
cur_len = split;
/* TX will hang if DMA transfers <= 4 bytes,
* make sure next segment is more than 4 bytes long.
*/
if (len <= split + 4)
cur_len -= 4;
}
desc->data = addr;
addr += cur_len;
len -= cur_len;
if (len > 0)
cur_len |= DESC_MORE;
/* prevent early tx attempt of this descriptor */
if (!ndesc)
cur_len |= DESC_EMPTY;
desc->ctrl = cur_len;
ndesc++;
}
return ndesc;
}
static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
int i, n, ring_min, ring_mask, ring_size;
struct ag71xx *ag = netdev_priv(ndev);
struct ag71xx_ring *ring;
struct ag71xx_desc *desc;
dma_addr_t dma_addr;
ring = &ag->tx_ring;
ring_mask = BIT(ring->order) - 1;
ring_size = BIT(ring->order);
if (skb->len <= 4) {
netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
goto err_drop;
}
dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
i = ring->curr & ring_mask;
desc = ag71xx_ring_desc(ring, i);
/* setup descriptor fields */
n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
skb->len & ag->dcfg->desc_pktlen_mask);
if (n < 0)
goto err_drop_unmap;
i = (ring->curr + n - 1) & ring_mask;
ring->buf[i].tx.len = skb->len;
ring->buf[i].tx.skb = skb;
netdev_sent_queue(ndev, skb->len);
skb_tx_timestamp(skb);
desc->ctrl &= ~DESC_EMPTY;
ring->curr += n;
/* flush descriptor */
wmb();
ring_min = 2;
if (ring->desc_split)
ring_min *= AG71XX_TX_RING_DS_PER_PKT;
if (ring->curr - ring->dirty >= ring_size - ring_min) {
netif_dbg(ag, tx_err, ndev, "tx queue full\n");
netif_stop_queue(ndev);
}
netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
/* enable TX engine */
ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
return NETDEV_TX_OK;
err_drop_unmap:
dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
err_drop:
ndev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
struct ag71xx *ag = from_timer(ag, t, oom_timer);
napi_schedule(&ag->napi);
}
static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct ag71xx *ag = netdev_priv(ndev);
netif_err(ag, tx_err, ndev, "tx timeout\n");
schedule_delayed_work(&ag->restart_work, 1);
}
static void ag71xx_restart_work_func(struct work_struct *work)
{
struct ag71xx *ag = container_of(work, struct ag71xx,
restart_work.work);
rtnl_lock();
ag71xx_hw_disable(ag);
ag71xx_hw_enable(ag);
phylink_stop(ag->phylink);
phylink_start(ag->phylink);
rtnl_unlock();
}
static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
{
struct net_device *ndev = ag->ndev;
int ring_mask, ring_size, done = 0;
unsigned int pktlen_mask, offset;
struct sk_buff *next, *skb;
struct ag71xx_ring *ring;
struct list_head rx_list;
ring = &ag->rx_ring;
pktlen_mask = ag->dcfg->desc_pktlen_mask;
offset = ag->rx_buf_offset;
ring_mask = BIT(ring->order) - 1;
ring_size = BIT(ring->order);
netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
limit, ring->curr, ring->dirty);
INIT_LIST_HEAD(&rx_list);
while (done < limit) {
unsigned int i = ring->curr & ring_mask;
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
int pktlen;
int err = 0;
if (ag71xx_desc_empty(desc))
break;
if ((ring->dirty + ring_size) == ring->curr) {
WARN_ONCE(1, "RX out of ring");
break;
}
ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
pktlen = desc->ctrl & pktlen_mask;
pktlen -= ETH_FCS_LEN;
dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
ag->rx_buf_size, DMA_FROM_DEVICE);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pktlen;
skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
}
skb_reserve(skb, offset);
skb_put(skb, pktlen);
if (err) {
ndev->stats.rx_dropped++;
kfree_skb(skb);
} else {
skb->dev = ndev;
skb->ip_summed = CHECKSUM_NONE;
list_add_tail(&skb->list, &rx_list);
}
next:
ring->buf[i].rx.rx_buf = NULL;
done++;
ring->curr++;
}
ag71xx_ring_rx_refill(ag);
list_for_each_entry_safe(skb, next, &rx_list, list)
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb_list(&rx_list);
netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
ring->curr, ring->dirty, done);
return done;
}
static int ag71xx_poll(struct napi_struct *napi, int limit)
{
struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
struct ag71xx_ring *rx_ring = &ag->rx_ring;
int rx_ring_size = BIT(rx_ring->order);
struct net_device *ndev = ag->ndev;
int tx_done, rx_done;
u32 status;
tx_done = ag71xx_tx_packets(ag, false);
netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
rx_done = ag71xx_rx_packets(ag, limit);
if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
goto oom;
status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
if (unlikely(status & RX_STATUS_OF)) {
ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
ndev->stats.rx_fifo_errors++;
/* restart RX */
ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
}
if (rx_done < limit) {
if (status & RX_STATUS_PR)
goto more;
status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
if (status & TX_STATUS_PS)
goto more;
netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
rx_done, tx_done, limit);
napi_complete(napi);
/* enable interrupts */
ag71xx_int_enable(ag, AG71XX_INT_POLL);
return rx_done;
}
more:
netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
rx_done, tx_done, limit);
return limit;
oom:
netif_err(ag, rx_err, ndev, "out of memory\n");
mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
napi_complete(napi);
return 0;
}
static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ag71xx *ag;
u32 status;
ag = netdev_priv(ndev);
status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
if (unlikely(!status))
return IRQ_NONE;
if (unlikely(status & AG71XX_INT_ERR)) {
if (status & AG71XX_INT_TX_BE) {
ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
netif_err(ag, intr, ndev, "TX BUS error\n");
}
if (status & AG71XX_INT_RX_BE) {
ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
netif_err(ag, intr, ndev, "RX BUS error\n");
}
}
if (likely(status & AG71XX_INT_POLL)) {
ag71xx_int_disable(ag, AG71XX_INT_POLL);
netif_dbg(ag, intr, ndev, "enable polling mode\n");
napi_schedule(&ag->napi);
}
return IRQ_HANDLED;
}
static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ag71xx *ag = netdev_priv(ndev);
ndev->mtu = new_mtu;
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
ag71xx_max_frame_len(ndev->mtu));
return 0;
}
static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
.ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static const u32 ar71xx_addr_ar7100[] = {
0x19000000, 0x1a000000,
};
static int ag71xx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct ag71xx_dcfg *dcfg;
struct net_device *ndev;
struct resource *res;
const void *mac_addr;
int tx_size, err, i;
struct ag71xx *ag;
if (!np)
return -ENODEV;
ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
if (!ndev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
dcfg = of_device_get_match_data(&pdev->dev);
if (!dcfg)
return -EINVAL;
ag = netdev_priv(ndev);
ag->mac_idx = -1;
for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
if (ar71xx_addr_ar7100[i] == res->start)
ag->mac_idx = i;
}
if (ag->mac_idx < 0) {
netif_err(ag, probe, ndev, "unknown mac idx\n");
return -EINVAL;
}
ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
if (IS_ERR(ag->clk_eth)) {
netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
return PTR_ERR(ag->clk_eth);
}
SET_NETDEV_DEV(ndev, &pdev->dev);
ag->pdev = pdev;
ag->ndev = ndev;
ag->dcfg = dcfg;
ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
if (IS_ERR(ag->mac_reset)) {
netif_err(ag, probe, ndev, "missing mac reset\n");
err = PTR_ERR(ag->mac_reset);
goto err_free;
}
ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ag->mac_base) {
err = -ENOMEM;
goto err_free;
}
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
0x0, dev_name(&pdev->dev), ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
ndev->irq);
goto err_free;
}
ndev->netdev_ops = &ag71xx_netdev_ops;
INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
ndev->min_mtu = 68;
ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
ag->rx_buf_offset = NET_SKB_PAD;
if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
ag->rx_buf_offset += NET_IP_ALIGN;
if (ag71xx_is(ag, AR7100)) {
ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
tx_size *= AG71XX_TX_RING_DS_PER_PKT;
}
ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL);
if (!ag->stop_desc) {
err = -ENOMEM;
goto err_free;
}
ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_random_addr(ndev->dev_addr);
}
err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
goto err_free;
}
netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
err = clk_prepare_enable(ag->clk_eth);
if (err) {
netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
goto err_free;
}
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
ag71xx_hw_init(ag);
err = ag71xx_mdio_probe(ag);
if (err)
goto err_put_clk;
platform_set_drvdata(pdev, ndev);
err = ag71xx_phylink_setup(ag);
if (err) {
netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
goto err_mdio_remove;
}
err = register_netdev(ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
platform_set_drvdata(pdev, NULL);
goto err_mdio_remove;
}
netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
(unsigned long)ag->mac_base, ndev->irq,
phy_modes(ag->phy_if_mode));
return 0;
err_mdio_remove:
ag71xx_mdio_remove(ag);
err_put_clk:
clk_disable_unprepare(ag->clk_eth);
err_free:
free_netdev(ndev);
return err;
}
static int ag71xx_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ag71xx *ag;
if (!ndev)
return 0;
ag = netdev_priv(ndev);
unregister_netdev(ndev);
ag71xx_mdio_remove(ag);
clk_disable_unprepare(ag->clk_eth);
platform_set_drvdata(pdev, NULL);
return 0;
}
static const u32 ar71xx_fifo_ar7100[] = {
0x0fff0000, 0x00001fff, 0x00780fff,
};
static const u32 ar71xx_fifo_ar9130[] = {
0x0fff0000, 0x00001fff, 0x008001ff,
};
static const u32 ar71xx_fifo_ar9330[] = {
0x0010ffff, 0x015500aa, 0x01f00140,
};
static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
.type = AR7100,
.fifodata = ar71xx_fifo_ar7100,
.max_frame_len = 1540,
.desc_pktlen_mask = SZ_4K - 1,
.tx_hang_workaround = false,
};
static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
.type = AR7240,
.fifodata = ar71xx_fifo_ar7100,
.max_frame_len = 1540,
.desc_pktlen_mask = SZ_4K - 1,
.tx_hang_workaround = true,
};
static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
.type = AR9130,
.fifodata = ar71xx_fifo_ar9130,
.max_frame_len = 1540,
.desc_pktlen_mask = SZ_4K - 1,
.tx_hang_workaround = false,
};
static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
.type = AR9330,
.fifodata = ar71xx_fifo_ar9330,
.max_frame_len = 1540,
.desc_pktlen_mask = SZ_4K - 1,
.tx_hang_workaround = true,
};
static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
.type = AR9340,
.fifodata = ar71xx_fifo_ar9330,
.max_frame_len = SZ_16K - 1,
.desc_pktlen_mask = SZ_16K - 1,
.tx_hang_workaround = true,
};
static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
.type = QCA9530,
.fifodata = ar71xx_fifo_ar9330,
.max_frame_len = SZ_16K - 1,
.desc_pktlen_mask = SZ_16K - 1,
.tx_hang_workaround = true,
};
static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
.type = QCA9550,
.fifodata = ar71xx_fifo_ar9330,
.max_frame_len = 1540,
.desc_pktlen_mask = SZ_16K - 1,
.tx_hang_workaround = true,
};
static const struct of_device_id ag71xx_match[] = {
{ .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
{ .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
{ .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
{ .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
{ .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
{ .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
{ .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
{ .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
{ .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
{}
};
static struct platform_driver ag71xx_driver = {
.probe = ag71xx_probe,
.remove = ag71xx_remove,
.driver = {
.name = "ag71xx",
.of_match_table = ag71xx_match,
}
};
module_platform_driver(ag71xx_driver);
MODULE_LICENSE("GPL v2");
| {
"pile_set_name": "Github"
} |
<?php
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
class ShaHmac256Signer implements ISigner
{
public function signString($source, $accessSecret)
{
return base64_encode(hash_hmac('sha256', $source, $accessSecret, true));
}
public function getSignatureMethod()
{
return "HMAC-SHA256";
}
public function getSignatureVersion()
{
return "1.0";
}
}
| {
"pile_set_name": "Github"
} |
"""Implementation of JSONEncoder
"""
from __future__ import absolute_import
import re
from operator import itemgetter
# Do not import Decimal directly to avoid reload issues
import decimal
from .compat import unichr, binary_type, text_type, string_types, integer_types, PY3
def _import_speedups():
try:
from . import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from .decoder import PosInf
from .raw_json import RawJSON
ESCAPE = re.compile(r'[\x00-\x1f\\"]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s, _PY3=PY3, _q=u'"'):
"""Return a JSON representation of a Python string
"""
if _PY3:
if isinstance(s, bytes):
s = str(s, 'utf-8')
elif type(s) is not str:
# convert an str subclass instance to exact str
# raise a TypeError otherwise
s = str.__str__(s)
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = unicode(s, 'utf-8')
elif type(s) not in (str, unicode):
# convert an str subclass instance to exact str
# convert a unicode subclass instance to exact unicode
# raise a TypeError otherwise
if isinstance(s, str):
s = str.__str__(s)
else:
s = unicode.__getnewargs__(s)[0]
def replace(match):
return ESCAPE_DCT[match.group(0)]
return _q + ESCAPE.sub(replace, s) + _q
def py_encode_basestring_ascii(s, _PY3=PY3):
"""Return an ASCII-only JSON representation of a Python string
"""
if _PY3:
if isinstance(s, bytes):
s = str(s, 'utf-8')
elif type(s) is not str:
# convert an str subclass instance to exact str
# raise a TypeError otherwise
s = str.__str__(s)
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = unicode(s, 'utf-8')
elif type(s) not in (str, unicode):
# convert an str subclass instance to exact str
# convert a unicode subclass instance to exact unicode
# raise a TypeError otherwise
if isinstance(s, str):
s = str.__str__(s)
else:
s = unicode.__getnewargs__(s)[0]
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict, namedtuple | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=True, namedtuple_as_object=True,
tuple_as_array=True, bigint_as_string=False,
item_sort_key=None, for_json=False, ignore_nan=False,
int_as_string_bitcount=None, iterable_as_array=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (default: ``True``), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
If namedtuple_as_object is true (the default), objects with
``_asdict()`` methods will be encoded as JSON objects.
If tuple_as_array is true (the default), tuple (and subclasses) will
be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``__iter__()``
will be encoded as a JSON array.
If bigint_as_string is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If int_as_string_bitcount is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, item_sort_key is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key.
If for_json is true (not the default), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized
as ``null`` in compliance with the ECMA-262 specification. If true,
this will override *allow_nan*.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
self.namedtuple_as_object = namedtuple_as_object
self.tuple_as_array = tuple_as_array
self.iterable_as_array = iterable_as_array
self.bigint_as_string = bigint_as_string
self.item_sort_key = item_sort_key
self.for_json = for_json
self.ignore_nan = ignore_nan
self.int_as_string_bitcount = int_as_string_bitcount
if indent is not None and not isinstance(indent, string_types):
indent = indent * ' '
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError('Object of type %s is not JSON serializable' %
o.__class__.__name__)
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, binary_type):
_encoding = self.encoding
if (_encoding is not None and not (_encoding == 'utf-8')):
o = text_type(o, _encoding)
if isinstance(o, string_types):
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8' and self.encoding is not None:
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, binary_type):
o = text_type(o, _encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, ignore_nan=self.ignore_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
if type(o) != float:
# See #118, do not trust custom str/repr
o = float(o)
return _repr(o)
if ignore_nan:
text = 'null'
elif not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
int_as_string_bitcount = (
53 if self.bigint_as_string else self.int_as_string_bitcount)
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array,
int_as_string_bitcount,
self.item_sort_key, self.encoding, self.for_json,
self.ignore_nan, decimal.Decimal, self.iterable_as_array)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array,
int_as_string_bitcount,
self.item_sort_key, self.encoding, self.for_json,
self.iterable_as_array, Decimal=decimal.Decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
This class also escapes the line separator and paragraph separator
characters U+2028 and U+2029, irrespective of the ensure_ascii setting,
as these characters are not valid in JavaScript strings (see
http://timelessrepo.com/json-isnt-a-javascript-subset).
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
if not self.ensure_ascii:
chunk = chunk.replace(u'\u2028', '\\u2028')
chunk = chunk.replace(u'\u2029', '\\u2029')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
_int_as_string_bitcount, _item_sort_key,
_encoding,_for_json,
_iterable_as_array,
## HACK: hand-optimized bytecode; turn globals into locals
_PY3=PY3,
ValueError=ValueError,
string_types=string_types,
Decimal=None,
dict=dict,
float=float,
id=id,
integer_types=integer_types,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
iter=iter,
):
if _use_decimal and Decimal is None:
Decimal = decimal.Decimal
if _item_sort_key and not callable(_item_sort_key):
raise TypeError("item_sort_key must be None or callable")
elif _sort_keys and not _item_sort_key:
_item_sort_key = itemgetter(0)
if (_int_as_string_bitcount is not None and
(_int_as_string_bitcount <= 0 or
not isinstance(_int_as_string_bitcount, integer_types))):
raise TypeError("int_as_string_bitcount must be a positive integer")
def _encode_int(value):
skip_quoting = (
_int_as_string_bitcount is None
or
_int_as_string_bitcount < 1
)
if type(value) not in integer_types:
# See #118, do not trust custom str/repr
value = int(value)
if (
skip_quoting or
(-1 << _int_as_string_bitcount)
< value <
(1 << _int_as_string_bitcount)
):
return str(value)
return '"' + str(value) + '"'
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, string_types):
yield buf + _encoder(value)
elif _PY3 and isinstance(value, bytes) and _encoding is not None:
yield buf + _encoder(value)
elif isinstance(value, RawJSON):
yield buf + value.encoded_json
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, integer_types):
yield buf + _encode_int(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
for_json = _for_json and getattr(value, 'for_json', None)
if for_json and callable(for_json):
chunks = _iterencode(for_json(), _current_indent_level)
elif isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if first:
# iterable_as_array misses the fast path at the top
yield '[]'
else:
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _stringify_key(key):
if isinstance(key, string_types): # pragma: no cover
pass
elif _PY3 and isinstance(key, bytes) and _encoding is not None:
key = str(key, _encoding)
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, integer_types):
if type(key) not in integer_types:
# See #118, do not trust custom str/repr
key = int(key)
key = str(key)
elif _use_decimal and isinstance(key, Decimal):
key = str(key)
elif _skipkeys:
key = None
else:
raise TypeError('keys must be str, int, float, bool or None, '
'not %s' % key.__class__.__name__)
return key
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _PY3:
iteritems = dct.items()
else:
iteritems = dct.iteritems()
if _item_sort_key:
items = []
for k, v in dct.items():
if not isinstance(k, string_types):
k = _stringify_key(k)
if k is None:
continue
items.append((k, v))
items.sort(key=_item_sort_key)
else:
items = iteritems
for key, value in items:
if not (_item_sort_key or isinstance(key, string_types)):
key = _stringify_key(key)
if key is None:
# _skipkeys must be True
continue
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, string_types):
yield _encoder(value)
elif _PY3 and isinstance(value, bytes) and _encoding is not None:
yield _encoder(value)
elif isinstance(value, RawJSON):
yield value.encoded_json
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, integer_types):
yield _encode_int(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
for_json = _for_json and getattr(value, 'for_json', None)
if for_json and callable(for_json):
chunks = _iterencode(for_json(), _current_indent_level)
elif isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, string_types):
yield _encoder(o)
elif _PY3 and isinstance(o, bytes) and _encoding is not None:
yield _encoder(o)
elif isinstance(o, RawJSON):
yield o.encoded_json
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, integer_types):
yield _encode_int(o)
elif isinstance(o, float):
yield _floatstr(o)
else:
for_json = _for_json and getattr(o, 'for_json', None)
if for_json and callable(for_json):
for chunk in _iterencode(for_json(), _current_indent_level):
yield chunk
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
else:
_asdict = _namedtuple_as_object and getattr(o, '_asdict', None)
if _asdict and callable(_asdict):
for chunk in _iterencode_dict(_asdict(),
_current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
while _iterable_as_array:
# Markers are not checked here because it is valid for
# an iterable to return self.
try:
o = iter(o)
except TypeError:
break
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
return
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| {
"pile_set_name": "Github"
} |
fn foo() -> {
x := [5]
return some(x)
}
fn main() {
x := foo()
println(x)
}
| {
"pile_set_name": "Github"
} |
import * as React from "react";
import { storiesOf } from "@storybook/react";
import DocRefEditor, { useDocRefEditor } from ".";
import { useDocumentTree } from "components/DocumentEditors/api/explorer";
import { iterateNodes } from "lib/treeUtils/treeUtils";
import DocRefTypePicker from "components/DocRefTypePicker";
import {
useDocumentApi,
ResourcesByDocType,
} from "components/DocumentEditors/useDocumentApi";
import JsonDebug from "testing/JsonDebug";
const TestHarness: React.FunctionComponent = () => {
const { documentTree } = useDocumentTree();
const [docRefType, setDocRefType] = React.useState<keyof ResourcesByDocType>(
"Dictionary",
);
const setDocRefTypeSafe = React.useCallback(
(d) => setDocRefType(d as keyof ResourcesByDocType),
[setDocRefType],
);
const documentApi = useDocumentApi(docRefType);
const docRefUuid = React.useMemo(() => {
let d;
iterateNodes(documentTree, (_, node) => {
if (node.type === docRefType) {
d = node.uuid;
return true;
}
return false;
});
return d || documentTree.uuid;
}, [docRefType, documentTree]);
const { editorProps, onDocumentChange } = useDocRefEditor({
docRefUuid,
documentApi,
});
const { docRefContents } = editorProps;
return !!docRefContents ? (
<DocRefEditor {...editorProps}>
<DocRefTypePicker value={docRefType} onChange={setDocRefTypeSafe} />
<JsonDebug
value={{
documentApi: Object.keys(documentApi),
docRefContents,
onDocumentChange: JSON.stringify(onDocumentChange),
}}
/>
</DocRefEditor>
) : (
<div>Nowt Available</div>
);
};
storiesOf("Document Editors", module).add("Document Editors", () => (
<TestHarness />
));
| {
"pile_set_name": "Github"
} |
package com.lxj.xrefreshlayout.loadinglayout;
import android.animation.ArgbEvaluator;
import android.animation.IntEvaluator;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.graphics.RectF;
import android.util.AttributeSet;
import android.view.View;
import com.lxj.xrefreshlayout.util.DensityUtil;
/**
* Created by dance on 2017/5/8.
*/
public class CircleLoadingView extends View {
private int progressColor = Color.BLUE;
private int progressBorderWidth = 4;
private float progress = 0;
private float start = 270;
private Paint paint = new Paint(Paint.ANTI_ALIAS_FLAG);
IntEvaluator intEval = new IntEvaluator();
ArgbEvaluator argbEval = new ArgbEvaluator();
public CircleLoadingView(Context context) {
this(context, null);
}
public CircleLoadingView(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public CircleLoadingView(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
progressBorderWidth = DensityUtil.dip2px(context,4);
setPaint();
}
private void setPaint() {
paint.setColor(progressColor);
paint.setStyle(Paint.Style.FILL_AND_STROKE);
paint.setStrokeWidth(progressBorderWidth);
}
RectF rect = null;
@Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
if(rect==null){
rect = new RectF(progressBorderWidth,progressBorderWidth,getWidth()-progressBorderWidth,
getHeight()-progressBorderWidth);
}
paint.setAlpha(intEval.evaluate(progress,30,255));
float p = 360*progress;
canvas.drawArc(rect, start, p, true, paint);
}
/**
* 设置起始角度
* @param start
*/
public void setStart(float start){
this.start = start;
}
/**
* 进度为0-1
* @param progress
*/
public void setProgress(float progress){
this.progress = progress;
invalidate();
}
/**
* 默认为Color.RED
* @param color
*/
public void setProgressColor(int color){
this.progressColor = color;
paint.setColor(progressColor);
}
public void setProgressBorderWidth(int borderWidth){
this.progressBorderWidth = borderWidth;
}
}
| {
"pile_set_name": "Github"
} |
# Change Log
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
# [1.0.0-gamma.9](https://github.com/aws/aws-sdk-js-v3/compare/@aws-sdk/client-pinpoint-email@1.0.0-gamma.8...@aws-sdk/client-pinpoint-email@1.0.0-gamma.9) (2020-09-15)
### Bug Fixes
* default import package.json for spec compatibility ([#1505](https://github.com/aws/aws-sdk-js-v3/issues/1505)) ([797ba7d](https://github.com/aws/aws-sdk-js-v3/commit/797ba7dc7743eb65e8f81536bcf70e5c225ef861))
* toposort and chunk shape models ([#1510](https://github.com/aws/aws-sdk-js-v3/issues/1510)) ([bee87d8](https://github.com/aws/aws-sdk-js-v3/commit/bee87d8fcc5ea82a361386309ebf9330fe39c816))
# [1.0.0-gamma.8](https://github.com/aws/aws-sdk-js-v3/compare/@aws-sdk/client-pinpoint-email@1.0.0-gamma.7...@aws-sdk/client-pinpoint-email@1.0.0-gamma.8) (2020-09-01)
### Features
* add LoggerConfig to all clients ([#1472](https://github.com/aws/aws-sdk-js-v3/issues/1472)) ([d55a812](https://github.com/aws/aws-sdk-js-v3/commit/d55a81278fee13281b20bfa60d89d2b111245dd4))
# [1.0.0-gamma.7](https://github.com/aws/aws-sdk-js-v3/compare/@aws-sdk/client-pinpoint-email@1.0.0-gamma.6...@aws-sdk/client-pinpoint-email@1.0.0-gamma.7) (2020-08-25)
### Features
* adds client paginators ([#1458](https://github.com/aws/aws-sdk-js-v3/issues/1458)) ([0c7f7ee](https://github.com/aws/aws-sdk-js-v3/commit/0c7f7eee39eef17a5baee3c0db78682a54b1028a))
* get partition of given region ([#1435](https://github.com/aws/aws-sdk-js-v3/issues/1435)) ([c18bfe4](https://github.com/aws/aws-sdk-js-v3/commit/c18bfe489db77d945d0bcc4ae7194ff46cd461a9))
* refactor nodejs region loader ([#1437](https://github.com/aws/aws-sdk-js-v3/issues/1437)) ([5d79645](https://github.com/aws/aws-sdk-js-v3/commit/5d79645eb622b111c94a7de1918c8357c83a7bf8))
* refactor nodejs retry config loader ([#1438](https://github.com/aws/aws-sdk-js-v3/issues/1438)) ([5478012](https://github.com/aws/aws-sdk-js-v3/commit/5478012147b475bdce07a2cbe393a972e502c93f))
* update clients with smithy models as of 08/20 ([#1457](https://github.com/aws/aws-sdk-js-v3/issues/1457)) ([f95cce3](https://github.com/aws/aws-sdk-js-v3/commit/f95cce338fcdc49ead6e3ca6d178a6fd58ae556f))
# [1.0.0-gamma.6](https://github.com/aws/aws-sdk-js-v3/compare/@aws-sdk/client-pinpoint-email@1.0.0-gamma.5...@aws-sdk/client-pinpoint-email@1.0.0-gamma.6) (2020-08-04)
**Note:** Version bump only for package @aws-sdk/client-pinpoint-email
# [1.0.0-gamma.5](https://github.com/aws/aws-sdk-js-v3/compare/@aws-sdk/client-pinpoint-email@1.0.0-gamma.4...@aws-sdk/client-pinpoint-email@1.0.0-gamma.5) (2020-07-21)
**Note:** Version bump only for package @aws-sdk/client-pinpoint-email
# [1.0.0-gamma.4](https://github.com/aws/aws-sdk-js-v3/compare/@aws-sdk/client-pinpoint-email@1.0.0-gamma.3...@aws-sdk/client-pinpoint-email@1.0.0-gamma.4) (2020-07-13)
**Note:** Version bump only for package @aws-sdk/client-pinpoint-email
# 1.0.0-gamma.3 (2020-07-08)
### Features
* add filterSensitiveLog method to Structure namespaces ([#1130](https://github.com/aws/aws-sdk-js-v3/issues/1130)) ([8eff087](https://github.com/aws/aws-sdk-js-v3/commit/8eff0875580e30e12f2e0abd5fa402973790e697))
# 1.0.0-gamma.2 (2020-05-26)
# 1.0.0-gamma.1 (2020-05-21)
### Bug Fixes
* add default value to stream collector ([#1131](https://github.com/aws/aws-sdk-js-v3/issues/1131)) ([030082a](https://github.com/aws/aws-sdk-js-v3/commit/030082a0378f873da34c5381c7889754c5bde9d3))
* use JS url parser in ReactNative ([#1129](https://github.com/aws/aws-sdk-js-v3/issues/1129)) ([efc8570](https://github.com/aws/aws-sdk-js-v3/commit/efc8570af4019ce4f07a94afde82661ad64bf3d4))
### Features
* bump up to gamma version ([#1192](https://github.com/aws/aws-sdk-js-v3/issues/1192)) ([a609075](https://github.com/aws/aws-sdk-js-v3/commit/a6090754f2a6c21e5b70bf0c8782cc0fbe59ee12))
* refactor http request handlers ([#1186](https://github.com/aws/aws-sdk-js-v3/issues/1186)) ([605ebc5](https://github.com/aws/aws-sdk-js-v3/commit/605ebc57d2ec140ae5dd1c152168ec786e6663d9))
# 1.0.0-beta.4 (2020-04-25)
### Bug Fixes
* move endpoint resolution to the serializers ([#1106](https://github.com/aws/aws-sdk-js-v3/issues/1106)) ([08c9420](https://github.com/aws/aws-sdk-js-v3/commit/08c9420db1ba9c3faf3ed26aa1244646bacff1d1))
* request default endpoints from serde context being overwritten ([#1097](https://github.com/aws/aws-sdk-js-v3/issues/1097)) ([299d2a1](https://github.com/aws/aws-sdk-js-v3/commit/299d2a19bddfbab1b70552fd7a6b669ef7762288))
### Features
* add default destroy function to clients ([#1081](https://github.com/aws/aws-sdk-js-v3/issues/1081)) ([7eb0f0e](https://github.com/aws/aws-sdk-js-v3/commit/7eb0f0e5debfafe08c51dc4f99dcf29d79dea358))
* codegen for issue fixed in protocol test ([#1086](https://github.com/aws/aws-sdk-js-v3/issues/1086)) ([8e077c7](https://github.com/aws/aws-sdk-js-v3/commit/8e077c7f1c1363a3a1f8522e6ee793bd57546c0e))
# 1.0.0-beta.3 (2020-03-30)
# 1.0.0-beta.2 (2020-03-28)
# 1.0.0-beta.1 (2020-03-25)
### Features
* bump packages to beta ([#1050](https://github.com/aws/aws-sdk-js-v3/issues/1050)) ([40501d4](https://github.com/aws/aws-sdk-js-v3/commit/40501d4394d04bc1bc91c10136fa48b1d3a67d8f))
# 1.0.0-alpha.28 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-gamma.2 (2020-05-26)
# 1.0.0-gamma.1 (2020-05-21)
### Bug Fixes
* add default value to stream collector ([#1131](https://github.com/aws/aws-sdk-js-v3/issues/1131)) ([030082a](https://github.com/aws/aws-sdk-js-v3/commit/030082a0378f873da34c5381c7889754c5bde9d3))
* use JS url parser in ReactNative ([#1129](https://github.com/aws/aws-sdk-js-v3/issues/1129)) ([efc8570](https://github.com/aws/aws-sdk-js-v3/commit/efc8570af4019ce4f07a94afde82661ad64bf3d4))
### Features
* bump up to gamma version ([#1192](https://github.com/aws/aws-sdk-js-v3/issues/1192)) ([a609075](https://github.com/aws/aws-sdk-js-v3/commit/a6090754f2a6c21e5b70bf0c8782cc0fbe59ee12))
* refactor http request handlers ([#1186](https://github.com/aws/aws-sdk-js-v3/issues/1186)) ([605ebc5](https://github.com/aws/aws-sdk-js-v3/commit/605ebc57d2ec140ae5dd1c152168ec786e6663d9))
# 1.0.0-beta.4 (2020-04-25)
### Bug Fixes
* move endpoint resolution to the serializers ([#1106](https://github.com/aws/aws-sdk-js-v3/issues/1106)) ([08c9420](https://github.com/aws/aws-sdk-js-v3/commit/08c9420db1ba9c3faf3ed26aa1244646bacff1d1))
* request default endpoints from serde context being overwritten ([#1097](https://github.com/aws/aws-sdk-js-v3/issues/1097)) ([299d2a1](https://github.com/aws/aws-sdk-js-v3/commit/299d2a19bddfbab1b70552fd7a6b669ef7762288))
### Features
* add default destroy function to clients ([#1081](https://github.com/aws/aws-sdk-js-v3/issues/1081)) ([7eb0f0e](https://github.com/aws/aws-sdk-js-v3/commit/7eb0f0e5debfafe08c51dc4f99dcf29d79dea358))
* codegen for issue fixed in protocol test ([#1086](https://github.com/aws/aws-sdk-js-v3/issues/1086)) ([8e077c7](https://github.com/aws/aws-sdk-js-v3/commit/8e077c7f1c1363a3a1f8522e6ee793bd57546c0e))
# 1.0.0-beta.3 (2020-03-30)
# 1.0.0-beta.2 (2020-03-28)
# 1.0.0-beta.1 (2020-03-25)
### Features
* bump packages to beta ([#1050](https://github.com/aws/aws-sdk-js-v3/issues/1050)) ([40501d4](https://github.com/aws/aws-sdk-js-v3/commit/40501d4394d04bc1bc91c10136fa48b1d3a67d8f))
# 1.0.0-alpha.28 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-gamma.1 (2020-05-21)
### Bug Fixes
* add default value to stream collector ([#1131](https://github.com/aws/aws-sdk-js-v3/issues/1131)) ([030082a](https://github.com/aws/aws-sdk-js-v3/commit/030082a0378f873da34c5381c7889754c5bde9d3))
* use JS url parser in ReactNative ([#1129](https://github.com/aws/aws-sdk-js-v3/issues/1129)) ([efc8570](https://github.com/aws/aws-sdk-js-v3/commit/efc8570af4019ce4f07a94afde82661ad64bf3d4))
### Features
* bump up to gamma version ([#1192](https://github.com/aws/aws-sdk-js-v3/issues/1192)) ([a609075](https://github.com/aws/aws-sdk-js-v3/commit/a6090754f2a6c21e5b70bf0c8782cc0fbe59ee12))
* refactor http request handlers ([#1186](https://github.com/aws/aws-sdk-js-v3/issues/1186)) ([605ebc5](https://github.com/aws/aws-sdk-js-v3/commit/605ebc57d2ec140ae5dd1c152168ec786e6663d9))
# 1.0.0-beta.4 (2020-04-25)
### Bug Fixes
* move endpoint resolution to the serializers ([#1106](https://github.com/aws/aws-sdk-js-v3/issues/1106)) ([08c9420](https://github.com/aws/aws-sdk-js-v3/commit/08c9420db1ba9c3faf3ed26aa1244646bacff1d1))
* request default endpoints from serde context being overwritten ([#1097](https://github.com/aws/aws-sdk-js-v3/issues/1097)) ([299d2a1](https://github.com/aws/aws-sdk-js-v3/commit/299d2a19bddfbab1b70552fd7a6b669ef7762288))
### Features
* add default destroy function to clients ([#1081](https://github.com/aws/aws-sdk-js-v3/issues/1081)) ([7eb0f0e](https://github.com/aws/aws-sdk-js-v3/commit/7eb0f0e5debfafe08c51dc4f99dcf29d79dea358))
* codegen for issue fixed in protocol test ([#1086](https://github.com/aws/aws-sdk-js-v3/issues/1086)) ([8e077c7](https://github.com/aws/aws-sdk-js-v3/commit/8e077c7f1c1363a3a1f8522e6ee793bd57546c0e))
# 1.0.0-beta.3 (2020-03-30)
# 1.0.0-beta.2 (2020-03-28)
# 1.0.0-beta.1 (2020-03-25)
### Features
* bump packages to beta ([#1050](https://github.com/aws/aws-sdk-js-v3/issues/1050)) ([40501d4](https://github.com/aws/aws-sdk-js-v3/commit/40501d4394d04bc1bc91c10136fa48b1d3a67d8f))
# 1.0.0-alpha.28 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-beta.5 (2020-04-27)
### Features
* use exact @aws-sdk/* dependencies ([#1110](https://github.com/aws/aws-sdk-js-v3/issues/1110)) ([bcfd7a2](https://github.com/aws/aws-sdk-js-v3/commit/bcfd7a2faeca3a2605057fd4736d710aa4902b62))
# 1.0.0-beta.3 (2020-03-30)
# 1.0.0-beta.2 (2020-03-28)
# 1.0.0-beta.1 (2020-03-25)
### Features
* bump packages to beta ([#1050](https://github.com/aws/aws-sdk-js-v3/issues/1050)) ([40501d4](https://github.com/aws/aws-sdk-js-v3/commit/40501d4394d04bc1bc91c10136fa48b1d3a67d8f))
# 1.0.0-alpha.28 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-beta.4 (2020-04-25)
### Bug Fixes
* move endpoint resolution to the serializers ([#1106](https://github.com/aws/aws-sdk-js-v3/issues/1106)) ([08c9420](https://github.com/aws/aws-sdk-js-v3/commit/08c9420db1ba9c3faf3ed26aa1244646bacff1d1))
* request default endpoints from serde context being overwritten ([#1097](https://github.com/aws/aws-sdk-js-v3/issues/1097)) ([299d2a1](https://github.com/aws/aws-sdk-js-v3/commit/299d2a19bddfbab1b70552fd7a6b669ef7762288))
### Features
* add default destroy function to clients ([#1081](https://github.com/aws/aws-sdk-js-v3/issues/1081)) ([7eb0f0e](https://github.com/aws/aws-sdk-js-v3/commit/7eb0f0e5debfafe08c51dc4f99dcf29d79dea358))
* codegen for issue fixed in protocol test ([#1086](https://github.com/aws/aws-sdk-js-v3/issues/1086)) ([8e077c7](https://github.com/aws/aws-sdk-js-v3/commit/8e077c7f1c1363a3a1f8522e6ee793bd57546c0e))
# 1.0.0-beta.3 (2020-03-30)
# 1.0.0-beta.2 (2020-03-28)
# 1.0.0-beta.1 (2020-03-25)
### Features
* bump packages to beta ([#1050](https://github.com/aws/aws-sdk-js-v3/issues/1050)) ([40501d4](https://github.com/aws/aws-sdk-js-v3/commit/40501d4394d04bc1bc91c10136fa48b1d3a67d8f))
# 1.0.0-alpha.28 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-beta.3 (2020-03-30)
# 1.0.0-beta.2 (2020-03-28)
# 1.0.0-beta.1 (2020-03-25)
### Features
* bump packages to beta ([#1050](https://github.com/aws/aws-sdk-js-v3/issues/1050)) ([40501d4](https://github.com/aws/aws-sdk-js-v3/commit/40501d4394d04bc1bc91c10136fa48b1d3a67d8f))
# 1.0.0-alpha.28 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-beta.2 (2020-03-28)
# 1.0.0-beta.1 (2020-03-25)
### Features
* bump packages to beta ([#1050](https://github.com/aws/aws-sdk-js-v3/issues/1050)) ([40501d4](https://github.com/aws/aws-sdk-js-v3/commit/40501d4394d04bc1bc91c10136fa48b1d3a67d8f))
# 1.0.0-alpha.28 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-beta.1 (2020-03-25)
### Features
* bump packages to beta ([#1050](https://github.com/aws/aws-sdk-js-v3/issues/1050)) ([40501d4](https://github.com/aws/aws-sdk-js-v3/commit/40501d4394d04bc1bc91c10136fa48b1d3a67d8f))
# 1.0.0-alpha.28 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-alpha.25 (2020-03-20)
# 1.0.0-alpha.27 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-alpha.24 (2020-03-13)
### Bug Fixes
* codegen for using pure JS hasher in RN ([#998](https://github.com/aws/aws-sdk-js-v3/issues/998)) ([022cba5](https://github.com/aws/aws-sdk-js-v3/commit/022cba59168998bea8a263687395d27eae375d30)), closes [awslabs/smithy-typescript#144](https://github.com/awslabs/smithy-typescript/issues/144)
# 1.0.0-alpha.26 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-alpha.23 (2020-03-12)
# 1.0.0-alpha.25 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-alpha.22 (2020-03-09)
### Features
* codegen for fixing protocol tests([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([15a66c7](https://github.com/aws/aws-sdk-js-v3/commit/15a66c720f49884087126d6d573c64b6a4a16dc5)), closes [awslabls/smithy-typescript#141](https://github.com/awslabls/smithy-typescript/issues/141)
* codegen for fixing streaming member shape([#968](https://github.com/aws/aws-sdk-js-v3/issues/968)) ([c7f13dc](https://github.com/aws/aws-sdk-js-v3/commit/c7f13dc0eda6217452bd37b1b7fa04bcc931deab)), closes [awslabs/smithy-typescript#138](https://github.com/awslabs/smithy-typescript/issues/138) [awslabs/smithy-typescript#140](https://github.com/awslabs/smithy-typescript/issues/140)
# 1.0.0-alpha.24 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb235cebf6cc8d4e073b517a78621fa7eaf))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464fb0374a8a3ba5a344f6b8c6aea5c85f2a2)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4f57400126767ab217c7b08bdf97963a10))
# 1.0.0-alpha.21 (2020-02-19)
# 1.0.0-alpha.23 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.20 (2020-02-14)
# 1.0.0-alpha.22 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.19 (2020-02-11)
# 1.0.0-alpha.21 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.18 (2020-02-11)
# 1.0.0-alpha.20 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.17 (2020-02-09)
# 1.0.0-alpha.19 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.16 (2020-02-07)
# 1.0.0-alpha.18 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.15 (2020-02-06)
# 1.0.0-alpha.17 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.14 (2020-02-05)
# 1.0.0-alpha.16 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.13 (2020-02-04)
# 1.0.0-alpha.15 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.12 (2020-01-30)
# 1.0.0-alpha.14 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.11 (2020-01-28)
# 1.0.0-alpha.13 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.10 (2020-01-24)
# 1.0.0-alpha.12 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.9 (2020-01-22)
# 1.0.0-alpha.9 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.8 (2020-01-17)
# 1.0.0-alpha.8 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.7 (2020-01-16)
# 1.0.0-alpha.4 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.6 (2020-01-14)
# 1.0.0-alpha.3 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.5 (2020-01-14)
### Bug Fixes
* update clients with correct endpoint prefix ([#720](https://github.com/aws/aws-sdk-js-v3/issues/720)) ([5356dbb](https://github.com/aws/aws-sdk-js-v3/commit/5356dbb))
# 1.0.0-alpha.2 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.4 (2020-01-10)
### Features
* update clients ([#717](https://github.com/aws/aws-sdk-js-v3/issues/717)) ([dc9464f](https://github.com/aws/aws-sdk-js-v3/commit/dc9464f)), closes [#694](https://github.com/aws/aws-sdk-js-v3/issues/694) [smithy-typescript#66](https://github.com/smithy-typescript/issues/66) [smithy-typescript#87](https://github.com/smithy-typescript/issues/87)
# 1.0.0-alpha.1 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.3 (2020-01-10)
# 0.9.0 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.2 (2020-01-09)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
# 1.0.0-alpha.1 (2020-01-08)
### Features
* add client-pinpoint-email ([#643](https://github.com/aws/aws-sdk-js-v3/issues/643)) ([a2036c4](https://github.com/aws/aws-sdk-js-v3/commit/a2036c4))
| {
"pile_set_name": "Github"
} |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_BROWSER_NET_NETWORK_QUALITY_OBSERVER_IMPL_H_
#define CONTENT_BROWSER_NET_NETWORK_QUALITY_OBSERVER_IMPL_H_
#include <stdint.h>
#include <memory>
#include "base/macros.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
#include "content/common/content_export.h"
#include "content/public/browser/notification_observer.h"
#include "content/public/browser/notification_registrar.h"
#include "net/nqe/effective_connection_type.h"
#include "net/nqe/network_quality.h"
#include "services/network/public/cpp/network_quality_tracker.h"
namespace content {
// Listens for changes to the network quality and manages sending updates to
// each RenderProcess via mojo.
class CONTENT_EXPORT NetworkQualityObserverImpl
: public network::NetworkQualityTracker::EffectiveConnectionTypeObserver,
public network::NetworkQualityTracker::RTTAndThroughputEstimatesObserver,
public content::NotificationObserver {
public:
explicit NetworkQualityObserverImpl(
network::NetworkQualityTracker* network_quality_tracker);
~NetworkQualityObserverImpl() override;
private:
// content::NotificationObserver:
void Observe(int type,
const NotificationSource& source,
const NotificationDetails& details) override;
// net::EffectiveConnectionTypeObserver implementation:
void OnEffectiveConnectionTypeChanged(
net::EffectiveConnectionType type) override;
// net::RTTAndThroughputEstimatesObserver implementation:
void OnRTTOrThroughputEstimatesComputed(
base::TimeDelta http_rtt,
base::TimeDelta transport_rtt,
int32_t downstream_throughput_kbps) override;
// |network_quality_tracker_| is guaranteed to be non-null during the
// lifetime of |this|.
network::NetworkQualityTracker* network_quality_tracker_;
// The network quality when the |ui_thread_observer_| was last notified.
net::EffectiveConnectionType last_notified_type_;
net::nqe::internal::NetworkQuality last_notified_network_quality_;
content::NotificationRegistrar registrar_;
base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(NetworkQualityObserverImpl);
};
} // namespace content
#endif // CONTENT_BROWSER_NET_NETWORK_QUALITY_OBSERVER_IMPL_H_
| {
"pile_set_name": "Github"
} |
<testcase>
<info>
<keywords>
HTTP
HTTP GET
multi
</keywords>
</info>
# Server-side
<reply>
<data>
HTTP/1.1 200 all good!
Date: Thu, 09 Nov 2010 14:49:00 GMT
Server: test-server/fake
Content-Type: text/html
Content-Length: 12
Connection: close
Hello World
</data>
</reply>
# Client-side
<client>
<server>
http
</server>
<features>
http
</features>
# tool is what to use instead of 'curl'
<tool>
lib1500
</tool>
<name>
curl_multi_wait
</name>
<command>
http://%HOSTIP:%HTTPPORT/1500
</command>
</client>
</testcase>
| {
"pile_set_name": "Github"
} |
package org.telegram.tgnet;
public interface RequestTimeDelegate {
void run(long time);
}
| {
"pile_set_name": "Github"
} |
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __SOC_NVIDIA_TEGRA_PINGROUP_H__
#define __SOC_NVIDIA_TEGRA_PINGROUP_H__
#include <stdint.h>
void pingroup_set_config(int group_index, uint32_t config);
uint32_t pingroup_get_config(int group_index);
enum {
PINGROUP_HSM = 1 << 2,
PINGROUP_SCHMT = 1 << 3,
PINGROUP_LPMD_SHIFT = 4,
PINGROUP_LPMD_MASK = 3 << 4,
PINGROUP_DRVDN_SHIFT = 12,
PINGROUP_DRVDN_MASK = 0x7f << 12,
PINGROUP_DRVUP_SHIFT = 20,
PINGROUP_DRVUP_MASK = 0x7f << 20,
PINGROUP_SLWR_SHIFT = 28,
PINGROUP_SLWR_MASK = 0x3 << 28,
PINGROUP_SLWF_SHIFT = 30,
PINGROUP_SLWF_MASK = 0x3 << 30
};
#endif /* __SOC_NVIDIA_TEGRA_PINGROUP_H__ */
| {
"pile_set_name": "Github"
} |
//
// TweakBinding.swift
// SwiftTweaks
//
// Created by Bryan Clark on 11/17/15.
// Copyright © 2015 Khan Academy. All rights reserved.
//
import Foundation
/// Represents a Tweak and a closure that should be run whenever the Tweak changes.
internal struct TweakBinding<T: TweakableType>: TweakBindingType {
let tweak: Tweak<T>
let binding: (T) -> Void
init(tweak: Tweak<T>, binding: @escaping (T) -> Void) {
self.tweak = tweak
self.binding = binding
}
func applyBindingWithValue(_ value: TweakableType) {
switch type(of: value).tweakViewDataType {
case .boolean, .integer, .cgFloat, .double, .string:
binding(value as! T)
}
}
}
// A type-erasure around TweakBinding<T>, so we can gather them together in TweakStore.tweakBindings
internal struct AnyTweakBinding: TweakBindingType {
private let tweakBinding: TweakBindingType
init(tweakBinding: TweakBindingType) {
self.tweakBinding = tweakBinding
}
func applyBindingWithValue(_ value: TweakableType) {
tweakBinding.applyBindingWithValue(value)
}
}
// When combined with AnyTweakBinding, this provides our type-erasure around TweakBinding<T>
internal protocol TweakBindingType {
func applyBindingWithValue(_ value: TweakableType)
}
| {
"pile_set_name": "Github"
} |
<!--
Copyright (C) 2017 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="18.0dp"
android:height="18.0dp"
android:viewportWidth="24.0"
android:viewportHeight="24.0">
<path
android:fillColor="#FFFFFFFF"
android:pathData="M16.0,1.0L4.0,1.0c-1.1,0.0 -2.0,0.9 -2.0,2.0l0.0,14.0l2.0,0.0L4.0,3.0l12.0,0.0L16.0,1.0zm3.0,4.0L8.0,5.0c-1.1,0.0 -2.0,0.9 -2.0,2.0l0.0,14.0c0.0,1.0 0.9,2.0 2.0,2.0l11.0,0.0c1.1,0.0 2.0,-0.9 2.0,-2.0L21.0,7.0c0.0,-1.1 -0.9,-2.0 -2.0,-2.0zm0.0,16.0L8.0,21.0L8.0,7.0l11.0,0.0l0.0,14.0z"/>
</vector>
| {
"pile_set_name": "Github"
} |
/*
Copyright (c) 2008 - Chris Buckley.
Permission is granted for use and modification of this file for
research, non-commercial purposes.
*/
#include "common.h"
#include "sysfunc.h"
#include "trec_eval.h"
#include "functions.h"
#include "trec_format.h"
double log2(double x);
static int
te_calc_ndcg_rel (const EPI *epi, const REL_INFO *rel_info,
const RESULTS *results, const TREC_MEAS *tm, TREC_EVAL *eval);
static PARAMS default_ndcg_gains = { NULL, 0, NULL};
/* See trec_eval.h for definition of TREC_MEAS */
TREC_MEAS te_meas_ndcg_rel =
{"ndcg_rel",
" Normalized Discounted Cumulative Gain averaged over rel docs\n\
Experimental measure\n\
Compute a traditional nDCG measure according to Jarvelin and\n\
Kekalainen (ACM ToIS v. 20, pp. 422-446, 2002), averaged at rel docs.\n\
Idea behind ndcg_rel, is that the expected value of ndcg is a smoothly\n\
decreasing function, with discontinuities upward at each transistion\n\
between positive gain levels in the ideal ndcg. Once the gain level \n\
becomes 0, the expected value of ndcg then increases until all rel docs are\n\
retrieved. Thus averaging ndcg is problematic, because these transistions\n\
occur at different points for each topic. Since it is not unusual for\n\
ndcg to start off near 1.0, decrease to 0.25, and then increase to 0.75\n\
at various cutoffs, the points at which ndcg is measured are important.\n\
This version averages ndcg over each relevant doc, where relevant is\n\
defined as expected gain > 0. If a rel doc is not retrieved, then\n\
ndcg for the doc is the dcg at the end of the retrieval / ideal dcg. \n\
\n\
Gain values are set to the appropriate relevance level by default. \n\
The default gain can be overridden on the command line by having \n\
comma separated parameters 'rel_level=gain'.\n\
Eg, 'trec_eval -m ndcg_rel.1=3.5,2=9.0,4=7.0 ...'\n\
will give gains 3.5, 9.0, 3.0, 7.0 for relevance levels 1,2,3,4\n\
respectively (level 3 remains at the default).\n\
Gains are allowed to be 0 or negative, and relevance level 0\n\
can be given a gain.\n",
te_init_meas_s_float_p_pair,
te_calc_ndcg_rel,
te_acc_meas_s,
te_calc_avg_meas_s,
te_print_single_meas_s_float,
te_print_final_meas_s_float_p,
&default_ndcg_gains, -1};
/* Keep track of valid rel_levels and associated gains */
/* Initialized in setup_gains */
typedef struct {
long rel_level;
long num_at_level;
double gain;
} REL_GAIN;
typedef struct {
REL_GAIN *rel_gains;
long num_gains;
long total_num_at_levels;
} GAINS;
static int setup_gains (const TREC_MEAS *tm, const RES_RELS *res_rels,
GAINS *gains);
static double get_gain (const long rel_level, const GAINS *gains);
static int comp_rel_gain ();
static int
te_calc_ndcg_rel (const EPI *epi, const REL_INFO *rel_info,
const RESULTS *results, const TREC_MEAS *tm, TREC_EVAL *eval)
{
RES_RELS res_rels;
double results_gain, results_dcg;
double ideal_gain, ideal_dcg;
double sum = 0.0;
long num_rel_ret = 0;
long num_rel = 0;
long cur_level, num_at_level;
long i;
GAINS gains;
if (UNDEF == te_form_res_rels (epi, rel_info, results, &res_rels))
return (UNDEF);
if (UNDEF == setup_gains (tm, &res_rels, &gains))
return (UNDEF);
results_dcg = 0.0;
ideal_dcg = 0.0;
cur_level = gains.num_gains - 1;
ideal_gain = (cur_level >= 0) ? gains.rel_gains[cur_level].gain : 0.0;
num_at_level = 0;
for (i = 0; i < res_rels.num_ret && ideal_gain > 0.0; i++) {
/* Calculate change in results dcg */
results_gain = get_gain (res_rels.results_rel_list[i], &gains);
if (results_gain != 0)
/* Note: i+2 since doc i has rank i+1 */
results_dcg += results_gain / log2((double) (i+2));
/* Calculate change in ideal dcg */
num_at_level++;
while (cur_level >= 0 &&
num_at_level > gains.rel_gains[cur_level].num_at_level) {
num_at_level = 1;
cur_level--;
ideal_gain = (cur_level >= 0) ? gains.rel_gains[cur_level].gain:0.0;
}
if (ideal_gain > 0.0) {
num_rel++;
ideal_dcg += ideal_gain / log2((double)(i + 2));
}
/* Average will include this point if rel */
if (results_gain > 0) {
sum += results_dcg / ideal_dcg;
num_rel_ret++;
}
if (epi->debug_level > 0)
printf("ndcg_rel: %ld %ld %3.1f %6.4f %3.1f %6.4f %6.4f\n",
i, cur_level, results_gain, results_dcg,
ideal_gain, ideal_dcg, sum);
}
if (i < res_rels.num_ret) {
while (i < res_rels.num_ret) {
/* Calculate change in results dcg */
results_gain = get_gain (res_rels.results_rel_list[i], &gains);
if (results_gain != 0)
results_dcg += results_gain / log2((double) (i+2));
/* Average will include this point if rel */
if (results_gain > 0) {
sum += results_dcg / ideal_dcg;
num_rel_ret++;
}
if (epi->debug_level > 0)
printf("ndcg_rel: %ld %ld %3.1f %6.4f %3.1f %6.4f %6.4f\n",
i, cur_level, results_gain, results_dcg,
0.0, ideal_dcg, sum);
i++;
}
}
while (ideal_gain > 0.0) {
/* Calculate change in ideal dcg */
num_at_level++;
while (cur_level >= 0 &&
num_at_level > gains.rel_gains[cur_level].num_at_level) {
num_at_level = 1;
cur_level--;
ideal_gain = (cur_level >= 0) ? gains.rel_gains[cur_level].gain:0.0;
}
if (ideal_gain > 0.0) {
num_rel++;
ideal_dcg += ideal_gain / log2((double)(i + 2));
}
if (epi->debug_level > 0)
printf("ndcg_rel: %ld %ld %3.1f %6.4f %3.1f %6.4f\n",
i, cur_level, 0.0, results_dcg,
ideal_gain, ideal_dcg);
i++;
}
sum += ((double) (num_rel - num_rel_ret)) * results_dcg / ideal_dcg;
if (epi->debug_level > 0)
printf("ndcg_rel: %ld %ld %6.4f %6.4f %6.4f\n",
i, cur_level, results_dcg, ideal_dcg, sum);
if (sum > 0.0)
eval->values[tm->eval_index].value = sum / num_rel;
Free (gains.rel_gains);
return (1);
}
static int
setup_gains (const TREC_MEAS *tm, const RES_RELS *res_rels, GAINS *gains)
{
FLOAT_PARAM_PAIR *pairs = NULL;
long num_pairs = 0;
long i,j;
long num_gains;
if (tm->meas_params) {
pairs = (FLOAT_PARAM_PAIR *) tm->meas_params->param_values;
num_pairs = tm->meas_params->num_params;
}
if (NULL == (gains->rel_gains = Malloc(res_rels->num_rel_levels + num_pairs,
REL_GAIN)))
return (UNDEF);
num_gains = 0;
for (i = 0; i < num_pairs; i++) {
gains->rel_gains[num_gains].rel_level = atol (pairs[i].name);
gains->rel_gains[num_gains].gain = (double) pairs[i].value;
gains->rel_gains[num_gains].num_at_level = 0;
num_gains++;
}
for (i = 0; i < res_rels->num_rel_levels; i++) {
for (j = 0; j < num_gains && gains->rel_gains[j].rel_level != i; j++)
;
if (j < num_gains)
/* Was included in list of parameters. Update occurrence info */
gains->rel_gains[j].num_at_level = res_rels->rel_levels[i];
else {
/* Not included in list of parameters. New gain level */
gains->rel_gains[num_gains].rel_level = i;
gains->rel_gains[num_gains].gain = (double) i;
gains->rel_gains[num_gains].num_at_level = res_rels->rel_levels[i];
num_gains++;
}
}
/* Sort gains by increasing gain value */
qsort ((char *) gains->rel_gains,
(int) num_gains,
sizeof (REL_GAIN),
comp_rel_gain);
gains->total_num_at_levels = 0;
for (i = 0; i < num_gains; i++)
gains->total_num_at_levels += gains->rel_gains[i].num_at_level;
gains->num_gains = num_gains;
return (1);
}
static int comp_rel_gain (REL_GAIN *ptr1, REL_GAIN *ptr2)
{
return (ptr1->gain - ptr2->gain);
}
static double
get_gain (const long rel_level, const GAINS *gains)
{
long i;
for (i = 0; i < gains->num_gains; i++)
if (rel_level == gains->rel_gains[i].rel_level)
return (gains->rel_gains[i].gain);
return (0.0); /* Print Error ?? */
}
| {
"pile_set_name": "Github"
} |
# [CentOS](https://hub.docker.com/_/centos)
## 基本信息
[CentOS](https://en.wikipedia.org/wiki/CentOS) 是流行的 Linux 发行版,其软件包大多跟 RedHat 系列保持一致。
该仓库位于 `https://hub.docker.com/_/centos` ,提供了 CentOS 从 5 ~ 8 各个版本的镜像。
## 使用方法
默认会启动一个最小化的 CentOS 环境。
```bash
$ docker run --name centos -it centos bash
bash-4.2#
```
## Dockerfile
请到 https://github.com/docker-library/docs/tree/master/centos 查看。
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
int main(void)
{
int fd = open("/dev/watchdog", O_WRONLY);
int ret = 0;
if (fd == -1) {
perror("watchdog");
exit(EXIT_FAILURE);
}
while (1) {
ret = write(fd, "\0", 1);
if (ret != 1) {
ret = -1;
break;
}
sleep(10);
}
close(fd);
return ret;
}
| {
"pile_set_name": "Github"
} |
uniform float s = 100.;
export void f_f(uniform float RET[], uniform float aFOO[]) {
float a = aFOO[programIndex];
uniform float b = atomic_swap_local(&s, 1.);
RET[programIndex] = s;
}
export void result(uniform float RET[]) {
RET[programIndex] = 1.;
}
| {
"pile_set_name": "Github"
} |
/* This file was generated with JastAdd2 (http://jastadd.org) version R20130212 (r1031) */
package soot.JastAddJ;
import java.util.HashSet;
import java.io.File;
import java.util.*;
import beaver.*;
import java.util.ArrayList;
import java.util.zip.*;
import java.io.*;
import java.io.FileNotFoundException;
import java.util.Collection;
import soot.*;
import soot.util.*;
import soot.jimple.*;
import soot.coffi.ClassFile;
import soot.coffi.method_info;
import soot.coffi.CONSTANT_Utf8_info;
import soot.tagkit.SourceFileTag;
import soot.coffi.CoffiMethodSource;
/**
* @production XorBitwiseExpr : {@link BitwiseExpr};
* @ast node
* @declaredat /Users/eric/Documents/workspaces/clara-soot/JastAddJ/Java1.4Frontend/java.ast:169
*/
public class XorBitwiseExpr extends BitwiseExpr implements Cloneable {
/**
* @apilevel low-level
*/
public void flushCache() {
super.flushCache();
}
/**
* @apilevel internal
*/
public void flushCollectionCache() {
super.flushCollectionCache();
}
/**
* @apilevel internal
*/
@SuppressWarnings({"unchecked", "cast"})
public XorBitwiseExpr clone() throws CloneNotSupportedException {
XorBitwiseExpr node = (XorBitwiseExpr)super.clone();
node.in$Circle(false);
node.is$Final(false);
return node;
}
/**
* @apilevel internal
*/
@SuppressWarnings({"unchecked", "cast"})
public XorBitwiseExpr copy() {
try {
XorBitwiseExpr node = (XorBitwiseExpr) clone();
node.parent = null;
if(children != null)
node.children = (ASTNode[]) children.clone();
return node;
} catch (CloneNotSupportedException e) {
throw new Error("Error: clone not supported for " +
getClass().getName());
}
}
/**
* Create a deep copy of the AST subtree at this node.
* The copy is dangling, i.e. has no parent.
* @return dangling copy of the subtree at this node
* @apilevel low-level
*/
@SuppressWarnings({"unchecked", "cast"})
public XorBitwiseExpr fullCopy() {
XorBitwiseExpr tree = (XorBitwiseExpr) copy();
if (children != null) {
for (int i = 0; i < children.length; ++i) {
ASTNode child = (ASTNode) children[i];
if(child != null) {
child = child.fullCopy();
tree.setChild(child, i);
}
}
}
return tree;
}
/**
* @ast method
* @aspect Expressions
* @declaredat /Users/eric/Documents/workspaces/clara-soot/JastAddExtensions/JimpleBackend/Expressions.jrag:835
*/
public soot.Value emitOperation(Body b, soot.Value left, soot.Value right) {
return asLocal(b, b.newXorExpr(asImmediate(b, left), asImmediate(b, right), this));
}
/**
* @ast method
*
*/
public XorBitwiseExpr() {
super();
}
/**
* Initializes the child array to the correct size.
* Initializes List and Opt nta children.
* @apilevel internal
* @ast method
* @ast method
*
*/
public void init$Children() {
children = new ASTNode[2];
}
/**
* @ast method
*
*/
public XorBitwiseExpr(Expr p0, Expr p1) {
setChild(p0, 0);
setChild(p1, 1);
}
/**
* @apilevel low-level
* @ast method
*
*/
protected int numChildren() {
return 2;
}
/**
* @apilevel internal
* @ast method
*
*/
public boolean mayHaveRewrite() {
return false;
}
/**
* Replaces the LeftOperand child.
* @param node The new node to replace the LeftOperand child.
* @apilevel high-level
* @ast method
*
*/
public void setLeftOperand(Expr node) {
setChild(node, 0);
}
/**
* Retrieves the LeftOperand child.
* @return The current node used as the LeftOperand child.
* @apilevel high-level
* @ast method
*
*/
public Expr getLeftOperand() {
return (Expr)getChild(0);
}
/**
* Retrieves the LeftOperand child.
* <p><em>This method does not invoke AST transformations.</em></p>
* @return The current node used as the LeftOperand child.
* @apilevel low-level
* @ast method
*
*/
public Expr getLeftOperandNoTransform() {
return (Expr)getChildNoTransform(0);
}
/**
* Replaces the RightOperand child.
* @param node The new node to replace the RightOperand child.
* @apilevel high-level
* @ast method
*
*/
public void setRightOperand(Expr node) {
setChild(node, 1);
}
/**
* Retrieves the RightOperand child.
* @return The current node used as the RightOperand child.
* @apilevel high-level
* @ast method
*
*/
public Expr getRightOperand() {
return (Expr)getChild(1);
}
/**
* Retrieves the RightOperand child.
* <p><em>This method does not invoke AST transformations.</em></p>
* @return The current node used as the RightOperand child.
* @apilevel low-level
* @ast method
*
*/
public Expr getRightOperandNoTransform() {
return (Expr)getChildNoTransform(1);
}
/**
* @attribute syn
* @aspect ConstantExpression
* @declaredat /Users/eric/Documents/workspaces/clara-soot/JastAddJ/Java7Frontend/ConstantExpression.jrag:91
*/
public Constant constant() {
ASTNode$State state = state();
try { return type().xorBitwise(getLeftOperand().constant(), getRightOperand().constant()); }
finally {
}
}
/**
* @attribute syn
* @aspect PrettyPrint
* @declaredat /Users/eric/Documents/workspaces/clara-soot/JastAddJ/Java1.4Frontend/PrettyPrint.jadd:400
*/
public String printOp() {
ASTNode$State state = state();
try { return " ^ "; }
finally {
}
}
/**
* @apilevel internal
*/
public ASTNode rewriteTo() {
return super.rewriteTo();
}
}
| {
"pile_set_name": "Github"
} |
package godot
import (
"github.com/shadowapex/godot-go/gdnative"
)
/*------------------------------------------------------------------------------
// This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated. Any updates should be done in
// "class.go.tmpl" so they can be included in the generated
// code.
//----------------------------------------------------------------------------*/
//func NewAcceptDialogFromPointer(ptr gdnative.Pointer) AcceptDialog {
func newAcceptDialogFromPointer(ptr gdnative.Pointer) AcceptDialog {
owner := gdnative.NewObjectFromPointer(ptr)
obj := AcceptDialog{}
obj.SetBaseObject(owner)
return obj
}
/*
This dialog is useful for small notifications to the user about an event. It can only be accepted or closed, with the same result.
*/
type AcceptDialog struct {
WindowDialog
owner gdnative.Object
}
func (o *AcceptDialog) BaseClass() string {
return "AcceptDialog"
}
/*
Undocumented
Args: [{ false arg0 String}], Returns: void
*/
func (o *AcceptDialog) X_BuiltinTextEntered(arg0 gdnative.String) {
//log.Println("Calling AcceptDialog.X_BuiltinTextEntered()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 1, 1)
ptrArguments[0] = gdnative.NewPointerFromString(arg0)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "_builtin_text_entered")
// Call the parent method.
// void
retPtr := gdnative.NewEmptyVoid()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
}
/*
Undocumented
Args: [{ false arg0 String}], Returns: void
*/
func (o *AcceptDialog) X_CustomAction(arg0 gdnative.String) {
//log.Println("Calling AcceptDialog.X_CustomAction()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 1, 1)
ptrArguments[0] = gdnative.NewPointerFromString(arg0)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "_custom_action")
// Call the parent method.
// void
retPtr := gdnative.NewEmptyVoid()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
}
/*
Undocumented
Args: [], Returns: void
*/
func (o *AcceptDialog) X_Ok() {
//log.Println("Calling AcceptDialog.X_Ok()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 0, 0)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "_ok")
// Call the parent method.
// void
retPtr := gdnative.NewEmptyVoid()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
}
/*
Adds a button with label [i]text[/i] and a custom [i]action[/i] to the dialog and returns the created button. [i]action[/i] will be passed to the [custom_action] signal when pressed. If [code]true[/code], [i]right[/i] will place the button to the right of any sibling buttons. Default value: [code]false[/code].
Args: [{ false text String} {False true right bool} { true action String}], Returns: Button
*/
func (o *AcceptDialog) AddButton(text gdnative.String, right gdnative.Bool, action gdnative.String) ButtonImplementer {
//log.Println("Calling AcceptDialog.AddButton()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 3, 3)
ptrArguments[0] = gdnative.NewPointerFromString(text)
ptrArguments[1] = gdnative.NewPointerFromBool(right)
ptrArguments[2] = gdnative.NewPointerFromString(action)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "add_button")
// Call the parent method.
// Button
retPtr := gdnative.NewEmptyObject()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
// If we have a return type, convert it from a pointer into its actual object.
ret := newButtonFromPointer(retPtr)
// Check to see if we already have an instance of this object in our Go instance registry.
if instance, ok := InstanceRegistry.Get(ret.GetBaseObject().ID()); ok {
return instance.(ButtonImplementer)
}
// Check to see what kind of class this is and create it. This is generally used with
// GetNode().
className := ret.GetClass()
if className != "Button" {
actualRet := getActualClass(className, ret.GetBaseObject())
return actualRet.(ButtonImplementer)
}
return &ret
}
/*
Adds a button with label [i]name[/i] and a cancel action to the dialog and returns the created button.
Args: [{ false name String}], Returns: Button
*/
func (o *AcceptDialog) AddCancel(name gdnative.String) ButtonImplementer {
//log.Println("Calling AcceptDialog.AddCancel()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 1, 1)
ptrArguments[0] = gdnative.NewPointerFromString(name)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "add_cancel")
// Call the parent method.
// Button
retPtr := gdnative.NewEmptyObject()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
// If we have a return type, convert it from a pointer into its actual object.
ret := newButtonFromPointer(retPtr)
// Check to see if we already have an instance of this object in our Go instance registry.
if instance, ok := InstanceRegistry.Get(ret.GetBaseObject().ID()); ok {
return instance.(ButtonImplementer)
}
// Check to see what kind of class this is and create it. This is generally used with
// GetNode().
className := ret.GetClass()
if className != "Button" {
actualRet := getActualClass(className, ret.GetBaseObject())
return actualRet.(ButtonImplementer)
}
return &ret
}
/*
Undocumented
Args: [], Returns: bool
*/
func (o *AcceptDialog) GetHideOnOk() gdnative.Bool {
//log.Println("Calling AcceptDialog.GetHideOnOk()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 0, 0)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "get_hide_on_ok")
// Call the parent method.
// bool
retPtr := gdnative.NewEmptyBool()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
// If we have a return type, convert it from a pointer into its actual object.
ret := gdnative.NewBoolFromPointer(retPtr)
return ret
}
/*
Return the label used for built-in text.
Args: [], Returns: Label
*/
func (o *AcceptDialog) GetLabel() LabelImplementer {
//log.Println("Calling AcceptDialog.GetLabel()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 0, 0)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "get_label")
// Call the parent method.
// Label
retPtr := gdnative.NewEmptyObject()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
// If we have a return type, convert it from a pointer into its actual object.
ret := newLabelFromPointer(retPtr)
// Check to see if we already have an instance of this object in our Go instance registry.
if instance, ok := InstanceRegistry.Get(ret.GetBaseObject().ID()); ok {
return instance.(LabelImplementer)
}
// Check to see what kind of class this is and create it. This is generally used with
// GetNode().
className := ret.GetClass()
if className != "Label" {
actualRet := getActualClass(className, ret.GetBaseObject())
return actualRet.(LabelImplementer)
}
return &ret
}
/*
Return the OK Button.
Args: [], Returns: Button
*/
func (o *AcceptDialog) GetOk() ButtonImplementer {
//log.Println("Calling AcceptDialog.GetOk()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 0, 0)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "get_ok")
// Call the parent method.
// Button
retPtr := gdnative.NewEmptyObject()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
// If we have a return type, convert it from a pointer into its actual object.
ret := newButtonFromPointer(retPtr)
// Check to see if we already have an instance of this object in our Go instance registry.
if instance, ok := InstanceRegistry.Get(ret.GetBaseObject().ID()); ok {
return instance.(ButtonImplementer)
}
// Check to see what kind of class this is and create it. This is generally used with
// GetNode().
className := ret.GetClass()
if className != "Button" {
actualRet := getActualClass(className, ret.GetBaseObject())
return actualRet.(ButtonImplementer)
}
return &ret
}
/*
Undocumented
Args: [], Returns: String
*/
func (o *AcceptDialog) GetText() gdnative.String {
//log.Println("Calling AcceptDialog.GetText()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 0, 0)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "get_text")
// Call the parent method.
// String
retPtr := gdnative.NewEmptyString()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
// If we have a return type, convert it from a pointer into its actual object.
ret := gdnative.NewStringFromPointer(retPtr)
return ret
}
/*
Registers a [LineEdit] in the dialog. When the enter key is pressed, the dialog will be accepted.
Args: [{ false line_edit Object}], Returns: void
*/
func (o *AcceptDialog) RegisterTextEnter(lineEdit ObjectImplementer) {
//log.Println("Calling AcceptDialog.RegisterTextEnter()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 1, 1)
ptrArguments[0] = gdnative.NewPointerFromObject(lineEdit.GetBaseObject())
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "register_text_enter")
// Call the parent method.
// void
retPtr := gdnative.NewEmptyVoid()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
}
/*
Undocumented
Args: [{ false enabled bool}], Returns: void
*/
func (o *AcceptDialog) SetHideOnOk(enabled gdnative.Bool) {
//log.Println("Calling AcceptDialog.SetHideOnOk()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 1, 1)
ptrArguments[0] = gdnative.NewPointerFromBool(enabled)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "set_hide_on_ok")
// Call the parent method.
// void
retPtr := gdnative.NewEmptyVoid()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
}
/*
Undocumented
Args: [{ false text String}], Returns: void
*/
func (o *AcceptDialog) SetText(text gdnative.String) {
//log.Println("Calling AcceptDialog.SetText()")
// Build out the method's arguments
ptrArguments := make([]gdnative.Pointer, 1, 1)
ptrArguments[0] = gdnative.NewPointerFromString(text)
// Get the method bind
methodBind := gdnative.NewMethodBind("AcceptDialog", "set_text")
// Call the parent method.
// void
retPtr := gdnative.NewEmptyVoid()
gdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)
}
// AcceptDialogImplementer is an interface that implements the methods
// of the AcceptDialog class.
type AcceptDialogImplementer interface {
WindowDialogImplementer
X_BuiltinTextEntered(arg0 gdnative.String)
X_CustomAction(arg0 gdnative.String)
X_Ok()
AddButton(text gdnative.String, right gdnative.Bool, action gdnative.String) ButtonImplementer
AddCancel(name gdnative.String) ButtonImplementer
GetHideOnOk() gdnative.Bool
GetLabel() LabelImplementer
GetOk() ButtonImplementer
GetText() gdnative.String
RegisterTextEnter(lineEdit ObjectImplementer)
SetHideOnOk(enabled gdnative.Bool)
SetText(text gdnative.String)
}
| {
"pile_set_name": "Github"
} |
1.0.1 / 2015-04-07
==================
* Fix `TypeError`s when under `'use strict'` code
* Fix useless type name on auto-generated messages
* Support io.js 1.x
* Support Node.js 0.12
1.0.0 / 2014-09-17
==================
* No changes
0.4.5 / 2014-09-09
==================
* Improve call speed to functions using the function wrapper
* Support Node.js 0.6
0.4.4 / 2014-07-27
==================
* Work-around v8 generating empty stack traces
0.4.3 / 2014-07-26
==================
* Fix exception when global `Error.stackTraceLimit` is too low
0.4.2 / 2014-07-19
==================
* Correct call site for wrapped functions and properties
0.4.1 / 2014-07-19
==================
* Improve automatic message generation for function properties
0.4.0 / 2014-07-19
==================
* Add `TRACE_DEPRECATION` environment variable
* Remove non-standard grey color from color output
* Support `--no-deprecation` argument
* Support `--trace-deprecation` argument
* Support `deprecate.property(fn, prop, message)`
0.3.0 / 2014-06-16
==================
* Add `NO_DEPRECATION` environment variable
0.2.0 / 2014-06-15
==================
* Add `deprecate.property(obj, prop, message)`
* Remove `supports-color` dependency for node.js 0.8
0.1.0 / 2014-06-15
==================
* Add `deprecate.function(fn, message)`
* Add `process.on('deprecation', fn)` emitter
* Automatically generate message when omitted from `deprecate()`
0.0.1 / 2014-06-15
==================
* Fix warning for dynamic calls at singe call site
0.0.0 / 2014-06-15
==================
* Initial implementation
| {
"pile_set_name": "Github"
} |
g $ Feb A ˜ .
% $ Feb Á ˜ . | {
"pile_set_name": "Github"
} |
{
"images": [
{
"filename": "ic_fluent_image_copy_20_filled.pdf",
"idiom": "universal"
}
],
"info": {
"author": "xcode",
"version": 1
},
"properties": {
"preserves-vector-representation": true,
"template-rendering-intent": "template"
}
} | {
"pile_set_name": "Github"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package ipv4
import "golang.org/x/net/internal/socket"
func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error {
return errNotImplemented
}
| {
"pile_set_name": "Github"
} |
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "qtrle",
"name": "QuickTime Animation",
"defaults": {
"_pixelFormat": "argb"
},
"groups": []
} | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="GradleSettings">
<option name="linkedExternalProjectsSettings">
<GradleProjectSettings>
<option name="distributionType" value="DEFAULT_WRAPPED" />
<option name="externalProjectPath" value="$PROJECT_DIR$" />
<option name="gradleJvm" value="1.8" />
<option name="modules">
<set>
<option value="$PROJECT_DIR$" />
<option value="$PROJECT_DIR$/app" />
<option value="$PROJECT_DIR$/materiallockview" />
</set>
</option>
</GradleProjectSettings>
</option>
</component>
</project> | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) Dominick Baier. All rights reserved.
*
* This code is licensed under the Microsoft Permissive License (Ms-PL)
*
* SEE: http://www.microsoft.com/resources/sharedsource/licensingbasics/permissivelicense.mspx
*
*/
using System;
using System.Collections.Generic;
using System.Net.Http;
using System.Net.Http.Headers;
using System.Text;
namespace Thinktecture.IdentityServer.Tests
{
internal static class Extensions
{
public static void SetBasicAuthenticationHeader(this HttpClient client, string userName, string password)
{
Encoding encoding = Encoding.GetEncoding("iso-8859-1");
string credential = String.Format("{0}:{1}", userName, password);
var encoded = Convert.ToBase64String(encoding.GetBytes(credential));
client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Basic", encoded);
}
public static void SetAccessToken(this HttpClient client, string token, string tokenType)
{
client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue(tokenType, token);
}
public static string ToQueryString(this Dictionary<string, string> dictionary)
{
var sb = new StringBuilder(128);
sb.Append("?");
foreach (var entry in dictionary)
{
sb.AppendFormat("{0}={1}&", entry.Key, entry.Value);
}
return sb.ToString().TrimEnd('&');
}
}
}
| {
"pile_set_name": "Github"
} |
// Fillers are random number generators that fills a blob using the specified
// algorithm. The expectation is that they are only going to be used during
// initialization time and will not involve any GPUs.
#ifndef CAFFE_FILLER_HPP
#define CAFFE_FILLER_HPP
#include <string>
#include "caffe/blob.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
/// @brief Fills a Blob with constant or randomly-generated data.
template <typename Dtype>
class Filler {
public:
explicit Filler(const FillerParameter& param) : filler_param_(param) {}
virtual ~Filler() {}
virtual void Fill(Blob<Dtype>* blob) = 0;
protected:
FillerParameter filler_param_;
}; // class Filler
/// @brief Fills a Blob with constant values @f$ x = 0 @f$.
template <typename Dtype>
class ConstantFiller : public Filler<Dtype> {
public:
explicit ConstantFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
Dtype* data = blob->mutable_cpu_data();
const int count = blob->count();
const Dtype value = this->filler_param_.value();
CHECK(count);
for (int i = 0; i < count; ++i) {
data[i] = value;
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$.
template <typename Dtype>
class UniformFiller : public Filler<Dtype> {
public:
explicit UniformFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
caffe_rng_uniform<Dtype>(blob->count(), Dtype(this->filler_param_.min()),
Dtype(this->filler_param_.max()), blob->mutable_cpu_data());
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$.
template <typename Dtype>
class GaussianFiller : public Filler<Dtype> {
public:
explicit GaussianFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
Dtype* data = blob->mutable_cpu_data();
CHECK(blob->count());
caffe_rng_gaussian<Dtype>(blob->count(), Dtype(this->filler_param_.mean()),
Dtype(this->filler_param_.std()), blob->mutable_cpu_data());
int sparse = this->filler_param_.sparse();
CHECK_GE(sparse, -1);
if (sparse >= 0) {
// Sparse initialization is implemented for "weight" blobs; i.e. matrices.
// These have num == channels == 1; width is number of inputs; height is
// number of outputs. The 'sparse' variable specifies the mean number
// of non-zero input weights for a given output.
CHECK_GE(blob->num_axes(), 1);
const int num_outputs = blob->shape(0);
Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs);
rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int)));
int* mask = reinterpret_cast<int*>(rand_vec_->mutable_cpu_data());
caffe_rng_bernoulli(blob->count(), non_zero_probability, mask);
for (int i = 0; i < blob->count(); ++i) {
data[i] *= mask[i];
}
}
}
protected:
shared_ptr<SyncedMemory> rand_vec_;
};
/** @brief Fills a Blob with values @f$ x \in [0, 1] @f$
* such that @f$ \forall i \sum_j x_{ij} = 1 @f$.
*/
template <typename Dtype>
class PositiveUnitballFiller : public Filler<Dtype> {
public:
explicit PositiveUnitballFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
Dtype* data = blob->mutable_cpu_data();
DCHECK(blob->count());
caffe_rng_uniform<Dtype>(blob->count(), 0, 1, blob->mutable_cpu_data());
// We expect the filler to not be called very frequently, so we will
// just use a simple implementation
int dim = blob->count() / blob->num();
CHECK(dim);
for (int i = 0; i < blob->num(); ++i) {
Dtype sum = 0;
for (int j = 0; j < dim; ++j) {
sum += data[i * dim + j];
}
for (int j = 0; j < dim; ++j) {
data[i * dim + j] /= sum;
}
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/**
* @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is
* set inversely proportional to number of incoming nodes, outgoing
* nodes, or their average.
*
* A Filler based on the paper [Bengio and Glorot 2010]: Understanding
* the difficulty of training deep feedforward neuralnetworks.
*
* It fills the incoming matrix by randomly sampling uniform data from [-scale,
* scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their
* average, depending on the variance_norm option. You should make sure the
* input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c
* = fan_out. Note that this is currently not the case for inner product layers.
*
* TODO(dox): make notation in above comment consistent with rest & use LaTeX.
*/
template <typename Dtype>
class XavierFiller : public Filler<Dtype> {
public:
explicit XavierFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
int fan_in = blob->count() / blob->num();
int fan_out = blob->count() / blob->channels();
Dtype n = fan_in; // default to fan_in
if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_AVERAGE) {
n = (fan_in + fan_out) / Dtype(2);
} else if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_FAN_OUT) {
n = fan_out;
}
Dtype scale = sqrt(Dtype(3) / n);
caffe_rng_uniform<Dtype>(blob->count(), -scale, scale,
blob->mutable_cpu_data());
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/**
* @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where
* @f$ \sigma^2 @f$ is set inversely proportional to number of incoming
* nodes, outgoing nodes, or their average.
*
* A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically
* accounts for ReLU nonlinearities.
*
* Aside: for another perspective on the scaling factor, see the derivation of
* [Saxe, McClelland, and Ganguli 2013 (v3)].
*
* It fills the incoming matrix by randomly sampling Gaussian data with std =
* sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on
* the variance_norm option. You should make sure the input blob has shape (num,
* a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this
* is currently not the case for inner product layers.
*/
template <typename Dtype>
class MSRAFiller : public Filler<Dtype> {
public:
explicit MSRAFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
int fan_in = blob->count() / blob->num();
int fan_out = blob->count() / blob->channels();
Dtype n = fan_in; // default to fan_in
if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_AVERAGE) {
n = (fan_in + fan_out) / Dtype(2);
} else if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_FAN_OUT) {
n = fan_out;
}
Dtype std = sqrt(Dtype(2) / n);
caffe_rng_gaussian<Dtype>(blob->count(), Dtype(0), std,
blob->mutable_cpu_data());
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/*!
@brief Fills a Blob with coefficients for bilinear interpolation.
A common use case is with the DeconvolutionLayer acting as upsampling.
You can upsample a feature map with shape of (B, C, H, W) by any integer factor
using the following proto.
\code
layer {
name: "upsample", type: "Deconvolution"
bottom: "{{bottom_name}}" top: "{{top_name}}"
convolution_param {
kernel_size: {{2 * factor - factor % 2}} stride: {{factor}}
num_output: {{C}} group: {{C}}
pad: {{ceil((factor - 1) / 2.)}}
weight_filler: { type: "bilinear" } bias_term: false
}
param { lr_mult: 0 decay_mult: 0 }
}
\endcode
Please use this by replacing `{{}}` with your values. By specifying
`num_output: {{C}} group: {{C}}`, it behaves as
channel-wise convolution. The filter shape of this deconvolution layer will be
(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K)
interpolation kernel for every channel of the filter identically. The resulting
shape of the top feature map will be (B, C, factor * H, factor * W).
Note that the learning rate and the
weight decay are set to 0 in order to keep coefficient values of bilinear
interpolation unchanged during training. If you apply this to an image, this
operation is equivalent to the following call in Python with Scikit.Image.
\code{.py}
out = skimage.transform.rescale(img, factor, mode='constant', cval=0)
\endcode
*/
template <typename Dtype>
class BilinearFiller : public Filler<Dtype> {
public:
explicit BilinearFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim.";
CHECK_EQ(blob->width(), blob->height()) << "Filter must be square";
Dtype* data = blob->mutable_cpu_data();
int f = ceil(blob->width() / 2.);
float c = (2 * f - 1 - f % 2) / (2. * f);
for (int i = 0; i < blob->count(); ++i) {
float x = i % blob->width();
float y = (i / blob->width()) % blob->height();
data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c));
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/**
* @brief Use file to initialize the weights or bias
*/
template <typename Dtype>
class FileFiller : public Filler<Dtype> {
public:
explicit FileFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK(this->filler_param_.has_file());
std::ifstream file(this->filler_param_.file().c_str());
Dtype* data = blob->mutable_cpu_data();
int count = blob->count();
Dtype temp;
for (int i = 0; i<count; ++i) {
file >> temp;
data[i] = temp;
std::cout << "Setting " << i << "th position to " << temp << std::endl;
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
};
/**
* @brief Get a specific filler from the specification given in FillerParameter.
*
* Ideally this would be replaced by a factory pattern, but we will leave it
* this way for now.
*/
template <typename Dtype>
Filler<Dtype>* GetFiller(const FillerParameter& param) {
const std::string& type = param.type();
if (type == "constant") {
return new ConstantFiller<Dtype>(param);
} else if (type == "gaussian") {
return new GaussianFiller<Dtype>(param);
} else if (type == "positive_unitball") {
return new PositiveUnitballFiller<Dtype>(param);
} else if (type == "uniform") {
return new UniformFiller<Dtype>(param);
} else if (type == "xavier") {
return new XavierFiller<Dtype>(param);
} else if (type == "msra") {
return new MSRAFiller<Dtype>(param);
}else if (type == "file") {
return new FileFiller<Dtype>(param);
} else if (type == "bilinear") {
return new BilinearFiller<Dtype>(param);
} else {
CHECK(false) << "Unknown filler name: " << param.type();
}
return (Filler<Dtype>*)(NULL);
}
} // namespace caffe
#endif // CAFFE_FILLER_HPP_
| {
"pile_set_name": "Github"
} |
cmake_minimum_required(VERSION 3.1...3.14)
project(chapter_3 LANGUAGES CXX)
add_executable(listing_3_1 listing_3_1.cpp)
add_executable(listing_3_2 listing_3_2.cpp)
add_executable(listing_3_3 listing_3_3.cpp)
add_executable(listing_3_4 listing_3_4.cpp)
add_executable(listing_3_5 listing_3_5.cpp)
add_executable(listing_3_6 listing_3_6.cpp)
add_executable(listing_3_7 listing_3_7.cpp)
add_executable(listing_3_8 listing_3_8.cpp)
add_executable(listing_3_10 listing_3_10.cpp)
add_executable(listing_3_11 listing_3_11.cpp)
add_executable(listing_3_18 listing_3_18.cpp) | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<FrameLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/root"
android:orientation="vertical"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:animateLayoutChanges="true">
<androidx.recyclerview.widget.RecyclerView
android:id="@+id/recyclerView"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:cacheColorHint="@android:color/transparent"
android:scrollbars="none"
android:divider="@null"
android:descendantFocusability="blocksDescendants" />
</FrameLayout> | {
"pile_set_name": "Github"
} |
/*
* ImgTec IR Decoder setup for Philips RC-5 protocol.
*
* Copyright 2012-2014 Imagination Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include "img-ir-hw.h"
/* Convert RC5 data to a scancode */
static int img_ir_rc5_scancode(int len, u64 raw, u64 enabled_protocols,
struct img_ir_scancode_req *request)
{
unsigned int addr, cmd, tgl, start;
/* Quirk in the decoder shifts everything by 2 to the left. */
raw >>= 2;
start = (raw >> 13) & 0x01;
tgl = (raw >> 11) & 0x01;
addr = (raw >> 6) & 0x1f;
cmd = raw & 0x3f;
/*
* 12th bit is used to extend the command in extended RC5 and has
* no effect on standard RC5.
*/
cmd += ((raw >> 12) & 0x01) ? 0 : 0x40;
if (!start)
return -EINVAL;
request->protocol = RC_TYPE_RC5;
request->scancode = addr << 8 | cmd;
request->toggle = tgl;
return IMG_IR_SCANCODE;
}
/* Convert RC5 scancode to RC5 data filter */
static int img_ir_rc5_filter(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols)
{
/* Not supported by the hw. */
return -EINVAL;
}
/*
* RC-5 decoder
* see http://www.sbprojects.com/knowledge/ir/rc5.php
*/
struct img_ir_decoder img_ir_rc5 = {
.type = RC_BIT_RC5,
.control = {
.bitoriend2 = 1,
.code_type = IMG_IR_CODETYPE_BIPHASE,
.decodend2 = 1,
},
/* main timings */
.tolerance = 16,
.unit = 888888, /* 1/36k*32=888.888microseconds */
.timings = {
/* 10 symbol */
.s10 = {
.pulse = { 1 },
.space = { 1 },
},
/* 11 symbol */
.s11 = {
.pulse = { 1 },
.space = { 1 },
},
/* free time */
.ft = {
.minlen = 14,
.maxlen = 14,
.ft_min = 5,
},
},
/* scancode logic */
.scancode = img_ir_rc5_scancode,
.filter = img_ir_rc5_filter,
};
| {
"pile_set_name": "Github"
} |
// { dg-do compile }
// 2007-09-20 Benjamin Kosnik <bkoz@redhat.com>
// Copyright (C) 2007-2020 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <numeric>
#include <testsuite_character.h>
namespace std
{
using __gnu_test::pod_int;
typedef pod_int value_type;
typedef value_type* iterator_type;
template value_type inner_product(iterator_type, iterator_type, iterator_type, value_type);
}
| {
"pile_set_name": "Github"
} |
[[1,1,1],[1,0,1],[1,1,1]]
[[1,0,1],[1,0,1],[1,0,1]] | {
"pile_set_name": "Github"
} |
\documentclass{article}
\usepackage[margin=0.5in]{geometry}
\usepackage{listings}
\lstset{numbers=left,frame=single}
\title{Quiz 3 - sizeof}
\author{CS100 - software construction}
\newcommand{\sizeof}[1]{\texttt{sizeof({#1})~~~}}
\newcommand{\ptr}[1]{\sizeof{{#1}} & \sizeof{{#1}*} & \sizeof{{#1}**}\vspace{0.45in}\\\hline}
\newcommand{\ptrU}[1]{
\ptr{{#1}}
\ptr{unsigned {#1}}
}
\begin{document}
\maketitle
On a 64 bit machine (e.g. one of the lab machines), what do the following applications of \texttt{sizeof} return?
\vspace{0.15in}
\noindent
\begin{tabular}{p{2.3in}p{2.3in}p{2.3in}}
\hline
\ptrU{char}
\ptrU{short}
\ptrU{int}
\ptrU{long}
\ptrU{long long}
\ptr{float}
\ptr{double}
\end{tabular}
\newpage
Given the following code:
\begin{lstlisting}
struct s1 { char a; };
struct s2 { char a; char b; };
struct s3 { char a; int b; };
struct s4 { char a; int b; char c; };
struct s5 { char a; char b; int c; };
\end{lstlisting}
Fill out the table below in the same manner.
\vspace{0.15in}
\noindent
\begin{tabular}{p{2.3in}p{2.3in}p{2.3in}}
\hline
\ptr{s1}
\ptr{s2}
\ptr{s3}
\ptr{s4}
\ptr{s5}
\end{tabular}
\vspace{0.5in}
Write the output of each \lstinline{cout} statement in the following code:
\begin{lstlisting}
#include <iostream>
using namespace std;
int main()
{
cout << (011 | 010) << endl;
cout << (011 & 010) << endl;
cout << (111 | 010) << endl;
cout << (111 & 010) << endl;
cout << 2 << 4 << endl;
cout << (2 << 4) << endl;
return 0;
}
\end{lstlisting}
\end{document}
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.IOException;
import java.util.Collections;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.TextField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
public class TestIndexOptions extends LuceneTestCase {
public void testChangeIndexOptionsViaAddDocument() throws IOException {
for (IndexOptions from : IndexOptions.values()) {
for (IndexOptions to : IndexOptions.values()) {
for (boolean preExisting : new boolean[] { false, true }) {
for (boolean onNewSegment : new boolean[] { false, true }) {
doTestChangeIndexOptionsViaAddDocument(preExisting, onNewSegment, from, to);
}
}
}
}
}
private void doTestChangeIndexOptionsViaAddDocument(boolean preExistingField, boolean onNewSegment, IndexOptions from, IndexOptions to) throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
if (preExistingField) {
w.addDocument(Collections.singleton(new IntPoint("foo", 1)));
if (onNewSegment) {
DirectoryReader.open(w).close();
}
}
FieldType ft1 = new FieldType(TextField.TYPE_STORED);
ft1.setIndexOptions(from);
w.addDocument(Collections.singleton(new Field("foo", "bar", ft1)));
if (onNewSegment) {
DirectoryReader.open(w).close();
}
FieldType ft2 = new FieldType(TextField.TYPE_STORED);
ft2.setIndexOptions(to);
if (from == IndexOptions.NONE || to == IndexOptions.NONE || from == to) {
w.addDocument(Collections.singleton(new Field("foo", "bar", ft2))); // no exception
w.forceMerge(1);
try (LeafReader r = getOnlyLeafReader(DirectoryReader.open(w))) {
IndexOptions expected = from == IndexOptions.NONE ? to : from;
assertEquals(expected, r.getFieldInfos().fieldInfo("foo").getIndexOptions());
}
} else {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> w.addDocument(Collections.singleton(new Field("foo", "bar", ft2))));
assertEquals("cannot change field \"foo\" from index options=" + from +
" to inconsistent index options=" + to, e.getMessage());
}
w.close();
dir.close();
}
public void testChangeIndexOptionsViaAddIndexesCodecReader() throws IOException {
for (IndexOptions from : IndexOptions.values()) {
for (IndexOptions to : IndexOptions.values()) {
doTestChangeIndexOptionsAddIndexesCodecReader(from, to);
}
}
}
private void doTestChangeIndexOptionsAddIndexesCodecReader(IndexOptions from, IndexOptions to) throws IOException {
Directory dir1 = newDirectory();
IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig());
FieldType ft1 = new FieldType(TextField.TYPE_STORED);
ft1.setIndexOptions(from);
w1.addDocument(Collections.singleton(new Field("foo", "bar", ft1)));
Directory dir2 = newDirectory();
IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig());
FieldType ft2 = new FieldType(TextField.TYPE_STORED);
ft2.setIndexOptions(to);
w2.addDocument(Collections.singleton(new Field("foo", "bar", ft2)));
try (CodecReader cr = (CodecReader) getOnlyLeafReader(DirectoryReader.open(w2))) {
if (from == IndexOptions.NONE || to == IndexOptions.NONE || from == to) {
w1.addIndexes(cr); // no exception
w1.forceMerge(1);
try (LeafReader r = getOnlyLeafReader(DirectoryReader.open(w1))) {
IndexOptions expected = from == IndexOptions.NONE ? to : from;
assertEquals(expected, r.getFieldInfos().fieldInfo("foo").getIndexOptions());
}
} else {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> w1.addIndexes(cr));
assertEquals("cannot change field \"foo\" from index options=" + from +
" to inconsistent index options=" + to, e.getMessage());
}
}
IOUtils.close(w1, w2, dir1, dir2);
}
public void testChangeIndexOptionsViaAddIndexesDirectory() throws IOException {
for (IndexOptions from : IndexOptions.values()) {
for (IndexOptions to : IndexOptions.values()) {
doTestChangeIndexOptionsAddIndexesDirectory(from, to);
}
}
}
private void doTestChangeIndexOptionsAddIndexesDirectory(IndexOptions from, IndexOptions to) throws IOException {
Directory dir1 = newDirectory();
IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig());
FieldType ft1 = new FieldType(TextField.TYPE_STORED);
ft1.setIndexOptions(from);
w1.addDocument(Collections.singleton(new Field("foo", "bar", ft1)));
Directory dir2 = newDirectory();
IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig());
FieldType ft2 = new FieldType(TextField.TYPE_STORED);
ft2.setIndexOptions(to);
w2.addDocument(Collections.singleton(new Field("foo", "bar", ft2)));
w2.close();
if (from == IndexOptions.NONE || to == IndexOptions.NONE || from == to) {
w1.addIndexes(dir2); // no exception
w1.forceMerge(1);
try (LeafReader r = getOnlyLeafReader(DirectoryReader.open(w1))) {
IndexOptions expected = from == IndexOptions.NONE ? to : from;
assertEquals(expected, r.getFieldInfos().fieldInfo("foo").getIndexOptions());
}
} else {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> w1.addIndexes(dir2));
assertEquals("cannot change field \"foo\" from index options=" + from +
" to inconsistent index options=" + to, e.getMessage());
}
IOUtils.close(w1, dir1, dir2);
}
}
| {
"pile_set_name": "Github"
} |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tasking_dsz.py
import mcl.framework
import mcl.tasking
class dsz:
INTERFACE = 16842801
PFAM = 4131
PROVIDER_ANY = 4131
PROVIDER = 16846883
RPC_INFO_ACTION = mcl.tasking.RpcInfo(mcl.framework.DSZ, [INTERFACE, PROVIDER_ANY, 0]) | {
"pile_set_name": "Github"
} |
/*
* GRUB -- GRand Unified Bootloader
* Copyright (c) 1999-2008 Igor Pavlov
* Copyright (C) 2008 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* This code was taken from LZMA SDK 4.58 beta, and was slightly modified
* to adapt it to GRUB's requirement.
*
* See <http://www.7-zip.org>, for more information about LZMA.
*/
#ifndef __LZHASH_H
#define __LZHASH_H
#define kHash2Size (1 << 10)
#define kHash3Size (1 << 16)
#define kHash4Size (1 << 20)
#define kFix3HashSize (kHash2Size)
#define kFix4HashSize (kHash2Size + kHash3Size)
#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
#define HASH3_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
hash2Value = temp & (kHash2Size - 1); \
hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
#define HASH4_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
hash2Value = temp & (kHash2Size - 1); \
hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
#define HASH5_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
hash2Value = temp & (kHash2Size - 1); \
hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
hash4Value &= (kHash4Size - 1); }
/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
#define MT_HASH2_CALC \
hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
#define MT_HASH3_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
hash2Value = temp & (kHash2Size - 1); \
hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
#define MT_HASH4_CALC { \
UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
hash2Value = temp & (kHash2Size - 1); \
hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
#endif
| {
"pile_set_name": "Github"
} |
<?php
/**
* Copyright since 2007 PrestaShop SA and Contributors
* PrestaShop is an International Registered Trademark & Property of PrestaShop SA
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.md.
* It is also available through the world-wide-web at this URL:
* https://opensource.org/licenses/OSL-3.0
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@prestashop.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade PrestaShop to newer
* versions in the future. If you wish to customize PrestaShop for your
* needs please refer to https://devdocs.prestashop.com/ for more information.
*
* @author PrestaShop SA and Contributors <contact@prestashop.com>
* @copyright Since 2007 PrestaShop SA and Contributors
* @license https://opensource.org/licenses/OSL-3.0 Open Software License (OSL 3.0)
*/
namespace PrestaShop\PrestaShop\Core\Domain\Language\Command;
use PrestaShop\PrestaShop\Core\Domain\Language\ValueObject\LanguageId;
/**
* Deletes given languages
*/
class DeleteLanguageCommand
{
/**
* @var LanguageId
*/
private $languageId;
/**
* @param int $languageId
*/
public function __construct($languageId)
{
$this->languageId = new LanguageId($languageId);
}
/**
* @return LanguageId
*/
public function getLanguageId()
{
return $this->languageId;
}
}
| {
"pile_set_name": "Github"
} |
// This file was procedurally generated from the following sources:
// - src/dstr-binding/ary-ptrn-rest-obj-prop-id.case
// - src/dstr-binding/default/cls-decl-async-gen-meth-dflt.template
/*---
description: Rest element containing an object binding pattern (class expression async generator method (default parameters))
esid: sec-class-definitions-runtime-semantics-evaluation
features: [async-iteration]
flags: [generated, async]
info: |
ClassDeclaration : class BindingIdentifier ClassTail
1. Let className be StringValue of BindingIdentifier.
2. Let value be the result of ClassDefinitionEvaluation of ClassTail with
argument className.
[...]
14.5.14 Runtime Semantics: ClassDefinitionEvaluation
21. For each ClassElement m in order from methods
a. If IsStatic of m is false, then
i. Let status be the result of performing
PropertyDefinitionEvaluation for m with arguments proto and
false.
[...]
Runtime Semantics: PropertyDefinitionEvaluation
AsyncGeneratorMethod :
async [no LineTerminator here] * PropertyName ( UniqueFormalParameters )
{ AsyncGeneratorBody }
1. Let propKey be the result of evaluating PropertyName.
2. ReturnIfAbrupt(propKey).
3. If the function code for this AsyncGeneratorMethod is strict mode code, let strict be true.
Otherwise let strict be false.
4. Let scope be the running execution context's LexicalEnvironment.
5. Let closure be ! AsyncGeneratorFunctionCreate(Method, UniqueFormalParameters,
AsyncGeneratorBody, scope, strict).
[...]
13.3.3.6 Runtime Semantics: IteratorBindingInitialization
BindingRestElement : ... BindingPattern
1. Let A be ArrayCreate(0).
[...]
3. Repeat
[...]
b. If iteratorRecord.[[done]] is true, then
i. Return the result of performing BindingInitialization of
BindingPattern with A and environment as the arguments.
[...]
---*/
let length = "outer";
var callCount = 0;
class C {
async *method([...{ 0: v, 1: w, 2: x, 3: y, length: z }] = [7, 8, 9]) {
assert.sameValue(v, 7);
assert.sameValue(w, 8);
assert.sameValue(x, 9);
assert.sameValue(y, undefined);
assert.sameValue(z, 3);
assert.sameValue(length, "outer", "the length prop is not set as a binding name");
callCount = callCount + 1;
}
};
new C().method().next().then(() => {
assert.sameValue(callCount, 1, 'invoked exactly once');
}).then($DONE, $DONE);
| {
"pile_set_name": "Github"
} |
package main
import (
"fmt"
"time"
"github.com/henrylee2cn/erpc/v6"
)
//go:generate go build $GOFILE
func main() {
defer erpc.SetLoggerLevel("INFO")()
cli := erpc.NewPeer(erpc.PeerConfig{})
defer cli.Close()
sess, stat := cli.Dial(":9090")
if !stat.OK() {
erpc.Fatalf("%v", stat)
}
// Single asynchronous call
var result string
callCmd := sess.AsyncCall(
"/test/wait3s",
"Single asynchronous call",
&result,
make(chan erpc.CallCmd, 1),
)
WAIT:
for {
select {
case <-callCmd.Done():
erpc.Infof("test 1: result: %#v, error: %v", result, callCmd.Status())
break WAIT
default:
erpc.Warnf("test 1: Not yet returned to the result, try again later...")
time.Sleep(time.Second)
}
}
// Batch asynchronous call
batch := 10
callCmdChan := make(chan erpc.CallCmd, batch)
for i := 0; i < batch; i++ {
sess.AsyncCall(
"/test/wait3s",
fmt.Sprintf("Batch asynchronous call %d", i+1),
new(string),
callCmdChan,
)
}
for callCmd := range callCmdChan {
result, stat := callCmd.Reply()
if !stat.OK() {
erpc.Errorf("test 2: error: %v", stat)
} else {
erpc.Infof("test 2: result: %v", *result.(*string))
}
batch--
if batch == 0 {
break
}
}
}
| {
"pile_set_name": "Github"
} |
<!doctype html>
<title>CodeMirror: PHP mode</title>
<meta charset="utf-8"/>
<link rel=stylesheet href="../../doc/docs.css">
<link rel="stylesheet" href="../../lib/codemirror.css">
<script src="../../lib/codemirror.js"></script>
<script src="../../addon/edit/matchbrackets.js"></script>
<script src="../htmlmixed/htmlmixed.js"></script>
<script src="../xml/xml.js"></script>
<script src="../javascript/javascript.js"></script>
<script src="../css/css.js"></script>
<script src="../clike/clike.js"></script>
<script src="php.js"></script>
<style type="text/css">.CodeMirror {border-top: 1px solid black; border-bottom: 1px solid black;}</style>
<div id=nav>
<a href="http://codemirror.net"><h1>CodeMirror</h1><img id=logo src="../../doc/logo.png"></a>
<ul>
<li><a href="../../index.html">Home</a>
<li><a href="../../doc/manual.html">Manual</a>
<li><a href="https://github.com/codemirror/codemirror">Code</a>
</ul>
<ul>
<li><a href="../index.html">Language modes</a>
<li><a class=active href="#">PHP</a>
</ul>
</div>
<article>
<h2>PHP mode</h2>
<form><textarea id="code" name="code">
<?php
$a = array('a' => 1, 'b' => 2, 3 => 'c');
echo "$a[a] ${a[3] /* } comment */} {$a[b]} \$a[a]";
function hello($who) {
return "Hello $who!";
}
?>
<p>The program says <?= hello("World") ?>.</p>
<script>
alert("And here is some JS code"); // also colored
</script>
</textarea></form>
<script>
var editor = CodeMirror.fromTextArea(document.getElementById("code"), {
lineNumbers: true,
matchBrackets: true,
mode: "application/x-httpd-php",
indentUnit: 4,
indentWithTabs: true
});
</script>
<p>Simple HTML/PHP mode based on
the <a href="../clike">C-like</a> mode. Depends on XML,
JavaScript, CSS, HTMLMixed, and C-like modes.</p>
<p><strong>MIME types defined:</strong> <code>application/x-httpd-php</code> (HTML with PHP code), <code>text/x-php</code> (plain, non-wrapped PHP code).</p>
</article>
| {
"pile_set_name": "Github"
} |
{
"uri": "mongodb+srv://test1.test.build.10gen.cc/adminDB?replicaSet=repl0",
"seeds": [
"localhost.test.build.10gen.cc:27017",
"localhost.test.build.10gen.cc:27018"
],
"hosts": [
"localhost:27017",
"localhost:27018",
"localhost:27019"
],
"options": {
"replicaSet": "repl0",
"ssl": true
},
"parsed_options": {
"auth_database": "adminDB"
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE chapter SYSTEM "chapter.dtd">
<chapter>
<header>
<copyright>
<year>2011</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
</legalnotice>
<title>Usage</title>
<prepared></prepared>
<responsible></responsible>
<docno></docno>
<approved></approved>
<checked></checked>
<date></date>
<rev></rev>
<file>diameter_using.xml</file>
</header>
<p>
To be written.</p>
<!-- ===================================================================== -->
</chapter>
| {
"pile_set_name": "Github"
} |
201 Leonov
202 Central\s+American?
202 Yucatan
202 neighbou?ring Guatemala
202 Guatemala.*border
202 boundaries.*Guatemalan?
202 Gulf of Honduras.*border(ed)?
202 Honduras.*next\s+door
202 on a line between Colombia and the US
203 400\s+micrograms
204 suspension
204 supported\s+by\s+cables
204 sling
205 245,?000
206 2[345]0,?000\s+miles
206 221,?000\s+miles
206 238,?855\s+miles
207 national\s+anthem
207 Star\s*-?\s*Spangled\s+Banner
207 ode\s+to\s+Old\s+Glory
208 California
209 Johan\s+Vaaler'?s?
210 least\s+five
210 nine\s+dogs?
210 ((11)|(12)|(13)|(14)|(15)|(16)|(17)|(18))((\s*to\s+20)|(\s+dogs?))
211 ancient\s+Roman
212 Adolph\s+Rickenbacker
213 bats?
214 20
215 Gandhi
215 Rao
215 Nehru
215 Singh
216 English
217 (mature|aging)\s+trees?
217 meadows?
217 woods
217 scrub
217 oak\s+trees?
217 hardwood
218 zipper
219 12[0234]\s+million
219 123,622,541|123,150,000
219 122.6
219 123.3
219 122.74?\s+million
220 Hawke?
220 Keating
221 Ray
222 Egyptian\s*god
222 row.*underworld
223 Yugoslav(ia)?
223 borders?.*Serbia
224 light\s+amplification
225 Poseidon
226 Hungary|Hungarians?
226 Czechoslovakian?
226 Austrian?
226 Slovak(ia)?
226 Allmendingen
226 Bratislava
226 Romanian?
226 Gabcikovo
226 Ostrov
226 Rye Island
226 Bulgarian?
226 Serbia
226 Ukraine
226 Vidin
226 Ruse
226 Vienna
226 Belgrade
226 Budapest
226 Europe
226 Esztergom
227 temperature\s+at\s+which\s+moisture\s+would\s*start\s+to\s+condense
227 moist\s+air\s+and\s+chill
227 in\s+the\s+air
228 metals?
229 Rozsa
229 Gillingham
229 Evans
229 Sieben
229 Oppel
229 Barrowman
229 Mitchell
229 Wharton
229 Sanders
229 Meagher
229 Jager
229 Salnikov
229 Biondi
229 Stewart
229 Morale
229 Ball
229 Gross
230 1944
231 Petain
231 Laval
232 Jenkins
232 Zworykin
232 Parker
232 Farnsworth
233 Wrights?
233 Whitehead
234 Whitehead
234 Wrights?
235 12
236 Francisco\s+Vasquez\s+de\s+Coronado
236 Coronado\s+Trail\s+Association
236 bantamweight
236 Celestino\s+Coronado
236 Gabriel\s+Coronado
236 Elaine\s+Coronado
236 exploration
236 leader.*gang
237 Brahma
237 Shiva
237 Vishnu
238 Organi[s|z]ation\s*of\s*American\s*States?
239 Rep
239 Representative
239 congresswoman
239 congress.*seat
239 member.*Congress
240 (75)|(76)|(78)
240 Seventy\s*-?\s*seven
241 craters?
241 lava\s+lake
242 Alamo
242 San Jacinto
243 Portugal
244 Doubleday
245 Carolinas?
245 N\.C\.?
246 sea\s*route\s*around\s*Africa
246 sea\s+route.*India
246 sail.*India
247 North
247 Union
248 reticulated?\s+pythons?
249 Egypt
249 Luxor
250 Yucatan
250 Mexico
250 Guatemala
250 Punta\s+de\s+Chimino
250 Huehuetenango
250 Central\s+American\s+countries
250 Belize\s+City
250 British\s+Honduras
250 El\s+Salvador
250 Tikal
251 12\s+million
252 1775
253 poets?
253 wrote.*lines
253 wrote.*poems?
253 long\s+poems
253 poetical
253 poetry
253 Immortality\s+Ode
254 quail
255 Diamond\s+Bar\s+consultant
256 Cheops
256 Khufu
257 plankton
257 krill
257 herring
257 shrimp-like\s+crustaceans
257 penguin\s+milkshakes
258 water.*cooler
258 crevices?
258 water.*full\s+of.*fish
259 dinosaurs?
260 North\s+American?\s+Free\s+Trade\s+Association
260 North\s+American?\s+Free\s+Trade\s+Agreement
261 Hallmark
262 Tokugawas?
263 1895
263 95\s+years\s+ago
264 Robert\s+B\.?\s*Thomas
265 pluto
266 Greek
267 rain\s+clouds
267 cumulonimbus
268 Brutus
269 towering\s+figure\s+of\s+art
269 painters?
269 artists?
269 sculpters?
269 self\s*-\s*portrait
269 20th\s+century\s+masters?
269 Picasso\s+oils?
269 Impressionist
269 Au\s+Lapin\s+Agile
269 Yo\s+Picasso
269 harlequin
269 rose\s+period
269 blue\s+period
270 Brazil
270 Venezuela
270 Colombia
271 16\s+to\s+18\s+feet
271 1[678]\s*-?\s*foot
271 6\s*feet
272 Australian?s?
272 New\s+Zealand
272 Queensland
272 Taiwan
272 Alaska
272 Northern\s+Territory
272 Outback
273 Nixon
274 Butts
275 nine\s+million
275 16\s+million
276 \$\s*24\.64\s*billion
276 \$\s*3[01]\s*billion
276 \$\s*25\s*billion
276 Dollars\s+30\s*billion
276 Dollars\s+37\s*bn
276 billions.*Sultan.*\$31.0
277 5,079,385
277 5.1m
278 (200)|(169)|(227)|(268)|(338)|(341)
279 psychoanalyst
279 French\s+analysts?
279 Jacques\s+Lacan
279 Lacan\s*'\s*s\s+writings?
279 writings?.*Lacan
279 Freud\s*,?\s*Jung\s*,?\s*Lacan
279 Baudrillard\s*,?\s*Habermas\s*,?\s*Lacan\s*,?\s*CS\s*Lewis
279 Lacan\s*,?\s*CS\s*Lewis\s*,?\s*le Corbusier\s*,?\s*McLuhan
279 implicated\s+Patrice\s+Pelat
279 investiga.*Pechiney\s*aluminium
279 found.*La\s+Truffe
279 Bas les masques
279 editor.*Le\s+Monde
279 French\s+journalists?
279 Jean\s*-?\s*Francois\s+Lacan
279 post\s*-?\s*structuralist
280 World\s+Trade\s+(Center|Centre)
281 1984
282 scale
282 plant\s+pests
282 leaf-?\s+and\s+flower-?\s*munching\s+pests
282 aphids?
283 Australia
283 outback
283 central\s+Australian\s+desert
284 age\s*70
285 1869
286 Keystone
287 black\s+South\s+African\s+leaders?
287 Archi?bish?op
287 bishop
287 Nobel\s+Peace
287 Nobel\s+prize
287 Nobel\s+laureates?
287 lead.*Anglican\s+Church
287 anti\s*-?\s*apartheid\s+cleric
287 anti\s*-?\s*apartheid\s+leader
288 1[89]0\s+mph
289 orators
289 senators
289 Sens\.
289 statesmen
289 Webster\s*'?\s*s\s+contemporaries
289 historic\s+figures
289 historical\s+allies
289 leading\s+political\s+figures
289 legislators
290 1989
291 8th
292 1m\s+square\s+miles
293 Capt.*Cook
293 Polynesians?
294 Sultan.*Brunei
294 Tsutsumi
294 Hassanal
294 Taikichiro\s+Mori
295 50
296 (\$|Dollars)\s*3.35
296 (\$|Dollars)\s*3.80
296 (\$|Dollars)\s*4.25
297 plants?
297 vegetation
297 pine\s+needles
298 redwoods?
299 29
299 30
299 43
300 blood.*cancer
300 cancer.*blood
300 bone\s*-?\s*marrow.*cancer
300 smoking\s*-?\s*related\s+disease
300 white\s+blood
301 Paul\s+Brown
301 Brown.*Christ\s+Hospital
301 coach.*Brown
301 1960s.*Brown
302 10\s+people
302 13\s+Americans
303 M[ou]hamm[ae]d
304 Texas\s+and\s+other.*states
304 Will\s+County
304 southwest.*Chicago
305 metals?
305 metallic\s+(substance)|(element)
305 strengthening\s+agent
305 alloy\s+in\s+steelmaking
305 alloying\s+agent
306 desert
306 Saudi\s+Arabia
306 Kenyan?
306 African?
306 Uganda
307 directors?
307 direction
307 directed
307 directs
307 Dead\s+Poet\s*'?\s*s\s+Society
308 714
309 William.*Cody
309 Buffalo\s+Bill\s*'?\s*'?\s+Cody
309 Buffalo\s+Bill\s*"\s+Cody
309 Bill\s+Cody
309 Indian\s+(scout)|(fighter)
309 pony\s+express
309 produced.*Wild\s+West\s+show
310 Burma
310 Thai(land)?
310 Kanchanaburi
310 90\s+miles\s+west.*Bangkok
311 ^\s*four\s*$
311 ^\s*4\s*$
311 ^\s*three\s*$
311 ^\s*3\s*$
311 four.*Super\s+Bowls?
311 in\s+four\s+tries
311 four\s+of\s+the\s+past\s+nine
311 fourth\s+in\s+six\s+years
311 three\s+(consecutive\s+)?Super
311 third\s+straight\s+Super
311 1982\s*,?\s*'?\s*85\s*,?\s*'?\s*89.*90
312 Olmsted
313 Ts.*Ai\s+Lun
313 Islamic\s+society
314 Furnier
315 small.*wings
315 wings.*too\s+small
316 Caspian
316 Javan
316 Bali
316 Tasmanian
317 west\s+of\s+the\s+International\s+Date\s+Line
317 western\s+Pacific
317 south\s+Pacific
317 half\s+a\s+world\s+away\s+from\s+Washington
317 south\s+of\s+the\s+Northern\s+Marianas
317 1\s*,?\s*500\s+miles\s+east\s+of\s+the\s+Philippines
317 U\s*\.?\s*S\s*\.?\s+territory\s+in\s+the\s+Pacific
318 Harvard
319 seven
319 7
320 Eastern\s+Europe
320 central\s+Europe
320 southeast\s+Europe
320 border.*Hungary
320 border.*Yugoslavia
320 Rhine\s*-?\s*Main\s*-?\s*Danube\s+trans\s*-?\s*European\s+axis
320 Rhine\s*-?\s*Main\s*-?\s*Danube\s+watercourse
321 100th\s+anniversary
321 1888
323 Queen Elizabeth
323 Elizabeth II
323 the Queen.*richest
324 Sacramento
325 size.*India
326 sea\s*grass
326 plant\s*-?\s*eating
326 herbivores?
326 unwanted\s+vegetation
326 lettuce
327 82\s+years\s+ago
327 1906
327 1849
327 april\s*18
327 '06
328 Hamilton
329 80\s+million
329 81\s+million
329 83\s+million
329 85\s+million
329 90\s+million
329 85\.\s*8\s+million
329 86\.\s*7\s+million
329 Population\(mn\) 87.8
330 1943
331 5\s*,?\s*400\s+degrees\s+Fahrenheit
331 9\s*,?\s*000\s+degrees\s+Fahrenheit
331 5\s*,?\s*000\s+degrees\s+Celsius
332 journey.*three\s+years
332 about\s+two\s+years
332 six\s*-?\s*month.*111\s*million\s*-?\s*mile
332 nine\s+months\s+to
332 nine\s+month\s+journey
332 minimum.*14\s+months?
332 flight.*18\s+months?
332 240\s+days
334 5bn\s+years?
335 8.*foot
335 9.*feet
335 9.*foot
335 10.*foot
335 10.*feet
335 10.*ft
336 197[56]
337 Dollars?\s*1\s*\.\s*2\s*m
337 Pounds?\s*770\s*,?\s*000
337 Dollars?\s*1m
337 \$\s*5\s*,?\s*800\s+a\s+week
337 \$\s*890\s*,?\s*844
337 \$\s*800\s*,?\s*000
337 \$\s*600\s*,?\s*000
337 \$\s*597\s*,?\s*000
337 \$\s*586\s*,?\s*816
337 \$\s*550\s*,?\s*000
337 \$\s*490\s*,?\s*000
337 \$\s*412\s*,?\s*520
337 \$\s*400\s*,?\s*000
338 Naismith
340 explorer
340 Army\s+lieutenant
340 head.*exploratory\s+troop
340 Pike\s+sighted
340 Pike\s+first\s+saw
341 2\s*,?\s*300\s+miles
341 2\s*,?\s*500\s+miles
342 ben[td]
342 separate
342 break\s+apart
342 split.*light
342 refract
342 spread
342 combines.*single\s+beam
343 Amazon.*longest
343 longest.*Amazon
343 Nile.*longest
344 Skinner
344 Jung
344 Pavlov
344 Charles.*Thomas
344 Sigmund\s+Freud
345 2\s*,?\s*467\s*,?\s*845
345 2\s*\.\s*49\s+million
346 poets?
346 writers?
346 poem.*Langston
346 Hughes.*poetry
346 Hughes.*poems?
346 Hughes.*wrote
346 poetry.*Hughes
347 a\s+Claude\s+Monet\s+impression
347 impressionists?
347 painting\s+is\s+the\s+stolen\s+Monet
347 painter.*Claude\s+Monet
347 artists\s+include.*Monet
347 painted\s+by\s+Monet?
347 Monet\s*'?\s*s\s+paintings?
347 paintings\s+included\s+five\s+Monet\s*'?\s*s
347 Impression Sunrise
347 treat\s+art\s+as\s+Monet\s+did
347 Monet\s+made\s+his\s+art
347 artist\s+of\s+immense\s+wealth
347 series.*paintings
347 Monet\s+painted
347 works?\s+by\s+Monet
347 painting\s+at\s+Orsay
347 paintings?\s+by\s+(Claude\s+)?Monet
347 Nature\s+into\s+Art
347 purchaser\s+of\s+the\s+Monet
347 Monet.*landscapes?
347 landscape.*Monet
347 father\s+of\s+impressionism
348 Zo[zs]er
349 Valley\s+of\s+the\s+Dolls
349 The\s+Civil\s+War
349 Fatherhood
349 Gone\s+with\s+the\s+Wind
349 Bible
350 1\s*,?\s*116
350 nearly\s+1\s*,?\s*120
350 650
351 pioneering\s+aviator
351 aviator.*Lindbergh
351 solo.*crossing
351 kidnapping.*son
351 kidnap.*Lindbergh
351 first.*solo.*Atlantic
351 first.*cross.*Atlantic
351 flight.*Atlantic.*1927
351 historic.*Atlantic\s+crossing
351 famous\s+solo\s+flight
351 flying\s+ace
351 hero\s+pilot
351 design\s+the\s+pontooned\s+planes
351 isolationists?\s+before\s+World\s+War\s+II
351 isolationist\s+movement
352 Robinson.*McGraw
352 Baltimore
352 Egyptians
353 Hindu-Arabic
354 worms?
354 Steinernema\s+feltiae
354 insects\s*'?\s+natural\s+enemies
354 eelworms?
354 roundworms?
354 parasitic
354 plant\s+parasite
354 pinewood\s+nematode
354 voracious\s+bug\s+killers
354 microscopic\s+organism
355 Bugatti\s+Royale
356 cocoa\s+bean
357 Alaska
358 looks\s+like\s+a\s+cross\s+between\s+a\s+dog\s+and\s+a\s+monkey
358 rodents?
358 mongoose
359 Australian?
359 Fla\.
360 6.2\s*-?\s*miles?
361 300\s*degrees\s+Fahrenheit
361 1\s*,?\s*800\s*degrees
361 1\s*,?\s*000\s*degrees
362 Ouagadougo?u
363 Port\s*-?\s*au\s*-?\s*Prince
364 1\s*,\s*605\s*,?\s*000
365 14\.[57]\s*million
365 15\)?\s*m(illion)?
366 San\s+Francisco
366 49\s*ers
367 Jan\.\)?\s*15
367 January\s+15
368 Caribbean
368 off.*coast.*Venezuela
369 Alaska
369 Soviet\s+Inuits?
369 Arctic
369 Greenland(ers)?
369 Canada
369 Canadian\s+Inuits?
369 Canadian\s+Eskimos?
369 Siberian?
369 Soviet\s+Union.*Yuit
369 Iqaluit
369 Nunavut
369 Ellesmere
370 skin
371 dogs?
371 pooches
372 (March\s*25\s*,?\s*)?1911
373 Africa
373 Botswana
373 west\s+of\s+Kariba
373 50\s+miles.*Kuruman
373 south\s+of\s+Victoria\s+Falls
374 ailment
374 metabolic\s+disorder
374 Poems?
375 Atlantic
376 Truman
377 30\s*km\s*/?\s*sec
378 Hirohito
378 Akihito
378 Showa
378 Taisho
379 100\s*,?\s*000\s+light\s*-?\s*years?
380 Portuguese
381 Czolgosz
382 7\s*th\s+century
383 saguaros?
383 pronounced\s+suh-WAH-row
384 Marconi
384 Lear
385 Busch\s+Gardens
385 Libya
385 Serengeti
385 Tanzania
385 Kenya
385 East\s+Africa\s*'?\s*s\s+Eden\s*-?\s*like\s+game\s+parks?
385 Zimbabwe
386 eating\s+disorders?
386 condition.*refusal.*eat
386 self\s*-?\s*starvation
386 an\s+avoidance\s+of\s+food
386 aversion\s+to\s+eating
386 psychological\s+problems?
387 1889
387 1989.*hundred.*years.*ago.*Montana
388 Nina.*Pinta.*Santa\s+Maria
388 Pinta.*Nina.*Santa\s+Maria
389 Arthur
390 Norfolk\s+County
390 Massachusetts
390 Braintree\s*,?\s*Mass
390 Quincy\s*,?\s*Mass
390 Franklin\s+St.*Quincy
390 in\s+Braintree
391 Manet
392 god.*hurricane
392 Aztec.*god
392 god.*ancient\s+Aztecs
392 priest\s*-?\s*leader\s+of\s+Tula
392 Toltec\s+god
392 brother.*Tezcatlipoca
392 the\s+god\s+Quetzalcoatl
392 god\s+of\s+learning
392 South\s+American\s+Indian.*god
393 brain
394 floccinaucinihilipilification
394 pneumonoultramicroscopicsilicovolcanoconiosis
395 ingredient.*gunpowder
395 potassium\s+nitrate
396 General\s+Electric
397 1788
397 18th\s+century
398 ten\s+days.*January 4
398 Christmas.*Boxing Day
398 26\s*December
398 December\s+26
398 day\s+after\s+Christmas
399 UK\s*currency.*Dollars\s*1\.4945
399 to\s+\$\s*1\.7091
400 Alef\s*-?\s*Beyz
401 ruler\s+of\s+Austria
401 Empress
401 Austria\s*'?\s*s\s+Queen
401 queen\s+of\s+Hungary.*Bohemia
401 Austrian\s+Archduchess
401 Maria\s+Theresa\s+of\s+Austria
402 ^\s*American\s*$
402 American.*art.*Pollock
402 character\s+of\s+American\s+art
402 American\s+artists?
402 Pollock\s*:?\s+An\s+American
402 An\s+American\s+Saga
402 contemporary\s+painter\s+in\s+America
402 an\s+American\s+art\s+scene
402 greatest\s+painter\s+in\s+America
402 born\s+in\s+rural\s+Georgia
402 Milledgeville\s*,?\s*Ga
403 Lexington
404 Magnolia
405 NSU
405 Mazda
406 Everest
406 Maxwell
407 Rushmore
407 carve.*mountain
407 carve.*granite\s+peak
408 ^\s*bears?\s*$
408 bear\s+of\s+little\s+brain
408 Pooh.+bears?
408 the\s+wall\s*,?\s+bear
408 famous\s+(little\s+)?bear
408 bear.+Milne
408 black\s+bear.+connection
408 bears\s+kind\s+of\s+talk
408 real\s+bear
408 sun\s+bears?
408 bears.+intelligent
409 Nutra\s*-?\s*Sweet
410 hazardous\s+materials?
411 champagne.*museum
411 cathedrals?
411 Champagne\s+country
411 champagne\s+houses?
412 Shopping
413 Cincinnati
414 amyo?troph?ic\s+lateral\s+sclerosis
415 Cable\s+News\s+Network
416 June\s+,?\s*1980
417 Turner
418 Salt\s+Lake\s+(City\s+)?Tribune
418 Tribune\s*,\s*Salt\s+Lake
418 Deseret\s+News
418 Salt\s+Lake\s+City\s+Deseret
418 Catalyst
419 naturalist
419 expert\s+on\s+chimps
419 chimpanzee\s+specialist
419 chimpanzee\s+researcher
419 chimpanzee\s*-?\s*observer
419 pioneered.*study\s+of\s+primates
419 anthropologist
419 ethnologist
419 ethologists?
419 primatologist
419 animal\s+behaviorist
419 scientist\s+of\s+unquestionable\s+reputation
419 most\s+recognizable\s+living\s+scientist
419 wife.*van\s*Lawick
420 sweet\s+Italian\s+bread
421 severe\s+blood\s+anemia
421 blood\s+disorders?
421 inherited\s+disorders?
421 disorder.*genetic\s+defect
421 genetic\s+disorders?
421 genetic\s+defects?
421 genetic\s+defect.*blood
421 lack.*gene.*hemoglobin
422 1981
423 Jolt
424 gaggles?
424 flocks?
425 ^\s*9\s*$
425 ^\s*nine\s*$
425 nine\s+months
426 Beta(max)?
426 Betacam
427 ^\s*Indian\s*$
427 Indian peoples?
427 Indian\s+culture
427 Indian\s+objects
427 Northwestern\s+Indian\s+nation
427 Nimpkish
427 Kwakiutl
427 British\s+Columbia.*Indian
428 Boston
429 Princeton
430 north.*Spain
430 Spain.*north(ern)?
430 south\s*-?\s*west(ern)?\s+France
430 France.*sothwest(ern)?
430 Basque.*Spain
430 Basque.*France
430 Basque.*Europe
430 Spain.*Basque
430 Spain.*Madrid
430 France.*Basque
430 ^\s*Europe\s*$
430 ^\s*Spain\s*$
430 ^\s*France\s*$
430 controlee\s+district\s+in\s+France
430 Guipuzcoa
430 Pamplona
430 Pyrenees\s+foothills
431 cardio\s*-?\s*pulmonary\s+resuscitation
432 Maine
433 Skywalker
434 millionth\s+of\s+a\s+millimeter
434 millionth\s+of\s+a\s+millimetre
434 billionth\s+of\s+a\s+meter
434 billionth\s+of\s+a\s+metre
434 about\s+four\s+100\s*-?\s*millionths\s+of\s+an\s+inch
434 one\s*-?\s*hundred\s*-?\s*millionth\s+of\s+a\s+meter
435 cancer
435 brain\s+tumor
436 piano
437 30\s+November
437 November\s+30
438 metals?
438 alloys?
438 titanium\s+ore
438 Titanium\s+dioxide.*paint\s+pigment
438 Titanium\s+dioxide.*whitener
438 common\s+natural\s+element
439 rock.*group
439 eccentric\s+rock
439 industrial\s+dance\s*-?\s*machine
439 Ministry.*similar.*music
440 Boston
441 King\s+John
441 John.*crown
442 Pirates?
444 ^\s*Miami\s*$
444 ^\s*in\s+Miami\s*\.?\s*$
444 to\s+(\(REF:\s+)?Miami
444 at\s+(\(REF:\s+)?Miami
444 Miami\s*'?\s*s\s+downtown
444 Orange.*\s+in\s+.*Miami
444 Orange\s+Bowl\s*,\s*Miami
444 Miami\s*'?\s*s Orange
444 Dade\s+County
445 198[026]
446 OEMs?
447 spices?
447 reduces\s+aggression\s+among\s+cows
447 licorice\s*-?\s*scented\s+fragrance
447 fragrant\s+herbs
447 flavor(ed)?
447 flavoring
447 anise\s+seeds?
447 fennel
447 finocchio
447 anise\s+extract
447 ^\s*seed\s*$
447 potent\s+bear\s+bait
448 Lawrence\s+Township
448 Lawrenceville
448 New\s+Jersey
449 played\s+opposite\s+Cher
449 played\s+by
449 he\s+plays
449 Actors?
449 film\s+stars?
449 Hollywood.*stars?
449 Hollywood.*starring
449 movie\s+roles?
449 movie\s+starring
449 motion\s+pictures?
450 hot
450 warm
451 Las\s+Vegas
452 Henson
453 July\)?\s+14
453 14\s+July
453 1789
453 ^\s*Friday\s*$
453 Friday\s+is\s+Bastille
453 Friday\)?\s+on\s+Bastille
453 Bastille\s+day\s+on\s+Friday
454 Red\s+Crescent
455 JCS\s+Chairmanship
455 head.+joint\s+chiefs
455 chair.+joint\s+chiefs
455 chair.+joint\s+of\s+staff
455 first\s+black\s+chair(man)?
455 joint\s+chiefs\s+of\s+staff
455 chief\s+of\s+staff
456 summer
456 begins.*Memorial\s+Day
456 December\s+holiday
456 Thanksgiving
456 May\s*-?\s*August
457 Bienvenue.+Missouri
457 Missouri\s+highway
457 in\s+the\s+St\.?\s+Louis\s+suburbs?
457 Webster\s+University\s+in\s+St\.?\s+Louis
457 Webster\s+University\s*,?\s*(in\s+)?St\.?\s+Louis
457 St\.?\s+Louis\s*'?\s*s\s+Webster\s+University
457 Edgar\s+Road
458 Jungle\s+Golf
458 Wild\s+Dunes
459 1839
460 Irish\s*,?\s*or\s+Gaelic
460 ^\s*Irish\s*$
460 Irish\s+language
460 Irish\s+folk\s+music
460 Irish\s+yiddish
460 Gaelic\s*,?\s*Irish
460 Irish\s+Gaelic
460 Irish.+study.+their\s+language
461 Stoll
462 radioactivi?ty
462 radioactive
462 radiation
462 contamination
462 wastes?
462 RAO
462 nuclear.+polluted
462 tritium
462 iodine
462 xenon
463 guitars?
463 guitarist
464 Indianapolis
464 London
465 Yale\s+law
465 Wellesley
466 Scotland
467 Boswell
468 spunbonded\s+olefin
468 a\s+plastic
468 plastic.+surgical\s+gowns
468 Tyvek\s+notes
469 Gibson
470 Paz\s+Estenssoro
470 Zamora
470 Lo[sz]ada
470 Sanchez
471 1945
472 1865
473 Dickens
474 Yeager
474 Cochran
475 \$\s*96\s*,?\s*600
475 \$\s*89\s*,?\s*500
475 \$\s*87\s*,?\s*483
475 \$\s*120\s*,?\s*800
475 \$\s*125\s*,?\s*100
475 \$\s*125\s*,?\s*149
475 speaker.+\$\s*115\s*,?\s*000
476 pyramids?
476 hanging\s+gardens?
476 Pharos\s+Lighthouse
476 temple\s+of\s+Zeus
477 October\s+23\s*-?\s*November\s+23
477 Oct\.\s+23\s+thru\s+Nov\.\s+22
478 LAX
479 GTE\s+of\s+California
479 GTE\s*-?\s*California
479 Pacific\s+Bell
480 94539
480 94538
480 94537
480 94536
480 94555
481 Garrett
482 Elizabeth\s+II
482 Elizabeth\s+the\s+second
482 Queen\s+Elizabeth
483 rice\s+wine
483 beverage
483 made\s+from\s+rice
483 brewed.+rice
483 rice.+brewed
483 distill.+rice
483 processed\s+rice
484 Simba
485 Holland
485 in\s+Europe
485 the Netherlands
485 ^\s*Netherlands\s*$
485 Amsterdam\s*,?\s*Netherlands
486 about\s+45
486 forty\s*-?\s*four
487 Total\s+Recall
488 South\s+American?
489 mountains?
490 South\s+America
491 Bethel
491 Yasgur\s*'?s?\s+farm
491 in\s+upstate\s+New\s+York
491 ^\s*N\.Y\.\s*$
491 New\s+York\s+from\s+Aug\.\s+15\s*-\s*17
491 northwest\s+of\s+New\s+York\s+City
491 New\s+York\s+State
492 Varivax
493 made.+13\s*-\s*star\s+flag
493 making.+first.+flag
493 made.+first.+flag
493 made.+first.+Stars\s+and\s+Stripes
493 original\s+flag\s+designed
493 Betsy Ross\s*'\s*s?\s+flag
494 Seuss
494 Suess
494 Geisel
495 1931
495 1932
495 1930\s*'?\s*s
496 Morrison
497 Penny
498 Conrad
499 South\s+America
500 Orlando\s*,?\s+Fla
500 Sea\s+World.+in\s+(San\s+Diego\s*,?\s+)?Orlando
500 Sea\s+World.+of\.?\s+Orlando
500 Orlando\s+_\s+in\s+Florida
500 Orlando\s+area\s+is\s+home\s+to\s+Sea\s+World
500 the\s+Orlando\s+Sea\s+World
500 ^\s*Orlando\s*$
500 marine\s+park\s+in\s+Orlando
500 Orlando\s*-\s*based
500 Florida\s+Inc\.?\s*,?\s+Orlando
500 of\s+Florida\s+in\s+Orlando
500 In\s+addition\s+to\s+Orlando
500 Orlando\s+in\s+central\s+Florida
500 near\s+Orlando
500 of\s+Florida\s+\(\s*Orlando\s*\)
501 10,777,514
502 of\s+Minnesota\s*,?\s+Minneapolis
502 of\s+Minnesota\s*,?\s+\(REF:\s*Minneapolis
502 of\s+Minnesota\s+Medical\s+Center\s*,?\s+Minneapolis
502 University\s+of\s+Minnesota\s*-?\s*Duluth
502 University\s+of\s+Minnesota\s+in\s+Duluth
502 the\s*University\s+of\s+Minnesota\s+in\s+Duluth
502 in\s+Minneapolis
502 University\s+of\s+Minnesota.+Minneapolis
502 Crookston
503 NHL
503 hockey
504 Walton
505 Boston
506 Christian
506 McNiff
507 1985
508 Nev\.
508 Nevada
509 1770
510 Tony
511 Boston
511 Medford
511 in\s+Massachusetts
511 ^\s*Massachusetts\s*$
511 ^\s*Mass\.\s*$
511 Grafton
512 Young\s+Frankenstein
513 1986
514 Rice\s+Krispies
515 Parkinson(ism)?
516 music\s+by
516 pianists?
516 professor
517 Alberta
518 Suez\s+Canal
518 Israelis?
518 Israel
518 Egypt
518 Egyptians?
518 Jerusalem
519 07054
520 1959
521 circle\s+painted.+eye
522 electrocardiograms?
523 (the)?Chiricahua\s+mountains?
523 Apache\s+Indian
523 (sub)?tribes?
524 Western\s+Europe
525 sick\s+with\s+aches
525 fatigue
525 mild\s+fever
525 sore\s+throat
525 painful\s+lymph\s+nodes
525 swollen\s+glands
525 muscle\s+discomfort
525 pain\s+in\s+joints\s+and\s+muscles
525 flu\s*-?\s*like symptoms
525 lethargy
525 exhaustion
525 depression
526 Northwest\s+Territories
526 South\s+African?
526 Angolan?
526 Namibian?
526 Arkansas
526 North\s+America
526 Australia
526 Australian\s+diamond\s+mining
526 diamond\s+mine\s+in\s+Africa
526 Venetia
526 Zimbabwe
526 Botswana
526 Kalgoorlie
526 Sierra\s+Leone
526 Ghana
526 Mafikeng
526 River\s+Ranch
526 Argyle\s+diamond\s+mine
526 in\s+about\s+20\s+countries
526 Soviet\s+Union
526 Russian?
526 diamond\s+finds\s+in\s+the\s+US
526 Cuango
527 1920
528 lizards?
528 missile\s+systems?
528 systems.+of\s+Russian\s+manufacture
528 surface\s*-?\s*to\s*-?\s*air
529 film.+directors?
530 Our\s+Gang
531 Pete\s+the\s+Pup
531 Petie
532 pit\s+bulls?
533 ^\s*David\s+Kirk\s*$
533 Kirk\s*,?\s+the\s+All\s+Black
533 All\s+Black.+1987\s+world\s+cup\s+winners?
533 ^\s*New\s+Zealand\s*$
533 New\s+Zealand\s+won\s+the\s+first\s+world\s+cup
534 west\s+of\s+London
534 near\s+London
534 overlooking\s+the\s+River\s+Thames
535 Gilda
535 Radner
536 246.9\s+million
536 248\s+million
536 245,837,683
536 260\s+million
536 226.5\s+million
536 248.7\s+million
536 254\s+million
536 243\s+million
537 chicken
538 1948
539 shrubs?
539 pallida
539 gold\s*-?\s*yellow\s+flowers?
539 herb(al)?
539 book
539 by\s+Alice\s+Schertle
539 Alice\s+Schertle\s*'\s*s
540 L\.P\.
541 build\s+atomic\s+weapons?
541 atom\s+bomb
541 atomic\s+bombs?
541 design\s+and\s+build\s+the\s+two\s+bombs
541 development\s+of\s+the\s+A\s*-?\s*bomb
541 quest\s+for\s+a\s+superweapon
542 basal\s*-?\s*cells?
543 Gettysburg
543 Antietam
543 Manassas
543 Fredericksburg
543 Appomatox
543 Bull\s+Run
544 fruit.+pomegranates?
544 pomegranate.+fruits?
545 Mohr
545 Gruber
546 Mel\s+Gibson
547 1930
547 1936
547 1930s
548 Coliseum
549 kiss(ing)?
550 flat\s+rate
550 2\.1\s*\%
550 3\.1\s*\%
551 Creation\s+of\s+Adam
552 freak\s+storm
553 Pleasant
554 42\s*,?\s*396
554 43\s*,?\s*000
554 36\s*,?\s*000
555 Captain\s+Smith
555 Capt\.\s+Edward\s+Smith
556 500\s*,?\s*000
557 each\s+May
557 ^\s*May(\s*,?)?\s*$
557 Mich.+May
558 Lives\s+of\s+the\s+Artists
558 version.+model.+Mona\s+Lisa
558 biographer
559 Ian\s+Flemming
559 Fleming
560 K\s*-?\s*mart
561 Harley
561 Honda.+motorcycles?
561 motorcycl.+Honda
561 Indian\s+Motocycle\s+Manufacturing\s+Co
562 Carnival
563 The\s+Preppie\s+Murder
563 The\s+Bionic\s+Showdown
563 Who\s+Shot\s+Pat
563 Religion\s*,?\s*Inc
564 ^\s*N\.?J\.?\s*$
564 West\s+Orange
565 Tele\s*-?\s*Communications
566 ^\s*Missouri\s*$
566 Mo\.?\s+.+Missouri
566 Missouri.+Mo
567 Debi\s+Thomas
567 Katarina\s+Witt
567 Elizabeth\s+Manley
567 Tonya\s+Harding
567 Claudia\s+Leistner
567 Trenary
567 Rodnina
567 Ito
567 Linda\s+Fratianne
567 Tiffany\s+Chin
567 Healy
567 Peggy\s+Fleming
567 Rosalyn\s+Sumners
567 Surya\s+Bonaly
567 Yamaguchi
567 Nancy\s+Kerrigan
568 Filene\s*'?\s*s\s+(Basement\s*)?_?\s*Connecticut
568 ^\s*Maine\s*$
568 ^\s*Massachusett\s*$
568 ^\s*Rhode\s+Island\s*$
568 ^\s*New\s+Hampshire\s*$
568 ^\s*New\s+York\s*$
568 ^\s*New\s+Jersey\s*$
568 ^\s*Pennsylvania\s*$
568 ^\s*New\s+Jersey\s*,\s+New\s+York\s*,\s+Pennsylvania\s*\.\s*$
569 800\s+milligrams
569 1\s*,?\s*000\s+milligrams
570 Gretzky\s+led\s+the\s+Oilers
570 former\s+team.+Edmonton\s+Oilers
570 leave\s+Edmonton
570 Los\s+Angeles\s+Kings\s+acquired.+Gretzky
570 Trade\s+Upsets\s+Oiler\s+Fans
570 trading.+Gretzky\s+to.+Kings
570 Oilers.+once\s+there\s+was\s+Wayne\s+Gretzky
570 to\s+the\s+Los\s+Angeles\s+Kings
570 to\s+the\s+struggling\s+Kings
570 Gretzky\s+left\s+for\s+Los\s+Angeles
570 Gretzky\s+to\s+(the\s+)?Los\s+Angeles
570 Los\s+Angeles\s+Kings\s+hockey\s+team
570 Los\s+Angeles\s+Kings\s*'?\s+superstar
570 Gretzky\s+and\s+the\s+L\s*\.?\s*A\s*\.?\s+Kings
570 Gretzky\s+\&\s+the\s+L\s*\.?\s*A\s*\.?\s+Kings
570 Los\s+Angeles\s+Kings\s+(hockey\s+)?star
570 Kings\s+star\s+Wayne\s+Gretzky
570 Gretzky.+plays\s+for\s+the\s+Kings
570 Gretzky.+play\s+hockey\s+for\s+the\s+Kings
570 Gretzky.+for\s+the\s+Los\s+Angeles\s+Kings?
570 Los\s+Angeles\s+Kings\s+center
570 Los\s+Angeles\s+Kings.+Gretzky
570 Gretzky\s+of\s+the\s+(Los\s+Angeles\s+)?Kings
570 Gretzky\s+scoreless.+Kings
570 All\s+Gretzky\s+did\s+was\s+jack\s+the\s+Kings\s+up
570 skated\s+for\s+the\s+Kings
570 the\s+Kings\s*'?\s+Wayne\s+Gretzky
570 leads\s+the\s+Kings\s+in\s+goals
570 teammate\s+at\s+Edmonton
570 ^\s*(the\s+)?Los\s+Angeles\s+Kings\s*$
570 boss\s*,?\s*Los\s+Angeles\s+Kings\s+owner
570 Wayne\s+Gretzky\s*,?\s+L\s*\.?\s*A\.?\s+Kings
570 Team\s+Canada\s+in\s+the.+Canada\s+Cup
570 good\s+hockey\s+team.+Gretzky\s+said.+Kings
570 Kings\s*,?\s+the\s+Smythe\s+Division\s+champions
571 L\s*'?\s*Oreal
571 brown\s+tint
571 Grecian\s+Formula\s+16
572 12\s+years
572 maturity\s+in\s+10\s+years
573 493
574 Twenty\s*-?\s*six
574 Twenty\s*-?\s*eight
574 ^\s*26\s*$
574 ^\s*28\s*$
574 ^\s*27\s*$
574 27\s+states?
574 28\s+states?
574 26\s+states?
574 30\s+states?
574 32\s+states?
574 33\s+states?
574 33\s+lottery\s+states?
574 ^\s*30\s*$
574 ^\s*31\s*$
574 ^\s*33\s*$
575 NCAA
576 Runaways
576 Blackhearts
577 Galbraith
577 Asprey
577 Worshipful\s+Company\s+of\s+Clockmakers
578 Who\s*'?\s*s\s+the\s+Boss
579 Ciccone
580 came\s+into\s+being\s+in\s+India
581 sunflowers?
581 iris(es)?
581 roses?
581 foxglove
581 poppies
582 KVOO
583 Parents\s+Just\s+Don\s*'?\s*t\s+Understand
584 National\s+Aeronautics\s+and\s+Space\s+Administration
585 Brinkley
586 nitrogen\s+\(?\s*N\s*\)?
586 N\s*-\s*P\s*-\s*K\s+numbers.*nitrogen
586 ^\s*N\s*$
587 One\s+Thousand\s+Nights\s+and\s+a\s+Night
587 Arabian\s+Nights?
588 1917
588 1914
589 Falls\s*,?\s+N\.?\s*Y\.?
589 ^\s*New\s+York\s*$
589 New\s+York.+Niagara
589 Niagara\s+Falls\s*,?\s+in\s+New\s+York\s+state
590 Foster
592 Grand\s+Canyon
592 China
592 Pennsylvania
592 Essex\s*,?\s+Conn
592 scenic\s+and\s+historic\s+sites
592 U\s*\.\s*S\s*\.
592 Paraguay
592 Brecon\s+Beacons\s+National\s+Park
592 Route\s+of\s+the\s+Redwoods
592 Chattanooga
592 Wales
592 Bear\s+Mountain
592 Poland
592 Nowa\s+Sol
593 1980
593 1976
594 P\s*-?\s*51
594 P\s*-?\s*3
594 FA\s*-?\s*18
594 B\s*-?\s*2
594 AC\s*-?\s*130s?
594 AWACS
595 on\s+the\s+Bay\s+of\s+Bengal
595 Thailand.+neighbor
595 Thai.+border
595 border.+Thailand
595 border.+Cambodia
595 border.+India
595 East\s+Asia
595 Moei\s+River
595 Burma.+Asia
595 Southeast\s+Asia
595 with\s+Thailand
595 linking\s*Thailand
596 43(\.4)?\s*mpg
597 Grapes\s+of\s+Wrath
597 In\s+Dubious\s+Battle
597 East\s+of\s+Eden
597 The\s+Pearl
597 Of\s+Mice\s+and\s+Men
597 Red\s+Pony
597 Cannery\s+Row
599 Leprechaun
600 Salmonella
600 could\s+lead.+rupture.+intestines?
600 infections?
600 economically\s+important\s+disease
601 about\s+50\s*,?\s*000\s+years?
601 60\s*,?\s*000\s*-?\s*years?
601 35\s*,?\s*000\s*-?\s*years?
601 100\s*,?\s*000\s*-?\s*years?
602 Adobe
603 ^\s*12\s*$
603 ^\s*11\s*$
603 12\s+gaming
603 12\s+(Atlantic\s+City\s+)?casi(nos)?
603 11\s+casinos?
604 Connecticut
604 Conn\.
605 Tereshkova
605 Vladinirovna
606 real\s*-?\s*estate\s+services\s+concern\s+based\s+in\s+New\s+York
606 private\s+real\s*-?\s*estate\s+services
606 real\s*-?\s*estate\s+services\s+unit\s+of\s+Rockefeller
606 real\s*-?\s*estate\s+broker(age)?
606 real\s*-?\s*estate\s+giant
606 real\s*-?\s*estate\s+firm
606 real\s*-?\s*estate\s+company
606 real\s*-?\s*estate\s+companies
606 real\s*-?\s*estate.*consultants
606 real\s*-?\s*estate.*commercial
606 New\s+York\s+brokerage\s+concern
606 commercial.*real\s*-?\s*estate
606 commercial\s+property\s+manager
606 commercial\s+property\s+firm
606 commercial\s+brokers?
606 industrial\s+brokerage
606 brokerage\s+firm\s+Cushman
607 hijacker
607 hijacking
607 skyjacking
607 skyjacker
607 commandeered.+flight
608 1503
609 1930\s*'?\s*s\s*,?\s+the\s+Great
609 depression\s+of\s+the\s+(early\s+)?1930\s*'?\s*s
609 1929.*1933
609 1873.*1896
610 screw
610 lever
610 inventions
610 On\s+the\s+Sphere\s+and\s+the\s+Cylinder
610 measure.*volume.+irregular
611 high\s+blood\s+pressure
611 elevation\s+in\s+blood\s+pressure
612 Davis
613 close\s+to\s+the\s+UK
613 between\s+Ireland\s+and\s+England
613 strip\s+of\s+water.+UK
614 Twain
614 Clemens
615 May\s+[45]
615 Jan\.?\)?\s+22
615 first\s+Thursday\s+of\s+(every\s+)?May
616 fool\s+police\s+radar
616 ^\s*stealth\s*$
617 contain\s+chlorophyll
617 make\s+chlorophyll
617 photosynthesis\s+occurs
617 compartment.+plant\s+cells.+contain\s+genes
617 tiny\s+components\s+of\s+plant\s+cells
617 solar\s+energy
618 Bonsai
619 900\s*-?\s*903\s*-?\s*3700
619 \(\s*606\s*\)\s*233\s*-?\s*6040
620 Hawthorne
621 Colnaghi
621 Knoedler
621 Riverrun
621 Henoch
621 Gagosian
621 Serpentine
621 Grey\s+Art\s+Gallery
621 Museum\s+of\s+Modern\s+Art
621 Metropolitan\s+Museum
621 Pierre Matisse
621 Vorpal
621 New\s+York\s+Academy\s+of\s+Art
621 Nerlino
621 Wildenstein Co
621 Bess\s+Cutler
621 Salander\s*-\s*O\s*'?\s*Reilly
621 Louver\s+Gallery\s+New\s+York
621 Guggenheim\s+Museum
621 Max\s+Protetch
621 Hammer\s+Galleries
621 Daedalus\s+Gallery
621 Lieberman\s+\&\s+Saul
622 pheasant
622 duck
622 quail
622 chukar
622 dove
622 birds?
622 small\s+game
622 Prairie\s+chickens?
623 Conservatives?
623 Tory
624 TR\s*'?\s*s\s+grand\s*-?\s*nephew\s+and\s+FDR\s*'?\s*s\s+eldest\s+son
624 distant\s+cousins
625 April\s+26\s*,?\s+1986
625 ^\s*((in\s+)?April\s+)?1986\s*$
625 ^\s*(occurred\s+)?in\s+1986\.?\s*$
625 1986\s+Chernobyl\s+nuclear
625 1986\s+Chernobyl\s+disaster
625 Chernobyl.+accident.+nuclear.+1986
625 Chernobyl.+site.+1986
625 n?uclear\s+accident.+1986
625 nuclear\s+disaster.+1986
625 ^\s*in\s+1986\s+in\s+the\s+Soviet Union\s*$
625 April\s+1986\s+after\s+the\s+reactor\s+exploded
625 radioactivity\s+spread.+April\s+1986
625 Chernobyl.+April\s*,?\s+1986
625 April\s*,?\s+1986.+Chernobyl
626 sudden infant death syndrome
627 Kerr
628 Dallas
629 Taft
630 19\s*,?\s*385\s+ft
630 19\s*,?\s*385\s+feet
631 Mahfouz
632 Tampa\s+(\(?Fla\.\)?\s+)?Tribune
632 Tampa\s+(\(?Fla\.\)?\s+)?Times
632 Tribune.*Tampa
632 ^\s*Tribune\s*$
633 70\s+years?
634 Westheimer
635 card\s+games?
635 favorite\s+games?
635 cribbage.+games?
635 game.+cribbage
636 wines?
636 woolmark\s+products
636 olive\s+oil
637 Ferrigno
638 Poe
639 Maytag
640 1889
640 one\s+hundred\s+years\s+ago
641 ^\s*75\s*$
641 ^\s*80\s*$
641 ^\s*100\s*$
641 75.+80\s+times.+minute
641 60.+75\s+beats.+minute
641 under\s+100\s+beat.+minute
642 Plant
642 Wolf
643 Wilde
644 anise\s*-?\s*flavored\s+drink
644 anise\s*-?\s*flavored\s+liquor
644 fiery\s+liquor
644 aperitifs?
644 Greek.+traditional\s+drink
644 Greek\s+cordial
644 Greek\s+liqueur
645 January 1987
645 January\s+8\s*,?\s+1987
645 01\/09\/87
645 Jan\.?\s+8\s*,?\s+1987
646 Anne.+Morrow
646 Anne\s+Lindbergh
647 200\s+miles
647 180\s+miles
647 225\s+miles
648 Cuellar
648 Bo?utr[ou]s\s*-?\s*Ghali
649 Williams
650 92
651 Hepburn
651 Harris
652 Croatia
652 Yugoslavian?
653 Indianapolis.*Ramada
653 Ramada.*Indianapolis
653 Indianapolis.*Radisson
653 Radisson.*Indianapolis
653 Howard\s+Johnson.*Indianapolis
653 Indianapolis.+Howard\s+Johnson
653 Indianapolis.*Westin
653 Westin.*Indianapolis
653 Indianapolis.*Holiday\s+Inn
653 Holiday\s+Inn.*Indianapolis
653 the\s+Canterbury
653 Holiday\s+Inn.+first\s*-?\s*rate
653 Westin.+black\s+tie
653 Ramada.+Air\s+Force
653 Air\s+Force.+Ramada
653 ^\s*Howard\s+Johnson\s*$
654 U\.S\..+Trademarks?\s+Office
654 U\.?S\.?\s+Patents?\s+and\s+Trademarks?\s+Office
654 Trademarks?\s+Office.+province
655 Monaghan
657 New\s+Zealand
658 Sierra
658 200\s+miles\s+north\s+of\s+Los\s+Angeles
658 east\s+of\s+Fresno
658 Calif.+parks?
658 California.+parks?
658 parks.+California
658 come\s+from\s+California
658 California\s+are\s+already\s+filling
658 California\s+in\s+brief
658 Central\s+California
659 Rochester\s*,?\s+Minn
659 Rochester\s*,?\s+Minnesota
659 ^\s*(in\s+)?Rochester\s*,?\s*$
659 ^\s*Minn\.?,?\s*$
659 ^\s*Minnesota,?\s*$
659 Mayo\s+Clinic\s*,?\s+Rochester
659 Clinic\s+in\s+Rochester
659 Minnesota\s*'?\s*s\s+Mayo
659 clinic\s+in\s+Minnesota
659 Rochester.+medical\s+regimen
659 Rochester.+Mayo\s+Clinic
659 Clinic.+Rochester
659 Scottsdale\s*,?\s+Ariz(ona)?
659 Jacksonville
660 301\s*-?\s*member
660 500\s*-?\s*members?
660 538\s*-?\s*members?
660 752\s*-?\s*members?
660 752\s*-?\s*seats?
660 60\s*,?\s*[06][06]0\s+senior
660 489.+49
661 DM60
661 Pounds\s+24
661 \$\s*50
661 \$?\s*100\s+to\s+\$\s*125
661 about\s+\$\s*30
662 Birmingham\s*,?\s+Ala
662 ^\s*(in\s+)?Birmingham\s*$
664 Poseidon
664 Zeus
664 Momus
664 Apollo
664 Mercury
664 Pan
664 Dionysus
664 Adonis
664 Zephyrus
664 Asclepius
664 Hermes
665 1895
666 2\.3\s+million
667 Calypso
668 Jean\s*-?\s*Michel
668 Philippe
669 island\s+of\s+Java
669 Java\s+island
669 home\s+for.+Indonesia
670 basketball.+college\s+championships?
670 NCAA.+tournament
670 NCAA.+basketball
670 college\s+basketball
670 basketball.+final\s+four
670 final\s+four.+basketball
670 NCAA.+final\s+four
670 World\s+Cup\s*'?\s*s\s+final\s+four
670 World\s+Cup\s+champion
671 ^\s*newspapers?\s*$
671 daily\s+newspapers
671 2[89]\s+newspapers
671 19\s+newspapers
671 Knight\s+newspapers?
671 Ridder.+newspapers
671 newspaper\s+publishing
671 newspaper\s+division
671 newspaper\s+chains?
671 Free\s+Press
671 excellent\s+and\s+competitive\s+newspaper\s+in\s+Detroit
671 Ridder\s+paper
671 at\s+the\s+News
671 (San\s+Jose\s+)?Mercury\s+News
671 Miami\s+Herald
671 Philadelphia\s+Inquirer
671 the\s+Inquirer
672 herding
673 basketball
674 1990\s+merger
674 ^\s*1990\s*$
674 October 1990
674 1990.+weakened
675 49\s*,?\s*675
675 49\s*,\s*000
676 ABT
676 American\s+Ballet\s+Theatre
676 American\s+Ballet\s+Theater
676 Kirov\s+Ballet
676 New\s+York\s+City\s+Ballet
676 Bolshoi\s+Ballet
676 White\s+Oaks\s+Project
677 Streets\s+of\s+San\s+Francisco
678 Waters
679 cut\s+it\s+off
679 hair\s+cut
680 Nikkei
681 Connery
682 Clydesdales?
682 draft\s+horses?
683 fish
683 eat\s+ducks\s*\?
683 ^\s*(a\s+)?ducks?\s*$
683 could\s+go\s+out\s+and\s+eat\s+a\s+bird
683 o?il\s*-?\s*contaminated\s+birds
684 12\s*th\s+century
684 medieval\s+Europe
685 1971\s*,?\s+Amtrak\s+began?
685 ^\s*1971\s*$
685 by\s+the\s+federal\s+government\s+in\s+1971
685 1971\s+with\s+what\s+was\s+intended\s+to\s+be
685 was\s+created\s+in\s+1971
686 Washington
687 1975
688 Belgians?
688 Belgium
689 ^\s*3[02]0\s*$
689 ^\s*332\s*$
689 3[02]0\s+islands
689 332\s+(coral\s*-?\s*fringed\s+)?islands
689 about\s+320
689 about\s+332
690 Hallmark
690 Second\s+Nature
690 Pink\s+Panther
690 Russ\s+Berrie
691 Knoll
691 Boots\s+pharmaceutical
691 prescription.+Boots
691 Boots.+prescription
691 Boots\s+products?
691 Boots\s+acquisition
691 BASF
692 1984
693 Bedrock
694 Reagan
695 Lincoln
696 Peru
696 Peruvian
696 Huallaga\s+Valley
696 Italy
696 Naples
696 Cuba
697 Wilson
698 Jamaican?
699 1986
700 myopia
701 ^\s*bears?\s*$
701 bear\s+of\s+little\s+brain
701 Pooh.+bears?
701 the\s+wall\s*,?\s+bear
701 famous\s+(little\s+)?bear
701 bear.+Milne
701 black\s+bear.+connection
701 bears\s+kind\s+of\s+talk
701 real\s+bear
701 sun\s+bears?
701 bears.+intelligent
702 ^\s*bears?\s*$
702 bear\s+of\s+little\s+brain
702 Pooh.+bears?
702 the\s+wall\s*,?\s+bear
702 famous\s+(little\s+)?bear
702 bear.+Milne
702 black\s+bear.+connection
702 bears\s+kind\s+of\s+talk
702 real\s+bear
702 sun\s+bears?
702 bears.+intelligent
703 ^\s*bears?\s*$
703 bear\s+of\s+little\s+brain
703 Pooh.+bears?
703 the\s+wall\s*,?\s+bear
703 famous\s+(little\s+)?bear
703 bear.+Milne
703 black\s+bear.+connection
703 bears\s+kind\s+of\s+talk
703 real\s+bear
703 sun\s+bears?
703 bears.+intelligent
704 ^\s*bears?\s*$
704 bear\s+of\s+little\s+brain
704 Pooh.+bears?
704 the\s+wall\s*,?\s+bear
704 famous\s+(little\s+)?bear
704 bear.+Milne
704 black\s+bear.+connection
704 bears\s+kind\s+of\s+talk
704 real\s+bear
704 sun\s+bears?
704 bears.+intelligent
705 Nutra\s*-?\s*Sweet
706 Nutra\s*-?\s*Sweet
707 Nutra\s*-?\s*Sweet
708 Nutra\s*-?\s*Sweet
709 hazardous\s+materials?
710 hazardous\s+materials?
711 champagne.*museum
711 champagne\s+cellars?
711 Le\s+Vigneron
711 Hotel\s+Les\s+Consuls
711 cathedrals?
711 Champagne\s+country
711 champagne\s+houses?
712 champagne.*museum
712 champagne\s+cellars?
712 Le\s+Vigneron
712 Hotel\s+Les\s+Consuls
712 cathedrals?
712 Champagne\s+country
712 champagne\s+houses?
713 champagne.*museum
713 champagne\s+cellars?
713 Le\s+Vigneron
713 Hotel\s+Les\s+Consuls
713 cathedrals?
713 Champagne\s+country
713 champagne\s+houses?
713 Valenciennes
713 TOUR\s+DE\s+FRANCE
714 champagne.*museum
714 champagne\s+cellars?
714 Le\s+Vigneron
714 Hotel\s+Les\s+Consuls
714 cathedrals?
714 Champagne\s+country
714 champagne\s+houses?
714 TOUR\s+DE\s+FRANCE
715 champagne.*museum
715 champagne\s+cellars?
715 Le\s+Vigneron
715 Hotel\s+Les\s+Consuls
715 cathedrals?
715 Champagne\s+country
715 champagne\s+houses?
715 new\s+exposition\s+hall
715 reception\s+centers
715 Red\s+Cross
715 grape\s+harvest
715 temporary\s+quarters
715 TOUR\s+DE\s+FRANCE
716 champagne.*museum
716 champagne\s+cellars?
716 new\s+exposition\s+hall
716 cathedrals?
716 Champagne\s+country
716 champagne\s+houses?
716 TOUR\s+DE\s+FRANCE
716 Cecilia\s+Bartoli\s+as\s+Rosina
717 champagne.*museum
717 champagne\s+cellars?
717 Le\s+Vigneron
717 Hotel\s+Les\s+Consuls
717 cathedrals?
717 Champagne\s+country
717 champagne\s+houses?
717 Royal\s+Champagne
717 new\s+exposition\s+hall
717 presidential\s+candidate
717 temporary\s+quarters
717 grapes\s+from\s+the\s+villages
717 buildings
717 Buchan
717 TOUR\s+DE\s+FRANCE
718 Shopping
719 Shopping
720 Shopping
721 Shopping
722 Shopping
723 Cincinnati
724 Cincinnati
724 Ohio\s*-?\s*based
725 Cincinnati
726 Cincinnati
727 Cincinn?att?i
727 Ohio\s*-?\s*based
728 400\s+micrograms
729 400\s+micrograms
730 400\s+micrograms
731 400\s+micrograms
732 Leonov
733 Leonov
734 Leonov
735 Cable\s+News\s+Network
736 Cable\s+News\s+Network
737 June\s+,?\s*1980
738 198[01]
738 1979
739 June\s+,?\s*1980
740 1980
740 1979
741 1980
742 Turner
743 Turner
744 Salt\s+Lake\s+(City\s+)?Tribune
744 Tribune\s*,\s*Salt\s+Lake
744 Deseret\s+News
744 Salt\s+Lake\s+City\s+Deseret
744 Catalyst
745 Salt\s+Lake\s+(City\s+)?Tribune
745 Tribune\s*,\s*Salt\s+Lake
745 Salt\s+Lake\s+City\s+Deseret
745 Deseret\s+News
745 Catalyst
746 chimpanzee\s+research
746 most\s+recognizable\s+living\s+scientist
746 famed\s+researcher
746 studies\s+of\s+wild\s+chimpanzees
746 celebrated\s+animal\s+behaviorist
746 famed\s+chimpanzee\s*-?\s*observer
747 chimpanzee\s+specialist
747 expert\s+on\s+chimps
747 chimpanzee\s+specialist
747 chimpanzee\s+research(er)?
747 study\s+apes.+wild
747 stud.+chimpanzees
747 Einstein.+physics.+Goodall.+behavioral\s+sciences
747 research\s+of.+chimpanzees
747 chimpanzee\s+studies
747 National\s+Geographic.+articles
747 pioneered.*study\s+of\s+primates
747 most\s+recognizable\s+living\s+scientist
747 animal\s+behaviorist
748 study\s+apes.+wild
748 chimpanzee\s+research
748 expert\s+on\s+chimps
748 chimpanzee\s+specialist
748 chimpanzee\s+researcher
748 chimpanzee\s*-?\s*observer
748 pioneered.*study\s+of\s+primates
748 scientist\s+of\s+unquestionable\s+reputation
748 stud.+chimpanzees
748 save\s+chimpanzees\s+from\s+extinction
748 Einstein.+physics.+Goodall.+behavioral\s+sciences
748 research\s+of.+chimpanzees
748 chimpanzee\s+studies
748 National\s+Geographic.+articles
748 animal\s+behaviorist
749 chimpanzee\s+research(er)?
749 study\s+apes.+wild
749 stud.+chimpanzees
749 research\s+of.+chimpanzees
749 chimpanzee\s+studies
749 National\s+Geographic.+articles
749 pioneered.*study\s+of\s+primates
749 save\s+chimpanzees\s+from\s+extinction
750 lack.*gene.*hemoglobin
750 disorder.*genetic\s+defect
750 severe\s+blood\s+anemia
750 inherited\s+disorders?
750 genetic\s+defects?
750 genetic\s+disorders?
750 blood\s+disorders?
751 severe\s+blood\s+anemia
751 blood\s+disorders?
751 inherited\s+disorders?
751 disorder.*genetic\s+defect
751 genetic\s+disorders?
751 genetic\s+defects?
751 genetic\s+defect.*blood
751 lack.*gene.*hemoglobin
752 severe\s+blood\s+anemia
752 blood\s+disorders?
752 inherited\s+disorders?
752 disorder.*genetic\s+defect
752 genetic\s+disorders?
752 genetic\s+defect.*blood
752 lack.*gene.*hemoglobin
752 genetic\s+defects?
753 Jolt
754 Jolt
755 Jolt
756 Jolt
757 Jolt
758 gaggles?
758 flocks?
759 gaggle.+geese
759 geese.+gaggles?
759 ^\s*gaggles?\s*$
759 flocks?
759 yakt
760 gaggles?
760 flocks?
761 gaggles?
761 flocks?
762 ^\s*9\s*$
762 ^\s*nine\s*$
762 nine\s+months
762 36\s+weeks?
763 ^\s*9\s*$
763 ^\s*nine\s*$
763 nine\s+months
764 ^\s*9\s*$
764 ^\s*nine\s*$
764 nine\s+months
765 ^\s*9\s*$
765 ^\s*nine\s*$
765 nine\s+months
766 Beta(max)?
766 Betacam
767 Beta(max)?
767 Betacam
767 movies?
767 8\s*-?\s*mm
767 8\)?\s*-?\s*millimeter
767 Video\s+8
767 Hi\s*-?\s*8
768 Beta(max)?
768 Betacam
769 ^\s*Indian\s*$
769 Indian\s+peoples?
769 Indian\s+culture
769 Indian\s+objects
769 Northwestern\s+Indian\s+nation
769 Nimpkish
769 Kwakiutl
769 British\s+Columbia.*Indian
769 objects\s+to\s+the\s+Indians
769 enticed\s+the\s+Indians
769 claim\s+to\s+being\s+Indian
769 Indian\s+woman.+potlatch
770 ^\s*Indian\s*$
770 Indian\s+peoples?
770 Indian\s+culture
770 Indian\s+objects
770 celebrated\s+(by\s+)?Indians
770 Northwestern\s+Indian\s+nation
770 important\s+to\s+the\s+Northwestern\s+Indian
770 Indian\s+woman.+potlatch
770 Native\s+American
770 North\s+American\s+Indian
770 British\s+Columbia.*Indian
770 Kwakiutl
771 ^\s*Indian\s*$
771 Indian peoples?
771 Indian\s+culture
771 Indian\s+objects
771 Northwestern\s+Indian\s+nation
771 Nimpkish
771 Kwakiutl
771 British\s+Columbia.*Indian
772 Boston
773 Boston
774 Boston
775 Boston
776 Boston
777 Boston
778 Princeton
779 Princeton
780 Princeton
781 cardio\s*-?\s*pulmonary\s+resuscitation
782 cardio\s*-?\s*pulmonary\s+resuscitation
783 cardio\s*-?\s*pulmonary\s+resuscitation
784 cardio\s*-?\s*pulmonary\s+resuscitation
784 applying\s+pressure\s+to\s+the\s+chest\s+to\s+keep\s+blood\s+pumping
785 Skywalker
786 Skywalker
787 cancer
787 brain\s+tumor
788 cancer
788 brain\s+tumor
789 cancer
789 brain\s+tumor
790 piano
791 piano
791 pianists?
792 piano
792 pianists?
793 piano
793 pianists?
795 30\s+November
795 November\s+30
797 Boston
798 Boston
799 Boston
800 King\s+John
800 John.*crown
800 John\s+signed\s+Magna
801 King\s+John
801 John.*crown
802 King\s+John
802 John.*crown
803 King\s+John
803 John.*crown
804 King\s+John
804 John.*crown
805 brain
806 brain
807 brain
808 floccinaucinihilipilification
808 pneumonoultramicroscopicsilicovolcanoconiosis
809 floccinaucinihilipilification
809 pneumonoultramicroscopicsilicovolcanoconiosis
810 floccinaucinihilipilification
810 pneumonoultram\s*-\s*icroscopicsilicovolcanoconiosis
812 General\s+Electric
813 General\s+Electric
814 1788
814 18th\s+century
815 ten\s+days.*January\s+4
815 ten\s+days.*4\s+Ja(nuary)?
815 26\s*December
815 December\s+26
815 day\s+after\s+Christmas
816 ten\s+days.*January 4
816 ten\s+days.*4\s+Ja(nuary)?
816 26\s*December
816 December\s+26
816 day\s+after\s+Christmas
817 ten\s+days.*January\s+4
816 ten\s+days.*4\s+Ja(nuary)?
817 26\s*December
817 December\s+26
817 day\s+after\s+Christmas
818 Las\s+Vegas
819 Las\s+Vegas
820 Las\s+Vegas
821 Las\s+Vegas
821 Nevada
822 Las\s+Vegas
823 Henson
824 Henson
825 Henson
826 Henson
827 Henson
828 900714
828 July\)?\s+14
828 14\s+July
828 14th\s+of\s+July
828 07\s*-\s*14
829 July\)?\s+14
829 14\s+July
829 14th\s+of\s+July
829 07\s*-\s*14
829 900714
830 Red\s+Crescent
831 Red\s+Crescent
832 Red\s+Crescent
833 Red\s+Crescent
834 Red\s+Crescent
835 JCS\s+Chairmanship
835 head.+joint\s+chiefs
835 chair.+joint\s+chiefs
835 chair.+joint\s+of\s+staff
835 first\s+black\s+chair(man)?
835 joint\s+chiefs\s+of\s+staff
835 chiefs?\s+of\s+staff
836 JCS\s+Chairmanship
836 head.+joint\s+chiefs
836 chair.+joint\s+chiefs
836 chair.+joint\s+of\s+staff
836 first\s+black\s+chair(man)?
836 joint\s+chiefs\s+of\s+staff
836 chiefs?\s+of\s+staff
837 economic\s+adviser
837 chief\s+adviser\s+to\s+the\s+States
837 national\s+security\s+advisers?
837 four\s*-?\s*star\s+(Army\s+)?general
837 Lt\.\s+Gen\.\s+(Colin\s+)?Powell
837 military\s+aide.+during.+1983
837 senior\s+military\s+officer
837 principal\s+adviser\s+to\s+the\s+defense\s+secretary
837 JCS\s+Chairmanship
837 head.+joint\s+chiefs
837 chair.+joint\s+chiefs
837 chair.+joint\s+of\s+staff
837 first\s+black\s+chair(man)?
837 chairman
837 ranking\s+black\s+military\s+officer
837 joint\s+chiefs\s+of\s+staff
837 chiefs?\s+of\s+staff
838 economic\s+adviser
838 chief\s+adviser\s+to\s+the\s+States
838 chairman
838 ranking\s+black\s+military\s+officer
838 national\s+security\s+advisers?
838 four\s*-?\s*star\s+(Army\s+)?general
838 Lt\.\s+Gen\.\s+(Colin\s+)?Powell
838 military\s+aide.+during.+1983
838 senior\s+military\s+officer
838 JCS\s+Chairmanship
838 head.+joint\s+chiefs
838 chair.+joint\s+chiefs
838 chair.+joint\s+of\s+staff
838 first\s+black\s+chair(man)?
838 joint\s+chiefs\s+of\s+staff
838 chiefs?\s+of\s+staff
839 summer
839 begins.*Memorial\s+Day
839 December\s+holiday
839 Thanksgiving
839 Christmas
839 May\s*-?\s*August
840 summer
840 begins.*Memorial\s+Day
840 December\s+holiday
840 Thanksgiving
840 May\s*-?\s*August
840 holiday\s+travel\s+period
840 Christmas
841 summer
841 heavy\s+holiday\s+demand
841 Christmas\s+holiday
841 begins.*Memorial\s+Day
841 December\s+holiday
841 Thanksgiving
841 May\s*-?\s*August
841 July
841 peak\s+Easter\s+travel
841 starts\s+in\s+April
841 day\s+after\s+Christmas
842 summer
842 heavy\s+holiday\s+demand
842 Christmas\s+holiday
842 begins.*Memorial\s+Day
842 December\s+holiday
842 Thanksgiving
842 May\s*-?\s*August
842 July
842 peak\s+Easter\s+travel
842 starts\s+in\s+April
842 day\s+after\s+Christmas
842 Golden Week holiday\s+in\s+early\s+May
842 Christmas\s*-?\s*New\s+Year\s+holiday\s+period
843 Jungle\s+Golf
843 Wild\s+Dunes
843 Keystone\s+Plantation
843 Jungle\s+Lagoon
843 Treasures\s+of\s+Baghdad
843 Golden\s+Dragon
844 Pirates?
845 Pirates?
846 Pirates?
847 Miami\s+police.+Orange\s+Bowl\s+Parade
847 Miami.+city\s*-?\s*owned\s+Orange\s+Bowl
847 city\s+officials,?\s+Miami
847 Miami\s+;
847 ^\s*Miami\s*$
847 ^\s*in\s+Miami\s*\.?\s*$
847 to\s+(\(REF:\s+)?Miami
847 at\s+(\(REF:\s+)?Miami
847 Miami\s*'*\s*s\s+downtown
847 Orange.*\s+in\s+.*Miami
847 Orange\s+Bowl\s*,?\s*Miami
847 Miami\s*'*\s*s Orange
848 Miami\s+police.+Orange\s+Bowl\s+Parade
848 Miami.+city\s*-?\s*owned\s+Orange\s+Bowl
848 city\s+officials,?\s+Miami
848 Miami\s+;
848 in\s+Miami\s+preparing.+Orange\s+Bowl
848 Miami.+Sugar\s+Bowl\s+in\s+New\s+Orleans
848 ^\s*Miami\s*$
848 ^\s*in\s+Miami\s*\.?\s*$
848 to\s+(\(REF:\s+)?Miami
848 at\s+(\(REF:\s+)?Miami
848 Miami\s*'*\s*s\s+downtown
848 Orange.*\s+in\s+.*Miami
848 Orange\s+Bowl\s*,?\s*Miami
848 Miami\s*'*\s*s Orange
849 Miami\s+police.+Orange\s+Bowl\s+Parade
849 Miami.+city\s*-?\s*owned\s+Orange\s+Bowl
849 city\s+officials,?\s+Miami
849 Miami\s+;
849 in\s+Miami\s+preparing.+Orange\s+Bowl
849 ^\s*Miami\s*$
849 ^\s*in\s+Miami\s*\.?\s*$
849 to\s+(\(REF:\s+)?Miami
849 at\s+(\(REF:\s+)?Miami
849 Miami\s*'*\s*s\s+downtown
849 Orange.*\s+in\s+.*Miami
849 Orange\s+Bowl\s*,?\s*Miami
849 Miami\s*'*\s*s Orange
849 Miami\s+Dolphins\s*:\s+the\s+Orange\s+Bowl
850 ^\s*Miami\s*$
850 ^\s*in\s+Miami\s*\.?\s*$
850 to\s+(\(REF:\s+)?Miami
850 at\s+(\(REF:\s+)?Miami
850 Miami\s*'?\s*s\s+downtown
850 Orange.*\s+in\s+.*Miami
850 Orange\s+Bowl\s*,\s*Miami
850 Miami\s*'?\s*s Orange
850 Dade\s+County
850 Orange\s+bowl.+hometown\s+Miami
850 Miami\s+had\s+its\s+parade
851 198[026]
851 1980s
851 1991
852 198[026]
852 1980s
853 1980s
853 198[026]
854 OEMs?
855 OEMs?
856 Lawrence\s+Township
856 Lawrenceville
856 New\s+Jersey
857 Lawrence\s+Township
857 Lawrenceville
857 New\s+Jersey
858 Lawrenceville
859 Lawrence\s+Township
859 Lawrenceville
859 New\s+Jersey
859 ^\s*N\.J\.\s*$
860 played\s+opposite\s+Cher
860 played\s+by
860 he\s+plays
860 Actors?
860 movie\s+roles?
861 played\s+opposite\s+Cher
861 played\s+by
861 he\s+plays
861 Cage\s+plays
861 Actors?
861 acting
861 movie\s+roles?
862 played\s+opposite\s+Cher
862 played\s+by
862 he\s+plays
862 Actors?
862 acting
862 movie\s+roles?
862 star.+in.+feature
863 hot
863 warm
864 hot
864 warm
865 hot
865 warm
865 thermal\s+springs
866 hot
866 warm
866 thermal\s+springs
867 Alef\s*-?\s*Beyz
868 Alef\s*-?\s*Beyz
869 Alef\s*-?\s*Beyz
870 ^\s*American\s*$
870 first\s+great\s+American
870 American\s+abstract\s+expressionism
870 ^\s*U\.S\.\s*$
870 U\.?S\.?\s+citizens?
870 American\s+;
870 American.*art.*Pollock
870 character\s+of\s+American\s+art
870 American\s+artists?
870 Pollock\s*:?\s+An\s+American
870 An\s+American\s+Saga
870 contemporary\s+painter\s+in\s+America
870 an\s+American\s+art\s+scene
870 greatest\s+painter\s+in\s+America
870 born\s+in\s+rural\s+Georgia
870 Milledgeville\s*,?\s*Ga
871 ^\s*U\.S\.\s*$
871 ^\s*American\s*$
871 American.*art.*Pollock
871 character\s+of\s+American\s+art
871 American\s+artists?
871 Pollock\s*:?\s+An\s+American
871 An\s+American\s+Saga
871 contemporary\s+painter\s+in\s+America
871 an\s+American\s+art\s+scene
871 greatest\s+painter\s+in\s+America
871 born\s+in\s+rural\s+Georgia
871 Milledgeville\s*,?\s*Ga
872 ^\s*U\.S\.\s*$
872 first\s+great\s+American
872 ^\s*American\s*$
872 American.*art.*Pollock
872 character\s+of\s+American\s+art
872 American\s+artists?
872 Pollock\s*:?\s+An\s+American
872 An\s+American\s+Saga
872 contemporary\s+painter\s+in\s+America
872 an\s+American\s+art\s+scene
872 greatest\s+painter\s+in\s+America
872 born\s+in\s+rural\s+Georgia
872 Milledgeville\s*,?\s*Ga
873 Lexington
874 Lexington
875 Lexington
875 ^\s*Kentucky\s+is\s*$
876 Lexington
877 Lexington
878 Magnolia
879 Magnolia
880 Magnolia
881 Magnolia
882 Magnolia
883 NSU
883 Mazda
883 Moll[ae]r
883 Matsuda
883 Norton
883 Deere
883 Tahoe
884 NSU
884 Mazda
885 NSU
885 Mazda
886 NSU
886 Mazda
886 Moll[ae]r
886 Matsuda
886 Norton
886 Deere
886 Tahoe
887 NSU
887 Mazda
887 Moll[ae]r
887 Matsuda
887 Norton
887 Deere
887 Tahoe
888 Everest
889 Everest
890 Everest
891 Everest
892 Rushmore
892 carve.*mountain
892 carve.*granite\s+peak
892 granite\s+carvings?
892 project\s+of\s+carving
892 triceratops?
892 Custer
892 horses
892 gambling
892 visages?
892 Roosevelt
892 Jefferson
892 motorcycle\s+mecca
892 Sioux.+traditional\s+stories
893 Rushmore
893 carve.*mountain
893 carve.*granite\s+peak
893 granite\s+carvings?
893 project\s+of\s+carving
893 triceratops?
893 horses
893 gambling
893 gaming\s+parlors
893 visages?
893 Roosevelt
893 Jefferson
893 motorcycle\s+mecca
893 motorcyclists
893 Motor\s+Classic\s+Board
893 races
893 Harney\s+Peak
893 Wild\s+Bill\s+Hickok
893 Sioux.+traditional\s+stories
893 gold
893 Black\s+Hills\s+Dome
893 Badlands
893 Custer\s+State\s+Park
893 Custer\s*,?\s+a\s+Black\s+Hills\s+tourist
893 Potato\s+Creek\s+Johnny
893 Bear\s+Butte
| {
"pile_set_name": "Github"
} |
{
"action": {
"hacking": {
"variety": [
"Unknown"
],
"vector": [
"Web application"
]
}
},
"actor": {
"external": {
"country": [
"CA"
],
"motive": [
"Unknown"
],
"region": [
"019021"
],
"variety": [
"Organized crime"
]
}
},
"asset": {
"assets": [
{
"amount": 1,
"variety": "S - Web application"
}
],
"cloud": [
"Unknown"
]
},
"attribute": {
"confidentiality": {
"data": [
{
"variety": "Personal"
}
],
"data_disclosure": "Potentially",
"data_victim": [
"Customer"
],
"state": [
"Stored"
]
},
"integrity": {
"variety": [
"Modify data"
]
}
},
"confidence": "Medium",
"discovery_method": {
"external": {
"variety": [
"Customer"
]
}
},
"incident_id": "e2b312f0-af5f-11e7-b9af-6daebc71dd3b",
"plus": {
"analysis_status": "Validated",
"analyst": "cdander",
"created": "2017-10-12T19:25:00.221Z",
"dbir_year": 2018,
"github": "8804",
"master_id": "b7bfb62f-2fef-461b-8d55-5c40bb07a1d3",
"modified": "2017-10-25T22:11:57.911Z"
},
"reference": "https://www.databreaches.net/department-of-national-defence-investigating-possible-hack-of-its-recruiting-site/",
"schema_version": "1.3.4",
"security_incident": "Confirmed",
"source_id": "vcdb",
"summary": "Canadian Department of National Defence investigating possible hack of its recruiting site",
"targeted": "Targeted",
"timeline": {
"compromise": {
"unit": "Days",
"value": 1
},
"discovery": {
"unit": "Days",
"value": 1
},
"incident": {
"day": 17,
"month": 11,
"year": 2016
}
},
"victim": {
"country": [
"CA"
],
"employee_count": "Unknown",
"industry": "928110",
"region": [
"019021"
],
"victim_id": "Department of National Defence"
}
} | {
"pile_set_name": "Github"
} |
"""
MIT License
Copyright (c) 2019 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import logging
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
logger = logging.getLogger('hrnet_backbone')
__all__ = ['hrnet18', 'hrnet32', 'hrnet48']
model_urls = {
# all the checkpoints come from https://github.com/HRNet/HRNet-Image-Classification
'hrnet18': 'https://opr0mq.dm.files.1drv.com/y4mIoWpP2n-LUohHHANpC0jrOixm1FZgO2OsUtP2DwIozH5RsoYVyv_De5wDgR6XuQmirMV3C0AljLeB-zQXevfLlnQpcNeJlT9Q8LwNYDwh3TsECkMTWXCUn3vDGJWpCxQcQWKONr5VQWO1hLEKPeJbbSZ6tgbWwJHgHF7592HY7ilmGe39o5BhHz7P9QqMYLBts6V7QGoaKrr0PL3wvvR4w',
'hrnet32': 'https://opr74a.dm.files.1drv.com/y4mKOuRSNGQQlp6wm_a9bF-UEQwp6a10xFCLhm4bqjDu6aSNW9yhDRM7qyx0vK0WTh42gEaniUVm3h7pg0H-W0yJff5qQtoAX7Zze4vOsqjoIthp-FW3nlfMD0-gcJi8IiVrMWqVOw2N3MbCud6uQQrTaEAvAdNjtjMpym1JghN-F060rSQKmgtq5R-wJe185IyW4-_c5_ItbhYpCyLxdqdEQ',
'hrnet48': 'https://optgaw.dm.files.1drv.com/y4mWNpya38VArcDInoPaL7GfPMgcop92G6YRkabO1QTSWkCbo7djk8BFZ6LK_KHHIYE8wqeSAChU58NVFOZEvqFaoz392OgcyBrq_f8XGkusQep_oQsuQ7DPQCUrdLwyze_NlsyDGWot0L9agkQ-M_SfNr10ETlCF5R7BdKDZdupmcMXZc-IE3Ysw1bVHdOH4l-XEbEKFAi6ivPUbeqlYkRMQ'
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True, norm_layer=None):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.norm_layer = norm_layer
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(num_channels[branch_index] * block.expansion),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample, norm_layer=self.norm_layer))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
self.norm_layer(num_inchannels[i])))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3),
nn.ReLU(inplace=True)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode='bilinear',
align_corners=True
)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self,
cfg,
norm_layer=None):
super(HighResolutionNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.norm_layer = norm_layer
# stem network
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = self.norm_layer(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = self.norm_layer(64)
self.relu = nn.ReLU(inplace=True)
# stage 1
self.stage1_cfg = cfg['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion*num_channels
# stage 2
self.stage2_cfg = cfg['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
# stage 3
self.stage3_cfg = cfg['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
# stage 4
self.stage4_cfg = cfg['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
self.norm_layer(num_channels_cur_layer[i]),
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
self.norm_layer(outchannels),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample, norm_layer=self.norm_layer))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes, norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output,
norm_layer=self.norm_layer)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
if i < self.stage2_cfg['NUM_BRANCHES']:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
if i < self.stage3_cfg['NUM_BRANCHES']:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
outputs = {}
# See note [TorchScript super()]
outputs['res2'] = x[0] # 1/4
outputs['res3'] = x[1] # 1/8
outputs['res4'] = x[2] # 1/16
outputs['res5'] = x[3] # 1/32
return outputs
def _hrnet(arch, pretrained, progress, **kwargs):
try:
from ...config.hrnet_config import MODEL_CONFIGS
except ImportError:
from segmentation.config.hrnet_config import MODEL_CONFIGS
model = HighResolutionNet(MODEL_CONFIGS[arch], **kwargs)
if pretrained:
if int(os.environ.get("mapillary_pretrain", 0)):
logger.info("load the mapillary pretrained hrnet-w48 weights.")
model_url = model_urls['hrnet48_mapillary_pretrain']
else:
model_url = model_urls[arch]
state_dict = load_state_dict_from_url(model_url,
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
def hrnet18(pretrained=False, progress=True, **kwargs):
r"""HRNet-18 model
"""
return _hrnet('hrnet18', pretrained, progress,
**kwargs)
def hrnet32(pretrained=False, progress=True, **kwargs):
r"""HRNet-32 model
"""
return _hrnet('hrnet32', pretrained, progress,
**kwargs)
def hrnet48(pretrained=False, progress=True, **kwargs):
r"""HRNet-48 model
"""
return _hrnet('hrnet48', pretrained, progress,
**kwargs)
| {
"pile_set_name": "Github"
} |
---
title: 运算符过程
ms.date: 07/20/2015
helpviewer_keywords:
- Visual Basic code, procedures
- procedures [Visual Basic], operator
- Visual Basic code, operators
- syntax [Visual Basic], Operator procedures
- operators [Visual Basic], overloading
- overloaded operators [Visual Basic]
- operator overloading
- operator procedures
ms.assetid: 8c513d38-246b-4fb7-8b75-29e1364e555b
ms.openlocfilehash: a1dd183570c8aa50efff85bdaebef90bd3b0120f
ms.sourcegitcommit: f8c270376ed905f6a8896ce0fe25b4f4b38ff498
ms.translationtype: MT
ms.contentlocale: zh-CN
ms.lasthandoff: 06/04/2020
ms.locfileid: "84364313"
---
# <a name="operator-procedures-visual-basic"></a>运算符过程 (Visual Basic)
运算符过程是一系列 Visual Basic 语句,用于定义标准运算符(如 `*` 、 `<>` 或 `And` )在已定义的类或结构中的行为。 这也称为*运算符重载*。
## <a name="when-to-define-operator-procedures"></a>何时定义操作员过程
定义了类或结构后,可以将变量声明为该类或结构的类型。 有时,此类变量需要作为表达式的一部分参与操作。 为此,它必须是运算符的操作数。
Visual Basic 仅定义其基本数据类型的运算符。 如果两个操作数或其中一个操作数为类或结构的类型,则可以定义运算符的行为。
有关详细信息,请参阅[Operator 语句](../../../language-reference/statements/operator-statement.md)。
## <a name="types-of-operator-procedure"></a>运算符过程的类型
操作员过程可以是以下类型之一:
- 一元运算符的定义,其中的参数是你的类或结构的类型。
- 二元运算符的定义,其中至少有一个参数是你的类或结构的类型。
- 转换运算符的定义,其中的参数属于你的类或结构的类型。
- 返回类或结构的类型的转换运算符的定义。
转换运算符始终为一元运算符,并且你始终使用 `CType` 作为正在定义的运算符。
## <a name="declaration-syntax"></a>声明语法
声明运算符过程的语法如下所示:
```vb
Public Shared [Widening | Narrowing] Operator operatorsymbol ( operand1 [, operand2 ]) As datatype
' Statements of the operator procedure.
End Operator
```
`Widening` `Narrowing` 仅对类型转换运算符使用或关键字。 对于类型转换运算符,运算符符号始终是[CType 函数](../../../language-reference/functions/ctype-function.md)。
您可以声明两个操作数来定义一个二元运算符,并声明一个操作数来定义一元运算符,包括类型转换运算符。 必须声明所有操作数 `ByVal` 。
声明每个操作数的方式与声明[Sub 过程](./sub-procedures.md)的参数的方式相同。
### <a name="data-type"></a>数据类型
由于你在定义的类或结构上定义运算符,因此至少一个操作数必须是该类或结构的数据类型。 对于类型转换运算符,操作数或返回类型必须是类或结构的数据类型。
有关更多详细信息,请参阅[Operator 语句](../../../language-reference/statements/operator-statement.md)。
## <a name="calling-syntax"></a>调用语法
通过在表达式中使用运算符符号,可以隐式调用运算符过程。 提供操作数的方式与预定义运算符相同。
对运算符过程的隐式调用的语法如下所示:
`Dim testStruct As` *structurename*
`Dim testNewStruct As` *structurename* `= testStruct`*operatorsymbol* `10`
### <a name="illustration-of-declaration-and-call"></a>声明和调用的插图
下面的结构将已签名的128位整数值存储为构成的高序位和低序位部分。 它定义 `+` 运算符以添加两个 `veryLong` 值并生成一个结果 `veryLong` 值。
[!code-vb[VbVbcnProcedures#23](~/samples/snippets/visualbasic/VS_Snippets_VBCSharp/VbVbcnProcedures/VB/Class1.vb#23)]
下面的示例演示对 `+` 上定义的运算符的典型调用 `veryLong` 。
[!code-vb[VbVbcnProcedures#24](~/samples/snippets/visualbasic/VS_Snippets_VBCSharp/VbVbcnProcedures/VB/Class1.vb#24)]
## <a name="see-also"></a>另请参阅
- [过程](./index.md)
- [Sub 过程](./sub-procedures.md)
- [Function 过程](./function-procedures.md)
- [Property 过程](./property-procedures.md)
- [过程形参和实参](./procedure-parameters-and-arguments.md)
- [Operator Statement](../../../language-reference/statements/operator-statement.md)
- [如何:定义运算符](./how-to-define-an-operator.md)
- [如何:定义转换运算符](./how-to-define-a-conversion-operator.md)
- [如何:调用运算符过程](./how-to-call-an-operator-procedure.md)
- [如何:使用定义运算符的类](./how-to-use-a-class-that-defines-operators.md)
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" standalone="no" ?>
<!DOCTYPE pov SYSTEM "/usr/share/cgc-docs/replay.dtd"><pov>
<cbid>CROMU_00011</cbid>
<replay>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>hT7APAY = |"WudXCW","NNuBY","nvUU","d9","q6zHJ","5qelmh","h5bk"|-|"4MiX5","LAGtNS","qc5"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>NwzGc7a = |"dYbc","MrhY","AVJ","zWqmG","yipbRO1","3O17LMl","WK9SECn","ux"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>8CK97fu = |"AVJ","MrhY","ux","dYbc"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>8CK97fu@NwzGc7a\n</data></write>
<read echo="ascii"><delim>\n</delim><match><data>TRUE\n</data></match></read>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>YIL3KOW = |"QY","OsFm","U0BI","yinOIRc"|-|"C","q","QY"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>vRoahM = YIL3KOW ~ NwzGc7a\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>PzGNk = NwzGc7a ~ hT7APAY\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>OHcAP = |"Nj","kPJO8","Zxtb","GY9urY","hwJX","VPG1RRO","zuJasgt"|-|"zuJasgt","Nj","VPG1RRO","visoxde","kPJO8","Zxtb","fenR25"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>dHAXF2 = |"JtV94","HxZiaA","aQbkjD","gTm"|^|"Bc","aQbkjD","tcxANo8","gTm","aq"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>1cXuo = |"ct67E","gDR","mhze","fd2","H4Fn","Bx"|^|"gDR","H4Fn","42Z","XG2Bl","Vsn56P"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>TQopQ = vRoahM ^ PzGNk\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>hgUppYj = vRoahM ~ TQopQ\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>hhOGtN = |"r2XcU","MZ","V","lu","R219T"|~|"QmN7HYz","mDvBHCk","R219T","W9pV","f13ZG9X","MZ","r2XcU"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>Al6aOf = |"a3R","0TMKvk4","ChAHD"|-|"oVBNZCP","ytj","gvN4Mdt"|\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>4CLxBs = OHcAP ~ Al6aOf\n</data></write>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>.p\n</data></write>
<read echo="ascii"><delim>\n</delim><match><data>hT7APAY = |"WudXCW","NNuBY","nvUU","d9","q6zHJ","5qelmh","h5bk"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>NwzGc7a = |"dYbc","MrhY","AVJ","zWqmG","yipbRO1","3O17LMl","WK9SECn","ux"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>8CK97fu = |"AVJ","MrhY","ux","dYbc"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>YIL3KOW = |"OsFm","U0BI","yinOIRc"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>vRoahM = |"OsFm","U0BI","yinOIRc","dYbc","MrhY","AVJ","zWqmG","yipbRO1","3O17LMl","WK9SECn","ux"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>PzGNk = |"dYbc","MrhY","AVJ","zWqmG","yipbRO1","3O17LMl","WK9SECn","ux","WudXCW","NNuBY","nvUU","d9","q6zHJ","5qelmh","h5bk"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>OHcAP = |"GY9urY","hwJX"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>dHAXF2 = |"aQbkjD","gTm"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>1cXuo = |"gDR","H4Fn"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>TQopQ = |"dYbc","MrhY","AVJ","zWqmG","yipbRO1","3O17LMl","WK9SECn","ux"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>hgUppYj = |"OsFm","U0BI","yinOIRc"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>hhOGtN = |"V","lu","QmN7HYz","mDvBHCk","W9pV","f13ZG9X"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>Al6aOf = |"a3R","0TMKvk4","ChAHD"|\n</data></match></read>
<read echo="ascii"><delim>\n</delim><match><data>4CLxBs = |"GY9urY","hwJX","a3R","0TMKvk4","ChAHD"|\n</data></match></read>
<read echo="ascii"><delim> </delim><match><data>\x3E </data></match></read>
<write echo="ascii"><data>.l\n</data></write>
</replay>
</pov>
| {
"pile_set_name": "Github"
} |
// CodeMirror, copyright (c) by Marijn Haverbeke and others
// Distributed under an MIT license: http://codemirror.net/LICENSE
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("ecl", function(config) {
function words(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
function metaHook(stream, state) {
if (!state.startOfLine) return false;
stream.skipToEnd();
return "meta";
}
var indentUnit = config.indentUnit;
var keyword = words("abs acos allnodes ascii asin asstring atan atan2 ave case choose choosen choosesets clustersize combine correlation cos cosh count covariance cron dataset dedup define denormalize distribute distributed distribution ebcdic enth error evaluate event eventextra eventname exists exp failcode failmessage fetch fromunicode getisvalid global graph group hash hash32 hash64 hashcrc hashmd5 having if index intformat isvalid iterate join keyunicode length library limit ln local log loop map matched matchlength matchposition matchtext matchunicode max merge mergejoin min nolocal nonempty normalize parse pipe power preload process project pull random range rank ranked realformat recordof regexfind regexreplace regroup rejected rollup round roundup row rowdiff sample set sin sinh sizeof soapcall sort sorted sqrt stepped stored sum table tan tanh thisnode topn tounicode transfer trim truncate typeof ungroup unicodeorder variance which workunit xmldecode xmlencode xmltext xmlunicode");
var variable = words("apply assert build buildindex evaluate fail keydiff keypatch loadxml nothor notify output parallel sequential soapcall wait");
var variable_2 = words("__compressed__ all and any as atmost before beginc++ best between case const counter csv descend encrypt end endc++ endmacro except exclusive expire export extend false few first flat from full function group header heading hole ifblock import in interface joined keep keyed last left limit load local locale lookup macro many maxcount maxlength min skew module named nocase noroot noscan nosort not of only opt or outer overwrite packed partition penalty physicallength pipe quote record relationship repeat return right scan self separator service shared skew skip sql store terminator thor threshold token transform trim true type unicodeorder unsorted validate virtual whole wild within xml xpath");
var variable_3 = words("ascii big_endian boolean data decimal ebcdic integer pattern qstring real record rule set of string token udecimal unicode unsigned varstring varunicode");
var builtin = words("checkpoint deprecated failcode failmessage failure global independent onwarning persist priority recovery stored success wait when");
var blockKeywords = words("catch class do else finally for if switch try while");
var atoms = words("true false null");
var hooks = {"#": metaHook};
var isOperatorChar = /[+\-*&%=<>!?|\/]/;
var curPunc;
function tokenBase(stream, state) {
var ch = stream.next();
if (hooks[ch]) {
var result = hooks[ch](stream, state);
if (result !== false) return result;
}
if (ch == '"' || ch == "'") {
state.tokenize = tokenString(ch);
return state.tokenize(stream, state);
}
if (/[\[\]{}\(\),;\:\.]/.test(ch)) {
curPunc = ch;
return null;
}
if (/\d/.test(ch)) {
stream.eatWhile(/[\w\.]/);
return "number";
}
if (ch == "/") {
if (stream.eat("*")) {
state.tokenize = tokenComment;
return tokenComment(stream, state);
}
if (stream.eat("/")) {
stream.skipToEnd();
return "comment";
}
}
if (isOperatorChar.test(ch)) {
stream.eatWhile(isOperatorChar);
return "operator";
}
stream.eatWhile(/[\w\$_]/);
var cur = stream.current().toLowerCase();
if (keyword.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "keyword";
} else if (variable.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "variable";
} else if (variable_2.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "variable-2";
} else if (variable_3.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "variable-3";
} else if (builtin.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "builtin";
} else { //Data types are of from KEYWORD##
var i = cur.length - 1;
while(i >= 0 && (!isNaN(cur[i]) || cur[i] == '_'))
--i;
if (i > 0) {
var cur2 = cur.substr(0, i + 1);
if (variable_3.propertyIsEnumerable(cur2)) {
if (blockKeywords.propertyIsEnumerable(cur2)) curPunc = "newstatement";
return "variable-3";
}
}
}
if (atoms.propertyIsEnumerable(cur)) return "atom";
return null;
}
function tokenString(quote) {
return function(stream, state) {
var escaped = false, next, end = false;
while ((next = stream.next()) != null) {
if (next == quote && !escaped) {end = true; break;}
escaped = !escaped && next == "\\";
}
if (end || !escaped)
state.tokenize = tokenBase;
return "string";
};
}
function tokenComment(stream, state) {
var maybeEnd = false, ch;
while (ch = stream.next()) {
if (ch == "/" && maybeEnd) {
state.tokenize = tokenBase;
break;
}
maybeEnd = (ch == "*");
}
return "comment";
}
function Context(indented, column, type, align, prev) {
this.indented = indented;
this.column = column;
this.type = type;
this.align = align;
this.prev = prev;
}
function pushContext(state, col, type) {
return state.context = new Context(state.indented, col, type, null, state.context);
}
function popContext(state) {
var t = state.context.type;
if (t == ")" || t == "]" || t == "}")
state.indented = state.context.indented;
return state.context = state.context.prev;
}
// Interface
return {
startState: function(basecolumn) {
return {
tokenize: null,
context: new Context((basecolumn || 0) - indentUnit, 0, "top", false),
indented: 0,
startOfLine: true
};
},
token: function(stream, state) {
var ctx = state.context;
if (stream.sol()) {
if (ctx.align == null) ctx.align = false;
state.indented = stream.indentation();
state.startOfLine = true;
}
if (stream.eatSpace()) return null;
curPunc = null;
var style = (state.tokenize || tokenBase)(stream, state);
if (style == "comment" || style == "meta") return style;
if (ctx.align == null) ctx.align = true;
if ((curPunc == ";" || curPunc == ":") && ctx.type == "statement") popContext(state);
else if (curPunc == "{") pushContext(state, stream.column(), "}");
else if (curPunc == "[") pushContext(state, stream.column(), "]");
else if (curPunc == "(") pushContext(state, stream.column(), ")");
else if (curPunc == "}") {
while (ctx.type == "statement") ctx = popContext(state);
if (ctx.type == "}") ctx = popContext(state);
while (ctx.type == "statement") ctx = popContext(state);
}
else if (curPunc == ctx.type) popContext(state);
else if (ctx.type == "}" || ctx.type == "top" || (ctx.type == "statement" && curPunc == "newstatement"))
pushContext(state, stream.column(), "statement");
state.startOfLine = false;
return style;
},
indent: function(state, textAfter) {
if (state.tokenize != tokenBase && state.tokenize != null) return 0;
var ctx = state.context, firstChar = textAfter && textAfter.charAt(0);
if (ctx.type == "statement" && firstChar == "}") ctx = ctx.prev;
var closing = firstChar == ctx.type;
if (ctx.type == "statement") return ctx.indented + (firstChar == "{" ? 0 : indentUnit);
else if (ctx.align) return ctx.column + (closing ? 0 : 1);
else return ctx.indented + (closing ? 0 : indentUnit);
},
electricChars: "{}"
};
});
CodeMirror.defineMIME("text/x-ecl", "ecl");
});
| {
"pile_set_name": "Github"
} |
counter bytes_total {operation=sent} 62793673 2011-02-23T05:54:10Z
counter bytes_total {operation=received} 975017 2011-02-23T05:54:10Z
counter connections_total 52 2011-02-22T21:54:13Z
counter connection-time_total 1181011 2011-02-23T05:54:10Z
counter transfers_total {operation=send,module=module} 2 2011-02-23T05:50:32Z
counter transfers_total {operation=send,module=repo} 25 2011-02-23T05:51:14Z
gauge foo {label=}
counter bar
gauge floaty 37.1 2017-06-15T18:09:37Z
text stringy hi 2018-06-16T18:04:00Z
| {
"pile_set_name": "Github"
} |
# 왜 공부 해야하나요? (for/if만 알아도 되는데)
> 아래 글은 [제 브런치에 올렸던 글](https://brunch.co.kr/@jojoldu/16)을 기록 차원에서 블로그로 옮긴 글입니다.

```
"정기씨 아까 제가 꽃을 버려서 슬펐나요?
그건 신발이 진창에 빠졌을때 만큼 슬펐나요,
아니면 가까운 이가 아플때만큼 슬펐나요?
어떤 슬픔은 어렴풋한 슬픔이고, 어떤 슬픔은 처절한 슬픔이죠.
소소한 슬픔도,
아련한 슬픔도,
잊혀가는 슬픔도,
문득 기억이 떠올라 때때로 가슴이 아파지는 슬픔까지,
같은 슬픔조차도 사실은 전부 달라요.
책을 읽고 풍부한 단어를 알게 된다는건 슬픔의 저 끝에서부터, 기쁨의 저 끝까지.
자신이 가지고 있는 수많은 감정들의 결을 하나하나 구분해내는거에요.
정확히 그만큼의 감정을
정확히 그만큼의 단어로 집어내서
자신의 마음을 선명하게 들여다 보는거죠
내가 얼만큼 슬픈지, 얼만큼 기쁜지.
내가 무엇에 행복하고, 무엇에 불행한지.
자신의 마음이 자신을 위한 목적을 결정하도록.
자신의 마음을 타인에게 정확히 전달하도록.
나무도 바위도 없이 숨을 곳 하나없는 산 한복판에서 매에게 쫓기는 까투리의 마음이,
망망대해 한가운데 배에 곡식 가득 싣고
노도 잃고,
닻도 잃고,
돛줄도 끊어지고,
돛대도 꺾어지고,
바람에 물결치고,
안개는 자욱이 뒤섞이며,
사방은 어두워지고,
풍알 일 노을 뜨는데,
해적을 만난 사공의 마음이
엊그제 임을 잃은 제 마음에 비할수 있을까요.
같은 단어를 안다면 감정의 의미를 공유할 수 있고,
같은 문장을 이해할 수 있다면 감정의 흐름을 공유할 수 있어요.
그리고 그건 서로를 온전히 이해할 수 있게 만들죠."
```
[가담항설 90화에서](https://comic.naver.com/webtoon/detail.nhn?titleId=670144&no=93&weekday=thu)
프로그래밍은 for, if만 알면 된다 라고 얘기하며 공부하지 않는 사람들을 볼때마다 어떻게 표현해야 와닿을수 있을까 종종 생각했었다.
물론 유지보수성, 확장성 등등 변화에 쉽게 대응하기 위해서 라고 대답했지만, 교과서적인 이야기로 받아들이길래 더이상 해줄 수 있는 말은 없었다.
그러다가 웹툰을 보고 "와 이거구나" 라고 생각이 들었다.
내가 얼마만큼 힘든지 타인이 공감하려면 그만큼 단어로, 문장으로 잘 표현할 수 있어야 한다.
아는 단어와 문장이 부족하다면 결국 좋다/싫다/힘들다 외에는 표현할 방법이 없다.
마찬가지로 프로그래밍에서도 좁게는 제네릭, Enum, 예외처리부터 넓게는 자료구조, 객체지향, 디자인패턴까지, 공부를 하면 할수록 **내 의도를, 내 목표를 명확하게 표현할 수 있다**.
확장해서 쓰게 하고 싶었던 곳은 어디인지,
다른 사람이 못쓰게 막고 싶었던 방식은 무엇인지,
무시해도 될 것은 무엇인지 등등
이 코드에서 내가 하고자 하는 이야기가 무엇인지를 명확히 표현할 수 있게 된다.
내가 짠 코드를 다른 사람이 오해하지 않고,
타인의 코드를 보고 얼마만큼 고민했는지 이해하게 되고,
그 사람과 내가 같은 생각을 공유할 수 있다는 것은 너무 즐거운 일 아닐까?
for/if만 알면 된다는건, 내 감정을 좋다/싫다로만 표현하겠다는 것과 같다고 생각한다.
그게 과연 좋을까?
좀 더 자신이 표현할 수 있는 코드의 표현이 다양해지길 바라며. | {
"pile_set_name": "Github"
} |
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Xception_implementation.ipynb",
"provenance": [],
"collapsed_sections": [],
"authorship_tag": "ABX9TyNq9qTKVN05k/mMCHpbG6D2",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/Machine-Learning-Tokyo/CNN-Architectures/blob/master/Implementations/Xception/Xception_implementation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "aBMbYREY0Jt8",
"colab_type": "text"
},
"source": [
"# Implementation of Xception\n",
"\n",
"We will use the [tensorflow.keras Functional API](https://www.tensorflow.org/guide/keras/functional) to build Xception from the original paper: “[Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)” by François Chollet.\n",
"\n",
"[Video tutorial](https://www.youtube.com/watch?v=nMBCSroJ7bY&list=PLaPdEEY26UXyE3UchW0C742xh542yh0yI&index=6)\n",
"\n",
"---\n",
"\n",
"In the paper we can read:\n",
"\n",
">**[i]** “all Convolution and SeparableConvolution layers are followed by batch normalization [7] (not included in the diagram).\"\n",
">\n",
">**[ii]** \"All SeparableConvolution layers use a depth multiplier of 1 (no depth expansion).\"\n",
"\n",
"<br>\n",
"\n",
"We will also use the following Diagram **[iii]**:\n",
"\n",
"<img src=https://raw.githubusercontent.com/Machine-Learning-Tokyo/DL-workshop-series/master/Part%20I%20-%20Convolution%20Operations/images/Xception/Xception.png? width=\"100%\">\n",
"\n",
"<br>\n",
"\n",
"as well the following Table **[iv]** to check the total number of parameters:\n",
"\n",
"<img src=https://raw.githubusercontent.com/Machine-Learning-Tokyo/DL-workshop-series/master/Part%20I%20-%20Convolution%20Operations/images/Xception/Xception_parameters.png? width=\"40%\">\n",
"\n",
"---\n",
"\n",
"## Network architecture\n",
"\n",
"The model is separated in 3 flows as depicted at **[iii]**:\n",
"- Entry flow\n",
"- Middle flow with 8 repetitions of the same block\n",
"- Exit flow\n",
"\n",
"According to **[i]** all Convolution and Separable Convolution layers are followed by batch normalization.\n",
"\n",
"---\n",
"\n",
"## Workflow\n",
"We will:\n",
"1. import the neccesary layers\n",
"2. write one helper function for the Conv-BatchNorm block and one for the SeparableConv-BatchNorm block according to **[i]**\n",
"3. write one function for each one of the 3 flows according to **[iii]**\n",
"4. use these helper functions to build the model.\n",
"\n",
"---\n",
"\n",
"### 1. Imports\n"
]
},
{
"cell_type": "code",
"metadata": {
"id": "Lhb0t5ZX0CWm",
"colab_type": "code",
"colab": {}
},
"source": [
"from tensorflow.keras.layers import Input, Conv2D, SeparableConv2D, \\\n",
" Add, Dense, BatchNormalization, ReLU, MaxPool2D, GlobalAvgPool2D"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "JVBKdFWp0b6J",
"colab_type": "text"
},
"source": [
"### 2.1. Conv-BatchNorm block\n",
"The *Conv-BatchNorm block* will:\n",
"- take as inputs:\n",
" - a tensor (**`x`**)\n",
" - the number of filters of the *Convolution layer* (**`filters`**)\n",
" - the kernel size of the *Convolution layer* (**`kernel_size`**)\n",
" - the strides of the *Convolution layer* (**`strides`**)\n",
"- run:\n",
" - apply a *Convolution layer* to **`x`**\n",
" - apply a *Batch Normalization* layer to this tensor\n",
"- return the tensor"
]
},
{
"cell_type": "code",
"metadata": {
"id": "1EmtXA_00fC6",
"colab_type": "code",
"colab": {}
},
"source": [
"def conv_bn(x, filters, kernel_size, strides=1):\n",
" x = Conv2D(filters=filters,\n",
" kernel_size=kernel_size,\n",
" strides=strides,\n",
" padding='same',\n",
" use_bias=False)(x)\n",
" x = BatchNormalization()(x)\n",
" return x"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "I38TMjXk0i0t",
"colab_type": "text"
},
"source": [
"***Note***: We include *use_bias=False* for the final number of parameters to match the ones written at **[iv]**.\n",
"\n",
"---\n",
"\n",
"### 2.2. SeparableConv-BatchNorm\n",
"The *SeparableConv-BatchNorm block* has similar structure with the *Conv-BatchNorm* one"
]
},
{
"cell_type": "code",
"metadata": {
"id": "ZyOzUA-m0jcF",
"colab_type": "code",
"colab": {}
},
"source": [
"def sep_bn(x, filters, kernel_size, strides=1):\n",
" x = SeparableConv2D(filters=filters,\n",
" kernel_size=kernel_size,\n",
" strides=strides,\n",
" padding='same',\n",
" use_bias=False)(x)\n",
" x = BatchNormalization()(x)\n",
" return x"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "mWLuniSA0nRG",
"colab_type": "text"
},
"source": [
"### 3.1. Entry flow\n",
"<img src=https://raw.githubusercontent.com/Machine-Learning-Tokyo/DL-workshop-series/master/Part%20I%20-%20Convolution%20Operations/images/Xception/entry_flow.png? width=\"300\">"
]
},
{
"cell_type": "code",
"metadata": {
"id": "q3Cx3Fc60qZ2",
"colab_type": "code",
"colab": {}
},
"source": [
"def entry_flow(x):\n",
" x = conv_bn(x, filters=32, kernel_size=3, strides=2)\n",
" x = ReLU()(x)\n",
" x = conv_bn(x, filters=64, kernel_size=3)\n",
" tensor = ReLU()(x)\n",
" \n",
" x = sep_bn(tensor, filters=128, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=128, kernel_size=3)\n",
" x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)\n",
" \n",
" tensor = conv_bn(tensor, filters=128, kernel_size=1, strides=2)\n",
" \n",
" x = Add()([tensor, x])\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=256, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=256, kernel_size=3)\n",
" x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)\n",
" \n",
" tensor = conv_bn(tensor, filters=256, kernel_size=1, strides=2)\n",
" \n",
" x = Add()([tensor, x])\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)\n",
" \n",
" tensor = conv_bn(tensor, filters=728, kernel_size=1, strides=2)\n",
" x = Add()([tensor, x])\n",
" \n",
" return x"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "VdDWkhHO0sAw",
"colab_type": "text"
},
"source": [
"### 3.2. Middle flow\n",
"<img src=https://raw.githubusercontent.com/Machine-Learning-Tokyo/DL-workshop-series/master/Part%20I%20-%20Convolution%20Operations/images/Xception/middle_flow.png? width=\"250\">"
]
},
{
"cell_type": "code",
"metadata": {
"id": "h6pBxNcA0y0I",
"colab_type": "code",
"colab": {}
},
"source": [
"def middle_flow(tensor):\n",
" for _ in range(8):\n",
" x = ReLU()(tensor)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" \n",
" tensor = Add()([tensor, x])\n",
" \n",
" return tensor"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "DNd1B5-002C9",
"colab_type": "text"
},
"source": [
"### 3.3. Exit flow\n",
"<img src=https://raw.githubusercontent.com/Machine-Learning-Tokyo/DL-workshop-series/master/Part%20I%20-%20Convolution%20Operations/images/Xception/exit_flow.png? width=\"300\">"
]
},
{
"cell_type": "code",
"metadata": {
"id": "VsxFWeQu04bW",
"colab_type": "code",
"colab": {}
},
"source": [
"def exit_flow(tensor):\n",
" x = ReLU()(tensor)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=1024, kernel_size=3)\n",
" x = MaxPool2D(3, strides=2, padding='same')(x)\n",
" \n",
" tensor = conv_bn(tensor, filters=1024, kernel_size=1, strides=2)\n",
" \n",
" x = Add()([tensor, x])\n",
" x = sep_bn(x, filters=1536, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=2048, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = GlobalAvgPool2D()(x)\n",
" x = Dense(units=1000, activation='softmax')(x)\n",
" \n",
" return x"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "QrfcSKHq07o3",
"colab_type": "text"
},
"source": [
"### 4. Model code"
]
},
{
"cell_type": "code",
"metadata": {
"id": "9_YCQNXO093K",
"colab_type": "code",
"colab": {}
},
"source": [
"input = Input(shape=[299, 299, 3])\n",
" \n",
"x = entry_flow(input)\n",
"x = middle_flow(x)\n",
"output = exit_flow(x)\n",
" \n",
"from tensorflow.keras import Model \n",
"model = Model(input, output)"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "mNGVPgYd4Z7A",
"colab_type": "code",
"colab": {}
},
"source": [
"from tensorflow.keras.utils import plot_model\n",
"plot_model(model, show_shapes=True)"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "-e5v5OOM1BHL",
"colab_type": "text"
},
"source": [
"### Check number of parameters\n",
"\n",
"We can also check the total number of trainable parameters of the model by calling `count_params()` on each result element of `model.trainable_weights`.\n",
"\n",
"According to **[iv]** there are 22,855,952 trainable parameters at Xception model."
]
},
{
"cell_type": "code",
"metadata": {
"id": "MvbYU04G1Bjr",
"colab_type": "code",
"colab": {}
},
"source": [
"import numpy as np\n",
"import tensorflow.keras.backend as K\n",
"np.sum([K.count_params(p) for p in model.trainable_weights])"
],
"execution_count": 0,
"outputs": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "AR7hVYiM1Etc",
"colab_type": "text"
},
"source": [
"## Final code\n",
"\n",
"```python\n",
"from tensorflow.keras.layers import Input, Conv2D, SeparableConv2D, \\\n",
" Add, Dense, BatchNormalization, ReLU, MaxPool2D, GlobalAvgPool2D\n",
" \n",
"def conv_bn(x, filters, kernel_size, strides=1):\n",
" x = Conv2D(filters=filters,\n",
" kernel_size=kernel_size,\n",
" strides=strides,\n",
" padding='same',\n",
" use_bias=False)(x)\n",
" x = BatchNormalization()(x)\n",
" return x\n",
" \n",
" \n",
"def sep_bn(x, filters, kernel_size, strides=1):\n",
" x = SeparableConv2D(filters=filters,\n",
" kernel_size=kernel_size,\n",
" strides=strides,\n",
" padding='same',\n",
" use_bias=False)(x)\n",
" x = BatchNormalization()(x)\n",
" return x\n",
" \n",
" \n",
"def entry_flow(x):\n",
" x = conv_bn(x, filters=32, kernel_size=3, strides=2)\n",
" x = ReLU()(x)\n",
" x = conv_bn(x, filters=64, kernel_size=3)\n",
" tensor = ReLU()(x)\n",
" \n",
" x = sep_bn(tensor, filters=128, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=128, kernel_size=3)\n",
" x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)\n",
" \n",
" tensor = conv_bn(tensor, filters=128, kernel_size=1, strides=2)\n",
" \n",
" x = Add()([tensor, x])\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=256, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=256, kernel_size=3)\n",
" x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)\n",
" \n",
" tensor = conv_bn(tensor, filters=256, kernel_size=1, strides=2)\n",
" \n",
" x = Add()([tensor, x])\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)\n",
" \n",
" tensor = conv_bn(tensor, filters=728, kernel_size=1, strides=2)\n",
" x = Add()([tensor, x])\n",
" \n",
" return x\n",
" \n",
" \n",
"def middle_flow(tensor):\n",
" for _ in range(8):\n",
" x = ReLU()(tensor)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" \n",
" tensor = Add()([tensor, x])\n",
" \n",
" return tensor\n",
" \n",
" \n",
"def exit_flow(tensor):\n",
" x = ReLU()(tensor)\n",
" x = sep_bn(x, filters=728, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=1024, kernel_size=3)\n",
" x = MaxPool2D(3, strides=2, padding='same')(x)\n",
" \n",
" tensor = conv_bn(tensor, filters=1024, kernel_size=1, strides=2)\n",
" \n",
" x = Add()([tensor, x])\n",
" x = sep_bn(x, filters=1536, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = sep_bn(x, filters=2048, kernel_size=3)\n",
" x = ReLU()(x)\n",
" x = GlobalAvgPool2D()(x)\n",
" x = Dense(units=1000, activation='softmax')(x)\n",
" \n",
" return x\n",
" \n",
" \n",
"input = Input(shape=[299, 299, 3])\n",
" \n",
"x = entry_flow(input)\n",
"x = middle_flow(x)\n",
"output = exit_flow(x)\n",
" \n",
"from tensorflow.keras import Model \n",
"model = Model(input, output)\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "CUl25aXV0SB0",
"colab_type": "text"
},
"source": [
"## Model diagram\n",
"\n",
"<img src=\"https://raw.githubusercontent.com/Machine-Learning-Tokyo/CNN-Architectures/master/Implementations/Xception/Xception_diagram.svg?sanitize=true\">"
]
}
]
} | {
"pile_set_name": "Github"
} |
[90mUsage:
Example of 'foo_bar' module in `foo_bar.tf`.
- list item 1
- list item 2
Even inline **formatting** in _here_ is possible.
and some [link](https://domain.com/)
* list item 3
* list item 4
```hcl
module "foo_bar" {
source = "github.com/foo/bar"
id = "1234567890"
name = "baz"
zones = ["us-east-1", "us-west-1"]
tags = {
Name = "baz"
Created-By = "first.last@email.com"
Date-Created = "20180101"
}
}
```
Here is some trailing text after code block,
followed by another line of text.
| Name | Description |
|------|-----------------|
| Foo | Foo description |
| Bar | Bar description |[0m
[36mrequirement.terraform[0m (>= 0.12)
[36mrequirement.aws[0m (>= 2.15.0)
[36mrequirement.random[0m (>= 2.2.0)
[36mprovider.tls[0m
[36mprovider.aws[0m (>= 2.15.0)
[36mprovider.aws.ident[0m (>= 2.15.0)
[36mprovider.null[0m
[36minput.unquoted[0m (required)
[90mn/a[0m
[36minput.bool-3[0m (true)
[90mn/a[0m
[36minput.bool-2[0m (false)
[90mIt's bool number two.[0m
[36minput.bool-1[0m (true)
[90mIt's bool number one.[0m
[36minput.string-3[0m ("")
[90mn/a[0m
[36minput.string-2[0m (required)
[90mIt's string number two.[0m
[36minput.string-1[0m ("bar")
[90mIt's string number one.[0m
[36minput.string-special-chars[0m ("\\.<>[]{}_-")
[90mn/a[0m
[36minput.number-3[0m ("19")
[90mn/a[0m
[36minput.number-4[0m (15.75)
[90mn/a[0m
[36minput.number-2[0m (required)
[90mIt's number number two.[0m
[36minput.number-1[0m (42)
[90mIt's number number one.[0m
[36minput.map-3[0m ({})
[90mn/a[0m
[36minput.map-2[0m (required)
[90mIt's map number two.[0m
[36minput.map-1[0m ({
"a": 1,
"b": 2,
"c": 3
})
[90mIt's map number one.[0m
[36minput.list-3[0m ([])
[90mn/a[0m
[36minput.list-2[0m (required)
[90mIt's list number two.[0m
[36minput.list-1[0m ([
"a",
"b",
"c"
])
[90mIt's list number one.[0m
[36minput.input_with_underscores[0m (required)
[90mA variable with underscores.[0m
[36minput.input-with-pipe[0m ("v1")
[90mIt includes v1 | v2 | v3[0m
[36minput.input-with-code-block[0m ([
"name rack:location"
])
[90mThis is a complicated one. We need a newline.
And an example in a code block
```
default = [
"machine rack01:neptune"
]
```[0m
[36minput.long_type[0m ({
"bar": {
"bar": "bar",
"foo": "bar"
},
"buzz": [
"fizz",
"buzz"
],
"fizz": [],
"foo": {
"bar": "foo",
"foo": "foo"
},
"name": "hello"
})
[90mThis description is itself markdown.
It spans over multiple lines.[0m
[36minput.no-escape-default-value[0m ("VALUE_WITH_UNDERSCORE")
[90mThe description contains `something_with_underscore`. Defaults to 'VALUE_WITH_UNDERSCORE'.[0m
[36minput.with-url[0m ("")
[90mThe description contains url. https://www.domain.com/foo/bar_baz.html[0m
[36minput.string_default_empty[0m ("")
[90mn/a[0m
[36minput.string_default_null[0m (null)
[90mn/a[0m
[36minput.string_no_default[0m (required)
[90mn/a[0m
[36minput.number_default_zero[0m (0)
[90mn/a[0m
[36minput.bool_default_false[0m (false)
[90mn/a[0m
[36minput.list_default_empty[0m ([])
[90mn/a[0m
[36minput.object_default_empty[0m ({})
[90mn/a[0m
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 22a20d6a3ade2427b8e500603af8fa4a
TextureImporter:
fileIDToRecycleName: {}
serializedVersion: 2
mipmaps:
mipMapMode: 0
enableMipMap: 1
linearTexture: 0
correctGamma: 0
fadeOut: 0
borderMipMap: 0
mipMapFadeDistanceStart: 1
mipMapFadeDistanceEnd: 3
bumpmap:
convertToNormalMap: 0
externalNormalMap: 0
heightScale: 0.25
normalMapFilter: 0
isReadable: 0
grayScaleToAlpha: 0
generateCubemap: 0
cubemapConvolution: 0
cubemapConvolutionSteps: 7
cubemapConvolutionExponent: 1.5
seamlessCubemap: 0
textureFormat: -1
maxTextureSize: 1024
textureSettings:
filterMode: 1
aniso: 3
mipBias: -1
wrapMode: 1
nPOTScale: 1
lightmap: 1
rGBM: 0
compressionQuality: 50
allowsAlphaSplitting: 0
spriteMode: 0
spriteExtrude: 1
spriteMeshType: 1
alignment: 0
spritePivot: {x: 0.5, y: 0.5}
spriteBorder: {x: 0, y: 0, z: 0, w: 0}
spritePixelsToUnits: 100
alphaIsTransparency: 0
spriteTessellationDetail: -1
textureType: 6
buildTargetSettings: []
spriteSheet:
serializedVersion: 2
sprites: []
outline: []
spritePackingTag:
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
34
Neopentane ... Neopentane Dimer
C 0.38252221 -0.07060697 0.76689582
C -1.04063947 0.39681125 1.06093593
H -1.77157460 -0.28150025 0.61833023
H -1.22471777 0.43573509 2.13551890
H -1.21406603 1.39372444 0.65309065
C 0.59084747 -1.46681814 1.34797791
H 1.60291380 -1.82295000 1.15010285
H 0.43896858 -1.46674598 2.42828668
H -0.10991906 -2.17868425 0.90931390
C 1.37826905 0.89843536 1.39914944
H 2.40439397 0.58544074 1.20073365
H 1.24378092 0.94597430 2.48070991
H 1.24837318 1.90502262 0.99895071
C 0.60196094 -0.11103419 -0.74309659
H 0.45921182 0.87703910 -1.18289819
H 1.61369399 -0.44345945 -0.97967210
H -0.09953078 -0.79754982 -1.21922069
C -0.24286035 0.04489996 5.05931405
C 1.17995210 -0.42406604 4.76591451
H 1.36439130 -0.46339519 3.69139404
H 1.91138625 0.25360679 5.20865008
H 1.35221577 -1.42107208 5.17403508
C -0.44925716 1.44146149 4.47848357
H -1.46122026 1.79844694 4.67533110
H 0.25166143 2.15253296 4.91820534
H -0.29614795 1.44165810 3.39834122
C -0.46315484 0.08507618 6.56916768
H -1.47437100 0.41935316 6.80524087
H 0.23935761 0.77002521 7.04600590
H -0.32259175 -0.90344416 7.00861942
C -1.23932307 -0.92287770 4.42616765
H -1.11039706 -1.92985308 4.82574663
H -2.26521217 -0.60910484 4.62455551
H -1.10461871 -0.96985209 3.34459099
| {
"pile_set_name": "Github"
} |
---
extraCss:
- |
inline:.scss:
.image-preview {
text-align: center;
img {
max-width:80%;
}
}
---

{.image-preview}
Usually in a mobile application, the user can navigate from one screen to
another.
The {{ anchor('Navigator') }} interface provides a way to handle
flow through an application.
Instead of {{ anchor('Scenes', 'Scene') }} determining the next destination in
the application by themselves, the Navigator can listen to events a Scene
publishes and determine
the appropriate action to take.
Take for example a simple login screen, where the user enters their username
and password.
When the user presses the 'log in' button, the application verifies the
credentials and navigates to a different screen.
In Acorn, the `LoginScene` provides an `Events` interface with a method
`onLoggedIn(User)`.
The Navigator provides an implementation of this interface to the Scene,
and navigates to the next Scene.
The Navigator in turn has the responsibility to let interested parties know that
the active Scene has changed.
This way the UI layer can react to a Scene change and show the proper UI.
### Lifecycle
Like Scenes, Navigators also have lifecycles.
Most often these are used to control the Scene lifecycle, but a Navigator
implementation can also choose to hook into this lifecycle itself.
The Navigator's lifecycle is similar to that of Scenes: they can be 'started',
'stopped' and 'destroyed':
- 'stopped ' : The Navigator is dormant, waiting to become active or to be
destroyed. A change in its Scenes is not propagated to its
listeners.
- 'started' : The Navigator is currently active, and changes in scenery are
propagated to the listeners.
- 'destroyed': The Navigator is destroyed and will not become active anymore.
During the lifetime of a Navigator it can go from 'stopped' to 'started' and
vice versa multiple times, until it reaches the 'destroyed' state.
{% highlight 'kotlin' %}
interface Navigator {
fun onStart()
fun onStop()
fun onDestroy()
}
{% endhighlight %}
### State modelling
Navigator implementations are free to choose how they implement their internal
state.
For example, it could use a structure like a stack to provide functionality
similar to a back stack, or it could use a state machine for the state
representation.
This freedom that the Navigator gets also means that it is free to choose how
the lifecycles of its Scenes behave, as long as it is according to the Scene
specification. The order of the Scene's callback methods must honored, and the
Navigator's lifecycle state must always outlive that of a Scene.
This means that a Navigator's Scenes may only be active when the Navigator is
active, and no Scenes may be active when the Navigator is inactive.
Finally, the Scenes must always be properly destroyed when the Navigator is
destroyed.
Other than that, the Navigator implementation is free to decide how its Scene's
lifecycle is structured, and often depends on the strategy that is chosen for
modelling the internal state.
A Navigator that uses a stack for its state for example will stop but not
destroy the currently active Scene when a new Scene is pushed on the Stack.
Scenes are only destroyed when they're popped off the stack or when the Navigator
is destroyed.
A Navigator that merely replaces Scenes without any 'back' behavior will
immediately stop and destroy the currently active Scene when a new Scene becomes
active.
### Scene propagation
The Navigator implementation is in control of determining which Scene is active,
and must propagate it to any listeners.
The Navigator interface declares a
{{ anchor('Navigator.Events', 'Events') }} interface that contains
callback methods to trigger interested parties.
The Navigator interface itself has a method to let these interested parties
register themselves:
{% highlight 'kotlin' %}
interface Navigator {
fun addNavigatorEventsListener(listener: Navigator.Events) : DisposableHandle
/* ... */
interface Events {
fun scene(scene: Scene<out Container>, data: TransitionData? = null)
fun finished()
}
}
{% endhighlight %}
If appropriate, the Navigator can invoke the `Navigator.Events.scene` method
when the active Scene changes.
### Reacting to Scene events
As mentioned in {{ anchor('About Scenes', 'pageId=about_scenes') }}, Scenes may accept callback interfaces to push
events to the Navigator.
The Navigator implementation can use these callbacks to make an internal state
change.
For example, assuming there is a base StackNavigator class, we can do the
following:
{% highlight 'kotlin' %}
class MyNavigator : StackNavigator() {
override fun initialStack() = listOf(MyScene(MySceneListener()))
private inner class MySceneListener: MyScene.Events {
override fun onEvent() {
push(MyScene(this))
}
}
}
{% endhighlight %}
### Back presses
When the user presses the back button, this can ultimately be viewed as an
event, much like regular button presses.
Navigators can choose to implement the
{{ anchor('OnBackPressListener') }} interface to indicate
they're interesting in handling these back presses.
Since the Activity is the entry point for back presses, it should delegate
this request first to the Navigator if possible.
The Navigator can use this event to make a transition in its internal state.
### Navigator results
A Navigator implementation can also provide a callback interface to publish
results.
This is useful for example when creating a login flow: the user can be guided
through several Scenes, after which the Navigator finishes with an auth token
result.
There are two ways to implement callbacks for Navigator results.
The first is similar to the way this is implemented for Scenes, by passing a
callback to the Navigator constructor:
{% highlight 'kotlin' %}
class MyNavigator(
private val listener: Events
) : Navigator, MyScene.Events {
/* ... */
override fun onAuthToken(authToken: String) {
listener.onResult(authToken)
}
interface Events {
onResult(authToken: String)
}
}
{% endhighlight %}
There are cases however where the Activity is interested in the Navigator's
result, to be able to call `Activity.setResult` and finish.
Since the Navigator should outlive the Activity, the Activity must be able to
register itself as a listener to the Navigator.
This can be done by keeping a list of interested listeners:
{% highlight 'kotlin' %}
class MyNavigator : Navigator, MyScene.Events {
private var listeners = listOf<Events>()
fun register(listener: Events) {
listeners += listener
}
fun remove(listener: Events) {
listeners -= listener
}
/* ... */
override fun onAuthToken(authToken: String) {
listeners.forEach { it.onResult(authToken) }
}
interface Events {
fun onResult(authToken: String)
}
}
{% endhighlight %}
### Saving and restoring state
Just like Scenes, Navigator instances need to be able to have their state saved
as well, and must be able to be restored from this saved state.
Navigators that save their state must also save the states of the Scenes they
are hosting.
This means that the Navigator instance is responsible for the restoration of the
Scenes as well.
Depending on the chosen strategy for modelling the internal navigation state,
the Navigator must be able to restore one or more of its Scenes and restore its
internal state as well.
### Navigator composition
The power behind the Navigator interface is that instances can be composed
together.
An application may for example have several flows that make up the entire
application flow.
These flows can all be implemented using their own Navigator implementations,
and then tied together using a composing Navigator.
These composing Navigators can decide their internal state just as the 'normal'
Navigators, which means that you can create a Navigator implementation that can
push and pop other Navigators on and off a stack.
## Advanced topics
Usually you don't need to implement the Navigator interface directly; you can
use the existing base classes to compose the behavior you need.
However if you do choose to implement your own Navigator, you might want to have
a look at {{ anchor('Scene Management', 'pageId=scene_management') }}.
| {
"pile_set_name": "Github"
} |
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const (
LabelHostname = "kubernetes.io/hostname"
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
LabelZoneFailureDomainStable = "topology.kubernetes.io/zone"
LabelZoneRegionStable = "topology.kubernetes.io/region"
LabelInstanceType = "beta.kubernetes.io/instance-type"
LabelInstanceTypeStable = "node.kubernetes.io/instance-type"
LabelOSStable = "kubernetes.io/os"
LabelArchStable = "kubernetes.io/arch"
// LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0.
// It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763)
LabelWindowsBuild = "node.kubernetes.io/windows-build"
// LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*)
LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io"
// LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*)
LabelNamespaceSuffixNode = "node.kubernetes.io"
// LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled
LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io"
// IsHeadlessService is added by Controller to an Endpoint denoting if its parent
// Service is Headless. The existence of this label can be used further by other
// controllers and kube-proxy to check if the Endpoint objects should be replicated when
// using Headless Services
IsHeadlessService = "service.kubernetes.io/headless"
)
| {
"pile_set_name": "Github"
} |
好奇心原文链接:[科学家成功培养出杂交胚胎,以拯救几近灭绝的北部白犀牛_文化_好奇心日报-Steph Yin](https://www.qdaily.com/articles/54954.html)
WebArchive归档链接:[科学家成功培养出杂交胚胎,以拯救几近灭绝的北部白犀牛_文化_好奇心日报-Steph Yin](http://web.archive.org/web/20181018000850/http://www.qdaily.com:80/articles/54954.html)
 | {
"pile_set_name": "Github"
} |
{
"assets": {
"container": {
"docker": {
"cassandra": "mesosphere/cassandra:1.0.26-3.0.10"
}
},
"uris": {
"jre-tar-gz": "https://downloads.mesosphere.com/java/jre-8u131-linux-x64.tar.gz",
"cassandra-jre-tar-gz": "https://downloads.mesosphere.com/java/jre-8u131-linux-x64.tar.gz",
"libmesos-bundle-tar-gz": "https://downloads.mesosphere.io/libmesos-bundle/libmesos-bundle-1.10-1.4-63e0814.tar.gz",
"scheduler-zip": "https://px-dcos.s3.amazonaws.com/portworx-cassandra/assets/1.1-3.0.14-bf399c5/cassandra-scheduler.zip",
"cassandra-tar-gz": "https://downloads.mesosphere.com/cassandra/assets/apache-cassandra-3.0.14-bin-dcos.tar.gz",
"bootstrap-zip": "https://px-dcos.s3.amazonaws.com/portworx-cassandra/assets/1.1-3.0.14-bf399c5/bootstrap.zip",
"executor-zip": "https://px-dcos.s3.amazonaws.com/portworx-cassandra/assets/1.1-3.0.14-bf399c5/executor.zip"
}
},
"images": {
"icon-small": "https://downloads.mesosphere.com/assets/universe/000/portworx-cassandra-icon-small.png",
"icon-medium": "https://downloads.mesosphere.com/assets/universe/000/portworx-cassandra-icon-medium.png",
"icon-large": "https://downloads.mesosphere.com/assets/universe/000/portworx-cassandra-icon-large.png"
},
"cli": {
"binaries": {
"darwin": {
"x86-64": {
"contentHash": [
{
"algo": "sha256",
"value": "78fe1089743b289ddce4ef81a45dfb015551610fe5bbec10e622dc00b12c2445"
}
],
"kind": "executable",
"url": "https://px-dcos.s3.amazonaws.com/portworx-cassandra/assets/1.1-3.0.14-bf399c5/dcos-portworx-cassandra-darwin"
}
},
"linux": {
"x86-64": {
"contentHash": [
{
"algo": "sha256",
"value": "978c6ed91297e1e821eedc059072963c4b4d31a5aeab15880a14e230fb8fd0d7"
}
],
"kind": "executable",
"url": "https://px-dcos.s3.amazonaws.com/portworx-cassandra/assets/1.1-3.0.14-bf399c5/dcos-portworx-cassandra-linux"
}
},
"windows": {
"x86-64": {
"contentHash": [
{
"algo": "sha256",
"value": "1bee6dab43d56137ed0cd12f6da0e36681520b686e9e1a5a5e5aeda32f6f6bbe"
}
],
"kind": "executable",
"url": "https://px-dcos.s3.amazonaws.com/portworx-cassandra/assets/1.1-3.0.14-bf399c5/dcos-portworx-cassandra.exe"
}
}
}
}
}
| {
"pile_set_name": "Github"
} |
[package]
name = "grin_pool"
version = "4.2.0-alpha.1"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the Mimblewimble chain format."
license = "Apache-2.0"
repository = "https://github.com/mimblewimble/grin"
keywords = [ "crypto", "grin", "mimblewimble" ]
workspace = '..'
edition = "2018"
[dependencies]
blake2-rfc = "0.2"
rand = "0.6"
serde = "1"
serde_derive = "1"
log = "0.4"
chrono = "0.4.11"
failure = "0.1"
failure_derive = "0.1"
grin_core = { path = "../core", version = "4.2.0-alpha.1" }
grin_keychain = { path = "../keychain", version = "4.2.0-alpha.1" }
grin_util = { path = "../util", version = "4.2.0-alpha.1" }
[dev-dependencies]
grin_chain = { path = "../chain", version = "4.2.0-alpha.1" }
| {
"pile_set_name": "Github"
} |
// RUN: %clang_cc1 -triple x86_64-apple-darwin -fblocks -emit-llvm -o - %s | FileCheck -check-prefix CHECK -check-prefix CHECK-NOARC %s
// RUN: %clang_cc1 -triple x86_64-apple-darwin -fblocks -emit-llvm -fobjc-arc -o - %s | FileCheck -check-prefix CHECK -check-prefix CHECK-ARC %s
typedef void (^BlockTy)(void);
union U {
int *i;
long long *ll;
} __attribute__((transparent_union));
void escapingFunc0(BlockTy);
void noescapeFunc0(id, __attribute__((noescape)) BlockTy);
void noescapeFunc1(__attribute__((noescape)) int *);
void noescapeFunc2(__attribute__((noescape)) id);
void noescapeFunc3(__attribute__((noescape)) union U);
// Block descriptors of non-escaping blocks don't need pointers to copy/dispose
// helper functions.
// CHECK: %[[STRUCT_BLOCK_DESCRIPTOR:.*]] = type { i64, i64 }
// When the block is non-escaping, copy/dispose helpers aren't generated, so the
// block layout string must include information about __strong captures.
// CHECK-NOARC: %[[STRUCT_BLOCK_BYREF_B0:.*]] = type { i8*, %[[STRUCT_BLOCK_BYREF_B0]]*, i32, i32, i8*, %[[STRUCT_S0:.*]] }
// CHECK-ARC: %[[STRUCT_BLOCK_BYREF_B0:.*]] = type { i8*, %[[STRUCT_BLOCK_BYREF_B0]]*, i32, i32, i8*, i8*, i8*, %[[STRUCT_S0:.*]] }
// CHECK: %[[STRUCT_S0]] = type { i8*, i8* }
// CHECK: @[[BLOCK_DESCIPTOR_TMP_2:.*ls32l8"]] = linkonce_odr hidden unnamed_addr constant { i64, i64, i8*, i64 } { i64 0, i64 40, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @{{.*}}, i32 0, i32 0), i64 256 }, align 8
// CHECK-LABEL: define void @test0(
// CHECK: call void @noescapeFunc0({{.*}}, {{.*}} nocapture {{.*}})
// CHECK: declare void @noescapeFunc0(i8*, {{.*}} nocapture)
void test0(BlockTy b) {
noescapeFunc0(0, b);
}
// CHECK-LABEL: define void @test1(
// CHECK: call void @noescapeFunc1({{.*}} nocapture {{.*}})
// CHECK: declare void @noescapeFunc1({{.*}} nocapture)
void test1(int *i) {
noescapeFunc1(i);
}
// CHECK-LABEL: define void @test2(
// CHECK: call void @noescapeFunc2({{.*}} nocapture {{.*}})
// CHECK: declare void @noescapeFunc2({{.*}} nocapture)
void test2(id i) {
noescapeFunc2(i);
}
// CHECK-LABEL: define void @test3(
// CHECK: call void @noescapeFunc3({{.*}} nocapture {{.*}})
// CHECK: declare void @noescapeFunc3({{.*}} nocapture)
void test3(union U u) {
noescapeFunc3(u);
}
// CHECK: define internal void @"\01-[C0 m0:]"({{.*}}, {{.*}}, {{.*}} nocapture {{.*}})
// CHECK-LABEL: define void @test4(
// CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i32*)*)(i8* {{.*}}, i8* {{.*}}, i32* nocapture {{.*}})
@interface C0
-(void) m0:(int*)__attribute__((noescape)) p0;
@end
@implementation C0
-(void) m0:(int*)__attribute__((noescape)) p0 {
}
@end
void test4(C0 *c0, int *p) {
[c0 m0:p];
}
// CHECK-LABEL: define void @test5(
// CHECK: call void {{.*}}(i8* bitcast ({ i8**, i32, i32, i8*, {{.*}} }* @{{.*}} to i8*), i32* nocapture {{.*}})
// CHECK: call void {{.*}}(i8* {{.*}}, i32* nocapture {{.*}})
// CHECK: define internal void @{{.*}}(i8* {{.*}}, i32* nocapture {{.*}})
typedef void (^BlockTy2)(__attribute__((noescape)) int *);
void test5(BlockTy2 b, int *p) {
^(int *__attribute__((noescape)) p0){}(p);
b(p);
}
// If the block is non-escaping, set the BLOCK_IS_NOESCAPE and BLOCK_IS_GLOBAL
// bits of field 'flags' and set the 'isa' field to 'NSConcreteGlobalBlock'.
// CHECK: define void @test6(i8* %{{.*}}, i8* %[[B:.*]])
// CHECK: %{{.*}} = alloca i8*, align 8
// CHECK: %[[B_ADDR:.*]] = alloca i8*, align 8
// CHECK: %[[BLOCK:.*]] = alloca <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, align 8
// CHECK-NOARC: store i8* %[[B]], i8** %[[B_ADDR]], align 8
// CHECK-ARC: store i8* null, i8** %[[B_ADDR]], align 8
// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[B_ADDR]], i8* %[[B]])
// CHECK-ARC: %[[V0:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 5
// CHECK: %[[BLOCK_ISA:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 0
// CHECK: store i8* bitcast (i8** @_NSConcreteGlobalBlock to i8*), i8** %[[BLOCK_ISA]], align 8
// CHECK: %[[BLOCK_FLAGS:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 1
// CHECK: store i32 -796917760, i32* %[[BLOCK_FLAGS]], align 8
// CHECK: %[[BLOCK_DESCRIPTOR:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 4
// CHECK: store %[[STRUCT_BLOCK_DESCRIPTOR]]* bitcast ({ i64, i64, i8*, i64 }* @[[BLOCK_DESCIPTOR_TMP_2]] to %[[STRUCT_BLOCK_DESCRIPTOR]]*), %[[STRUCT_BLOCK_DESCRIPTOR]]** %[[BLOCK_DESCRIPTOR]], align 8
// CHECK: %[[BLOCK_CAPTURED:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 5
// CHECK-NOARC: %[[V1:.*]] = load i8*, i8** %[[B_ADDR]], align 8
// CHECK-NOARC: store i8* %[[V1]], i8** %[[BLOCK_CAPTURED]], align 8
// CHECK-ARC: %[[V2:.*]] = load i8*, i8** %[[B_ADDR]], align 8
// CHECK-ARC: %[[V3:.*]] = call i8* @llvm.objc.retain(i8* %[[V2]])
// CHECK-ARC: store i8* %[[V3]], i8** %[[BLOCK_CAPTURED]], align 8
// CHECK: call void @noescapeFunc0(
// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[V0]], i8* null)
// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[B_ADDR]], i8* null)
// Non-escaping blocks don't need copy/dispose helper functions.
// CHECK-NOT: define internal void @__copy_helper_block_
// CHECK-NOT: define internal void @__destroy_helper_block_
void func(id);
void test6(id a, id b) {
noescapeFunc0(a, ^{ func(b); });
}
// We don't need either the byref helper functions or the byref structs for
// __block variables that are not captured by escaping blocks.
// CHECK: define void @test7(
// CHECK: alloca i8*, align 8
// CHECK: %[[B0:.*]] = alloca i8*, align 8
// CHECK: %[[BLOCK:.*]] = alloca <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8** }>, align 8
// CHECK: %[[BLOCK_CAPTURED:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8** }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8** }>* %[[BLOCK]], i32 0, i32 5
// CHECK: store i8** %[[B0]], i8*** %[[BLOCK_CAPTURED]], align 8
// CHECK-ARC-NOT: define internal void @__Block_byref_object_copy_
// CHECK-ARC-NOT: define internal void @__Block_byref_object_dispose_
void test7() {
id a;
__block id b0;
noescapeFunc0(a, ^{ (void)b0; });
}
// __block variables captured by escaping blocks need byref helper functions.
// CHECK: define void @test8(
// CHECK: %[[A:.*]] = alloca i8*, align 8
// CHECK: %[[B0:.*]] = alloca %[[STRUCT_BLOCK_BYREF_B0]], align 8
// CHECK: alloca <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, align 8
// CHECK: %[[BLOCK1:.*]] = alloca <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, align 8
// CHECK: %[[BLOCK_CAPTURED7:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK1]], i32 0, i32 5
// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_BLOCK_BYREF_B0]]* %[[B0]] to i8*
// CHECK: store i8* %[[V3]], i8** %[[BLOCK_CAPTURED7]], align 8
// CHECK-ARC: define internal void @__Block_byref_object_copy_
// CHECK-ARC: define internal void @__Block_byref_object_dispose_
// CHECK: define linkonce_odr hidden void @__copy_helper_block_
// CHECK: define linkonce_odr hidden void @__destroy_helper_block_
struct S0 {
id a, b;
};
void test8() {
id a;
__block struct S0 b0;
noescapeFunc0(a, ^{ (void)b0; });
escapingFunc0(^{ (void)b0; });
}
| {
"pile_set_name": "Github"
} |
@nice-blue: #5B83AD;
@light-blue: @nice-blue + #111;
@font-size: 18px;
#header {
color: @light-blue;
font-size: @font-size;
}
@height: 0.5;
@size: 10px+10;
@color: #666 + 111;
#header2 {
color: @color;
height: percentage(@height); //0.5->50%
font-size: saturate(@size,10%); //增加10%饱和度
background-color: spin(lighten(@color, 25%), 10); //颜色亮度降低25%,并且色相值增加10
}
.my-mixin {
color: black;
}
//定义一个混合方法,想使用的时候再去触发
.my-other-mixin() {
background: white;
}
.my-font-mixin {
font-size: 20px;
}
.cat,
#wscat {
background-color: aquamarine;
}
#header3 {
.my-mixin;
.my-other-mixin; //可以省略括号
//.my-other-mixin()//当然也可以这样
.my-font-mixin();
#wscat;
//.cat()//上下两种方法都可以
}
#header4 {
color: black;
}
#header4 .navigation {
font-size: 12px;
}
#header4 .logo {
width: 300px;
}
#header4 {
color: black;
.navigation {
font-size: 12px;
}
.logo {
width: 300px;
}
&:before {
content: "你好";
}
&-wsscat {
background-color: bisque;
}
}
@var: white;
#header5 {
@var: red;
#logo {
color: @var; // red
}
}
.bg(@color, @size) {
background-color: @color;
font-size: @size;
}
.background{
.bg(#555555,16px)
}
| {
"pile_set_name": "Github"
} |
import { IconDefinition, IconPrefix, IconName } from "@fortawesome/fontawesome-common-types";
export const definition: IconDefinition;
export const faGrinAlt: IconDefinition;
export const prefix: IconPrefix;
export const iconName: IconName;
export const width: number;
export const height: number;
export const ligatures: string[];
export const unicode: string;
export const svgPathData: string; | {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_201) on Thu May 07 13:37:39 PDT 2020 -->
<title>Table (iceberg 0.8.0-incubating API)</title>
<meta name="date" content="2020-05-07">
<link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Table (iceberg 0.8.0-incubating API)";
}
}
catch(err) {
}
//-->
var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":18,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":38,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6};
var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
var tableTab = "tableTab";
var activeTableTab = "activeTableTab";
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../index-all.html">Index</a></li>
<li><a href="../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../org/apache/iceberg/SystemProperties.html" title="class in org.apache.iceberg"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../org/apache/iceberg/TableMetadata.html" title="class in org.apache.iceberg"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../index.html?org/apache/iceberg/Table.html" target="_top">Frames</a></li>
<li><a href="Table.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.apache.iceberg</div>
<h2 title="Interface Table" class="title">Interface Table</h2>
</div>
<div class="contentContainer">
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>All Known Implementing Classes:</dt>
<dd><a href="../../../org/apache/iceberg/AllDataFilesTable.html" title="class in org.apache.iceberg">AllDataFilesTable</a>, <a href="../../../org/apache/iceberg/AllEntriesTable.html" title="class in org.apache.iceberg">AllEntriesTable</a>, <a href="../../../org/apache/iceberg/AllManifestsTable.html" title="class in org.apache.iceberg">AllManifestsTable</a>, <a href="../../../org/apache/iceberg/BaseTable.html" title="class in org.apache.iceberg">BaseTable</a>, <a href="../../../org/apache/iceberg/DataFilesTable.html" title="class in org.apache.iceberg">DataFilesTable</a>, <a href="../../../org/apache/iceberg/HistoryTable.html" title="class in org.apache.iceberg">HistoryTable</a>, <a href="../../../org/apache/iceberg/ManifestEntriesTable.html" title="class in org.apache.iceberg">ManifestEntriesTable</a>, <a href="../../../org/apache/iceberg/ManifestsTable.html" title="class in org.apache.iceberg">ManifestsTable</a>, <a href="../../../org/apache/iceberg/PartitionsTable.html" title="class in org.apache.iceberg">PartitionsTable</a>, <a href="../../../org/apache/iceberg/SnapshotsTable.html" title="class in org.apache.iceberg">SnapshotsTable</a></dd>
</dl>
<hr>
<br>
<pre>public interface <span class="typeNameLabel">Table</span></pre>
<div class="block">Represents a table.</div>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd"> </span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd"> </span></span><span id="t3" class="tableTab"><span><a href="javascript:show(4);">Abstract Methods</a></span><span class="tabEnd"> </span></span><span id="t5" class="tableTab"><span><a href="javascript:show(16);">Default Methods</a></span><span class="tabEnd"> </span></span><span id="t6" class="tableTab"><span><a href="javascript:show(32);">Deprecated Methods</a></span><span class="tabEnd"> </span></span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr id="i0" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg">Snapshot</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#currentSnapshot--">currentSnapshot</a></span>()</code>
<div class="block">Get the current <a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg"><code>snapshot</code></a> for this table, or null if there are no snapshots.</div>
</td>
</tr>
<tr id="i1" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/encryption/EncryptionManager.html" title="interface in org.apache.iceberg.encryption">EncryptionManager</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#encryption--">encryption</a></span>()</code> </td>
</tr>
<tr id="i2" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/ExpireSnapshots.html" title="interface in org.apache.iceberg">ExpireSnapshots</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#expireSnapshots--">expireSnapshots</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/ExpireSnapshots.html" title="interface in org.apache.iceberg"><code>expire API</code></a> to manage snapshots in this table and commit.</div>
</td>
</tr>
<tr id="i3" class="rowColor">
<td class="colFirst"><code>java.util.List<<a href="../../../org/apache/iceberg/HistoryEntry.html" title="interface in org.apache.iceberg">HistoryEntry</a>></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#history--">history</a></span>()</code>
<div class="block">Get the snapshot history of this table.</div>
</td>
</tr>
<tr id="i4" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/io/FileIO.html" title="interface in org.apache.iceberg.io">FileIO</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#io--">io</a></span>()</code> </td>
</tr>
<tr id="i5" class="rowColor">
<td class="colFirst"><code>java.lang.String</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#location--">location</a></span>()</code>
<div class="block">Return the table's base location.</div>
</td>
</tr>
<tr id="i6" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/io/LocationProvider.html" title="interface in org.apache.iceberg.io">LocationProvider</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#locationProvider--">locationProvider</a></span>()</code> </td>
</tr>
<tr id="i7" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/ManageSnapshots.html" title="interface in org.apache.iceberg">ManageSnapshots</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#manageSnapshots--">manageSnapshots</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/ManageSnapshots.html" title="interface in org.apache.iceberg"><code>manage snapshots API</code></a> to manage snapshots in this table and commit.</div>
</td>
</tr>
<tr id="i8" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg">AppendFiles</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#newAppend--">newAppend</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg"><code>append API</code></a> to add files to this table and commit.</div>
</td>
</tr>
<tr id="i9" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/DeleteFiles.html" title="interface in org.apache.iceberg">DeleteFiles</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#newDelete--">newDelete</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/DeleteFiles.html" title="interface in org.apache.iceberg"><code>delete API</code></a> to replace files in this table and commit.</div>
</td>
</tr>
<tr id="i10" class="altColor">
<td class="colFirst"><code>default <a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg">AppendFiles</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#newFastAppend--">newFastAppend</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg"><code>append API</code></a> to add files to this table and commit.</div>
</td>
</tr>
<tr id="i11" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/OverwriteFiles.html" title="interface in org.apache.iceberg">OverwriteFiles</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#newOverwrite--">newOverwrite</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/OverwriteFiles.html" title="interface in org.apache.iceberg"><code>overwrite API</code></a> to overwrite files by a filter expression.</div>
</td>
</tr>
<tr id="i12" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/ReplacePartitions.html" title="interface in org.apache.iceberg">ReplacePartitions</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#newReplacePartitions--">newReplacePartitions</a></span>()</code>
<div class="block">Not recommended: Create a new <a href="../../../org/apache/iceberg/ReplacePartitions.html" title="interface in org.apache.iceberg"><code>replace partitions API</code></a> to dynamically
overwrite partitions in the table with new data.</div>
</td>
</tr>
<tr id="i13" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/RewriteFiles.html" title="interface in org.apache.iceberg">RewriteFiles</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#newRewrite--">newRewrite</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/RewriteFiles.html" title="interface in org.apache.iceberg"><code>rewrite API</code></a> to replace files in this table and commit.</div>
</td>
</tr>
<tr id="i14" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/TableScan.html" title="interface in org.apache.iceberg">TableScan</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#newScan--">newScan</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/TableScan.html" title="interface in org.apache.iceberg"><code>scan</code></a> for this table.</div>
</td>
</tr>
<tr id="i15" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/Transaction.html" title="interface in org.apache.iceberg">Transaction</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#newTransaction--">newTransaction</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/Transaction.html" title="interface in org.apache.iceberg"><code>transaction API</code></a> to commit multiple table operations at once.</div>
</td>
</tr>
<tr id="i16" class="altColor">
<td class="colFirst"><code>java.util.Map<java.lang.String,java.lang.String></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#properties--">properties</a></span>()</code>
<div class="block">Return a map of string properties for this table.</div>
</td>
</tr>
<tr id="i17" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#refresh--">refresh</a></span>()</code>
<div class="block">Refresh the current table metadata.</div>
</td>
</tr>
<tr id="i18" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/RewriteManifests.html" title="interface in org.apache.iceberg">RewriteManifests</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#rewriteManifests--">rewriteManifests</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/RewriteManifests.html" title="interface in org.apache.iceberg"><code>rewrite manifests API</code></a> to replace manifests for this
table and commit.</div>
</td>
</tr>
<tr id="i19" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/Rollback.html" title="interface in org.apache.iceberg">Rollback</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#rollback--">rollback</a></span>()</code>
<div class="block"><span class="deprecatedLabel">Deprecated.</span>
<div class="block"><span class="deprecationComment">Replaced by <a href="../../../org/apache/iceberg/Table.html#manageSnapshots--"><code>manageSnapshots()</code></a></span></div>
</div>
</td>
</tr>
<tr id="i20" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/Schema.html" title="class in org.apache.iceberg">Schema</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#schema--">schema</a></span>()</code>
<div class="block">Return the <a href="../../../org/apache/iceberg/Schema.html" title="class in org.apache.iceberg"><code>schema</code></a> for this table.</div>
</td>
</tr>
<tr id="i21" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg">Snapshot</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#snapshot-long-">snapshot</a></span>(long snapshotId)</code>
<div class="block">Get the <a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg"><code>snapshot</code></a> of this table with the given id, or null if there is no
matching snapshot.</div>
</td>
</tr>
<tr id="i22" class="altColor">
<td class="colFirst"><code>java.lang.Iterable<<a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg">Snapshot</a>></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#snapshots--">snapshots</a></span>()</code>
<div class="block">Get the <a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg"><code>snapshots</code></a> of this table.</div>
</td>
</tr>
<tr id="i23" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/PartitionSpec.html" title="class in org.apache.iceberg">PartitionSpec</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#spec--">spec</a></span>()</code>
<div class="block">Return the <a href="../../../org/apache/iceberg/PartitionSpec.html" title="class in org.apache.iceberg"><code>partition spec</code></a> for this table.</div>
</td>
</tr>
<tr id="i24" class="altColor">
<td class="colFirst"><code>java.util.Map<java.lang.Integer,<a href="../../../org/apache/iceberg/PartitionSpec.html" title="class in org.apache.iceberg">PartitionSpec</a>></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#specs--">specs</a></span>()</code>
<div class="block">Return a map of <a href="../../../org/apache/iceberg/PartitionSpec.html" title="class in org.apache.iceberg"><code>partition specs</code></a> for this table.</div>
</td>
</tr>
<tr id="i25" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/UpdateLocation.html" title="interface in org.apache.iceberg">UpdateLocation</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#updateLocation--">updateLocation</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/UpdateLocation.html" title="interface in org.apache.iceberg"><code>UpdateLocation</code></a> to update table location and commit the changes.</div>
</td>
</tr>
<tr id="i26" class="altColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/UpdateProperties.html" title="interface in org.apache.iceberg">UpdateProperties</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#updateProperties--">updateProperties</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/UpdateProperties.html" title="interface in org.apache.iceberg"><code>UpdateProperties</code></a> to update table properties and commit the changes.</div>
</td>
</tr>
<tr id="i27" class="rowColor">
<td class="colFirst"><code><a href="../../../org/apache/iceberg/UpdateSchema.html" title="interface in org.apache.iceberg">UpdateSchema</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../org/apache/iceberg/Table.html#updateSchema--">updateSchema</a></span>()</code>
<div class="block">Create a new <a href="../../../org/apache/iceberg/UpdateSchema.html" title="interface in org.apache.iceberg"><code>UpdateSchema</code></a> to alter the columns of this table and commit the change.</div>
</td>
</tr>
</table>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method.detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="refresh--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>refresh</h4>
<pre>void refresh()</pre>
<div class="block">Refresh the current table metadata.</div>
</li>
</ul>
<a name="newScan--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>newScan</h4>
<pre><a href="../../../org/apache/iceberg/TableScan.html" title="interface in org.apache.iceberg">TableScan</a> newScan()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/TableScan.html" title="interface in org.apache.iceberg"><code>scan</code></a> for this table.
<p>
Once a table scan is created, it can be refined to project columns and filter data.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a table scan for this table</dd>
</dl>
</li>
</ul>
<a name="schema--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>schema</h4>
<pre><a href="../../../org/apache/iceberg/Schema.html" title="class in org.apache.iceberg">Schema</a> schema()</pre>
<div class="block">Return the <a href="../../../org/apache/iceberg/Schema.html" title="class in org.apache.iceberg"><code>schema</code></a> for this table.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>this table's schema</dd>
</dl>
</li>
</ul>
<a name="spec--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>spec</h4>
<pre><a href="../../../org/apache/iceberg/PartitionSpec.html" title="class in org.apache.iceberg">PartitionSpec</a> spec()</pre>
<div class="block">Return the <a href="../../../org/apache/iceberg/PartitionSpec.html" title="class in org.apache.iceberg"><code>partition spec</code></a> for this table.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>this table's partition spec</dd>
</dl>
</li>
</ul>
<a name="specs--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>specs</h4>
<pre>java.util.Map<java.lang.Integer,<a href="../../../org/apache/iceberg/PartitionSpec.html" title="class in org.apache.iceberg">PartitionSpec</a>> specs()</pre>
<div class="block">Return a map of <a href="../../../org/apache/iceberg/PartitionSpec.html" title="class in org.apache.iceberg"><code>partition specs</code></a> for this table.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>this table's partition specs map</dd>
</dl>
</li>
</ul>
<a name="properties--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>properties</h4>
<pre>java.util.Map<java.lang.String,java.lang.String> properties()</pre>
<div class="block">Return a map of string properties for this table.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>this table's properties map</dd>
</dl>
</li>
</ul>
<a name="location--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>location</h4>
<pre>java.lang.String location()</pre>
<div class="block">Return the table's base location.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>this table's location</dd>
</dl>
</li>
</ul>
<a name="currentSnapshot--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>currentSnapshot</h4>
<pre><a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg">Snapshot</a> currentSnapshot()</pre>
<div class="block">Get the current <a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg"><code>snapshot</code></a> for this table, or null if there are no snapshots.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>the current table Snapshot.</dd>
</dl>
</li>
</ul>
<a name="snapshot-long-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>snapshot</h4>
<pre><a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg">Snapshot</a> snapshot(long snapshotId)</pre>
<div class="block">Get the <a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg"><code>snapshot</code></a> of this table with the given id, or null if there is no
matching snapshot.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>the <a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg"><code>Snapshot</code></a> with the given id.</dd>
</dl>
</li>
</ul>
<a name="snapshots--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>snapshots</h4>
<pre>java.lang.Iterable<<a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg">Snapshot</a>> snapshots()</pre>
<div class="block">Get the <a href="../../../org/apache/iceberg/Snapshot.html" title="interface in org.apache.iceberg"><code>snapshots</code></a> of this table.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>an Iterable of snapshots of this table.</dd>
</dl>
</li>
</ul>
<a name="history--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>history</h4>
<pre>java.util.List<<a href="../../../org/apache/iceberg/HistoryEntry.html" title="interface in org.apache.iceberg">HistoryEntry</a>> history()</pre>
<div class="block">Get the snapshot history of this table.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a list of <a href="../../../org/apache/iceberg/HistoryEntry.html" title="interface in org.apache.iceberg"><code>history entries</code></a></dd>
</dl>
</li>
</ul>
<a name="updateSchema--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>updateSchema</h4>
<pre><a href="../../../org/apache/iceberg/UpdateSchema.html" title="interface in org.apache.iceberg">UpdateSchema</a> updateSchema()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/UpdateSchema.html" title="interface in org.apache.iceberg"><code>UpdateSchema</code></a> to alter the columns of this table and commit the change.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/UpdateSchema.html" title="interface in org.apache.iceberg"><code>UpdateSchema</code></a></dd>
</dl>
</li>
</ul>
<a name="updateProperties--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>updateProperties</h4>
<pre><a href="../../../org/apache/iceberg/UpdateProperties.html" title="interface in org.apache.iceberg">UpdateProperties</a> updateProperties()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/UpdateProperties.html" title="interface in org.apache.iceberg"><code>UpdateProperties</code></a> to update table properties and commit the changes.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/UpdateProperties.html" title="interface in org.apache.iceberg"><code>UpdateProperties</code></a></dd>
</dl>
</li>
</ul>
<a name="updateLocation--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>updateLocation</h4>
<pre><a href="../../../org/apache/iceberg/UpdateLocation.html" title="interface in org.apache.iceberg">UpdateLocation</a> updateLocation()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/UpdateLocation.html" title="interface in org.apache.iceberg"><code>UpdateLocation</code></a> to update table location and commit the changes.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/UpdateLocation.html" title="interface in org.apache.iceberg"><code>UpdateLocation</code></a></dd>
</dl>
</li>
</ul>
<a name="newAppend--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>newAppend</h4>
<pre><a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg">AppendFiles</a> newAppend()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg"><code>append API</code></a> to add files to this table and commit.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg"><code>AppendFiles</code></a></dd>
</dl>
</li>
</ul>
<a name="newFastAppend--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>newFastAppend</h4>
<pre>default <a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg">AppendFiles</a> newFastAppend()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg"><code>append API</code></a> to add files to this table and commit.
<p>
Using this method signals to the underlying implementation that the append should not perform
extra work in order to commit quickly. Fast appends are not recommended for normal writes
because the fast commit may cause split planning to slow down over time.
<p>
Implementations may not support fast appends, in which case this will return the same appender
as <a href="../../../org/apache/iceberg/Table.html#newAppend--"><code>newAppend()</code></a>.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/AppendFiles.html" title="interface in org.apache.iceberg"><code>AppendFiles</code></a></dd>
</dl>
</li>
</ul>
<a name="newRewrite--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>newRewrite</h4>
<pre><a href="../../../org/apache/iceberg/RewriteFiles.html" title="interface in org.apache.iceberg">RewriteFiles</a> newRewrite()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/RewriteFiles.html" title="interface in org.apache.iceberg"><code>rewrite API</code></a> to replace files in this table and commit.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/RewriteFiles.html" title="interface in org.apache.iceberg"><code>RewriteFiles</code></a></dd>
</dl>
</li>
</ul>
<a name="rewriteManifests--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>rewriteManifests</h4>
<pre><a href="../../../org/apache/iceberg/RewriteManifests.html" title="interface in org.apache.iceberg">RewriteManifests</a> rewriteManifests()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/RewriteManifests.html" title="interface in org.apache.iceberg"><code>rewrite manifests API</code></a> to replace manifests for this
table and commit.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/RewriteManifests.html" title="interface in org.apache.iceberg"><code>RewriteManifests</code></a></dd>
</dl>
</li>
</ul>
<a name="newOverwrite--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>newOverwrite</h4>
<pre><a href="../../../org/apache/iceberg/OverwriteFiles.html" title="interface in org.apache.iceberg">OverwriteFiles</a> newOverwrite()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/OverwriteFiles.html" title="interface in org.apache.iceberg"><code>overwrite API</code></a> to overwrite files by a filter expression.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/OverwriteFiles.html" title="interface in org.apache.iceberg"><code>OverwriteFiles</code></a></dd>
</dl>
</li>
</ul>
<a name="newReplacePartitions--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>newReplacePartitions</h4>
<pre><a href="../../../org/apache/iceberg/ReplacePartitions.html" title="interface in org.apache.iceberg">ReplacePartitions</a> newReplacePartitions()</pre>
<div class="block">Not recommended: Create a new <a href="../../../org/apache/iceberg/ReplacePartitions.html" title="interface in org.apache.iceberg"><code>replace partitions API</code></a> to dynamically
overwrite partitions in the table with new data.
<p>
This is provided to implement SQL compatible with Hive table operations but is not recommended.
Instead, use the <a href="../../../org/apache/iceberg/OverwriteFiles.html" title="interface in org.apache.iceberg"><code>overwrite API</code></a> to explicitly overwrite data.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/ReplacePartitions.html" title="interface in org.apache.iceberg"><code>ReplacePartitions</code></a></dd>
</dl>
</li>
</ul>
<a name="newDelete--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>newDelete</h4>
<pre><a href="../../../org/apache/iceberg/DeleteFiles.html" title="interface in org.apache.iceberg">DeleteFiles</a> newDelete()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/DeleteFiles.html" title="interface in org.apache.iceberg"><code>delete API</code></a> to replace files in this table and commit.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/DeleteFiles.html" title="interface in org.apache.iceberg"><code>DeleteFiles</code></a></dd>
</dl>
</li>
</ul>
<a name="expireSnapshots--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>expireSnapshots</h4>
<pre><a href="../../../org/apache/iceberg/ExpireSnapshots.html" title="interface in org.apache.iceberg">ExpireSnapshots</a> expireSnapshots()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/ExpireSnapshots.html" title="interface in org.apache.iceberg"><code>expire API</code></a> to manage snapshots in this table and commit.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/ExpireSnapshots.html" title="interface in org.apache.iceberg"><code>ExpireSnapshots</code></a></dd>
</dl>
</li>
</ul>
<a name="rollback--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>rollback</h4>
<pre>@Deprecated
<a href="../../../org/apache/iceberg/Rollback.html" title="interface in org.apache.iceberg">Rollback</a> rollback()</pre>
<div class="block"><span class="deprecatedLabel">Deprecated.</span> <span class="deprecationComment">Replaced by <a href="../../../org/apache/iceberg/Table.html#manageSnapshots--"><code>manageSnapshots()</code></a></span></div>
<div class="block">Create a new <a href="../../../org/apache/iceberg/Rollback.html" title="interface in org.apache.iceberg"><code>rollback API</code></a> to roll back to a previous snapshot and commit.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/Rollback.html" title="interface in org.apache.iceberg"><code>Rollback</code></a></dd>
</dl>
</li>
</ul>
<a name="manageSnapshots--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>manageSnapshots</h4>
<pre><a href="../../../org/apache/iceberg/ManageSnapshots.html" title="interface in org.apache.iceberg">ManageSnapshots</a> manageSnapshots()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/ManageSnapshots.html" title="interface in org.apache.iceberg"><code>manage snapshots API</code></a> to manage snapshots in this table and commit.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/ManageSnapshots.html" title="interface in org.apache.iceberg"><code>ManageSnapshots</code></a></dd>
</dl>
</li>
</ul>
<a name="newTransaction--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>newTransaction</h4>
<pre><a href="../../../org/apache/iceberg/Transaction.html" title="interface in org.apache.iceberg">Transaction</a> newTransaction()</pre>
<div class="block">Create a new <a href="../../../org/apache/iceberg/Transaction.html" title="interface in org.apache.iceberg"><code>transaction API</code></a> to commit multiple table operations at once.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a new <a href="../../../org/apache/iceberg/Transaction.html" title="interface in org.apache.iceberg"><code>Transaction</code></a></dd>
</dl>
</li>
</ul>
<a name="io--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>io</h4>
<pre><a href="../../../org/apache/iceberg/io/FileIO.html" title="interface in org.apache.iceberg.io">FileIO</a> io()</pre>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a <a href="../../../org/apache/iceberg/io/FileIO.html" title="interface in org.apache.iceberg.io"><code>FileIO</code></a> to read and write table data and metadata files</dd>
</dl>
</li>
</ul>
<a name="encryption--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>encryption</h4>
<pre><a href="../../../org/apache/iceberg/encryption/EncryptionManager.html" title="interface in org.apache.iceberg.encryption">EncryptionManager</a> encryption()</pre>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>an <a href="../../../org/apache/iceberg/encryption/EncryptionManager.html" title="interface in org.apache.iceberg.encryption"><code>EncryptionManager</code></a> to encrypt and decrypt
data files.</dd>
</dl>
</li>
</ul>
<a name="locationProvider--">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>locationProvider</h4>
<pre><a href="../../../org/apache/iceberg/io/LocationProvider.html" title="interface in org.apache.iceberg.io">LocationProvider</a> locationProvider()</pre>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>a <a href="../../../org/apache/iceberg/io/LocationProvider.html" title="interface in org.apache.iceberg.io"><code>LocationProvider</code></a> to provide locations for new data files</dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../index-all.html">Index</a></li>
<li><a href="../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../org/apache/iceberg/SystemProperties.html" title="class in org.apache.iceberg"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../org/apache/iceberg/TableMetadata.html" title="class in org.apache.iceberg"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../index.html?org/apache/iceberg/Table.html" target="_top">Frames</a></li>
<li><a href="Table.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.