repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
trocvuong/izzfeed_us | wp-content/plugins/video-thumbnails/php/providers/class-youku-thumbnails.php | 3248 | <?php
/* Copyright 2014 Sutherland Boswell (email : sutherland.boswell@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2, as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
// Require thumbnail provider class
require_once( VIDEO_THUMBNAILS_PATH . '/php/providers/class-video-thumbnails-provider.php' );
class Youku_Thumbnails extends Video_Thumbnails_Provider {
// Human-readable name of the video provider
public $service_name = 'Youku';
const service_name = 'Youku';
// Slug for the video provider
public $service_slug = 'youku';
const service_slug = 'youku';
public static function register_provider( $providers ) {
$providers[self::service_slug] = new self;
return $providers;
}
// Regex strings
public $regexes = array(
'#http://player\.youku\.com/embed/([A-Za-z0-9]+)#', // iFrame
'#http://player\.youku\.com/player\.php/sid/([A-Za-z0-9]+)/v\.swf#', // Flash
'#http://v\.youku\.com/v_show/id_([A-Za-z0-9]+)\.html#' // Link
);
// Thumbnail URL
public function get_thumbnail_url( $id ) {
$request = "http://v.youku.com/player/getPlayList/VideoIDS/$id/";
$response = wp_remote_get( $request, array( 'sslverify' => false ) );
if( is_wp_error( $response ) ) {
$result = $this->construct_info_retrieval_error( $request, $response );
} else {
$result = json_decode( $response['body'] );
$result = $result->data[0]->logo;
}
return $result;
}
// Test cases
public static function get_test_cases() {
return array(
array(
'markup' => '<iframe height=498 width=510 src="http://player.youku.com/embed/XMzQyMzk5MzQ4" frameborder=0 allowfullscreen></iframe>',
'expected' => 'http://g1.ykimg.com/1100641F464F0FB57407E2053DFCBC802FBBC4-E4C5-7A58-0394-26C366F10493',
'expected_hash' => 'deac7bb89058a8c46ae2350da9d33ba8',
'name' => __( 'iFrame Embed', 'video-thumbnails' )
),
array(
'markup' => '<embed src="http://player.youku.com/player.php/sid/XMzQyMzk5MzQ4/v.swf" quality="high" width="480" height="400" align="middle" allowScriptAccess="sameDomain" allowFullscreen="true" type="application/x-shockwave-flash"></embed>',
'expected' => 'http://g1.ykimg.com/1100641F464F0FB57407E2053DFCBC802FBBC4-E4C5-7A58-0394-26C366F10493',
'expected_hash' => 'deac7bb89058a8c46ae2350da9d33ba8',
'name' => __( 'Flash Embed', 'video-thumbnails' )
),
array(
'markup' => 'http://v.youku.com/v_show/id_XMzQyMzk5MzQ4.html',
'expected' => 'http://g1.ykimg.com/1100641F464F0FB57407E2053DFCBC802FBBC4-E4C5-7A58-0394-26C366F10493',
'expected_hash' => 'deac7bb89058a8c46ae2350da9d33ba8',
'name' => __( 'Video URL', 'video-thumbnails' )
),
);
}
}
?> | gpl-2.0 |
notspiff/xbmc | xbmc/cores/DllLoader/exports/util/EmuFileWrapper.cpp | 5290 | /*
* Copyright (C) 2005-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "EmuFileWrapper.h"
#include "filesystem/File.h"
#include "threads/SingleLock.h"
CEmuFileWrapper g_emuFileWrapper;
namespace
{
constexpr bool isValidFilePtr(FILE* f)
{
return (f != nullptr);
}
}
CEmuFileWrapper::CEmuFileWrapper()
{
// since we always use dlls we might just initialize it directly
for (int i = 0; i < MAX_EMULATED_FILES; i++)
{
memset(&m_files[i], 0, sizeof(EmuFileObject));
m_files[i].used = false;
m_files[i].fd = -1;
}
}
CEmuFileWrapper::~CEmuFileWrapper()
{
CleanUp();
}
void CEmuFileWrapper::CleanUp()
{
CSingleLock lock(m_criticalSection);
for (int i = 0; i < MAX_EMULATED_FILES; i++)
{
if (m_files[i].used)
{
m_files[i].file_xbmc->Close();
delete m_files[i].file_xbmc;
if (m_files[i].file_lock)
{
delete m_files[i].file_lock;
m_files[i].file_lock = nullptr;
}
m_files[i].used = false;
m_files[i].fd = -1;
}
}
}
EmuFileObject* CEmuFileWrapper::RegisterFileObject(XFILE::CFile* pFile)
{
EmuFileObject* object = nullptr;
CSingleLock lock(m_criticalSection);
for (int i = 0; i < MAX_EMULATED_FILES; i++)
{
if (!m_files[i].used)
{
// found a free location
object = &m_files[i];
object->used = true;
object->file_xbmc = pFile;
object->fd = (i + FILE_WRAPPER_OFFSET);
object->file_lock = new CCriticalSection();
break;
}
}
return object;
}
void CEmuFileWrapper::UnRegisterFileObjectByDescriptor(int fd)
{
int i = fd - FILE_WRAPPER_OFFSET;
if (! (i >= 0 && i < MAX_EMULATED_FILES))
return;
if (!m_files[i].used)
return;
CSingleLock lock(m_criticalSection);
// we assume the emulated function already deleted the CFile object
if (m_files[i].file_lock)
{
delete m_files[i].file_lock;
m_files[i].file_lock = nullptr;
}
m_files[i].used = false;
m_files[i].fd = -1;
}
void CEmuFileWrapper::UnRegisterFileObjectByStream(FILE* stream)
{
if (isValidFilePtr(stream))
{
EmuFileObject* o = reinterpret_cast<EmuFileObject*>(stream);
return UnRegisterFileObjectByDescriptor(o->fd);
}
}
void CEmuFileWrapper::LockFileObjectByDescriptor(int fd)
{
int i = fd - FILE_WRAPPER_OFFSET;
if (i >= 0 && i < MAX_EMULATED_FILES)
{
if (m_files[i].used)
{
m_files[i].file_lock->lock();
}
}
}
bool CEmuFileWrapper::TryLockFileObjectByDescriptor(int fd)
{
int i = fd - FILE_WRAPPER_OFFSET;
if (i >= 0 && i < MAX_EMULATED_FILES)
{
if (m_files[i].used)
{
return m_files[i].file_lock->try_lock();
}
}
return false;
}
void CEmuFileWrapper::UnlockFileObjectByDescriptor(int fd)
{
int i = fd - FILE_WRAPPER_OFFSET;
if (i >= 0 && i < MAX_EMULATED_FILES)
{
if (m_files[i].used)
{
m_files[i].file_lock->unlock();
}
}
}
EmuFileObject* CEmuFileWrapper::GetFileObjectByDescriptor(int fd)
{
int i = fd - FILE_WRAPPER_OFFSET;
if (i >= 0 && i < MAX_EMULATED_FILES)
{
if (m_files[i].used)
{
return &m_files[i];
}
}
return nullptr;
}
EmuFileObject* CEmuFileWrapper::GetFileObjectByStream(FILE* stream)
{
if (isValidFilePtr(stream))
{
EmuFileObject* o = reinterpret_cast<EmuFileObject*>(stream);
return GetFileObjectByDescriptor(o->fd);
}
return nullptr;
}
XFILE::CFile* CEmuFileWrapper::GetFileXbmcByDescriptor(int fd)
{
auto object = GetFileObjectByDescriptor(fd);
if (object != nullptr && object->used)
{
return object->file_xbmc;
}
return nullptr;
}
XFILE::CFile* CEmuFileWrapper::GetFileXbmcByStream(FILE* stream)
{
if (isValidFilePtr(stream))
{
EmuFileObject* object = reinterpret_cast<EmuFileObject*>(stream);
if (object != nullptr && object->used)
{
return object->file_xbmc;
}
}
return nullptr;
}
int CEmuFileWrapper::GetDescriptorByStream(FILE* stream)
{
if (isValidFilePtr(stream))
{
EmuFileObject* obj = reinterpret_cast<EmuFileObject*>(stream);
int i = obj->fd - FILE_WRAPPER_OFFSET;
if (i >= 0 && i < MAX_EMULATED_FILES)
{
return i + FILE_WRAPPER_OFFSET;
}
}
return -1;
}
FILE* CEmuFileWrapper::GetStreamByDescriptor(int fd)
{
auto object = GetFileObjectByDescriptor(fd);
if (object != nullptr && object->used)
{
return reinterpret_cast<FILE*>(object);
}
return nullptr;
}
bool CEmuFileWrapper::StreamIsEmulatedFile(FILE* stream)
{
if (isValidFilePtr(stream))
{
EmuFileObject* obj = reinterpret_cast<EmuFileObject*>(stream);
return DescriptorIsEmulatedFile(obj->fd);
}
return false;
}
| gpl-2.0 |
qianguozheng/Openswan | testing/kunit/xmit-02/testspi1.sh | 951 | #!/bin/sh
TZ=GMT export TZ
enckey=0x4043434545464649494a4a4c4c4f4f515152525454575758
authkey=0x87658765876587658765876587658765
ipsec spi --del --af inet --edst 205.150.200.252 --spi 0x12345678 --proto esp
ipsec spi --del --af inet --edst 205.150.200.252 --spi 0x12345678 --proto tun
ipsec spi --af inet --edst 205.150.200.252 --spi 0x12345678 --proto esp --src 205.150.200.232 --esp 3des-md5-96 --enckey $enckey --authkey $authkey
ipsec spi --af inet --edst 205.150.200.252 --spi 0x12345678 --proto tun --src 205.150.200.232 --dst 205.150.200.252 --ip4
ipsec spigrp inet 205.150.200.252 0x12345678 tun inet 205.150.200.252 0x12345678 esp
ipsec eroute --del --eraf inet --src 205.150.200.163/32 --dst 205.150.200.252/32
ipsec eroute --add --eraf inet --src 205.150.200.163/32 --dst 205.150.200.252/32 --said tun0x12345678@205.150.200.252
# magic route command
ip route add 205.150.200.252 via 205.150.200.238 src 205.150.200.163 dev ipsec0
| gpl-2.0 |
dmarx/praw | tests/test_internal.py | 1261 | from __future__ import print_function, unicode_literals
from praw.internal import _to_reddit_list
from .helper import PRAWTest, betamax
class InternalTest(PRAWTest):
def test__to_reddit_list(self):
output = _to_reddit_list('hello')
self.assertEqual('hello', output)
def test__to_reddit_list_with_list(self):
output = _to_reddit_list(['hello'])
self.assertEqual('hello', output)
def test__to_reddit_list_with_empty_list(self):
output = _to_reddit_list([])
self.assertEqual('', output)
def test__to_reddit_list_with_big_list(self):
output = _to_reddit_list(['hello', 'world'])
self.assertEqual('hello,world', output)
@betamax()
def test__to_reddit_list_with_object(self):
output = _to_reddit_list(self.r.get_subreddit(self.sr))
self.assertEqual(self.sr, output)
def test__to_reddit_list_with_object_in_list(self):
obj = self.r.get_subreddit(self.sr)
output = _to_reddit_list([obj])
self.assertEqual(self.sr, output)
def test__to_reddit_list_with_mix(self):
obj = self.r.get_subreddit(self.sr)
output = _to_reddit_list([obj, 'hello'])
self.assertEqual("{0},{1}".format(self.sr, 'hello'), output)
| gpl-3.0 |
blue-eyed-devil/testCMS | externals/highcharts/es-modules/masters/indicators/mfi.src.js | 258 | /**
* @license @product.name@ JS v@product.version@ (@product.date@)
*
* Money Flow Index indicator for Highstock
*
* (c) 2010-2019 Grzegorz Blachliński
*
* License: www.highcharts.com/license
*/
'use strict';
import '../../indicators/mfi.src.js';
| gpl-3.0 |
livecodesebastien/livecode | engine/src/vclip.h | 2340 | /* Copyright (C) 2003-2013 Runtime Revolution Ltd.
This file is part of LiveCode.
LiveCode is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License v3 as published by the Free
Software Foundation.
LiveCode is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with LiveCode. If not see <http://www.gnu.org/licenses/>. */
//
// MCVideoClip class declarations
//
#ifndef VIDEOCLIP_H
#define VIDEOCLIP_H
#include "control.h"
class MCVideoClip : public MCObject
{
real8 scale;
uint2 framerate;
uint1 *frames;
uint4 size;
public:
MCVideoClip();
MCVideoClip(const MCVideoClip &sref);
// virtual functions from MCObject
virtual ~MCVideoClip();
virtual Chunk_term gettype() const;
virtual const char *gettypestring();
virtual Exec_stat getprop(uint4 parid, Properties which, MCExecPoint &, Boolean effective);
virtual Exec_stat setprop(uint4 parid, Properties which, MCExecPoint &, Boolean effective);
virtual Boolean del();
virtual void paste(void);
MCVideoClip *clone();
char *getfile();
real8 getscale()
{
return scale;
}
Boolean import(const char *fname, IO_handle stream);
IO_stat load(IO_handle stream, const char *version);
IO_stat extendedload(MCObjectInputStream& p_stream, const char *p_version, uint4 p_length);
IO_stat save(IO_handle stream, uint4 p_part, bool p_force_ext);
IO_stat extendedsave(MCObjectOutputStream& p_stream, uint4 p_part);
MCVideoClip *next()
{
return (MCVideoClip *)MCDLlist::next();
}
MCVideoClip *prev()
{
return (MCVideoClip *)MCDLlist::prev();
}
void totop(MCVideoClip *&list)
{
MCDLlist::totop((MCDLlist *&)list);
}
void insertto(MCVideoClip *&list)
{
MCDLlist::insertto((MCDLlist *&)list);
}
void appendto(MCVideoClip *&list)
{
MCDLlist::appendto((MCDLlist *&)list);
}
void append(MCVideoClip *node)
{
MCDLlist::append((MCDLlist *)node);
}
void splitat(MCVideoClip *node)
{
MCDLlist::splitat((MCDLlist *)node) ;
}
MCVideoClip *remove
(MCVideoClip *&list)
{
return (MCVideoClip *)MCDLlist::remove
((MCDLlist *&)list);
}
};
#endif
| gpl-3.0 |
Whitechaser/darkstar | scripts/zones/Misareaux_Coast/mobs/Gigas_Catapulter.lua | 291 | -----------------------------------
-- Area: Misareaux_Coast
-- MOB: Gigas Catapulter
-----------------------------------
mixins = {require("scripts/mixins/fomor_hate")}
function onMobSpawn(mob)
mob:setLocalVar("fomorHateAdj", -1);
end;
function onMobDeath(mob, player, isKiller)
end; | gpl-3.0 |
selmentdev/selment-toolchain | source/gcc-latest/libstdc++-v3/testsuite/experimental/memory/observer_ptr/requirements.cc | 2725 | // { dg-options "-std=gnu++14" }
// { dg-do compile }
// Copyright (C) 2015-2016 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a moved_to of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <experimental/memory>
using std::experimental::observer_ptr;
struct nontrivial {nontrivial() {}};
struct other {};
struct base {};
struct derived : base {};
static_assert(!std::is_trivially_constructible<
observer_ptr<nontrivial>>::value, "");
static_assert(std::is_trivially_copyable<
observer_ptr<nontrivial>>::value, "");
static_assert(std::is_trivially_destructible<
observer_ptr<nontrivial>>::value, "");
static_assert(std::is_constructible<
observer_ptr<nontrivial>, nontrivial*>::value,
"");
static_assert(std::is_constructible<observer_ptr<base>, base*>::value, "");
static_assert(std::is_constructible<observer_ptr<base>, derived*>::value, "");
static_assert(!std::is_constructible<observer_ptr<base>, other*>::value, "");
static_assert(std::is_constructible<
observer_ptr<base>, observer_ptr<base>>::value, "");
static_assert(std::is_constructible<
observer_ptr<base>, observer_ptr<derived>>::value, "");
static_assert(!std::is_constructible<
observer_ptr<base>, observer_ptr<other>>::value, "");
static_assert(!std::is_assignable<
observer_ptr<nontrivial>, nontrivial*>::value,
"");
static_assert(std::is_assignable<
observer_ptr<nontrivial>, observer_ptr<nontrivial>>::value,
"");
static_assert(std::is_assignable<observer_ptr<base>,
observer_ptr<base>>::value, "");
static_assert(std::is_assignable<observer_ptr<base>,
observer_ptr<derived>>::value, "");
static_assert(!std::is_assignable<
observer_ptr<base>, observer_ptr<other>>::value, "");
static_assert(std::is_assignable<observer_ptr<const int>,
observer_ptr<int>>::value, "");
static_assert(!std::is_assignable<observer_ptr<int>,
observer_ptr<const int>>::value, "");
| gpl-3.0 |
dacrybabysuck/darkstar | scripts/zones/Arrapago_Remnants/mobs/Vile_Wahzil.lua | 686 | -----------------------------------
-- Area: Arrapago Remnants
-- Mob: Vile Wahzil
-----------------------------------
local ID = require("scripts/zones/Arrapago_Remnants/IDs")
require("scripts/globals/status")
-----------------------------------
function onMobSpawn(mob)
local instance = mob:getInstance();
instance:getEntity(bit.band(ID.npc[2][2].SOCKET, 0xFFF), dsp.objType.NPC):setStatus(dsp.status.DISAPPEAR)
end
function onMobDeath(mob, player, isKiller)
local CELL = mob:getLocalVar("Cell")
local AMOUNT = mob:getLocalVar("Qnt") *2
while AMOUNT > 0 do
player:addTreasure(CELL)
AMOUNT = AMOUNT -1
end
end
function onMobDespawn(mob)
end | gpl-3.0 |
jeffreytinkess/OS_A2 | src/extern/acpica/source/components/executer/exstorob.c | 12066 | /******************************************************************************
*
* Module Name: exstorob - AML Interpreter object store support, store to object
*
*****************************************************************************/
/******************************************************************************
*
* 1. Copyright Notice
*
* Some or all of this work - Copyright (c) 1999 - 2015, Intel Corp.
* All rights reserved.
*
* 2. License
*
* 2.1. This is your license from Intel Corp. under its intellectual property
* rights. You may have additional license terms from the party that provided
* you this software, covering your right to use that party's intellectual
* property rights.
*
* 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a
* copy of the source code appearing in this file ("Covered Code") an
* irrevocable, perpetual, worldwide license under Intel's copyrights in the
* base code distributed originally by Intel ("Original Intel Code") to copy,
* make derivatives, distribute, use and display any portion of the Covered
* Code in any form, with the right to sublicense such rights; and
*
* 2.3. Intel grants Licensee a non-exclusive and non-transferable patent
* license (with the right to sublicense), under only those claims of Intel
* patents that are infringed by the Original Intel Code, to make, use, sell,
* offer to sell, and import the Covered Code and derivative works thereof
* solely to the minimum extent necessary to exercise the above copyright
* license, and in no event shall the patent license extend to any additions
* to or modifications of the Original Intel Code. No other license or right
* is granted directly or by implication, estoppel or otherwise;
*
* The above copyright and patent license is granted only if the following
* conditions are met:
*
* 3. Conditions
*
* 3.1. Redistribution of Source with Rights to Further Distribute Source.
* Redistribution of source code of any substantial portion of the Covered
* Code or modification with rights to further distribute source must include
* the above Copyright Notice, the above License, this list of Conditions,
* and the following Disclaimer and Export Compliance provision. In addition,
* Licensee must cause all Covered Code to which Licensee contributes to
* contain a file documenting the changes Licensee made to create that Covered
* Code and the date of any change. Licensee must include in that file the
* documentation of any changes made by any predecessor Licensee. Licensee
* must include a prominent statement that the modification is derived,
* directly or indirectly, from Original Intel Code.
*
* 3.2. Redistribution of Source with no Rights to Further Distribute Source.
* Redistribution of source code of any substantial portion of the Covered
* Code or modification without rights to further distribute source must
* include the following Disclaimer and Export Compliance provision in the
* documentation and/or other materials provided with distribution. In
* addition, Licensee may not authorize further sublicense of source of any
* portion of the Covered Code, and must include terms to the effect that the
* license from Licensee to its licensee is limited to the intellectual
* property embodied in the software Licensee provides to its licensee, and
* not to intellectual property embodied in modifications its licensee may
* make.
*
* 3.3. Redistribution of Executable. Redistribution in executable form of any
* substantial portion of the Covered Code or modification must reproduce the
* above Copyright Notice, and the following Disclaimer and Export Compliance
* provision in the documentation and/or other materials provided with the
* distribution.
*
* 3.4. Intel retains all right, title, and interest in and to the Original
* Intel Code.
*
* 3.5. Neither the name Intel nor any other trademark owned or controlled by
* Intel shall be used in advertising or otherwise to promote the sale, use or
* other dealings in products derived from or relating to the Covered Code
* without prior written authorization from Intel.
*
* 4. Disclaimer and Export Compliance
*
* 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED
* HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE
* IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE,
* INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY
* UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY
* IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES
* OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR
* COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT,
* SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY
* CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL
* HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS
* SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY
* LIMITED REMEDY.
*
* 4.3. Licensee shall not export, either directly or indirectly, any of this
* software or system incorporating such software without first obtaining any
* required license or other approval from the U. S. Department of Commerce or
* any other agency or department of the United States Government. In the
* event Licensee exports any such software from the United States or
* re-exports any such software from a foreign destination, Licensee shall
* ensure that the distribution and export/re-export of the software is in
* compliance with all laws, regulations, orders, or other restrictions of the
* U.S. Export Administration Regulations. Licensee agrees that neither it nor
* any of its subsidiaries will export/re-export any technical data, process,
* software, or service, directly or indirectly, to any country for which the
* United States government or any agency thereof requires an export license,
* other governmental approval, or letter of assurance, without first obtaining
* such license, approval or letter.
*
*****************************************************************************/
#include "acpi.h"
#include "accommon.h"
#include "acinterp.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME ("exstorob")
/*******************************************************************************
*
* FUNCTION: AcpiExStoreBufferToBuffer
*
* PARAMETERS: SourceDesc - Source object to copy
* TargetDesc - Destination object of the copy
*
* RETURN: Status
*
* DESCRIPTION: Copy a buffer object to another buffer object.
*
******************************************************************************/
ACPI_STATUS
AcpiExStoreBufferToBuffer (
ACPI_OPERAND_OBJECT *SourceDesc,
ACPI_OPERAND_OBJECT *TargetDesc)
{
UINT32 Length;
UINT8 *Buffer;
ACPI_FUNCTION_TRACE_PTR (ExStoreBufferToBuffer, SourceDesc);
/* If Source and Target are the same, just return */
if (SourceDesc == TargetDesc)
{
return_ACPI_STATUS (AE_OK);
}
/* We know that SourceDesc is a buffer by now */
Buffer = ACPI_CAST_PTR (UINT8, SourceDesc->Buffer.Pointer);
Length = SourceDesc->Buffer.Length;
/*
* If target is a buffer of length zero or is a static buffer,
* allocate a new buffer of the proper length
*/
if ((TargetDesc->Buffer.Length == 0) ||
(TargetDesc->Common.Flags & AOPOBJ_STATIC_POINTER))
{
TargetDesc->Buffer.Pointer = ACPI_ALLOCATE (Length);
if (!TargetDesc->Buffer.Pointer)
{
return_ACPI_STATUS (AE_NO_MEMORY);
}
TargetDesc->Buffer.Length = Length;
}
/* Copy source buffer to target buffer */
if (Length <= TargetDesc->Buffer.Length)
{
/* Clear existing buffer and copy in the new one */
ACPI_MEMSET (TargetDesc->Buffer.Pointer, 0, TargetDesc->Buffer.Length);
ACPI_MEMCPY (TargetDesc->Buffer.Pointer, Buffer, Length);
#ifdef ACPI_OBSOLETE_BEHAVIOR
/*
* NOTE: ACPI versions up to 3.0 specified that the buffer must be
* truncated if the string is smaller than the buffer. However, "other"
* implementations of ACPI never did this and thus became the defacto
* standard. ACPI 3.0A changes this behavior such that the buffer
* is no longer truncated.
*/
/*
* OBSOLETE BEHAVIOR:
* If the original source was a string, we must truncate the buffer,
* according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer
* copy must not truncate the original buffer.
*/
if (OriginalSrcType == ACPI_TYPE_STRING)
{
/* Set the new length of the target */
TargetDesc->Buffer.Length = Length;
}
#endif
}
else
{
/* Truncate the source, copy only what will fit */
ACPI_MEMCPY (TargetDesc->Buffer.Pointer, Buffer,
TargetDesc->Buffer.Length);
ACPI_DEBUG_PRINT ((ACPI_DB_INFO,
"Truncating source buffer from %X to %X\n",
Length, TargetDesc->Buffer.Length));
}
/* Copy flags */
TargetDesc->Buffer.Flags = SourceDesc->Buffer.Flags;
TargetDesc->Common.Flags &= ~AOPOBJ_STATIC_POINTER;
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: AcpiExStoreStringToString
*
* PARAMETERS: SourceDesc - Source object to copy
* TargetDesc - Destination object of the copy
*
* RETURN: Status
*
* DESCRIPTION: Copy a String object to another String object
*
******************************************************************************/
ACPI_STATUS
AcpiExStoreStringToString (
ACPI_OPERAND_OBJECT *SourceDesc,
ACPI_OPERAND_OBJECT *TargetDesc)
{
UINT32 Length;
UINT8 *Buffer;
ACPI_FUNCTION_TRACE_PTR (ExStoreStringToString, SourceDesc);
/* If Source and Target are the same, just return */
if (SourceDesc == TargetDesc)
{
return_ACPI_STATUS (AE_OK);
}
/* We know that SourceDesc is a string by now */
Buffer = ACPI_CAST_PTR (UINT8, SourceDesc->String.Pointer);
Length = SourceDesc->String.Length;
/*
* Replace existing string value if it will fit and the string
* pointer is not a static pointer (part of an ACPI table)
*/
if ((Length < TargetDesc->String.Length) &&
(!(TargetDesc->Common.Flags & AOPOBJ_STATIC_POINTER)))
{
/*
* String will fit in existing non-static buffer.
* Clear old string and copy in the new one
*/
ACPI_MEMSET (TargetDesc->String.Pointer, 0,
(ACPI_SIZE) TargetDesc->String.Length + 1);
ACPI_MEMCPY (TargetDesc->String.Pointer, Buffer, Length);
}
else
{
/*
* Free the current buffer, then allocate a new buffer
* large enough to hold the value
*/
if (TargetDesc->String.Pointer &&
(!(TargetDesc->Common.Flags & AOPOBJ_STATIC_POINTER)))
{
/* Only free if not a pointer into the DSDT */
ACPI_FREE (TargetDesc->String.Pointer);
}
TargetDesc->String.Pointer = ACPI_ALLOCATE_ZEROED (
(ACPI_SIZE) Length + 1);
if (!TargetDesc->String.Pointer)
{
return_ACPI_STATUS (AE_NO_MEMORY);
}
TargetDesc->Common.Flags &= ~AOPOBJ_STATIC_POINTER;
ACPI_MEMCPY (TargetDesc->String.Pointer, Buffer, Length);
}
/* Set the new target length */
TargetDesc->String.Length = Length;
return_ACPI_STATUS (AE_OK);
}
| gpl-3.0 |
mkuzmin/terraform | vendor/github.com/hashicorp/nomad/nomad/periodic_endpoint.go | 1256 | package nomad
import (
"fmt"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/nomad/nomad/structs"
)
// Periodic endpoint is used for periodic job interactions
type Periodic struct {
srv *Server
}
// Force is used to force a new instance of a periodic job
func (p *Periodic) Force(args *structs.PeriodicForceRequest, reply *structs.PeriodicForceResponse) error {
if done, err := p.srv.forward("Periodic.Force", args, args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"nomad", "periodic", "force"}, time.Now())
// Validate the arguments
if args.JobID == "" {
return fmt.Errorf("missing job ID for evaluation")
}
// Lookup the job
snap, err := p.srv.fsm.State().Snapshot()
if err != nil {
return err
}
job, err := snap.JobByID(args.JobID)
if err != nil {
return err
}
if job == nil {
return fmt.Errorf("job not found")
}
if !job.IsPeriodic() {
return fmt.Errorf("can't force launch non-periodic job")
}
// Force run the job.
eval, err := p.srv.periodicDispatcher.ForceRun(job.ID)
if err != nil {
return fmt.Errorf("force launch for job %q failed: %v", job.ID, err)
}
reply.EvalID = eval.ID
reply.EvalCreateIndex = eval.CreateIndex
reply.Index = eval.CreateIndex
return nil
}
| mpl-2.0 |
ratliff/server | plugins/transcoding/inlet_armada/InletArmadaPlugin.php | 2348 | <?php
/**
*@package plugins.inletArmada
*/
class InletArmadaPlugin extends KalturaPlugin implements IKalturaObjectLoader, IKalturaEnumerator
{
const PLUGIN_NAME = 'inletArmada';
public static function getPluginName()
{
return self::PLUGIN_NAME;
}
/**
* @param string $baseClass
* @param string $enumValue
* @param array $constructorArgs
* @return object
*/
public static function loadObject($baseClass, $enumValue, array $constructorArgs = null)
{
if($baseClass == 'KOperationEngine' && $enumValue == KalturaConversionEngineType::INLET_ARMADA)
{
if(!isset($constructorArgs['params']) || !isset($constructorArgs['outFilePath']))
return null;
return new KOperationEngineInletArmada("", $constructorArgs['outFilePath']);
}
if($baseClass == 'KDLOperatorBase' && $enumValue == self::getApiValue(InletArmadaConversionEngineType::INLET_ARMADA))
{
return new KDLOperatorInletArmada($enumValue);
}
return null;
}
/**
* @param string $baseClass
* @param string $enumValue
* @return string
*/
public static function getObjectClass($baseClass, $enumValue)
{
if($baseClass == 'KOperationEngine' && $enumValue == self::getApiValue(InletArmadaConversionEngineType::INLET_ARMADA))
return 'KOperationEngineInletArmada';
if($baseClass == 'KDLOperatorBase' && $enumValue == self::getConversionEngineCoreValue(InletArmadaConversionEngineType::INLET_ARMADA))
return 'KDLOperatorInletArmada';
return null;
}
/**
* @return array<string> list of enum classes names that extend the base enum name
*/
public static function getEnums($baseEnumName = null)
{
if(is_null($baseEnumName))
return array('InletArmadaConversionEngineType');
if($baseEnumName == 'conversionEngineType')
return array('InletArmadaConversionEngineType');
return array();
}
/**
* @return int id of dynamic enum in the DB.
*/
public static function getConversionEngineCoreValue($valueName)
{
$value = self::getPluginName() . IKalturaEnumerator::PLUGIN_VALUE_DELIMITER . $valueName;
return kPluginableEnumsManager::apiToCore('conversionEngineType', $value);
}
/**
* @return string external API value of dynamic enum.
*/
public static function getApiValue($valueName)
{
return self::getPluginName() . IKalturaEnumerator::PLUGIN_VALUE_DELIMITER . $valueName;
}
}
| agpl-3.0 |
levent/openfoodnetwork | spec/controllers/spree/admin/overview_controller_spec.rb | 2772 | require 'spec_helper'
describe Spree::Admin::OverviewController do
include AuthenticationWorkflow
context "loading overview" do
let(:user) { create_enterprise_user(enterprise_limit: 2) }
before do
controller.stub spree_current_user: user
end
context "when user owns only one enterprise" do
let!(:enterprise) { create(:distributor_enterprise, owner: user) }
context "when the referer is not an admin page" do
before { @request.env['HTTP_REFERER'] = 'http://test.com/some_other_path' }
context "and the enterprise has sells='unspecified'" do
before do
enterprise.update_attribute(:sells, "unspecified")
end
it "redirects to the welcome page for the enterprise" do
spree_get :index
response.should redirect_to welcome_admin_enterprise_path(enterprise)
end
end
context "and the enterprise does not have sells='unspecified'" do
it "renders the single enterprise dashboard" do
spree_get :index
response.should render_template "single_enterprise_dashboard"
end
end
end
context "when the refer is an admin page" do
before { @request.env['HTTP_REFERER'] = 'http://test.com/admin' }
it "renders the single enterprise dashboard" do
spree_get :index
response.should render_template "single_enterprise_dashboard"
end
end
end
context "when user owns multiple enterprises" do
let!(:enterprise1) { create(:distributor_enterprise, owner: user) }
let!(:enterprise2) { create(:distributor_enterprise, owner: user) }
context "when the referer is not an admin page" do
before { @request.env['HTTP_REFERER'] = 'http://test.com/some_other_path' }
context "and at least one owned enterprise has sells='unspecified'" do
before do
enterprise1.update_attribute(:sells, "unspecified")
end
it "redirects to the enterprises index" do
spree_get :index
response.should redirect_to admin_enterprises_path
end
end
context "and no owned enterprises have sells='unspecified'" do
it "renders the multiple enterprise dashboard" do
spree_get :index
response.should render_template "multi_enterprise_dashboard"
end
end
end
context "when the refer is an admin page" do
before { @request.env['HTTP_REFERER'] = 'http://test.com/admin' }
it "renders the multiple enterprise dashboard" do
spree_get :index
response.should render_template "multi_enterprise_dashboard"
end
end
end
end
end
| agpl-3.0 |
ftrotter/google_health_fail | yui/examples/datatable/dt_tabview_clean.html | 3575 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>TabView Integration</title>
<style type="text/css">
/*margin and padding on body element
can introduce errors in determining
element position and are not recommended;
we turn them off as a foundation for YUI
CSS treatments. */
body {
margin:0;
padding:0;
}
</style>
<link rel="stylesheet" type="text/css" href="../../build/fonts/fonts-min.css" />
<link rel="stylesheet" type="text/css" href="../../build/tabview/assets/skins/sam/tabview.css" />
<link rel="stylesheet" type="text/css" href="../../build/datatable/assets/skins/sam/datatable.css" />
<script type="text/javascript" src="../../build/yahoo-dom-event/yahoo-dom-event.js"></script>
<script type="text/javascript" src="../../build/element/element-min.js"></script>
<script type="text/javascript" src="../../build/tabview/tabview-min.js"></script>
<script type="text/javascript" src="../../build/datasource/datasource-min.js"></script>
<script type="text/javascript" src="../../build/datatable/datatable-min.js"></script>
<!--there is no custom header content for this example-->
</head>
<body class=" yui-skin-sam">
<h1>TabView Integration</h1>
<div class="exampleIntro">
<p>Integrating DataTable with the TabView widget.</p>
</div>
<!--BEGIN SOURCE CODE FOR EXAMPLE =============================== -->
<div id="tvcontainer" class="yui-navset">
<ul class="yui-nav">
<li class="selected"><a href="#tab1"><em>Start Tab</em></a></li>
<li><a href="#tab2"><em>DataTable Tab</em></a></li>
<li><a href="#tab3"><em>Another Tab</em></a></li>
</ul>
<div class="yui-content">
<div><p>Welcome! There is a DataTable in the second Tab.</p></div>
<div id="dtcontainer"></div>
<div><p>This is another Tab.</p></div>
</div>
</div>
<script type="text/javascript" src="assets/js/data.js"></script>
<script type="text/javascript">
YAHOO.util.Event.addListener(window, "load", function() {
YAHOO.example.TabView = function() {
var myColumnDefs = [
{key:"id", sortable:true},
{key:"date", formatter:YAHOO.widget.DataTable.formatDate, sortable:true, sortOptions:{defaultDir:YAHOO.widget.DataTable.CLASS_DESC}},
{key:"quantity", formatter:YAHOO.widget.DataTable.formatNumber, sortable:true},
{key:"amount", formatter:YAHOO.widget.DataTable.formatCurrency, sortable:true},
{key:"title", sortable:true},
{key:"description"}
];
var myDataSource = new YAHOO.util.DataSource(YAHOO.example.Data.bookorders);
myDataSource.responseType = YAHOO.util.DataSource.TYPE_JSARRAY;
myDataSource.responseSchema = {
fields: ["id","date","quantity","amount","title","description"]
};
var myDataTable =
new YAHOO.widget.DataTable("dtcontainer", myColumnDefs, myDataSource,{scrollable:true,width:"100%"});
var myTabView = new YAHOO.widget.TabView("tvcontainer");
myTabView.getTab(1).addListener("click", function() {myDataTable.onShow()});
return {
oDS: myDataSource,
oDT: myDataTable,
oTV: myTabView
};
}();
});
</script>
<!--END SOURCE CODE FOR EXAMPLE =============================== -->
</body>
</html>
<!-- presentbright.corp.yahoo.com uncompressed/chunked Thu Feb 19 10:53:12 PST 2009 -->
| agpl-3.0 |
ixmid/snipe-it | resources/lang/fil/pagination.php | 547 | <?php
return array(
/*
|--------------------------------------------------------------------------
| Pagination Language Lines
|--------------------------------------------------------------------------
|
| The following language lines are used by the paginator library to build
| the simple pagination links. You are free to change them to anything
| you want to customize your views to better match your application.
|
*/
'previous' => '« Nakaraan',
'next' => 'Susunod »',
);
| agpl-3.0 |
sdlBasic/sdlbrt | win32/mingw/i686-w64-mingw32/include/schannel.h | 12166 | /**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#ifndef __SCHANNEL_H__
#define __SCHANNEL_H__
#include <_mingw_unicode.h>
#include <wincrypt.h>
#define UNISP_NAME_A "Microsoft Unified Security Protocol Provider"
#define UNISP_NAME_W L"Microsoft Unified Security Protocol Provider"
#define SSL2SP_NAME_A "Microsoft SSL 2.0"
#define SSL2SP_NAME_W L"Microsoft SSL 2.0"
#define SSL3SP_NAME_A "Microsoft SSL 3.0"
#define SSL3SP_NAME_W L"Microsoft SSL 3.0"
#define TLS1SP_NAME_A "Microsoft TLS 1.0"
#define TLS1SP_NAME_W L"Microsoft TLS 1.0"
#define PCT1SP_NAME_A "Microsoft PCT 1.0"
#define PCT1SP_NAME_W L"Microsoft PCT 1.0"
#define SCHANNEL_NAME_A "Schannel"
#define SCHANNEL_NAME_W L"Schannel"
#define UNISP_NAME __MINGW_NAME_UAW(UNISP_NAME)
#define PCT1SP_NAME __MINGW_NAME_UAW(PCT1SP_NAME)
#define SSL2SP_NAME __MINGW_NAME_UAW(SSL2SP_NAME)
#define SSL3SP_NAME __MINGW_NAME_UAW(SSL3SP_NAME)
#define TLS1SP_NAME __MINGW_NAME_UAW(TLS1SP_NAME)
#define SCHANNEL_NAME __MINGW_NAME_UAW(SCHANNEL_NAME)
#define UNISP_RPC_ID 14
#define SECPKG_ATTR_ISSUER_LIST 0x50
#define SECPKG_ATTR_REMOTE_CRED 0x51
#define SECPKG_ATTR_LOCAL_CRED 0x52
#define SECPKG_ATTR_REMOTE_CERT_CONTEXT 0x53
#define SECPKG_ATTR_LOCAL_CERT_CONTEXT 0x54
#define SECPKG_ATTR_ROOT_STORE 0x55
#define SECPKG_ATTR_SUPPORTED_ALGS 0x56
#define SECPKG_ATTR_CIPHER_STRENGTHS 0x57
#define SECPKG_ATTR_SUPPORTED_PROTOCOLS 0x58
#define SECPKG_ATTR_ISSUER_LIST_EX 0x59
#define SECPKG_ATTR_CONNECTION_INFO 0x5a
#define SECPKG_ATTR_EAP_KEY_BLOCK 0x5b
#define SECPKG_ATTR_MAPPED_CRED_ATTR 0x5c
#define SECPKG_ATTR_SESSION_INFO 0x5d
#define SECPKG_ATTR_APP_DATA 0x5e
typedef struct _SecPkgContext_IssuerListInfo {
DWORD cbIssuerList;
PBYTE pIssuerList;
} SecPkgContext_IssuerListInfo,*PSecPkgContext_IssuerListInfo;
typedef struct _SecPkgContext_RemoteCredentialInfo {
DWORD cbCertificateChain;
PBYTE pbCertificateChain;
DWORD cCertificates;
DWORD fFlags;
DWORD dwBits;
} SecPkgContext_RemoteCredentialInfo,*PSecPkgContext_RemoteCredentialInfo;
typedef SecPkgContext_RemoteCredentialInfo SecPkgContext_RemoteCredenitalInfo,*PSecPkgContext_RemoteCredenitalInfo;
#define RCRED_STATUS_NOCRED 0x00000000
#define RCRED_CRED_EXISTS 0x00000001
#define RCRED_STATUS_UNKNOWN_ISSUER 0x00000002
typedef struct _SecPkgContext_LocalCredentialInfo {
DWORD cbCertificateChain;
PBYTE pbCertificateChain;
DWORD cCertificates;
DWORD fFlags;
DWORD dwBits;
} SecPkgContext_LocalCredentialInfo,*PSecPkgContext_LocalCredentialInfo;
typedef SecPkgContext_LocalCredentialInfo SecPkgContext_LocalCredenitalInfo,*PSecPkgContext_LocalCredenitalInfo;
#define LCRED_STATUS_NOCRED 0x00000000
#define LCRED_CRED_EXISTS 0x00000001
#define LCRED_STATUS_UNKNOWN_ISSUER 0x00000002
typedef struct _SecPkgCred_SupportedAlgs {
DWORD cSupportedAlgs;
ALG_ID *palgSupportedAlgs;
} SecPkgCred_SupportedAlgs,*PSecPkgCred_SupportedAlgs;
typedef struct _SecPkgCred_CipherStrengths {
DWORD dwMinimumCipherStrength;
DWORD dwMaximumCipherStrength;
} SecPkgCred_CipherStrengths,*PSecPkgCred_CipherStrengths;
typedef struct _SecPkgCred_SupportedProtocols {
DWORD grbitProtocol;
} SecPkgCred_SupportedProtocols,*PSecPkgCred_SupportedProtocols;
typedef struct _SecPkgContext_IssuerListInfoEx {
PCERT_NAME_BLOB aIssuers;
DWORD cIssuers;
} SecPkgContext_IssuerListInfoEx,*PSecPkgContext_IssuerListInfoEx;
typedef struct _SecPkgContext_ConnectionInfo {
DWORD dwProtocol;
ALG_ID aiCipher;
DWORD dwCipherStrength;
ALG_ID aiHash;
DWORD dwHashStrength;
ALG_ID aiExch;
DWORD dwExchStrength;
} SecPkgContext_ConnectionInfo,*PSecPkgContext_ConnectionInfo;
typedef struct _SecPkgContext_EapKeyBlock {
BYTE rgbKeys[128];
BYTE rgbIVs[64];
} SecPkgContext_EapKeyBlock,*PSecPkgContext_EapKeyBlock;
typedef struct _SecPkgContext_MappedCredAttr {
DWORD dwAttribute;
PVOID pvBuffer;
} SecPkgContext_MappedCredAttr,*PSecPkgContext_MappedCredAttr;
#define SSL_SESSION_RECONNECT 1
typedef struct _SecPkgContext_SessionInfo {
DWORD dwFlags;
DWORD cbSessionId;
BYTE rgbSessionId[32];
} SecPkgContext_SessionInfo,*PSecPkgContext_SessionInfo;
typedef struct _SecPkgContext_SessionAppData {
DWORD dwFlags;
DWORD cbAppData;
PBYTE pbAppData;
} SecPkgContext_SessionAppData,*PSecPkgContext_SessionAppData;
#define SCH_CRED_V1 0x00000001
#define SCH_CRED_V2 0x00000002
#define SCH_CRED_VERSION 0x00000002
#define SCH_CRED_V3 0x00000003
#define SCHANNEL_CRED_VERSION 0x00000004
struct _HMAPPER;
typedef struct _SCHANNEL_CRED {
DWORD dwVersion;
DWORD cCreds;
PCCERT_CONTEXT *paCred;
HCERTSTORE hRootStore;
DWORD cMappers;
struct _HMAPPER **aphMappers;
DWORD cSupportedAlgs;
ALG_ID *palgSupportedAlgs;
DWORD grbitEnabledProtocols;
DWORD dwMinimumCipherStrength;
DWORD dwMaximumCipherStrength;
DWORD dwSessionLifespan;
DWORD dwFlags;
DWORD dwCredFormat;
} SCHANNEL_CRED,*PSCHANNEL_CRED;
#define SCH_CRED_FORMAT_CERT_HASH 0x00000001
#define SCH_CRED_MAX_SUPPORTED_ALGS 256
#define SCH_CRED_MAX_SUPPORTED_CERTS 100
typedef struct _SCHANNEL_CERT_HASH {
DWORD dwLength;
DWORD dwFlags;
HCRYPTPROV hProv;
BYTE ShaHash[20];
} SCHANNEL_CERT_HASH,*PSCHANNEL_CERT_HASH;
#define SCH_MACHINE_CERT_HASH 0x00000001
#define SCH_CRED_NO_SYSTEM_MAPPER 0x00000002
#define SCH_CRED_NO_SERVERNAME_CHECK 0x00000004
#define SCH_CRED_MANUAL_CRED_VALIDATION 0x00000008
#define SCH_CRED_NO_DEFAULT_CREDS 0x00000010
#define SCH_CRED_AUTO_CRED_VALIDATION 0x00000020
#define SCH_CRED_USE_DEFAULT_CREDS 0x00000040
#define SCH_CRED_DISABLE_RECONNECTS 0x00000080
#define SCH_CRED_REVOCATION_CHECK_END_CERT 0x00000100
#define SCH_CRED_REVOCATION_CHECK_CHAIN 0x00000200
#define SCH_CRED_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT 0x00000400
#define SCH_CRED_IGNORE_NO_REVOCATION_CHECK 0x00000800
#define SCH_CRED_IGNORE_REVOCATION_OFFLINE 0x00001000
#define SCH_CRED_REVOCATION_CHECK_CACHE_ONLY 0x00004000
#define SCH_CRED_CACHE_ONLY_URL_RETRIEVAL 0x00008000
#define SCHANNEL_RENEGOTIATE 0
#define SCHANNEL_SHUTDOWN 1
#define SCHANNEL_ALERT 2
#define SCHANNEL_SESSION 3
typedef struct _SCHANNEL_ALERT_TOKEN {
DWORD dwTokenType;
DWORD dwAlertType;
DWORD dwAlertNumber;
} SCHANNEL_ALERT_TOKEN;
#define TLS1_ALERT_WARNING 1
#define TLS1_ALERT_FATAL 2
#define TLS1_ALERT_CLOSE_NOTIFY 0
#define TLS1_ALERT_UNEXPECTED_MESSAGE 10
#define TLS1_ALERT_BAD_RECORD_MAC 20
#define TLS1_ALERT_DECRYPTION_FAILED 21
#define TLS1_ALERT_RECORD_OVERFLOW 22
#define TLS1_ALERT_DECOMPRESSION_FAIL 30
#define TLS1_ALERT_HANDSHAKE_FAILURE 40
#define TLS1_ALERT_BAD_CERTIFICATE 42
#define TLS1_ALERT_UNSUPPORTED_CERT 43
#define TLS1_ALERT_CERTIFICATE_REVOKED 44
#define TLS1_ALERT_CERTIFICATE_EXPIRED 45
#define TLS1_ALERT_CERTIFICATE_UNKNOWN 46
#define TLS1_ALERT_ILLEGAL_PARAMETER 47
#define TLS1_ALERT_UNKNOWN_CA 48
#define TLS1_ALERT_ACCESS_DENIED 49
#define TLS1_ALERT_DECODE_ERROR 50
#define TLS1_ALERT_DECRYPT_ERROR 51
#define TLS1_ALERT_EXPORT_RESTRICTION 60
#define TLS1_ALERT_PROTOCOL_VERSION 70
#define TLS1_ALERT_INSUFFIENT_SECURITY 71
#define TLS1_ALERT_INTERNAL_ERROR 80
#define TLS1_ALERT_USER_CANCELED 90
#define TLS1_ALERT_NO_RENEGOTIATATION 100
#define SSL_SESSION_ENABLE_RECONNECTS 1
#define SSL_SESSION_DISABLE_RECONNECTS 2
typedef struct _SCHANNEL_SESSION_TOKEN {
DWORD dwTokenType;
DWORD dwFlags;
} SCHANNEL_SESSION_TOKEN;
#define CERT_SCHANNEL_IIS_PRIVATE_KEY_PROP_ID (CERT_FIRST_USER_PROP_ID + 0)
#define CERT_SCHANNEL_IIS_PASSWORD_PROP_ID (CERT_FIRST_USER_PROP_ID + 1)
#define CERT_SCHANNEL_SGC_CERTIFICATE_PROP_ID (CERT_FIRST_USER_PROP_ID + 2)
#define SP_PROT_PCT1_SERVER 0x00000001
#define SP_PROT_PCT1_CLIENT 0x00000002
#define SP_PROT_PCT1 (SP_PROT_PCT1_SERVER | SP_PROT_PCT1_CLIENT)
#define SP_PROT_SSL2_SERVER 0x00000004
#define SP_PROT_SSL2_CLIENT 0x00000008
#define SP_PROT_SSL2 (SP_PROT_SSL2_SERVER | SP_PROT_SSL2_CLIENT)
#define SP_PROT_SSL3_SERVER 0x00000010
#define SP_PROT_SSL3_CLIENT 0x00000020
#define SP_PROT_SSL3 (SP_PROT_SSL3_SERVER | SP_PROT_SSL3_CLIENT)
#define SP_PROT_TLS1_SERVER 0x00000040
#define SP_PROT_TLS1_CLIENT 0x00000080
#define SP_PROT_TLS1 (SP_PROT_TLS1_SERVER | SP_PROT_TLS1_CLIENT)
#define SP_PROT_SSL3TLS1_CLIENTS (SP_PROT_TLS1_CLIENT | SP_PROT_SSL3_CLIENT)
#define SP_PROT_SSL3TLS1_SERVERS (SP_PROT_TLS1_SERVER | SP_PROT_SSL3_SERVER)
#define SP_PROT_SSL3TLS1 (SP_PROT_SSL3 | SP_PROT_TLS1)
#define SP_PROT_UNI_SERVER 0x40000000
#define SP_PROT_UNI_CLIENT 0x80000000
#define SP_PROT_UNI (SP_PROT_UNI_SERVER | SP_PROT_UNI_CLIENT)
#define SP_PROT_ALL 0xffffffff
#define SP_PROT_NONE 0
#define SP_PROT_CLIENTS (SP_PROT_PCT1_CLIENT | SP_PROT_SSL2_CLIENT | SP_PROT_SSL3_CLIENT | SP_PROT_UNI_CLIENT | SP_PROT_TLS1_CLIENT)
#define SP_PROT_SERVERS (SP_PROT_PCT1_SERVER | SP_PROT_SSL2_SERVER | SP_PROT_SSL3_SERVER | SP_PROT_UNI_SERVER | SP_PROT_TLS1_SERVER)
typedef WINBOOL (*SSL_EMPTY_CACHE_FN_A)(LPSTR pszTargetName,DWORD dwFlags);
WINBOOL SslEmptyCacheA(LPSTR pszTargetName,DWORD dwFlags);
typedef WINBOOL (*SSL_EMPTY_CACHE_FN_W)(LPWSTR pszTargetName,DWORD dwFlags);
WINBOOL SslEmptyCacheW(LPWSTR pszTargetName,DWORD dwFlags);
#define SSL_EMPTY_CACHE_FN __MINGW_NAME_UAW(SSL_EMPTY_CACHE_FN)
#define SslEmptyCache __MINGW_NAME_AW(SslEmptyCache)
typedef struct _SSL_CREDENTIAL_CERTIFICATE {
DWORD cbPrivateKey;
PBYTE pPrivateKey;
DWORD cbCertificate;
PBYTE pCertificate;
PSTR pszPassword;
} SSL_CREDENTIAL_CERTIFICATE,*PSSL_CREDENTIAL_CERTIFICATE;
#define SCHANNEL_SECRET_TYPE_CAPI 0x00000001
#define SCHANNEL_SECRET_PRIVKEY 0x00000002
#define SCH_CRED_X509_CERTCHAIN 0x00000001
#define SCH_CRED_X509_CAPI 0x00000002
#define SCH_CRED_CERT_CONTEXT 0x00000003
struct _HMAPPER;
typedef struct _SCH_CRED {
DWORD dwVersion;
DWORD cCreds;
PVOID *paSecret;
PVOID *paPublic;
DWORD cMappers;
struct _HMAPPER **aphMappers;
} SCH_CRED,*PSCH_CRED;
typedef struct _SCH_CRED_SECRET_CAPI {
DWORD dwType;
HCRYPTPROV hProv;
} SCH_CRED_SECRET_CAPI,*PSCH_CRED_SECRET_CAPI;
typedef struct _SCH_CRED_SECRET_PRIVKEY {
DWORD dwType;
PBYTE pPrivateKey;
DWORD cbPrivateKey;
PSTR pszPassword;
} SCH_CRED_SECRET_PRIVKEY,*PSCH_CRED_SECRET_PRIVKEY;
typedef struct _SCH_CRED_PUBLIC_CERTCHAIN {
DWORD dwType;
DWORD cbCertChain;
PBYTE pCertChain;
} SCH_CRED_PUBLIC_CERTCHAIN,*PSCH_CRED_PUBLIC_CERTCHAIN;
typedef struct _SCH_CRED_PUBLIC_CAPI {
DWORD dwType;
HCRYPTPROV hProv;
} SCH_CRED_PUBLIC_CAPI,*PSCH_CRED_PUBLIC_CAPI;
typedef struct _PctPublicKey {
DWORD Type;
DWORD cbKey;
UCHAR pKey[1];
} PctPublicKey;
typedef struct _X509Certificate {
DWORD Version;
DWORD SerialNumber[4];
ALG_ID SignatureAlgorithm;
FILETIME ValidFrom;
FILETIME ValidUntil;
PSTR pszIssuer;
PSTR pszSubject;
PctPublicKey *pPublicKey;
} X509Certificate,*PX509Certificate;
WINBOOL SslGenerateKeyPair(PSSL_CREDENTIAL_CERTIFICATE pCerts,PSTR pszDN,PSTR pszPassword,DWORD Bits);
VOID SslGenerateRandomBits(PUCHAR pRandomData,LONG cRandomData);
WINBOOL SslCrackCertificate(PUCHAR pbCertificate,DWORD cbCertificate,DWORD dwFlags,PX509Certificate *ppCertificate);
VOID SslFreeCertificate(PX509Certificate pCertificate);
DWORD WINAPI SslGetMaximumKeySize(DWORD Reserved);
WINBOOL SslGetDefaultIssuers(PBYTE pbIssuers,DWORD *pcbIssuers);
#define SSL_CRACK_CERTIFICATE_NAME TEXT("SslCrackCertificate")
#define SSL_FREE_CERTIFICATE_NAME TEXT("SslFreeCertificate")
typedef WINBOOL (WINAPI *SSL_CRACK_CERTIFICATE_FN)(PUCHAR pbCertificate,DWORD cbCertificate,WINBOOL VerifySignature,PX509Certificate *ppCertificate);
typedef VOID (WINAPI *SSL_FREE_CERTIFICATE_FN)(PX509Certificate pCertificate);
#if (_WIN32_WINNT >= 0x0600)
typedef struct _SecPkgContext_EapPrfInfo {
DWORD dwVersion;
DWORD cbPrfData;
} SecPkgContext_EapPrfInfo, *PSecPkgContext_EapPrfInfo;
#endif /*(_WIN32_WINNT >= 0x0600)*/
#if (_WIN32_WINNT >= 0x0601)
typedef struct _SecPkgContext_SupportedSignatures {
WORD cSignatureAndHashAlgorithms;
WORD *pSignatureAndHashAlgorithms;
} SecPkgContext_SupportedSignatures, *PSecPkgContext_SupportedSignatures;
#endif /*(_WIN32_WINNT >= 0x0601)*/
#endif
| lgpl-2.1 |
markwatkinson/luminous | style/luminous_print.css | 1195 | /*
* Here we override some CSS to make a Luminous object more print-friendly.
* as well as some layout, we override a few colours of the light theme.
* This is because some colours do not print well (grey comments => green)
* and boldness appears to be dropped from keywords, thereby removing all their
* highlighting (these are changed to a purple colour)
*/
/* body {padding:0px !important; margin:0px !important;} */
div.luminous .metabar_buttons, div.luminous .metabar_fixed
{
display:none !important;
visibility:hidden !important;
}
div.luminous, .luminous .code, .luminous .line_numbers
{
height:auto !important;
}
.luminous pre.code, .luminous pre.line_numbers {
overflow:visible !important;
max-height:none !important;
}
div.luminous .comment{color:green !important;}
div.luminous .keyword {}
div.luminous table.code_container{height:auto !important;}
div.luminous div.code_container{max-height:none !important;
overflow:visible !important;
height:auto !important;
}
table.code_container {padding:0px !important;
border-collapse:collapse;
}
td {padding: 0px !important;}
td.lineno {border-right:1px solid black; }
td.code { padding-left: 1em !important;} | lgpl-2.1 |
AkshitaKukreja30/checkstyle | src/test/resources/com/puppycrawl/tools/checkstyle/checks/whitespace/nowhitespacebefore/InputNoWhitespaceBeforeDefault.java | 5995 | ////////////////////////////////////////////////////////////////////////////////
// Test case file for checkstyle.
// Created: 2001
////////////////////////////////////////////////////////////////////////////////
package com . puppycrawl
.tools.
checkstyle.checks.whitespace.nowhitespacebefore;
/**
* Class for testing whitespace issues.
* error missing author tag
**/
class InputNoWhitespaceBeforeDefault
{
/** ignore assignment **/
private int mVar1=1;
/** ignore assignment **/
private int mVar2 =1;
/** Should be ok **/
private int mVar3 = 1;
/** method **/
void method1()
{
final int a = 1;
int b= 1; // Ignore 1
b=1; // Ignore 1
b+=1; // Ignore 1
b -=- 1 + (+ b); // Ignore 2
b = b ++ + b --; // Ignore 1
b = ++ b - -- b; // Ignore 1
}
/** method **/
void method2()
{
synchronized(this) {
}
try{
}
catch(RuntimeException e){
}
}
/**
skip blank lines between comment and code,
should be ok
**/
private int mVar4 = 1;
/** test WS after void return */
private void fastExit()
{
boolean complicatedStuffNeeded = true;
if( !complicatedStuffNeeded )
{
return; // should not complain about missing WS after return
}
else
{
// do complicated stuff
}
}
/** test WS after non void return
@return 2
*/
private int nonVoid()
{
if ( true )
{
return(2); // should complain about missing WS after return
}
else
{
return 2; // this is ok
}
}
/** test casts **/
private void testCasts()
{
Object o = (Object) new Object(); // ok
o = (Object)o; // error
o = ( Object ) o; // ok
o = (Object)
o; // ok
}
/** test questions **/
private void testQuestions()
{
boolean b = (1 == 2)?true:false;
b = (1==2) ? false : true;
}
/** star test **/
private void starTest()
{
int x = 2 *3* 4;
}
/** boolean test **/
private void boolTest()
{
boolean a = true;
boolean x = ! a;
int z = ~1 + ~ 2;
}
/** division test **/
private void divTest()
{
int a = 4 % 2;
int b = 4% 2;
int c = 4 %2;
int d = 4%2;
int e = 4 / 2;
int f = 4/ 2;
int g = 4 /2;
int h = 4/2;
}
/** @return dot test **/
private java .lang. String dotTest()
{
Object o = new java.lang.Object();
o.
toString();
o
.toString();
o . toString();
return o.toString();
}
/** assert statement test */
public void assertTest()
{
// OK
assert true;
// OK
assert true : "Whups";
// evil colons, should be OK
assert "OK".equals(null) ? false : true : "Whups";
// missing WS around assert
assert(true);
// missing WS around colon
assert true:"Whups";
}
/** another check */
void donBradman(Runnable aRun)
{
donBradman(new Runnable() {
public void run() {
}
});
final Runnable r = new Runnable() {
public void run() {
}
};
}
/** rfe 521323, detect whitespace before ';' */
void rfe521323()
{
doStuff() ;
// ^ whitespace
for (int i = 0 ; i < 5; i++) {
// ^ whitespace
}
}
/** bug 806243 (NoWhitespaceBeforeCheck error for anonymous inner class) */
private int i ;
// ^ whitespace
private int i1, i2, i3 ;
// ^ whitespace
private int i4, i5, i6;
/** bug 806243 (NoWhitespaceBeforeCheck error for anonymous inner class) */
void bug806243()
{
Object o = new InputNoWhitespaceBeforeDefault() {
private int j ;
// ^ whitespace
};
}
void doStuff() {
}
}
/**
* Bug 806242 (NoWhitespaceBeforeCheck error with an interface).
* @author o_sukhodolsky
* @version 1.0
*/
interface IFoo_NoWhitespaceBeforeDefault
{
void foo() ;
// ^ whitespace
}
/**
* Avoid Whitespace errors in for loop.
* @author lkuehne
* @version 1.0
*/
class SpecialCasesInForLoop_NoWhitespaceBeforeDefault
{
void forIterator()
{
// avoid conflict between WhiteSpaceAfter ';' and ParenPad(nospace)
for (int i = 0; i++ < 5;) {
// ^ no whitespace
}
// bug 895072
// avoid confilct between ParenPad(space) and NoWhiteSpace before ';'
int i = 0;
for ( ; i < 5; i++ ) {
// ^ whitespace
}
for (int anInt : getSomeInts()) {
//Should be ignored
}
}
int[] getSomeInts() {
int i = (int) ( 2 / 3 );
return null;
}
public void myMethod() {
new Thread() {
public void run() {
}
}.start();
}
public void foo(java.util.List<? extends String[]> bar, Comparable<? super Object[]> baz) { }
public void mySuperMethod() {
Runnable[] runs = new Runnable[] {new Runnable() {
public void run() {
}
},
new Runnable() {
public void run() {
}
}};
runs[0]
.
run()
;
}
public void testNullSemi() {
return ;
}
public void register(Object obj) { }
public void doSomething(String args[]) {
register(boolean[].class);
register( args );
}
public void parentheses() {
testNullSemi
(
)
;
}
public static void testNoWhitespaceBeforeEllipses(String ... args) {
}
}
| lgpl-2.1 |
harterj/moose | modules/navier_stokes/doc/content/source/fvkernels/CNSFVFluidEnergyHLLC.md | 389 | # CNSFVFluidEnergyHLLC
!syntax description /FVKernels/CNSFVFluidEnergyHLLC
## Overview
This object implements the energy equation inter-cell fluxes for the
Harten-Lax-Van Leer-Contact (HLLC) formulation described in [CNSFVHLLCBase.md].
!syntax parameters /FVKernels/CNSFVFluidEnergyHLLC
!syntax inputs /FVKernels/CNSFVFluidEnergyHLLC
!syntax children /FVKernels/CNSFVFluidEnergyHLLC
| lgpl-2.1 |
abbeyj/sonarqube | server/sonar-server/src/main/java/org/sonar/server/metric/ws/MetricsWs.java | 1515 | /*
* SonarQube, open source software quality management tool.
* Copyright (C) 2008-2014 SonarSource
* mailto:contact AT sonarsource DOT com
*
* SonarQube is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* SonarQube is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package org.sonar.server.metric.ws;
import org.sonar.api.server.ws.WebService;
public class MetricsWs implements WebService {
public static final String ENDPOINT = "api/metrics";
private final MetricsWsAction[] actions;
public MetricsWs(MetricsWsAction... actions) {
this.actions = actions;
}
@Override
public void define(Context context) {
NewController controller = context.createController(ENDPOINT);
controller.setDescription("Metrics management");
controller.setSince("2.6");
for (MetricsWsAction action : actions) {
action.define(controller);
}
controller.done();
}
}
| lgpl-3.0 |
animotron/animos | include/acpi/acdisasm.h | 24792 | /******************************************************************************
*
* Name: acdisasm.h - AML disassembler
*
*****************************************************************************/
/******************************************************************************
*
* 1. Copyright Notice
*
* Some or all of this work - Copyright (c) 1999 - 2011, Intel Corp.
* All rights reserved.
*
* 2. License
*
* 2.1. This is your license from Intel Corp. under its intellectual property
* rights. You may have additional license terms from the party that provided
* you this software, covering your right to use that party's intellectual
* property rights.
*
* 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a
* copy of the source code appearing in this file ("Covered Code") an
* irrevocable, perpetual, worldwide license under Intel's copyrights in the
* base code distributed originally by Intel ("Original Intel Code") to copy,
* make derivatives, distribute, use and display any portion of the Covered
* Code in any form, with the right to sublicense such rights; and
*
* 2.3. Intel grants Licensee a non-exclusive and non-transferable patent
* license (with the right to sublicense), under only those claims of Intel
* patents that are infringed by the Original Intel Code, to make, use, sell,
* offer to sell, and import the Covered Code and derivative works thereof
* solely to the minimum extent necessary to exercise the above copyright
* license, and in no event shall the patent license extend to any additions
* to or modifications of the Original Intel Code. No other license or right
* is granted directly or by implication, estoppel or otherwise;
*
* The above copyright and patent license is granted only if the following
* conditions are met:
*
* 3. Conditions
*
* 3.1. Redistribution of Source with Rights to Further Distribute Source.
* Redistribution of source code of any substantial portion of the Covered
* Code or modification with rights to further distribute source must include
* the above Copyright Notice, the above License, this list of Conditions,
* and the following Disclaimer and Export Compliance provision. In addition,
* Licensee must cause all Covered Code to which Licensee contributes to
* contain a file documenting the changes Licensee made to create that Covered
* Code and the date of any change. Licensee must include in that file the
* documentation of any changes made by any predecessor Licensee. Licensee
* must include a prominent statement that the modification is derived,
* directly or indirectly, from Original Intel Code.
*
* 3.2. Redistribution of Source with no Rights to Further Distribute Source.
* Redistribution of source code of any substantial portion of the Covered
* Code or modification without rights to further distribute source must
* include the following Disclaimer and Export Compliance provision in the
* documentation and/or other materials provided with distribution. In
* addition, Licensee may not authorize further sublicense of source of any
* portion of the Covered Code, and must include terms to the effect that the
* license from Licensee to its licensee is limited to the intellectual
* property embodied in the software Licensee provides to its licensee, and
* not to intellectual property embodied in modifications its licensee may
* make.
*
* 3.3. Redistribution of Executable. Redistribution in executable form of any
* substantial portion of the Covered Code or modification must reproduce the
* above Copyright Notice, and the following Disclaimer and Export Compliance
* provision in the documentation and/or other materials provided with the
* distribution.
*
* 3.4. Intel retains all right, title, and interest in and to the Original
* Intel Code.
*
* 3.5. Neither the name Intel nor any other trademark owned or controlled by
* Intel shall be used in advertising or otherwise to promote the sale, use or
* other dealings in products derived from or relating to the Covered Code
* without prior written authorization from Intel.
*
* 4. Disclaimer and Export Compliance
*
* 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED
* HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE
* IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE,
* INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY
* UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY
* IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES
* OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR
* COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT,
* SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY
* CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL
* HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS
* SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY
* LIMITED REMEDY.
*
* 4.3. Licensee shall not export, either directly or indirectly, any of this
* software or system incorporating such software without first obtaining any
* required license or other approval from the U. S. Department of Commerce or
* any other agency or department of the United States Government. In the
* event Licensee exports any such software from the United States or
* re-exports any such software from a foreign destination, Licensee shall
* ensure that the distribution and export/re-export of the software is in
* compliance with all laws, regulations, orders, or other restrictions of the
* U.S. Export Administration Regulations. Licensee agrees that neither it nor
* any of its subsidiaries will export/re-export any technical data, process,
* software, or service, directly or indirectly, to any country for which the
* United States government or any agency thereof requires an export license,
* other governmental approval, or letter of assurance, without first obtaining
* such license, approval or letter.
*
*****************************************************************************/
#ifndef __ACDISASM_H__
#define __ACDISASM_H__
#include "amlresrc.h"
#define BLOCK_NONE 0
#define BLOCK_PAREN 1
#define BLOCK_BRACE 2
#define BLOCK_COMMA_LIST 4
#define ACPI_DEFAULT_RESNAME *(UINT32 *) "__RD"
/*
* Raw table data header. Used by disassembler and data table compiler.
* Do not change.
*/
#define ACPI_RAW_TABLE_DATA_HEADER "Raw Table Data"
typedef const struct acpi_dmtable_info
{
UINT8 Opcode;
UINT8 Offset;
char *Name;
UINT8 Flags;
} ACPI_DMTABLE_INFO;
#define DT_LENGTH 0x01 /* Field is a subtable length */
#define DT_FLAG 0x02 /* Field is a flag value */
#define DT_NON_ZERO 0x04 /* Field must be non-zero */
/* TBD: Not used at this time */
#define DT_OPTIONAL 0x08
#define DT_COUNT 0x10
/*
* Values for Opcode above.
* Note: 0-7 must not change, used as a flag shift value
*/
#define ACPI_DMT_FLAG0 0
#define ACPI_DMT_FLAG1 1
#define ACPI_DMT_FLAG2 2
#define ACPI_DMT_FLAG3 3
#define ACPI_DMT_FLAG4 4
#define ACPI_DMT_FLAG5 5
#define ACPI_DMT_FLAG6 6
#define ACPI_DMT_FLAG7 7
#define ACPI_DMT_FLAGS0 8
#define ACPI_DMT_FLAGS2 9
#define ACPI_DMT_UINT8 10
#define ACPI_DMT_UINT16 11
#define ACPI_DMT_UINT24 12
#define ACPI_DMT_UINT32 13
#define ACPI_DMT_UINT56 14
#define ACPI_DMT_UINT64 15
#define ACPI_DMT_STRING 16
#define ACPI_DMT_NAME4 17
#define ACPI_DMT_NAME6 18
#define ACPI_DMT_NAME8 19
#define ACPI_DMT_CHKSUM 20
#define ACPI_DMT_SPACEID 21
#define ACPI_DMT_GAS 22
#define ACPI_DMT_ASF 23
#define ACPI_DMT_DMAR 24
#define ACPI_DMT_HEST 25
#define ACPI_DMT_HESTNTFY 26
#define ACPI_DMT_HESTNTYP 27
#define ACPI_DMT_MADT 28
#define ACPI_DMT_SRAT 29
#define ACPI_DMT_EXIT 30
#define ACPI_DMT_SIG 31
#define ACPI_DMT_FADTPM 32
#define ACPI_DMT_BUF16 33
#define ACPI_DMT_IVRS 34
#define ACPI_DMT_BUFFER 35
#define ACPI_DMT_PCI_PATH 36
#define ACPI_DMT_EINJACT 37
#define ACPI_DMT_EINJINST 38
#define ACPI_DMT_ERSTACT 39
#define ACPI_DMT_ERSTINST 40
#define ACPI_DMT_ACCWIDTH 41
#define ACPI_DMT_UNICODE 42
#define ACPI_DMT_UUID 43
#define ACPI_DMT_DEVICE_PATH 44
#define ACPI_DMT_LABEL 45
#define ACPI_DMT_BUF7 46
#define ACPI_DMT_BUF128 47
#define ACPI_DMT_SLIC 48
typedef
void (*ACPI_DMTABLE_HANDLER) (
ACPI_TABLE_HEADER *Table);
typedef
ACPI_STATUS (*ACPI_CMTABLE_HANDLER) (
void **PFieldList);
typedef struct acpi_dmtable_data
{
char *Signature;
ACPI_DMTABLE_INFO *TableInfo;
ACPI_DMTABLE_HANDLER TableHandler;
ACPI_CMTABLE_HANDLER CmTableHandler;
const unsigned char *Template;
char *Name;
} ACPI_DMTABLE_DATA;
typedef struct acpi_op_walk_info
{
UINT32 Level;
UINT32 LastLevel;
UINT32 Count;
UINT32 BitOffset;
UINT32 Flags;
ACPI_WALK_STATE *WalkState;
} ACPI_OP_WALK_INFO;
/*
* TBD - another copy of this is in asltypes.h, fix
*/
#ifndef ASL_WALK_CALLBACK_DEFINED
typedef
ACPI_STATUS (*ASL_WALK_CALLBACK) (
ACPI_PARSE_OBJECT *Op,
UINT32 Level,
void *Context);
#define ASL_WALK_CALLBACK_DEFINED
#endif
typedef struct acpi_resource_tag
{
UINT32 BitIndex;
char *Tag;
} ACPI_RESOURCE_TAG;
/* Strings used for decoding flags to ASL keywords */
extern const char *AcpiGbl_WordDecode[];
extern const char *AcpiGbl_IrqDecode[];
extern const char *AcpiGbl_LockRule[];
extern const char *AcpiGbl_AccessTypes[];
extern const char *AcpiGbl_UpdateRules[];
extern const char *AcpiGbl_MatchOps[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoAsf0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoAsf1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoAsf1a[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoAsf2[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoAsf2a[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoAsf3[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoAsf4[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoAsfHdr[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoBoot[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoBert[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoCpep[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoCpep0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoDbgp[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoDmar[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoDmarHdr[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoDmarScope[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoDmar0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoDmar1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoDmar2[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoDmar3[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoEcdt[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoEinj[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoEinj0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoErst[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoErst0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoFacs[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoFadt1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoFadt2[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoFadt3[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoGas[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHeader[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHest[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHest0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHest1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHest2[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHest6[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHest7[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHest8[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHest9[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHestNotify[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHestBank[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoHpet[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoIvrs[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoIvrs0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoIvrs1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoIvrs4[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoIvrs8a[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoIvrs8b[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoIvrs8c[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoIvrsHdr[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt2[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt3[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt4[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt5[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt6[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt7[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt8[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt9[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadt10[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMadtHdr[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMcfg[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMcfg0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMchi[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMsct[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoMsct0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoRsdp1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoRsdp2[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSbst[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSlicHdr[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSlic0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSlic1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSlit[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSpcr[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSpmi[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSrat[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSratHdr[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSrat0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSrat1[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoSrat2[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoTcpa[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoUefi[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoWaet[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoWdat[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoWdat0[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoWddt[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoWdrt[];
extern ACPI_DMTABLE_INFO AcpiDmTableInfoGeneric[][2];
/*
* dmtable
*/
extern ACPI_DMTABLE_DATA AcpiDmTableData[];
UINT8
AcpiDmGenerateChecksum (
void *Table,
UINT32 Length,
UINT8 OriginalChecksum);
ACPI_DMTABLE_DATA *
AcpiDmGetTableData (
char *Signature);
void
AcpiDmDumpDataTable (
ACPI_TABLE_HEADER *Table);
ACPI_STATUS
AcpiDmDumpTable (
UINT32 TableLength,
UINT32 TableOffset,
void *Table,
UINT32 SubTableLength,
ACPI_DMTABLE_INFO *Info);
void
AcpiDmLineHeader (
UINT32 Offset,
UINT32 ByteLength,
char *Name);
void
AcpiDmLineHeader2 (
UINT32 Offset,
UINT32 ByteLength,
char *Name,
UINT32 Value);
/*
* dmtbdump
*/
void
AcpiDmDumpAsf (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpCpep (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpDmar (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpEinj (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpErst (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpFadt (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpHest (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpIvrs (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpMcfg (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpMadt (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpMsct (
ACPI_TABLE_HEADER *Table);
UINT32
AcpiDmDumpRsdp (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpRsdt (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpSlic (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpSlit (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpSrat (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpWdat (
ACPI_TABLE_HEADER *Table);
void
AcpiDmDumpXsdt (
ACPI_TABLE_HEADER *Table);
/*
* dmwalk
*/
void
AcpiDmDisassemble (
ACPI_WALK_STATE *WalkState,
ACPI_PARSE_OBJECT *Origin,
UINT32 NumOpcodes);
void
AcpiDmWalkParseTree (
ACPI_PARSE_OBJECT *Op,
ASL_WALK_CALLBACK DescendingCallback,
ASL_WALK_CALLBACK AscendingCallback,
void *Context);
/*
* dmopcode
*/
void
AcpiDmDisassembleOneOp (
ACPI_WALK_STATE *WalkState,
ACPI_OP_WALK_INFO *Info,
ACPI_PARSE_OBJECT *Op);
void
AcpiDmDecodeInternalObject (
ACPI_OPERAND_OBJECT *ObjDesc);
UINT32
AcpiDmListType (
ACPI_PARSE_OBJECT *Op);
void
AcpiDmMethodFlags (
ACPI_PARSE_OBJECT *Op);
void
AcpiDmFieldFlags (
ACPI_PARSE_OBJECT *Op);
void
AcpiDmAddressSpace (
UINT8 SpaceId);
void
AcpiDmRegionFlags (
ACPI_PARSE_OBJECT *Op);
void
AcpiDmMatchOp (
ACPI_PARSE_OBJECT *Op);
/*
* dmnames
*/
UINT32
AcpiDmDumpName (
UINT32 Name);
ACPI_STATUS
AcpiPsDisplayObjectPathname (
ACPI_WALK_STATE *WalkState,
ACPI_PARSE_OBJECT *Op);
void
AcpiDmNamestring (
char *Name);
/*
* dmobject
*/
void
AcpiDmDisplayInternalObject (
ACPI_OPERAND_OBJECT *ObjDesc,
ACPI_WALK_STATE *WalkState);
void
AcpiDmDisplayArguments (
ACPI_WALK_STATE *WalkState);
void
AcpiDmDisplayLocals (
ACPI_WALK_STATE *WalkState);
void
AcpiDmDumpMethodInfo (
ACPI_STATUS Status,
ACPI_WALK_STATE *WalkState,
ACPI_PARSE_OBJECT *Op);
/*
* dmbuffer
*/
void
AcpiDmDisasmByteList (
UINT32 Level,
UINT8 *ByteData,
UINT32 ByteCount);
void
AcpiDmByteList (
ACPI_OP_WALK_INFO *Info,
ACPI_PARSE_OBJECT *Op);
void
AcpiDmIsEisaId (
ACPI_PARSE_OBJECT *Op);
void
AcpiDmEisaId (
UINT32 EncodedId);
BOOLEAN
AcpiDmIsUnicodeBuffer (
ACPI_PARSE_OBJECT *Op);
BOOLEAN
AcpiDmIsStringBuffer (
ACPI_PARSE_OBJECT *Op);
/*
* dmextern
*/
ACPI_STATUS
AcpiDmAddToExternalFileList (
char *PathList);
void
AcpiDmClearExternalFileList (
void);
void
AcpiDmAddToExternalList (
ACPI_PARSE_OBJECT *Op,
char *Path,
UINT8 Type,
UINT32 Value);
void
AcpiDmAddExternalsToNamespace (
void);
UINT32
AcpiDmGetExternalMethodCount (
void);
void
AcpiDmClearExternalList (
void);
void
AcpiDmEmitExternals (
void);
/*
* dmresrc
*/
void
AcpiDmDumpInteger8 (
UINT8 Value,
char *Name);
void
AcpiDmDumpInteger16 (
UINT16 Value,
char *Name);
void
AcpiDmDumpInteger32 (
UINT32 Value,
char *Name);
void
AcpiDmDumpInteger64 (
UINT64 Value,
char *Name);
void
AcpiDmResourceTemplate (
ACPI_OP_WALK_INFO *Info,
ACPI_PARSE_OBJECT *Op,
UINT8 *ByteData,
UINT32 ByteCount);
ACPI_STATUS
AcpiDmIsResourceTemplate (
ACPI_PARSE_OBJECT *Op);
void
AcpiDmBitList (
UINT16 Mask);
void
AcpiDmDescriptorName (
void);
/*
* dmresrcl
*/
void
AcpiDmWordDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmDwordDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmExtendedDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmQwordDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmMemory24Descriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmMemory32Descriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmFixedMemory32Descriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmGenericRegisterDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmInterruptDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmVendorLargeDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmVendorCommon (
char *Name,
UINT8 *ByteData,
UINT32 Length,
UINT32 Level);
/*
* dmresrcs
*/
void
AcpiDmIrqDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmDmaDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmIoDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmFixedIoDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmStartDependentDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmEndDependentDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
void
AcpiDmVendorSmallDescriptor (
AML_RESOURCE *Resource,
UINT32 Length,
UINT32 Level);
/*
* dmutils
*/
void
AcpiDmDecodeAttribute (
UINT8 Attribute);
void
AcpiDmIndent (
UINT32 Level);
BOOLEAN
AcpiDmCommaIfListMember (
ACPI_PARSE_OBJECT *Op);
void
AcpiDmCommaIfFieldMember (
ACPI_PARSE_OBJECT *Op);
/*
* dmrestag
*/
void
AcpiDmFindResources (
ACPI_PARSE_OBJECT *Root);
void
AcpiDmCheckResourceReference (
ACPI_PARSE_OBJECT *Op,
ACPI_WALK_STATE *WalkState);
/*
* acdisasm
*/
void
AdDisassemblerHeader (
char *Filename);
#endif /* __ACDISASM_H__ */
| lgpl-3.0 |
jcoady9/hummingbird | app/controllers/users_controller.rb | 5020 | require_dependency 'user_query'
class UsersController < ApplicationController
def index
if params[:followed_by] || params[:followers_of]
if params[:followed_by]
users = User.find(params[:followed_by]).following
elsif params[:followers_of]
users = User.find(params[:followers_of]).followers
end
users = users.page(params[:page]).per(20)
UserQuery.load_is_followed(users, current_user)
render json: users, meta: { cursor: 1 + (params[:page] || 1).to_i }
elsif params[:to_follow]
render json: User.where(to_follow: true), each_serializer: UserSerializer
else
### OLD CODE PATH BELOW. Used only by the recommendations page.
authenticate_user!
status = {
recommendations_up_to_date: current_user.recommendations_up_to_date
}
respond_to do |format|
format.html { redirect_to '/' }
format.json { render json: status }
end
end
end
def show
user = User.find(params[:id])
# Redirect to canonical path
if request.path != user_path(user)
return redirect_to user_path(user), status: :moved_permanently
end
if user_signed_in? && current_user == user
# Clear notifications if the current user is viewing his/her feed.
# TODO: This needs to be moved elsewhere.
Notification.where(user: user, notification_type: 'profile_comment',
seen: false).update_all seen: true
end
respond_with_ember user
end
ember_action(:ember) { User.find(params[:user_id]) }
def follow
authenticate_user!
user = User.find(params[:user_id])
if user != current_user
if user.followers.include? current_user
user.followers.destroy current_user
action_type = 'unfollowed'
else
if current_user.following_count < 10_000
user.followers.push current_user
action_type = 'followed'
else
flash[:message] = "Wow! You're following 10,000 people?! You should \
unfollow a few people that no longer interest you \
before following any others."
action_type = nil
end
end
if action_type
Substory.from_action(
user_id: current_user.id,
action_type: action_type,
followed_id: user.id
)
end
end
respond_to do |format|
format.html { redirect_to :back }
format.json { render json: true }
end
end
def update_avatar
authenticate_user!
user = User.find(params[:user_id])
if user == current_user
user.avatar = params[:avatar] || params[:user][:avatar]
user.save!
respond_to do |format|
format.html { redirect_to :back }
format.json { render json: user, serializer: CurrentUserSerializer }
end
else
error! 403
end
end
def disconnect_facebook
authenticate_user!
current_user.update_attributes(facebook_id: nil)
redirect_to :back
end
def redirect_short_url
@user = User.find_by_name params[:username]
fail ActionController::RoutingError, 'Not Found' if @user.nil?
redirect_to @user
end
def comment
authenticate_user!
# Create the story.
@user = User.find(params[:user_id])
Action.broadcast(
action_type: 'created_profile_comment',
user: @user,
poster: current_user,
comment: params[:comment]
)
respond_to do |format|
format.html { redirect_to :back }
format.json { render json: true }
end
end
def update
authenticate_user!
user = User.find(params[:id])
changes = params[:current_user] || params[:user]
return error!(401, 'Wrong user') unless current_user == user
# Finagling things into place
changes[:cover_image] =
changes[:cover_image_url] if changes[:cover_image_url] =~ /^data:/
changes[:password] =
changes[:new_password] if changes[:new_password].present?
changes[:name] = changes[:new_username] if changes[:new_username].present?
changes[:star_rating] = (changes[:rating_type] == 'advanced')
%i(new_password new_username rating_type cover_image_url).each do |key|
changes.delete(key)
end
changes = changes.permit(:about, :location, :website, :name, :waifu_char_id,
:sfw_filter, :waifu, :bio, :email, :cover_image,
:waifu_or_husbando, :title_language_preference,
:password, :star_rating)
# Convert to hash so that we ignore disallowed attributes
user.assign_attributes(changes.to_h)
if user.save
render json: user
else
return error!(user.errors, 400)
end
end
def to_follow
fixed_user_list = %w(
Gigguk Holden JeanP
Arkada HappiLeeErin DoctorDazza
Yokurama dexbonus DEMOLITION_D
)
@users = User.where(name: fixed_user_list)
render json: @users, each_serializer: UserSerializer
end
end
| apache-2.0 |
nikhilvibhav/camel | core/camel-core/src/test/java/org/apache/camel/component/seda/SedaSizeTest.java | 1588 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.seda;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
import org.junit.jupiter.api.Test;
public class SedaSizeTest extends ContextTestSupport {
@Test
public void testSeda() throws Exception {
getMockEndpoint("mock:bar").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start").to("seda:bar");
from("seda:bar?size=5").to("mock:bar");
}
};
}
}
| apache-2.0 |
zhencui/azure-powershell | src/ResourceManager/Insights/Commands.Insights/OutputClasses/PSLocalizableString.cs | 1909 | // ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
using Microsoft.Azure.Insights.Models;
namespace Microsoft.Azure.Commands.Insights.OutputClasses
{
/// <summary>
/// Wraps around a list of Dimension objects to display them with indentation
/// </summary>
public class PSLocalizableString : LocalizableString
{
/// <summary>
/// Initializes a new instance of the PSLocalizableString class
/// </summary>
/// <param name="localizableString">The input LocalizableString object</param>
public PSLocalizableString(LocalizableString localizableString)
{
if (localizableString != null)
{
this.LocalizedValue = localizableString.LocalizedValue;
this.Value = localizableString.Value;
}
}
/// <summary>
/// A string representation of the list LocalizableString objects including indentation
/// </summary>
/// <returns>A string representation of the LocalizableString object including indentation</returns>
public override string ToString()
{
return this.ToString(indentationTabs: 1);
}
}
}
| apache-2.0 |
maxamillion/origin | vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go | 38059 | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"fmt"
"reflect"
"strings"
"time"
"k8s.io/api/admissionregistration/v1beta1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
const (
secretName = "sample-webhook-secret"
deploymentName = "sample-webhook-deployment"
serviceName = "e2e-test-webhook"
roleBindingName = "webhook-auth-reader"
// The webhook configuration names should not be reused between test instances.
crdWebhookConfigName = "e2e-test-webhook-config-crd"
webhookConfigName = "e2e-test-webhook-config"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crdMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-crd"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
skipNamespaceLabelKey = "skip-webhook-admission"
skipNamespaceLabelValue = "yes"
skippedNamespaceName = "exempted-namesapce"
disallowedPodName = "disallowed-pod"
hangingPodName = "hanging-pod"
disallowedConfigMapName = "disallowed-configmap"
allowedConfigMapName = "allowed-configmap"
failNamespaceLabelKey = "fail-closed-webhook"
failNamespaceLabelValue = "yes"
failNamespaceName = "fail-closed-namesapce"
)
var serverWebhookVersion = utilversion.MustParseSemantic("v1.8.0")
var _ = SIGDescribe("AdmissionWebhook", func() {
var context *certContext
f := framework.NewDefaultFramework("webhook")
var client clientset.Interface
var namespaceName string
BeforeEach(func() {
client = f.ClientSet
namespaceName = f.Namespace.Name
// Make sure the relevant provider supports admission webhook
framework.SkipUnlessServerVersionGTE(serverWebhookVersion, f.ClientSet.Discovery())
framework.SkipUnlessProviderIs("gce", "gke", "local")
_, err := f.ClientSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(metav1.ListOptions{})
if errors.IsNotFound(err) {
framework.Skipf("dynamic configuration of webhooks requires the admissionregistration.k8s.io group to be enabled")
}
By("Setting up server cert")
context = setupServerCert(namespaceName, serviceName)
createAuthReaderRoleBinding(f, namespaceName)
// Note that in 1.9 we will have backwards incompatible change to
// admission webhooks, so the image will be updated to 1.9 sometime in
// the development 1.9 cycle.
deployWebhookAndService(f, imageutils.GetE2EImage(imageutils.AdmissionWebhook), context)
})
AfterEach(func() {
cleanWebhookTest(client, namespaceName)
})
It("Should be able to deny pod and configmap creation", func() {
webhookCleanup := registerWebhook(f, context)
defer webhookCleanup()
testWebhook(f)
})
It("Should be able to deny custom resource creation", func() {
testcrd, err := framework.CreateTestCRD(f)
if err != nil {
return
}
defer testcrd.CleanUp()
webhookCleanup := registerWebhookForCRD(f, context, testcrd)
defer webhookCleanup()
testCRDWebhook(f, testcrd.Crd, testcrd.DynamicClient)
})
It("Should unconditionally reject operations on fail closed webhook", func() {
webhookCleanup := registerFailClosedWebhook(f, context)
defer webhookCleanup()
testFailClosedWebhook(f)
})
It("Should mutate configmap", func() {
webhookCleanup := registerMutatingWebhookForConfigMap(f, context)
defer webhookCleanup()
testMutatingConfigMapWebhook(f)
})
It("Should mutate pod and apply defaults after mutation", func() {
webhookCleanup := registerMutatingWebhookForPod(f, context)
defer webhookCleanup()
testMutatingPodWebhook(f)
})
It("Should not be able to prevent deleting validating-webhook-configurations or mutating-webhook-configurations", func() {
webhookCleanup := registerWebhookForWebhookConfigurations(f, context)
defer webhookCleanup()
testWebhookForWebhookConfigurations(f)
})
It("Should mutate crd", func() {
testcrd, err := framework.CreateTestCRD(f)
if err != nil {
return
}
defer testcrd.CleanUp()
webhookCleanup := registerMutatingWebhookForCRD(f, context, testcrd)
defer webhookCleanup()
testMutatingCRDWebhook(f, testcrd.Crd, testcrd.DynamicClient)
})
// TODO: add more e2e tests for mutating webhooks
// 1. mutating webhook that mutates pod
// 2. mutating webhook that sends empty patch
// 2.1 and sets status.allowed=true
// 2.2 and sets status.allowed=false
// 3. mutating webhook that sends patch, but also sets status.allowed=false
// 4. mtuating webhook that fail-open v.s. fail-closed
})
func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
By("Create role binding to let webhook read extension-apiserver-authentication")
client := f.ClientSet
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
_, err := client.RbacV1beta1().RoleBindings("kube-system").Create(&rbacv1beta1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleBindingName,
Annotations: map[string]string{
rbacv1beta1.AutoUpdateAnnotationKey: "true",
},
},
RoleRef: rbacv1beta1.RoleRef{
APIGroup: "",
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
// Webhook uses the default service account.
Subjects: []rbacv1beta1.Subject{
{
Kind: "ServiceAccount",
Name: "default",
Namespace: namespace,
},
},
})
if err != nil && errors.IsAlreadyExists(err) {
framework.Logf("role binding %s already exists", roleBindingName)
} else {
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
}
}
func deployWebhookAndService(f *framework.Framework, image string, context *certContext) {
By("Deploying the webhook pod")
client := f.ClientSet
// Creating the secret that contains the webhook's cert.
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
"tls.crt": context.cert,
"tls.key": context.key,
},
}
namespace := f.Namespace.Name
_, err := client.CoreV1().Secrets(namespace).Create(secret)
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// Create the deployment of the webhook
podLabels := map[string]string{"app": "sample-webhook", "webhook": "true"}
replicas := int32(1)
zero := int64(0)
mounts := []v1.VolumeMount{
{
Name: "webhook-certs",
ReadOnly: true,
MountPath: "/webhook.local.config/certificates",
},
}
volumes := []v1.Volume{
{
Name: "webhook-certs",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{SecretName: secretName},
},
},
}
containers := []v1.Container{
{
Name: "sample-webhook",
VolumeMounts: mounts,
Args: []string{
"--tls-cert-file=/webhook.local.config/certificates/tls.crt",
"--tls-private-key-file=/webhook.local.config/certificates/tls.key",
"--alsologtostderr",
"-v=4",
"2>&1",
},
Image: image,
},
}
d := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
},
Spec: extensions.DeploymentSpec{
Replicas: &replicas,
Strategy: extensions.DeploymentStrategy{
Type: extensions.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: containers,
Volumes: volumes,
},
},
},
}
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
By("Wait for the deployment to be ready")
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = framework.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentName, namespace)
By("Deploying the webhook service")
serviceLabels := map[string]string{"webhook": "true"}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: serviceName,
Labels: map[string]string{"test": "webhook"},
},
Spec: v1.ServiceSpec{
Selector: serviceLabels,
Ports: []v1.ServicePort{
{
Protocol: "TCP",
Port: 443,
TargetPort: intstr.FromInt(443),
},
},
},
}
_, err = client.CoreV1().Services(namespace).Create(service)
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceName, namespace)
By("Verifying the service has paired with the endpoint")
err = framework.WaitForServiceEndpointsNum(client, namespace, serviceName, 1, 1*time.Second, 30*time.Second)
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceName, 1)
}
func strPtr(s string) *string { return &s }
func registerWebhook(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the webhook via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := webhookConfigName
// A webhook that cannot talk to server, with fail-open policy
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
policyIgnore := v1beta1.Ignore
failOpenHook.FailurePolicy = &policyIgnore
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "deny-unwanted-pod-container-name-and-label.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/pods"),
},
CABundle: context.signingCert,
},
},
{
Name: "deny-unwanted-configmap-data.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"configmaps"},
},
}},
// The webhook skips the namespace that has label "skip-webhook-admission":"yes"
NamespaceSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: skipNamespaceLabelKey,
Operator: metav1.LabelSelectorOpNotIn,
Values: []string{skipNamespaceLabelValue},
},
},
},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/configmaps"),
},
CABundle: context.signingCert,
},
},
// Server cannot talk to this webhook, so it always fails.
// Because this webhook is configured fail-open, request should be admitted after the call fails.
failOpenHook,
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
}
}
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := mutatingWebhookConfigName
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "adding-configmap-data-stage-1.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"configmaps"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/mutating-configmaps"),
},
CABundle: context.signingCert,
},
},
{
Name: "adding-configmap-data-stage-2.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"configmaps"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/mutating-configmaps"),
},
CABundle: context.signingCert,
},
},
},
})
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
}
func testMutatingConfigMapWebhook(f *framework.Framework) {
By("create a configmap that should be updated by the webhook")
client := f.ClientSet
configMap := toBeMutatedConfigMap(f)
mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
Expect(err).To(BeNil())
expectedConfigMapData := map[string]string{
"mutation-start": "yes",
"mutation-stage-1": "yes",
"mutation-stage-2": "yes",
}
if !reflect.DeepEqual(expectedConfigMapData, mutatedConfigMap.Data) {
framework.Failf("\nexpected %#v\n, got %#v\n", expectedConfigMapData, mutatedConfigMap.Data)
}
}
func registerMutatingWebhookForPod(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the mutating pod webhook via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := podMutatingWebhookConfigName
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "adding-init-container.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/mutating-pods"),
},
CABundle: context.signingCert,
},
},
},
})
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
}
func testMutatingPodWebhook(f *framework.Framework) {
By("create a pod that should be updated by the webhook")
client := f.ClientSet
configMap := toBeMutatedPod(f)
mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(configMap)
Expect(err).To(BeNil())
if len(mutatedPod.Spec.InitContainers) != 1 {
framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers)
}
if got, expected := mutatedPod.Spec.InitContainers[0].Name, "webhook-added-init-container"; got != expected {
framework.Failf("expect the init container name to be %q, got %q", expected, got)
}
if got, expected := mutatedPod.Spec.InitContainers[0].TerminationMessagePolicy, v1.TerminationMessageReadFile; got != expected {
framework.Failf("expect the init terminationMessagePolicy to be default to %q, got %q", expected, got)
}
}
func toBeMutatedPod(f *framework.Framework) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "webhook-to-be-mutated",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "example",
Image: framework.GetPauseImageName(f.ClientSet),
},
},
},
}
}
func testWebhook(f *framework.Framework) {
By("create a pod that should be denied by the webhook")
client := f.ClientSet
// Creating the pod, the request should be rejected
pod := nonCompliantPod(f)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(BeNil())
expectedErrMsg1 := "the pod contains unwanted container name"
if !strings.Contains(err.Error(), expectedErrMsg1) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
}
expectedErrMsg2 := "the pod contains unwanted label"
if !strings.Contains(err.Error(), expectedErrMsg2) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error())
}
By("create a pod that causes the webhook to hang")
client = f.ClientSet
// Creating the pod, the request should be rejected
pod = hangingPod(f)
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(BeNil())
expectedTimeoutErr := "request did not complete within allowed duration"
if !strings.Contains(err.Error(), expectedTimeoutErr) {
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
}
By("create a configmap that should be denied by the webhook")
// Creating the configmap, the request should be rejected
configmap := nonCompliantConfigMap(f)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
Expect(err).NotTo(BeNil())
expectedErrMsg := "the configmap contains unwanted key and value"
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
By("create a configmap that should be admitted by the webhook")
// Creating the configmap, the request should be admitted
configmap = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: allowedConfigMapName,
},
Data: map[string]string{
"admit": "this",
},
}
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
Expect(err).NotTo(HaveOccurred())
By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
toNonCompliantFn := func(cm *v1.ConfigMap) {
if cm.Data == nil {
cm.Data = map[string]string{}
}
cm.Data["webhook-e2e-test"] = "webhook-disallow"
}
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
Expect(err).NotTo(BeNil())
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
patch := nonCompliantConfigMapPatch()
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(BeNil())
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
By("create a namespace that bypass the webhook")
err = createNamespace(f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{
Name: skippedNamespaceName,
Labels: map[string]string{
skipNamespaceLabelKey: skipNamespaceLabelValue,
},
}})
framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName)
// clean up the namespace
defer client.CoreV1().Namespaces().Delete(skippedNamespaceName, nil)
By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
configmap = nonCompliantConfigMap(f)
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
Expect(err).To(BeNil())
}
// failingWebhook returns a webhook with rule of create configmaps,
// but with an invalid client config so that server cannot communicate with it
func failingWebhook(namespace, name string) v1beta1.Webhook {
return v1beta1.Webhook{
Name: name,
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"configmaps"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/configmaps"),
},
// Without CA bundle, the call to webhook always fails
CABundle: nil,
},
}
}
func registerFailClosedWebhook(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := webhookFailClosedConfigName
// A webhook that cannot talk to server, with fail-closed policy
policyFail := v1beta1.Fail
hook := failingWebhook(namespace, "fail-closed.k8s.io")
hook.FailurePolicy = &policyFail
hook.NamespaceSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: failNamespaceLabelKey,
Operator: metav1.LabelSelectorOpIn,
Values: []string{failNamespaceLabelValue},
},
},
}
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
// Server cannot talk to this webhook, so it always fails.
// Because this webhook is configured fail-closed, request should be rejected after the call fails.
hook,
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
f.ClientSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
}
}
func testFailClosedWebhook(f *framework.Framework) {
client := f.ClientSet
By("create a namespace for the webhook")
err := createNamespace(f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{
Name: failNamespaceName,
Labels: map[string]string{
failNamespaceLabelKey: failNamespaceLabelValue,
},
}})
framework.ExpectNoError(err, "creating namespace %q", failNamespaceName)
defer client.CoreV1().Namespaces().Delete(failNamespaceName, nil)
By("create a configmap should be unconditionally rejected by the webhook")
configmap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
}
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
Expect(err).To(HaveOccurred())
if !errors.IsInternalError(err) {
framework.Failf("expect an internal error, got %#v", err)
}
}
func registerWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
var err error
client := f.ClientSet
By("Registering a webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := webhookForWebhooksConfigName
failurePolicy := v1beta1.Fail
// This webhook will deny all requests to Delete admissionregistration objects
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "deny-webhook-configuration-deletions.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Delete},
Rule: v1beta1.Rule{
APIGroups: []string{"admissionregistration.k8s.io"},
APIVersions: []string{"*"},
Resources: []string{
"validatingwebhookconfigurations",
"mutatingwebhookconfigurations",
},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/always-deny"),
},
CABundle: context.signingCert,
},
FailurePolicy: &failurePolicy,
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
}
// This test assumes that the deletion-rejecting webhook defined in
// registerWebhookForWebhookConfigurations is in place.
func testWebhookForWebhookConfigurations(f *framework.Framework) {
var err error
client := f.ClientSet
By("Creating a validating-webhook-configuration object")
namespace := f.Namespace.Name
failurePolicy := v1beta1.Ignore
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: removableValidatingHookName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "should-be-removable-validating-webhook.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{"*"},
APIVersions: []string{"*"},
Resources: []string{"*"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
// This path not recognized by the webhook service,
// so the call to this webhook will always fail,
// but because the failure policy is ignore, it will
// have no effect on admission requests.
Path: strPtr(""),
},
CABundle: nil,
},
FailurePolicy: &failurePolicy,
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableValidatingHookName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
By("Deleting the validating-webhook-configuration, which should be possible to remove")
err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(removableValidatingHookName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableValidatingHookName, namespace)
By("Creating a mutating-webhook-configuration object")
_, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: removableMutatingHookName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "should-be-removable-mutating-webhook.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{"*"},
APIVersions: []string{"*"},
Resources: []string{"*"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
// This path not recognized by the webhook service,
// so the call to this webhook will always fail,
// but because the failure policy is ignore, it will
// have no effect on admission requests.
Path: strPtr(""),
},
CABundle: nil,
},
FailurePolicy: &failurePolicy,
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableMutatingHookName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
By("Deleting the mutating-webhook-configuration, which should be possible to remove")
err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(removableMutatingHookName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableMutatingHookName, namespace)
}
func createNamespace(f *framework.Framework, ns *v1.Namespace) error {
return wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
_, err := f.ClientSet.CoreV1().Namespaces().Create(ns)
if err != nil {
if strings.HasPrefix(err.Error(), "object is being deleted:") {
return false, nil
}
return false, err
}
return true, nil
})
}
func nonCompliantPod(f *framework.Framework) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: disallowedPodName,
Labels: map[string]string{
"webhook-e2e-test": "webhook-disallow",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "webhook-disallow",
Image: framework.GetPauseImageName(f.ClientSet),
},
},
},
}
}
func hangingPod(f *framework.Framework) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: hangingPodName,
Labels: map[string]string{
"webhook-e2e-test": "wait-forever",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "wait-forever",
Image: framework.GetPauseImageName(f.ClientSet),
},
},
},
}
}
func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: disallowedConfigMapName,
},
Data: map[string]string{
"webhook-e2e-test": "webhook-disallow",
},
}
}
func toBeMutatedConfigMap(f *framework.Framework) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "to-be-mutated",
},
Data: map[string]string{
"mutation-start": "yes",
},
}
}
func nonCompliantConfigMapPatch() string {
return fmt.Sprint(`{"data":{"webhook-e2e-test":"webhook-disallow"}}`)
}
type updateConfigMapFn func(cm *v1.ConfigMap)
func updateConfigMap(c clientset.Interface, ns, name string, update updateConfigMapFn) (*v1.ConfigMap, error) {
var cm *v1.ConfigMap
pollErr := wait.PollImmediate(2*time.Second, 1*time.Minute, func() (bool, error) {
var err error
if cm, err = c.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
update(cm)
if cm, err = c.CoreV1().ConfigMaps(ns).Update(cm); err == nil {
return true, nil
}
// Only retry update on conflict
if !errors.IsConflict(err) {
return false, err
}
return false, nil
})
return cm, pollErr
}
func cleanWebhookTest(client clientset.Interface, namespaceName string) {
_ = client.CoreV1().Services(namespaceName).Delete(serviceName, nil)
_ = client.ExtensionsV1beta1().Deployments(namespaceName).Delete(deploymentName, nil)
_ = client.CoreV1().Secrets(namespaceName).Delete(secretName, nil)
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete(roleBindingName, nil)
}
func registerWebhookForCRD(f *framework.Framework, context *certContext, testcrd *framework.TestCrd) func() {
client := f.ClientSet
By("Registering the crd webhook via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := crdWebhookConfigName
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "deny-unwanted-crd-data.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{testcrd.ApiGroup},
APIVersions: []string{testcrd.ApiVersion},
Resources: []string{testcrd.GetPluralName()},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/crd"),
},
CABundle: context.signingCert,
},
},
},
})
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
}
}
func registerMutatingWebhookForCRD(f *framework.Framework, context *certContext, testcrd *framework.TestCrd) func() {
client := f.ClientSet
By("Registering the mutating webhook for crd via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := crdMutatingWebhookConfigName
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "mutate-crd-data-stage-1.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{testcrd.ApiGroup},
APIVersions: []string{testcrd.ApiVersion},
Resources: []string{testcrd.GetPluralName()},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/mutating-crd"),
},
CABundle: context.signingCert,
},
},
{
Name: "mutate-crd-data-stage-2.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{testcrd.ApiGroup},
APIVersions: []string{testcrd.ApiVersion},
Resources: []string{testcrd.GetPluralName()},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/mutating-crd"),
},
CABundle: context.signingCert,
},
},
},
})
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
}
func testCRDWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, crdClient dynamic.ResourceInterface) {
By("Creating a custom resource that should be denied by the webhook")
crInstance := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind,
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
"metadata": map[string]interface{}{
"name": "cr-instance-1",
"namespace": f.Namespace.Name,
},
"data": map[string]interface{}{
"webhook-e2e-test": "webhook-disallow",
},
},
}
_, err := crdClient.Create(crInstance)
Expect(err).NotTo(BeNil())
expectedErrMsg := "the custom resource contains unwanted data"
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
}
func testMutatingCRDWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, crdClient dynamic.ResourceInterface) {
By("Creating a custom resource that should be mutated by the webhook")
cr := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind,
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
"metadata": map[string]interface{}{
"name": "cr-instance-1",
"namespace": f.Namespace.Name,
},
"data": map[string]interface{}{
"mutation-start": "yes",
},
},
}
mutatedCR, err := crdClient.Create(cr)
Expect(err).To(BeNil())
expectedCRData := map[string]interface{}{
"mutation-start": "yes",
"mutation-stage-1": "yes",
"mutation-stage-2": "yes",
}
if !reflect.DeepEqual(expectedCRData, mutatedCR.Object["data"]) {
framework.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
}
}
| apache-2.0 |
aconyteds/Esri-Ozone-Map-Widget | vendor/js/esri/arcgis_js_api/library/3.12/3.12compact/dgrid/extensions/nls/zh-cn/columnHider.js | 111 | //>>built
define("dgrid/extensions/nls/zh-cn/columnHider",{popupLabel:"\u663e\u793a\u6216\u9690\u85cf\u5217"}); | apache-2.0 |
MungoRae/home-assistant | homeassistant/components/notify/telstra.py | 3404 | """
Telstra API platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.telstra/
"""
import logging
import requests
import voluptuous as vol
from homeassistant.components.notify import (
BaseNotificationService, ATTR_TITLE, PLATFORM_SCHEMA)
from homeassistant.const import CONTENT_TYPE_JSON, HTTP_HEADER_CONTENT_TYPE
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_CONSUMER_KEY = 'consumer_key'
CONF_CONSUMER_SECRET = 'consumer_secret'
CONF_PHONE_NUMBER = 'phone_number'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_CONSUMER_KEY): cv.string,
vol.Required(CONF_CONSUMER_SECRET): cv.string,
vol.Required(CONF_PHONE_NUMBER): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Telstra SMS API notification service."""
consumer_key = config.get(CONF_CONSUMER_KEY)
consumer_secret = config.get(CONF_CONSUMER_SECRET)
phone_number = config.get(CONF_PHONE_NUMBER)
if _authenticate(consumer_key, consumer_secret) is False:
_LOGGER.exception("Error obtaining authorization from Telstra API")
return None
return TelstraNotificationService(
consumer_key, consumer_secret, phone_number)
class TelstraNotificationService(BaseNotificationService):
"""Implementation of a notification service for the Telstra SMS API."""
def __init__(self, consumer_key, consumer_secret, phone_number):
"""Initialize the service."""
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._phone_number = phone_number
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
title = kwargs.get(ATTR_TITLE)
# Retrieve authorization first
token_response = _authenticate(
self._consumer_key, self._consumer_secret)
if token_response is False:
_LOGGER.exception("Error obtaining authorization from Telstra API")
return
# Send the SMS
if title:
text = '{} {}'.format(title, message)
else:
text = message
message_data = {
'to': self._phone_number,
'body': text,
}
message_resource = 'https://api.telstra.com/v1/sms/messages'
message_headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_JSON,
'Authorization': 'Bearer ' + token_response['access_token'],
}
message_response = requests.post(
message_resource, headers=message_headers, json=message_data,
timeout=10)
if message_response.status_code != 202:
_LOGGER.exception("Failed to send SMS. Status code: %d",
message_response.status_code)
def _authenticate(consumer_key, consumer_secret):
"""Authenticate with the Telstra API."""
token_data = {
'client_id': consumer_key,
'client_secret': consumer_secret,
'grant_type': 'client_credentials',
'scope': 'SMS'
}
token_resource = 'https://api.telstra.com/v1/oauth/token'
token_response = requests.get(
token_resource, params=token_data, timeout=10).json()
if 'error' in token_response:
return False
return token_response
| apache-2.0 |
ivan-fedorov/intellij-community | plugins/git4idea/tests/git4idea/repo/GitRepositoryReaderTest.java | 5931 | package git4idea.repo;
import com.intellij.openapi.application.PluginPathManager;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.io.FileUtilRt;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vcs.VcsTestUtil;
import com.intellij.util.Function;
import com.intellij.util.ThrowableRunnable;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.io.ZipUtil;
import com.intellij.vcs.log.Hash;
import com.intellij.vcs.log.impl.HashImpl;
import git4idea.GitBranch;
import git4idea.GitLocalBranch;
import git4idea.test.GitPlatformTest;
import junit.framework.TestCase;
import org.jetbrains.annotations.NotNull;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
@RunWith(Parameterized.class)
public class GitRepositoryReaderTest extends GitPlatformTest {
@NotNull private final File myTestCaseDir;
private File myTempDir;
private GitRepositoryReader myRepositoryReader;
private File myGitDir;
@Parameterized.Parameters(name = "{0}")
public static Collection<Object[]> data() {
File pluginRoot = new File(PluginPathManager.getPluginHomePath("git4idea"));
File dataDir = new File(new File(pluginRoot, "testData"), "repo");
File[] testCases = dataDir.listFiles(FileUtilRt.ALL_DIRECTORIES);
return ContainerUtil.map(testCases, new Function<File, Object[]>() {
@Override
public Object[] fun(File file) {
return new Object[] { file.getName(), file };
}
});
}
@SuppressWarnings({"UnusedParameters", "JUnitTestCaseWithNonTrivialConstructors"})
public GitRepositoryReaderTest(@NotNull String name, @NotNull File testDir) {
myTestCaseDir = testDir;
}
@Override
@Before
public void setUp() throws Exception {
edt(new ThrowableRunnable() {
@Override
public void run() throws Exception {
GitRepositoryReaderTest.super.setUp();
}
});
myTempDir = new File(myProjectRoot.getPath(), "test");
prepareTest(myTestCaseDir);
}
@After
@Override
public void tearDown() throws Exception {
try {
if (myTempDir != null) {
FileUtil.delete(myTempDir);
}
}
finally {
edt(new ThrowableRunnable() {
@Override
public void run() throws Throwable {
GitRepositoryReaderTest.super.tearDown();
}
});
}
}
private void prepareTest(File testDir) throws IOException {
assertTrue("Temp directory was not created", myTempDir.mkdir());
FileUtil.copyDir(testDir, myTempDir);
myGitDir = new File(myTempDir, ".git");
File dotGit = new File(myTempDir, "dot_git");
if (!dotGit.exists()) {
File dotGitZip = new File(myTempDir, "dot_git.zip");
assertTrue("Neither dot_git nor dot_git.zip were found", dotGitZip.exists());
ZipUtil.extract(dotGitZip, myTempDir, null);
}
FileUtil.rename(dotGit, myGitDir);
TestCase.assertTrue(myGitDir.exists());
myRepositoryReader = new GitRepositoryReader(myGitDir);
}
@NotNull
private static String readHead(@NotNull File dir) throws IOException {
return FileUtil.loadFile(new File(dir, "head.txt")).trim();
}
@NotNull
private static Branch readCurrentBranch(@NotNull File resultDir) throws IOException {
String branch = FileUtil.loadFile(new File(resultDir, "current-branch.txt")).trim();
return readBranchFromLine(branch);
}
@NotNull
private static Branch readBranchFromLine(@NotNull String branch) {
List<String> branchAndHash = StringUtil.split(branch, " ");
return new Branch(branchAndHash.get(1), HashImpl.build(branchAndHash.get(0)));
}
@Test
public void testBranches() throws Exception {
Collection<GitRemote> remotes = GitConfig.read(myPlatformFacade, new File(myGitDir, "config")).parseRemotes();
GitBranchState state = myRepositoryReader.readState(remotes);
assertEquals("HEAD revision is incorrect", readHead(myTempDir), state.getCurrentRevision());
assertEqualBranches(readCurrentBranch(myTempDir), state.getCurrentBranch());
assertBranches(state.getLocalBranches(), readBranches(myTempDir, true));
assertBranches(state.getRemoteBranches(), readBranches(myTempDir, false));
}
private static void assertEqualBranches(@NotNull Branch expected, @NotNull GitLocalBranch actual) {
assertEquals(expected.name, actual.getName());
assertEquals("Incorrect hash of branch " + actual.getName(), expected.hash, actual.getHash());
}
private static void assertBranches(Collection<? extends GitBranch> actualBranches, Collection<Branch> expectedBranches) {
VcsTestUtil.assertEqualCollections(actualBranches, expectedBranches, new VcsTestUtil.EqualityChecker<GitBranch, Branch>() {
@Override
public boolean areEqual(GitBranch actual, Branch expected) {
return branchesAreEqual(actual, expected);
}
});
}
@NotNull
private static Collection<Branch> readBranches(@NotNull File resultDir, boolean local) throws IOException {
String content = FileUtil.loadFile(new File(resultDir, local ? "local-branches.txt" : "remote-branches.txt"));
Collection<Branch> branches = ContainerUtil.newArrayList();
for (String line : StringUtil.splitByLines(content)) {
branches.add(readBranchFromLine(line));
}
return branches;
}
private static boolean branchesAreEqual(GitBranch actual, Branch expected) {
return actual.getFullName().equals(expected.name) && actual.getHash().equals(expected.hash);
}
private static class Branch {
final String name;
final Hash hash;
private Branch(String name, Hash hash) {
this.name = name;
this.hash = hash;
}
@Override
public String toString() {
return name;
}
}
}
| apache-2.0 |
charles-cooper/idylfin | src/org/apache/commons/math3/optim/nonlinear/scalar/ObjectiveFunctionGradient.java | 1648 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.math3.optim.nonlinear.scalar;
import org.apache.commons.math3.analysis.MultivariateVectorFunction;
import org.apache.commons.math3.optim.OptimizationData;
/**
* Gradient of the scalar function to be optimized.
*
* @version $Id$
* @since 3.1
*/
public class ObjectiveFunctionGradient implements OptimizationData {
/** Function to be optimized. */
private final MultivariateVectorFunction gradient;
/**
* @param g Gradient of the function to be optimized.
*/
public ObjectiveFunctionGradient(MultivariateVectorFunction g) {
gradient = g;
}
/**
* Gets the gradient of the function to be optimized.
*
* @return the objective function gradient.
*/
public MultivariateVectorFunction getObjectiveFunctionGradient() {
return gradient;
}
}
| apache-2.0 |
ThiagoGarciaAlves/intellij-community | platform/testFramework/src/com/intellij/psi/codeStyle/arrangement/AbstractRearrangerTest.java | 11297 | /*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.psi.codeStyle.arrangement;
import com.intellij.lang.Language;
import com.intellij.openapi.command.CommandProcessor;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.editor.FoldRegion;
import com.intellij.openapi.editor.FoldingModel;
import com.intellij.openapi.fileTypes.FileType;
import com.intellij.openapi.util.TextRange;
import com.intellij.psi.codeStyle.CodeStyleSettings;
import com.intellij.psi.codeStyle.CodeStyleSettingsManager;
import com.intellij.psi.codeStyle.CommonCodeStyleSettings;
import com.intellij.psi.codeStyle.arrangement.engine.ArrangementEngine;
import com.intellij.psi.codeStyle.arrangement.group.ArrangementGroupingRule;
import com.intellij.psi.codeStyle.arrangement.match.ArrangementSectionRule;
import com.intellij.psi.codeStyle.arrangement.match.StdArrangementEntryMatcher;
import com.intellij.psi.codeStyle.arrangement.match.StdArrangementMatchRule;
import com.intellij.psi.codeStyle.arrangement.model.ArrangementAtomMatchCondition;
import com.intellij.psi.codeStyle.arrangement.model.ArrangementMatchCondition;
import com.intellij.psi.codeStyle.arrangement.std.*;
import com.intellij.testFramework.fixtures.LightPlatformCodeInsightFixtureTestCase;
import com.intellij.util.Function;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.Order.KEEP;
/**
* @author Denis Zhdanov
* @since 20.07.2012
*/
public abstract class AbstractRearrangerTest extends LightPlatformCodeInsightFixtureTestCase {
private static final RichTextHandler[] RICH_TEXT_HANDLERS = {new RangeHandler(), new FoldingHandler()};
private static final Pattern ATTRIBUTE_PATTERN = Pattern.compile("([^\\s]+)=([^\\s]+)");
protected FileType fileType;
protected Language language;
@Override
protected void setUp() throws Exception {
super.setUp();
CodeStyleSettingsManager.getInstance(myFixture.getProject()).setTemporarySettings(new CodeStyleSettings());
}
@Override
protected void tearDown() throws Exception {
CodeStyleSettingsManager.getInstance(myFixture.getProject()).dropTemporarySettings();
super.tearDown();
}
@NotNull
protected CommonCodeStyleSettings getCommonSettings() {
return CodeStyleSettingsManager.getInstance(myFixture.getProject()).getCurrentSettings().getCommonSettings(language);
}
protected static ArrangementSectionRule section(@NotNull StdArrangementMatchRule... rules) {
return section(null, null, rules);
}
protected static ArrangementSectionRule section(@Nullable String start, @Nullable String end, @NotNull StdArrangementMatchRule... rules) {
return ArrangementSectionRule.create(start, end, rules);
}
protected static StdArrangementRuleAliasToken alias(@NotNull String id, @NotNull StdArrangementMatchRule... rules) {
return new StdArrangementRuleAliasToken(id, id, ContainerUtil.newArrayList(rules));
}
@NotNull
protected static ArrangementGroupingRule group(@NotNull ArrangementSettingsToken type) {
return group(type, KEEP);
}
@NotNull
protected static ArrangementGroupingRule group(@NotNull ArrangementSettingsToken type, @NotNull ArrangementSettingsToken order) {
return new ArrangementGroupingRule(type, order);
}
@NotNull
protected static StdArrangementMatchRule rule(@NotNull ArrangementSettingsToken token) {
return new StdArrangementMatchRule(new StdArrangementEntryMatcher(atom(token)));
}
@NotNull
protected static StdArrangementMatchRule nameRule(@NotNull String nameFilter, @NotNull ArrangementSettingsToken... tokens) {
if (tokens.length == 0) {
return new StdArrangementMatchRule(new StdArrangementEntryMatcher(atom(nameFilter)));
}
else {
ArrangementAtomMatchCondition[] conditions = new ArrangementAtomMatchCondition[tokens.length + 1];
conditions[0] = atom(nameFilter);
for (int i = 0; i < tokens.length; i++) conditions[i + 1] = atom(tokens[i]);
ArrangementMatchCondition compositeCondition = ArrangementUtil.combine(conditions);
return new StdArrangementMatchRule(new StdArrangementEntryMatcher(compositeCondition));
}
}
@NotNull
protected static StdArrangementMatchRule rule(@NotNull ArrangementSettingsToken... conditions) {
return rule(ContainerUtil.map(conditions, it -> atom(it)));
}
@NotNull
protected static StdArrangementMatchRule rule(@NotNull List<ArrangementAtomMatchCondition> conditions) {
return rule(conditions.toArray(new ArrangementAtomMatchCondition[conditions.size()]));
}
@NotNull
protected static StdArrangementMatchRule rule(@NotNull ArrangementAtomMatchCondition... conditions) {
ArrangementMatchCondition compositeCondition = ArrangementUtil.combine(conditions);
return new StdArrangementMatchRule(new StdArrangementEntryMatcher(compositeCondition));
}
@NotNull
protected static StdArrangementMatchRule ruleWithOrder(@NotNull ArrangementSettingsToken orderType, @NotNull StdArrangementMatchRule rule) {
return new StdArrangementMatchRule(rule.getMatcher(), orderType);
}
@NotNull
protected static ArrangementAtomMatchCondition atom(@NotNull ArrangementSettingsToken token) {
return new ArrangementAtomMatchCondition(token);
}
protected static ArrangementAtomMatchCondition atom(@NotNull ArrangementSettingsToken token, boolean included) {
return new ArrangementAtomMatchCondition(token, included);
}
@NotNull
protected static ArrangementAtomMatchCondition atom(@NotNull String nameFilter) {
return new ArrangementAtomMatchCondition(StdArrangementTokens.Regexp.NAME, nameFilter);
}
protected void doTest(@NotNull Map<String, ?> args) {
String text = (String)args.get("initial");
String expected = (String)args.get("expected");
@SuppressWarnings("unchecked") List<TextRange> ranges = (List<TextRange>)args.get("ranges");
Info info = parse(text);
if (!isEmpty(ranges) && !isEmpty(info.ranges)) {
fail("Duplicate ranges set: explicit: " + ranges + ", " + "derived: " + info.ranges + ", text:\n" + text);
}
if (isEmpty(info.ranges)) {
info.ranges = !isEmpty(ranges) ? ranges : Arrays.asList(TextRange.from(0, text.length()));
}
myFixture.configureByText(fileType, info.text);
final FoldingModel foldingModel = myFixture.getEditor().getFoldingModel();
for (final FoldingInfo foldingInfo : info.foldings) {
foldingModel.runBatchFoldingOperation(() -> {
FoldRegion region = foldingModel.addFoldRegion(foldingInfo.start, foldingInfo.end, foldingInfo.placeholder);
if (region != null) region.setExpanded(false);
});
}
@SuppressWarnings("unchecked") List<ArrangementGroupingRule> groupingRules = (List<ArrangementGroupingRule>)args.get("groups");
if (groupingRules == null) groupingRules = Collections.emptyList();
List<?> rules = (List<?>)args.get("rules");
List<ArrangementSectionRule> sectionRules = getSectionRules(rules);
@SuppressWarnings("unchecked")
List<StdArrangementRuleAliasToken> aliases = (List<StdArrangementRuleAliasToken>)args.get("aliases");
CommonCodeStyleSettings settings = CodeStyleSettingsManager.getInstance(myFixture.getProject()).getCurrentSettings().getCommonSettings(language);
final StdArrangementSettings arrangementSettings =
aliases == null ?
new StdArrangementSettings(groupingRules, sectionRules) :
new StdArrangementExtendableSettings(groupingRules, sectionRules, aliases);
settings.setArrangementSettings(arrangementSettings);
ArrangementEngine engine = ServiceManager.getService(myFixture.getProject(), ArrangementEngine.class);
CommandProcessor.getInstance().executeCommand(getProject(), ()-> engine.arrange(myFixture.getEditor(), myFixture.getFile(), info.ranges), null, null);
// Check expectation.
Info after = parse(expected);
assertEquals(after.text, myFixture.getEditor().getDocument().getText());
for (FoldingInfo it : after.foldings) {
FoldRegion foldRegion = foldingModel.getCollapsedRegionAtOffset(it.start);
assertNotNull("Expected to find fold region at offset " + it.start, foldRegion);
assertEquals(it.end, foldRegion.getEndOffset());
}
}
protected List<ArrangementSectionRule> getSectionRules(List<?> rules) {
List<ArrangementSectionRule> sectionRules = Collections.emptyList();
if (rules != null) sectionRules = ContainerUtil.map(rules, (Function<Object, ArrangementSectionRule>)o -> o instanceof ArrangementSectionRule ? (ArrangementSectionRule)o : ArrangementSectionRule.create((StdArrangementMatchRule)o));
return sectionRules;
}
private static boolean isEmpty(Collection<?> collection) {
return collection == null || collection.isEmpty();
}
@NotNull
private static Info parse(@NotNull String text) {
Info result = new Info();
StringBuilder buffer = new StringBuilder(text);
int offset = 0;
while (offset < buffer.length()) {
RichTextHandler handler = null;
int richTextMarkStart = -1;
for (RichTextHandler h : RICH_TEXT_HANDLERS) {
int i = buffer.indexOf("<" + h.getMarker(), offset);
if (i >= 0 && (handler == null || i < richTextMarkStart)) {
richTextMarkStart = i;
handler = h;
}
}
if (handler == null) break;
String marker = handler.getMarker();
int attrStart = richTextMarkStart + marker.length() + 1;
int openingTagEnd = buffer.indexOf(">", richTextMarkStart);
int openTagLength = openingTagEnd - richTextMarkStart + 1;
Map<String, String> attributes = parseAttributes(buffer.substring(attrStart, openingTagEnd));
String closingTag = "</" + marker + ">";
int closingTagStart = buffer.indexOf(closingTag);
assert closingTagStart > 0;
handler.handle(result, attributes, richTextMarkStart, closingTagStart - openTagLength);
buffer.delete(closingTagStart, closingTagStart + closingTag.length());
buffer.delete(richTextMarkStart, openingTagEnd + 1);
offset = closingTagStart - openTagLength;
}
result.text = buffer.toString();
return result;
}
@NotNull
private static Map<String, String> parseAttributes(@NotNull String text) {
if (text.isEmpty()) return Collections.emptyMap();
Matcher matcher = ATTRIBUTE_PATTERN.matcher(text);
Map<String, String> result = ContainerUtil.newLinkedHashMap();
while (matcher.find()) result.put(matcher.group(1), matcher.group(2));
return result;
}
}
| apache-2.0 |
zimmermatt/flink | flink-tests/src/test/java/org/apache/flink/test/classloading/jar/CheckpointingCustomKvStateProgram.java | 7468 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.test.classloading.jar;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ReducingState;
import org.apache.flink.api.common.state.ReducingStateDescriptor;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.common.typeutils.base.TypeSerializerSingleton;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.core.memory.DataInputView;
import org.apache.flink.core.memory.DataOutputView;
import org.apache.flink.runtime.state.CheckpointListener;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.checkpoint.ListCheckpointed;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction;
import org.apache.flink.test.util.SuccessException;
import org.apache.flink.util.Collector;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
/**
* Test class used by the {@link org.apache.flink.test.classloading.ClassLoaderITCase}.
*/
public class CheckpointingCustomKvStateProgram {
public static void main(String[] args) throws Exception {
final String checkpointPath = args[0];
final String outputPath = args[1];
final int parallelism = 1;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
env.getConfig().disableSysoutLogging();
env.enableCheckpointing(100);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
env.setStateBackend(new FsStateBackend(checkpointPath));
DataStream<Integer> source = env.addSource(new InfiniteIntegerSource());
source
.map(new MapFunction<Integer, Tuple2<Integer, Integer>>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple2<Integer, Integer> map(Integer value) throws Exception {
return new Tuple2<>(ThreadLocalRandom.current().nextInt(parallelism), value);
}
})
.keyBy(new KeySelector<Tuple2<Integer, Integer>, Integer>() {
private static final long serialVersionUID = 1L;
@Override
public Integer getKey(Tuple2<Integer, Integer> value) throws Exception {
return value.f0;
}
}).flatMap(new ReducingStateFlatMap()).writeAsText(outputPath, FileSystem.WriteMode.OVERWRITE);
env.execute();
}
private static class InfiniteIntegerSource implements ParallelSourceFunction<Integer>, ListCheckpointed<Integer> {
private static final long serialVersionUID = -7517574288730066280L;
private volatile boolean running = true;
@Override
public void run(SourceContext<Integer> ctx) throws Exception {
int counter = 0;
while (running) {
synchronized (ctx.getCheckpointLock()) {
ctx.collect(counter++);
}
}
}
@Override
public void cancel() {
running = false;
}
@Override
public List<Integer> snapshotState(long checkpointId, long timestamp) throws Exception {
return Collections.singletonList(0);
}
@Override
public void restoreState(List<Integer> state) throws Exception {
}
}
private static class ReducingStateFlatMap extends RichFlatMapFunction<Tuple2<Integer, Integer>, Integer>
implements ListCheckpointed<ReducingStateFlatMap>, CheckpointListener {
private static final long serialVersionUID = -5939722892793950253L;
private transient ReducingState<Integer> kvState;
private boolean atLeastOneSnapshotComplete = false;
private boolean restored = false;
@Override
public void open(Configuration parameters) throws Exception {
ReducingStateDescriptor<Integer> stateDescriptor =
new ReducingStateDescriptor<>(
"reducing-state",
new ReduceSum(),
CustomIntSerializer.INSTANCE);
this.kvState = getRuntimeContext().getReducingState(stateDescriptor);
}
@Override
public void flatMap(Tuple2<Integer, Integer> value, Collector<Integer> out) throws Exception {
kvState.add(value.f1);
if (atLeastOneSnapshotComplete) {
if (restored) {
throw new SuccessException();
} else {
throw new RuntimeException("Intended failure, to trigger restore");
}
}
}
@Override
public List<ReducingStateFlatMap> snapshotState(long checkpointId, long timestamp) throws Exception {
return Collections.singletonList(this);
}
@Override
public void restoreState(List<ReducingStateFlatMap> state) throws Exception {
restored = true;
atLeastOneSnapshotComplete = true;
}
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
atLeastOneSnapshotComplete = true;
}
private static class ReduceSum implements ReduceFunction<Integer> {
private static final long serialVersionUID = 1L;
@Override
public Integer reduce(Integer value1, Integer value2) throws Exception {
return value1 + value2;
}
}
}
private static final class CustomIntSerializer extends TypeSerializerSingleton<Integer> {
private static final long serialVersionUID = 4572452915892737448L;
public static final TypeSerializer<Integer> INSTANCE = new CustomIntSerializer();
@Override
public boolean isImmutableType() {
return true;
}
@Override
public Integer createInstance() {
return 0;
}
@Override
public Integer copy(Integer from) {
return from;
}
@Override
public Integer copy(Integer from, Integer reuse) {
return from;
}
@Override
public int getLength() {
return 4;
}
@Override
public void serialize(Integer record, DataOutputView target) throws IOException {
target.writeInt(record.intValue());
}
@Override
public Integer deserialize(DataInputView source) throws IOException {
return Integer.valueOf(source.readInt());
}
@Override
public Integer deserialize(Integer reuse, DataInputView source) throws IOException {
return Integer.valueOf(source.readInt());
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
target.writeInt(source.readInt());
}
@Override
public boolean canEqual(Object obj) {
return obj instanceof CustomIntSerializer;
}
}
}
| apache-2.0 |
WangTaoTheTonic/flink | flink-java/src/test/java/org/apache/flink/api/common/operators/CollectionExecutionIterationTest.java | 5782 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.common.operators;
//CHECKSTYLE.OFF: AvoidStarImport - Needed for TupleGenerator
import static org.junit.Assert.*;
import java.util.ArrayList;
import java.util.List;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.JoinFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.io.LocalCollectionOutputFormat;
import org.apache.flink.api.java.operators.DeltaIteration;
import org.apache.flink.api.java.operators.IterativeDataSet;
import org.apache.flink.api.java.tuple.Tuple1;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;
import org.junit.Test;
@SuppressWarnings("serial")
public class CollectionExecutionIterationTest implements java.io.Serializable {
@Test
public void testBulkIteration() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();
IterativeDataSet<Integer> iteration = env.fromElements(1).iterate(10);
DataSet<Integer> result = iteration.closeWith(iteration.map(new AddSuperstepNumberMapper()));
List<Integer> collected = new ArrayList<Integer>();
result.output(new LocalCollectionOutputFormat<Integer>(collected));
env.execute();
assertEquals(1, collected.size());
assertEquals(56, collected.get(0).intValue());
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
public void testBulkIterationWithTerminationCriterion() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();
IterativeDataSet<Integer> iteration = env.fromElements(1).iterate(100);
DataSet<Integer> iterationResult = iteration.map(new AddSuperstepNumberMapper());
DataSet<Integer> terminationCriterion = iterationResult.filter(new FilterFunction<Integer>() {
public boolean filter(Integer value) {
return value < 50;
}
});
List<Integer> collected = new ArrayList<Integer>();
iteration.closeWith(iterationResult, terminationCriterion)
.output(new LocalCollectionOutputFormat<Integer>(collected));
env.execute();
assertEquals(1, collected.size());
assertEquals(56, collected.get(0).intValue());
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
public void testDeltaIteration() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();
@SuppressWarnings("unchecked")
DataSet<Tuple2<Integer, Integer>> solInput = env.fromElements(
new Tuple2<Integer, Integer>(1, 0),
new Tuple2<Integer, Integer>(2, 0),
new Tuple2<Integer, Integer>(3, 0),
new Tuple2<Integer, Integer>(4, 0));
@SuppressWarnings("unchecked")
DataSet<Tuple1<Integer>> workInput = env.fromElements(
new Tuple1<Integer>(1),
new Tuple1<Integer>(2),
new Tuple1<Integer>(3),
new Tuple1<Integer>(4));
// Perform a delta iteration where we add those values to the workset where
// the second tuple field is smaller than the first tuple field.
// At the end both tuple fields must be the same.
DeltaIteration<Tuple2<Integer, Integer>, Tuple1<Integer>> iteration =
solInput.iterateDelta(workInput, 10, 0);
DataSet<Tuple2<Integer, Integer>> solDelta = iteration.getSolutionSet().join(
iteration.getWorkset()).where(0).equalTo(0).with(
new JoinFunction<Tuple2<Integer, Integer>, Tuple1<Integer>, Tuple2<Integer, Integer>>() {
@Override
public Tuple2<Integer, Integer> join(Tuple2<Integer, Integer> first,
Tuple1<Integer> second) throws Exception {
return new Tuple2<Integer, Integer>(first.f0, first.f1 + 1);
}
});
DataSet<Tuple1<Integer>> nextWorkset = solDelta.flatMap(
new FlatMapFunction<Tuple2<Integer, Integer>, Tuple1<Integer>>() {
@Override
public void flatMap(Tuple2<Integer, Integer> in, Collector<Tuple1<Integer>>
out) throws Exception {
if (in.f1 < in.f0) {
out.collect(new Tuple1<Integer>(in.f0));
}
}
});
List<Tuple2<Integer, Integer>> collected = new ArrayList<Tuple2<Integer, Integer>>();
iteration.closeWith(solDelta, nextWorkset)
.output(new LocalCollectionOutputFormat<Tuple2<Integer, Integer>>(collected));
env.execute();
// verify that both tuple fields are now the same
for (Tuple2<Integer, Integer> t: collected) {
assertEquals(t.f0, t.f1);
}
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
public static class AddSuperstepNumberMapper extends RichMapFunction<Integer, Integer> {
@Override
public Integer map(Integer value) {
int superstep = getIterationRuntimeContext().getSuperstepNumber();
return value + superstep;
}
}
}
| apache-2.0 |
phacility/phabricator | src/applications/people/controller/PhabricatorPeopleProfileEditController.php | 3029 | <?php
final class PhabricatorPeopleProfileEditController
extends PhabricatorPeopleProfileController {
public function handleRequest(AphrontRequest $request) {
$viewer = $this->getViewer();
$id = $request->getURIData('id');
$user = id(new PhabricatorPeopleQuery())
->setViewer($viewer)
->withIDs(array($id))
->needProfileImage(true)
->requireCapabilities(
array(
PhabricatorPolicyCapability::CAN_VIEW,
PhabricatorPolicyCapability::CAN_EDIT,
))
->executeOne();
if (!$user) {
return new Aphront404Response();
}
$this->setUser($user);
$done_uri = $this->getApplicationURI("manage/{$id}/");
$field_list = PhabricatorCustomField::getObjectFields(
$user,
PhabricatorCustomField::ROLE_EDIT);
$field_list
->setViewer($viewer)
->readFieldsFromStorage($user);
$validation_exception = null;
if ($request->isFormPost()) {
$xactions = $field_list->buildFieldTransactionsFromRequest(
new PhabricatorUserTransaction(),
$request);
$editor = id(new PhabricatorUserTransactionEditor())
->setActor($viewer)
->setContentSourceFromRequest($request)
->setContinueOnNoEffect(true);
try {
$editor->applyTransactions($user, $xactions);
return id(new AphrontRedirectResponse())->setURI($done_uri);
} catch (PhabricatorApplicationTransactionValidationException $ex) {
$validation_exception = $ex;
}
}
$title = pht('Edit Profile');
$form = id(new AphrontFormView())
->setUser($viewer);
$field_list->appendFieldsToForm($form);
$form
->appendChild(
id(new AphrontFormSubmitControl())
->addCancelButton($done_uri)
->setValue(pht('Save Profile')));
$allow_public = PhabricatorEnv::getEnvConfig('policy.allow-public');
$note = null;
if ($allow_public) {
$note = id(new PHUIInfoView())
->setSeverity(PHUIInfoView::SEVERITY_WARNING)
->appendChild(pht(
'Information on user profiles on this install is publicly '.
'visible.'));
}
$form_box = id(new PHUIObjectBoxView())
->setHeaderText(pht('Profile'))
->setValidationException($validation_exception)
->setBackground(PHUIObjectBoxView::BLUE_PROPERTY)
->setForm($form);
$crumbs = $this->buildApplicationCrumbs();
$crumbs->addTextCrumb(pht('Edit Profile'));
$crumbs->setBorder(true);
$nav = $this->newNavigation(
$user,
PhabricatorPeopleProfileMenuEngine::ITEM_MANAGE);
$header = id(new PHUIHeaderView())
->setHeader(pht('Edit Profile: %s', $user->getFullName()))
->setHeaderIcon('fa-pencil');
$view = id(new PHUITwoColumnView())
->setHeader($header)
->setFooter(array(
$note,
$form_box,
));
return $this->newPage()
->setTitle($title)
->setCrumbs($crumbs)
->setNavigation($nav)
->appendChild($view);
}
}
| apache-2.0 |
jgsqware/clairctl | vendor/github.com/coreos/go-systemd/sdjournal/journal_test.go | 1951 | // Copyright 2015 RedHat, Inc.
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdjournal
import (
"os"
"testing"
"time"
"github.com/coreos/go-systemd/journal"
)
func TestJournalFollow(t *testing.T) {
r, err := NewJournalReader(JournalReaderConfig{
Since: time.Duration(-15) * time.Second,
Matches: []Match{
{
Field: SD_JOURNAL_FIELD_SYSTEMD_UNIT,
Value: "NetworkManager.service",
},
},
})
if err != nil {
t.Fatalf("Error opening journal: %s", err)
}
if r == nil {
t.Fatal("Got a nil reader")
}
defer r.Close()
// start writing some test entries
done := make(chan struct{}, 1)
defer close(done)
go func() {
for {
select {
case <-done:
return
default:
if err = journal.Print(journal.PriInfo, "test message %s", time.Now()); err != nil {
t.Fatalf("Error writing to journal: %s", err)
}
time.Sleep(time.Second)
}
}
}()
// and follow the reader synchronously
timeout := time.Duration(5) * time.Second
if err = r.Follow(time.After(timeout), os.Stdout); err != ErrExpired {
t.Fatalf("Error during follow: %s", err)
}
}
func TestJournalGetUsage(t *testing.T) {
j, err := NewJournal()
if err != nil {
t.Fatalf("Error opening journal: %s", err)
}
if j == nil {
t.Fatal("Got a nil journal")
}
defer j.Close()
_, err = j.GetUsage()
if err != nil {
t.Fatalf("Error getting journal size: %s", err)
}
}
| apache-2.0 |
maxim-ky/libcouchbase | src/mc/mcreq.h | 32622 | /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Copyright 2014 Couchbase, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LCB_MCREQ_H
#define LCB_MCREQ_H
#include <libcouchbase/couchbase.h>
#include <libcouchbase/api3.h>
#include <libcouchbase/vbucket.h>
#include <memcached/protocol_binary.h>
#include "netbuf/netbuf.h"
#include "sllist.h"
#include "config.h"
#include "packetutils.h"
#ifdef __cplusplus
extern "C" {
#endif /** __cplusplus */
/**
* @file
* @brief Core memcached client routines
*/
/**
* @defgroup mcreq Memcached Packets
*
* @brief
* This module defines the core routines which are used to construct, handle,
* and enqueue packets. They also handle the retry mechanisms.
*
*
* # Initializing the Queue
*
* Using the mcreq system involves first establishing an mc_CMDQUEUE structure.
* This structure contains several mc_PIPELINE structures. The proper way to
* initialize the mc_CMDQEUE structure is to call mcreq_queue_init().
*
* Once the queue has been initialized, it must be assigned a
* `lcbvb_CONFIG*` (which it will _not_ own). This is done via the
* mcreq_queue_add_pipelines(). This function takes an array of pipeline pointers,
* and this will typically be a "subclass" (mc_SERVER) allocated via
* mcserver_alloc()
*
* Once the pipelines have been established, operations may be scheduled and
* distributed across the various pipelines.
*
* # Creating a Packet
*
* For each packet sent, the packet should first be reserved via the
* mcreq_basic_packet() call which allocates space for the actual packet
* as well as provides and populates the vbucket fields as needed.
*
* The header size must be the total size of the header plus any extras
* following the header but before the actual key data.
*
* If the command carries a body in addition to the key, it should be provided
* via mcreq_reserve_value().
*
* Once the packet has a key and value it must be assigned a cookie. The
* cookie may either be of a simple embedded type or an extended type. Whatever
* the case the appropriate flags should be set.
*
* # Scheduling Commands
*
* Scheduling commands is performed in an _enter_ and _leave_ sequence.
* mcreq_sched_enter() should be called before one or more commands are added.
* Then for each new command added, mcreq_sched_add() should be invoked with
* the new packet, and finally either mcreq_sched_leave() or mcreq_sched_fail()
* should be invoked to flush the commands to the network or free the resources
* allocated. In both cases the commands affected are scoped by the last call
* to mcreq_sched_enter().
*
* In order for commands to actually be flushed, the mc_PIPELINE::flush_start
* field must be set. This can vary depending on what the state of the underlying
* socket is. In server.c for example, the initial callback just schedules a
* connection. While the connection is in progress this field is set to a no-op
* callback, and finally when the socket is connected this field is set to
* interact with the I/O system which actually writes the buffers.
*
* # Flushing Responses
*
* This module does not do network I/O by design. Its only bridge is the
* mc_PIPELINE::flush_start function which should be set to actually flush
* the data.
*
* # Handling Reponses
*
* The I/O system reading the responses should place the response into a
* packet_info structure. Once this is done, the request for the response must
* be found using the opaque. This may be done with mcreq_pipeline_find()
* or mcreq_pipeline_remove() depending on whether this request expects multiple
* responses (such as the 'stat' command). These parameters should be passed
* to the mcreq_dispatch_response() function which will invoke the appropriate
* user-defined handler for it.
*
* If the packet does not expect more responses (as above), the application
* should call mcreq_packet_handled()
*
*
* # Error Handling and Failing Commands
*
* This module offers facilities for failing commands from a pipeline while
* safely allowing for their sharing of user-allocated data.
*
* The mcreq_pipeline_fail() and mcreq_pipeline_timeout() will fail packets
* in a single pipeline (the former failing all packets, the latter failing
* only packets older than a specified threshold).
*
* The mcreq_iterwipe() will clean a pipeline of its packets, invoking a
* callback which allows the user to relocate the packet to another pipeline.
* In this callback the user may invoke the mcreq_renew_packet() function to
* create a copy of the packet, keeping the previous packet in tact, but
* returning a copy of the packet as the 'primary' version.
*
* @addtogroup mcreq
* @{
*/
/**
* @name Core Packet Structure
* @{
*/
/** @brief Constant defining the size of a memcached header */
#define MCREQ_PKT_BASESIZE 24
/** @brief Embedded user data for a simple request. */
typedef struct {
const void *cookie; /**< User pointer to place in callbacks */
hrtime_t start; /**< Time of the initial request. Used for timeouts */
} mc_REQDATA;
struct mc_packet_st;
struct mc_pipeline_st;
/** This structure serves as a kind of 'vtable' for the mc_REQDATAEX structure. */
typedef struct {
/**
* Callback to be invoked for "Extended" packet handling. This is only
* available in the mc_REQDATAEX structure
* @param pipeline the pipeline on which the response was received
* @param pkt the request packet
* @param rc the error code for the response
* @param arg opaque pointer for callback
*/
void (*handler)(struct mc_pipeline_st *pipeline,
struct mc_packet_st *pkt, lcb_error_t rc, const void *res);
/**
* Destructor function called from within mcreq_sched_fail() for packets with
* extended data. This function should suitably free the data for the packet,
* if any.
* @param pkt The packet being unscheduled.
*/
void (*fail_dtor)(struct mc_packet_st *pkt);
} mc_REQDATAPROCS;
/**@brief Allocated user data for an extended request.
*
* @details
* An extended request is typically used by commands which have more complex
* handling requirements, such as mapping a single user API call to multiple
* packets, or when the packet itself is generated internally rather than
* on behalf of an API request.
*/
typedef struct {
const void *cookie; /**< User data */
hrtime_t start; /**< Start time */
mc_REQDATAPROCS *procs; /**< Common routines for the packet */
} mc_REQDATAEX;
/**
* Called when the buffers for a packet have been invoked
* @param pl the pipeline
* @param ucookie the cookie passed to the scheduler
* @param kbuf the pointer to the beginning of the key/header buffer, if
* passed to the scheduler
* @param vbuf the pointer to the beginning of the value buffer or the first
* IOV within the buffer.
*/
typedef void (*mcreq_bufdone_fn)(struct mc_pipeline_st *pl,
const void *ucookie, void *kbuf, void *vbuf);
/**
* Possible values for the mc_PACKET#flags field in the packet structure.
* These provide
* information as to which fields in the various unions are in use, and how
* to allocate/release data buffers for requests.
*/
typedef enum {
/** The key is user-allocated. Do not release to MBLOCK */
MCREQ_F_KEY_NOCOPY = 1 << 0,
/** The value is user-allocated. Do not release to MBLOCK */
MCREQ_F_VALUE_NOCOPY = 1 << 1,
/**
* The value is user allocated and in the form of an IOV.
* Use mc_VALUE#multi
*/
MCREQ_F_VALUE_IOV = 1 << 2,
/** The request has a value. Use mc_VALUE#single unless otherwise noted */
MCREQ_F_HASVALUE = 1 << 3,
/**
* The request is tied to an 'extended' user data structure.
* Use mc_USER#exdata
*/
MCREQ_F_REQEXT = 1 << 4,
/** The request is a one-to-one user forwarded packet */
MCREQ_F_UFWD = 1 << 5,
/**
* Indicates that the entire packet has been flushed. Specifically this
* also indicates that the packet's underlying buffers are no longer needed
* by libcouchbase.
*/
MCREQ_F_FLUSHED = 1 << 6,
/**
* Indicates that the callback should NOT be invoked for the request. This
* is typically because the request is just present in the queue for buffer
* management purposes and has expired or otherwise been invalidated.
*/
MCREQ_F_INVOKED = 1 << 7,
/**
* Indicates that this packet and its constituent data members are not
* part of a nb_MBLOCK but rather point to standalone malloc'd memory. This
* also indicates that the packet is actually an mc_EXPACKET extended
* type. This is set by mcreq_renew_packet()
*/
MCREQ_F_DETACHED = 1 << 8,
/**
* Another way of signalling that the callback has an 'internal' variant.
* Dispatching this command requires a specially formatted cookie object,
* which itself is expected to _contain_ a pointer to the callback, and
* thus be formatted like so:
* @code{.c}
* struct {
* lcb_RESPCALLBACK callback;
* };
* @endcode
*/
MCREQ_F_PRIVCALLBACK = 1 << 9
} mcreq_flags;
/** @brief mask of flags indicating user-allocated buffers */
#define MCREQ_UBUF_FLAGS (MCREQ_F_KEY_NOCOPY|MCREQ_F_VALUE_NOCOPY)
/** @brief mask of flags indicating response state of the packet */
#define MCREQ_STATE_FLAGS (MCREQ_F_INVOKED|MCREQ_F_FLUSHED)
/** Union representing the value within a packet */
union mc_VALUE {
/** For a single contiguous value */
nb_SPAN single;
/** For a set of multiple IOV buffers */
lcb_FRAGBUF multi;
};
/** Union representing application/command data within a packet structure */
union mc_USER {
/** Embedded command info for simple commands; 16 bytes, 48B */
mc_REQDATA reqdata;
/** Pointer to extended data */
mc_REQDATAEX *exdata;
};
/**
* @brief Packet structure for a single Memcached command
*
* A single packet structure is allocated for each request
* sent to a server. A packet structure may be associated with user data in the
* u_rdata union field, either by using the embedded structure, or by referencing
* an allocated chunk of 'extended' user data.
*/
typedef struct mc_packet_st {
/** Node in the linked list for logical command ordering */
sllist_node slnode;
/**
* Node in the linked list for actual output ordering.
* @see netbuf_end_flush2(), netbuf_pdu_enqueue()
*/
sllist_node sl_flushq;
/** Span for key and header */
nb_SPAN kh_span;
/** Extras length */
uint8_t extlen;
/** Retries */
uint8_t retries;
/** flags for request. @see mcreq_flags */
uint16_t flags;
/** Cached opaque value */
uint32_t opaque;
/** User/CMDAPI Data */
union mc_USER u_rdata;
/** Value data */
union mc_VALUE u_value;
/** Allocation data for the PACKET structure itself */
nb_MBLOCK *alloc_parent;
} mc_PACKET;
/**
* @brief Gets the request data from the packet structure itself
* @return an mc_REQDATA or mc_REQDATAEX pointer
*/
#define MCREQ_PKT_RDATA(pkt) \
(((pkt)->flags & MCREQ_F_REQEXT) \
? ((mc_REQDATA *)(pkt)->u_rdata.exdata) \
: (&(pkt)->u_rdata.reqdata))
/**
* @brief Retrieve the cookie pointer from a packet
* @param pkt
*/
#define MCREQ_PKT_COOKIE(pkt) MCREQ_PKT_RDATA(pkt)->cookie
/**@}*/
/**
* Callback invoked when APIs request that a pipeline start flushing. It
* receives a pipeline object as its sole argument.
*/
typedef void (*mcreq_flushstart_fn)(struct mc_pipeline_st *pipeline);
/**
* @brief Structure representing a single input/output queue for memcached
*
* Memcached request pipeline. This contains the command log for
* sending/receiving requests. This is basically the non-I/O part of the server
*/
typedef struct mc_pipeline_st {
/** List of requests. Newer requests are appended at the end */
sllist_root requests;
/** Parent command queue */
struct mc_cmdqueue_st *parent;
/**
* Flush handler. This is invoked to schedule a flush operation
* the socket
*/
mcreq_flushstart_fn flush_start;
/** Index of this server within the configuration map */
int index;
/**
* Intermediate queue where pending packets are placed. Moved to
* the `requests` list when mcreq_sched_leave() is called
*/
sllist_root ctxqueued;
/**
* Callback invoked for each packet (which has user-defined buffers) when
* it is no longer required
*/
mcreq_bufdone_fn buf_done_callback;
/** Buffer manager for the respective requests. */
nb_MGR nbmgr;
/** Allocator for packet structures */
nb_MGR reqpool;
} mc_PIPELINE;
typedef struct mc_cmdqueue_st {
/** Indexed pipelines, i.e. server map target */
mc_PIPELINE **pipelines;
/**
* Small array of size npipelines, for mcreq_sched_enter()/mcreq_sched_leave()
* stuff. See those functions for usage
*/
char *scheds;
/** Number of pipelines in the queue */
unsigned npipelines;
/** Number of pipelines, with fallback included */
unsigned _npipelines_ex;
/** Sequence number for pipeline. Incremented for each new packet */
uint32_t seq;
/** Configuration handle for vBucket mapping */
lcbvb_CONFIG* config;
/** Opaque pointer to be used by the application (in this case, lcb core) */
void* cqdata;
/**Special pipeline used to contain orphaned packets within a scheduling
* context. This field is used by mcreq_set_fallback_handler() */
mc_PIPELINE *fallback;
} mc_CMDQUEUE;
/**
* Allocate a packet belonging to a specific pipeline.
* @param pipeline the pipeline to allocate against
* @return a new packet structure or NULL on error
*/
mc_PACKET *
mcreq_allocate_packet(mc_PIPELINE *pipeline);
/**
* Free the packet structure. This will simply free the skeleton structure.
* The underlying members will not be touched.
* @param pipeline the pipleine which was used to allocate the packet
* @param packet the packet to release
*/
void
mcreq_release_packet(mc_PIPELINE *pipeline, mc_PACKET *packet);
struct mc_epkt_datum;
/**
* Extended packet structure. This is returned by mcreq_renew_packet().
*
* The purpose of this structure is to be able to "tag" extra data to the packet
* (typically for retries, or "special" commands) without weighing down on the
* normal packet structure; thus it should be considered a 'subclass' of the
* normal packet structure.
*/
typedef struct mc_expacket_st {
/** The base packet structure */
mc_PACKET base;
/* Additional data for the packet itself */
sllist_root data;
} mc_EXPACKET;
typedef struct mc_epkt_datum {
sllist_node slnode;
/**Unique string key by which this datum will be identified, as more
* than a single datum can exist for a packet */
const char *key;
/**Free the data structure
* @param datum the datum object */
void (*dtorfn)(struct mc_epkt_datum *datum);
} mc_EPKTDATUM;
/**
* Detatches the packet src belonging to the given pipeline. A detached
* packet has all its data allocated via malloc and does not belong to
* any particular buffer. This is typically used for relocation or retries
* where it is impractical to affect the in-order netbuf allocator.
*
* @param src the source packet to copy
* @return a new packet structure. You should still clear the packet's data
* with wipe_packet/release_packet but you may pass NULL as the pipeline
* parameter.
*
* @attention
* Any 'Extended' packet data is **MOVED** from the source to the destination
* packet. This goes well with the typical use case of this function, which is
* not to actually duplicate the packet, but rather to provide a fresh copy
* which may be re-used.
*
* @attention
* This function attempts to be "dumb" in the sense of trying to make an
* exact effective clone of the original packet (the main goal of this function
* is to move the resources of the packet over to a new block of memory). This
* means things like non-buffer-related flags (i.e. the ones not specifying
* the layout of the buffer) are _preserved_, including the so-called
* "state flags" which indicate if a packet has been flushed and/or handled. If
* calling this function to retry a packet, ensure to clear these state flags.
*/
mc_PACKET *
mcreq_renew_packet(const mc_PACKET *src);
/**
* Associates a datum with the packet. The packet must be a standalone packet,
* indicated by the MCREQ_F_DETACHED flag in the mc_PACKET::flags field.
* @param ep The packet to which the data should be added
* @param datum The datum object to add. The object is not copied and should
* not be freed until the `dtorfn` or `copyfn` functions have been called
* @return 0 on success, nonzero on failure (i.e. if packet is not detached).
*/
int
mcreq_epkt_insert(mc_EXPACKET *ep, mc_EPKTDATUM *datum);
/**
* Locate the datum associated with the given key for the packet.
* @param ep The packet in which to search
* @param key A NUL-terminated string matching the mc_EPKTDATUM::key field
* @return The datum, or NULL if it does not exist.
*/
mc_EPKTDATUM *
mcreq_epkt_find(mc_EXPACKET *ep, const char *key);
/**
* Reserve the packet's basic header structure, this is for use for frames
* which do not contain keys, or contain fixed size data which does not
* need to be returned via get_key
* @param pipeline the pipeline to use
* @param packet the packet which should contain the header
* @param hdrsize the total size of the header+extras+key
*/
lcb_error_t
mcreq_reserve_header(
mc_PIPELINE *pipeline, mc_PACKET *packet, uint8_t hdrsize);
/**
* Initialize the given packet's key structure
* @param pipeline the pipeline used to allocate the packet
* @param packet the packet which should have its key field initialized
* @param hdrsize the size of the header before the key. This should contain
* the header size (i.e. 24 bytes) PLUS any extras therein.
* @param kreq the user-provided key structure
* @return LCB_SUCCESS on success, LCB_CLIENT_ENOMEM on allocation failure
*/
lcb_error_t
mcreq_reserve_key(
mc_PIPELINE *pipeline, mc_PACKET *packet,
uint8_t hdrsize, const lcb_KEYBUF *kreq);
/**
* Initialize the given packet's value structure. Only applicable for storage
* operations.
* @param pipeline the pipeline used to allocate the packet
* @param packet the packet whose value field should be initialized
* @param vreq the user-provided structure containing the value parameters
* @return LCB_SUCCESS on success, LCB_CLIENT_ENOMEM on allocation failure
*/
lcb_error_t
mcreq_reserve_value(mc_PIPELINE *pipeline, mc_PACKET *packet,
const lcb_VALBUF *vreq);
/**
* Reserves value/body space, but doesn't actually copy the contents over
* @param pipeline the pipeline to use
* @param packet the packet to host the value
* @param n the number of bytes to reserve
*/
lcb_error_t
mcreq_reserve_value2(mc_PIPELINE *pipeline, mc_PACKET *packet, lcb_size_t n);
/**
* Enqueue the packet to the pipeline. This packet should have fully been
* initialized. Specifically, the packet's data buffer contents (i.e. key,
* header, and value) must not be modified once this function is called
*
* @param pipeline the target pipeline that the packet will be queued in
* @param packet the packet to enqueue.
* This function always succeeds.
*/
void
mcreq_enqueue_packet(mc_PIPELINE *pipeline, mc_PACKET *packet);
/**
* Like enqueue packet, except it will also inspect the packet's timeout field
* and if necessary, restructure the command inside the request list so that
* it appears before newer commands.
*
* The default enqueue_packet() just appends the command to the end of the
* queue while this will perform an additional check (and is less efficient)
*/
void
mcreq_reenqueue_packet(mc_PIPELINE *pipeline, mc_PACKET *packet);
/**
* Wipe the packet's internal buffers, releasing them. This should be called
* when the underlying data buffer fields are no longer needed, usually this
* is called directly before release_packet.
* Note that release_packet should be called to free the storage for the packet
* structure itself.
* @param pipeline the pipeline structure used to allocate this packet
* @param packet the packet to wipe.
*/
void
mcreq_wipe_packet(mc_PIPELINE *pipeline, mc_PACKET *packet);
/**
* Function to extract mapping information given a key and a hashkey
* @param queue The command queue
* @param key The structure for the key
* @param hashkey The optional hashkey structure
* @param nhdr The size of the header (for KV_CONTIG)
* @param[out] vbid The vBucket ID
* @param[out] srvix The master server's index
*/
void
mcreq_map_key(mc_CMDQUEUE *queue,
const lcb_KEYBUF *key, const lcb_KEYBUF *hashkey,
unsigned nhdr, int *vbid, int *srvix);
/**If the packet's vbucket does not have a master node, use the fallback pipeline
* and let it be handled by the handler installed via mcreq_set_fallback_handler()
*/
#define MCREQ_BASICPACKET_F_FALLBACKOK 0x01
/**
* Handle the basic requirements of a packet common to all commands
* @param queue the queue
* @param cmd the command base structure
*
* @param[out] req the request header which will be set with key, vbucket, and extlen
* fields. In other words, you do not need to initialize them once this
* function has completed.
*
* @param extlen the size of extras for this command
* @param[out] packet a pointer set to the address of the allocated packet
* @param[out] pipeline a pointer set to the target pipeline
* @param options a set of options to control creation behavior. Currently the
* only recognized options are `0` (i.e. default options), or @ref
* MCREQ_BASICPACKET_F_FALLBACKOK
*/
lcb_error_t
mcreq_basic_packet(
mc_CMDQUEUE *queue, const lcb_CMDBASE *cmd,
protocol_binary_request_header *req, uint8_t extlen,
mc_PACKET **packet, mc_PIPELINE **pipeline, int options);
/**
* @brief Get the key from a packet
* @param[in] packet The packet from which to retrieve the key
* @param[out] key
* @param[out] nkey
*/
void
mcreq_get_key(const mc_PACKET *packet, const void **key, lcb_size_t *nkey);
/** @brief Returns the size of the entire packet, in bytes */
uint32_t
mcreq_get_bodysize(const mc_PACKET *packet);
/**
* @brief get the total packet size (header+body)
* @param pkt the packet
* @return the total size
*/
uint32_t
mcreq_get_size(const mc_PACKET *packet);
/**
* @brief Get the vBucket for the request
* @param packet The packet
* @return The vBucket ID from the packet.
*/
uint16_t
mcreq_get_vbucket(const mc_PACKET *packet);
/** Initializes a single pipeline object */
int
mcreq_pipeline_init(mc_PIPELINE *pipeline);
/** Cleans up any initialization from pipeline_init */
void
mcreq_pipeline_cleanup(mc_PIPELINE *pipeline);
/**
* Set the pipelines that this queue will manage
* @param queue the queue to take the pipelines
* @param pipelines an array of pipeline pointers. The array is copied
* @param npipelines number of pipelines in the queue
* @param config the configuration handle. The configuration is _not_ owned
* and _not_ copied and the caller must ensure it remains valid
* until it is replaces.
*/
void
mcreq_queue_add_pipelines(
mc_CMDQUEUE *queue, mc_PIPELINE * const *pipelines,
unsigned npipelines, lcbvb_CONFIG* config);
/**
* Set the arra
* @param queue the queue
* @param count a pointer to the number of pipelines within the queue
* @return the pipeline array.
*
* When this function completes another call to add_pipelines must be performed
* in order for the queue to function properly.
*/
mc_PIPELINE **
mcreq_queue_take_pipelines(mc_CMDQUEUE *queue, unsigned *count);
int
mcreq_queue_init(mc_CMDQUEUE *queue);
void
mcreq_queue_cleanup(mc_CMDQUEUE *queue);
/**
* @brief Add a packet to the current scheduling context
* @param pipeline
* @param pkt
* @see mcreq_sched_enter()
*/
void
mcreq_sched_add(mc_PIPELINE *pipeline, mc_PACKET *pkt);
/**
* @brief enter a scheduling scope
* @param queue
* @attention It is not safe to call this function twice
* @volatile
*/
void
mcreq_sched_enter(struct mc_cmdqueue_st *queue);
/**
* @brief successfully exit a scheduling scope
*
* All operations enqueued since the last call to mcreq_sched_enter() will be
* placed in their respective pipelines' operation queue.
*
* @param queue
* @param do_flush Whether the items in the queue should be flushed
* @volatile
*/
void
mcreq_sched_leave(struct mc_cmdqueue_st *queue, int do_flush);
/**
* @brief destroy all operations within the scheduling scope
* All operations enqueued since the last call to mcreq_sched_enter() will
* be destroyed
* @param queue
*/
void
mcreq_sched_fail(struct mc_cmdqueue_st *queue);
/**
* Find a packet with the given opaque value
*/
mc_PACKET *
mcreq_pipeline_find(mc_PIPELINE *pipeline, uint32_t opaque);
/**
* Find and remove the packet with the given opaque value
*/
mc_PACKET *
mcreq_pipeline_remove(mc_PIPELINE *pipeline, uint32_t opaque);
/**
* Handles a received packet in response to a command
* @param pipeline the pipeline upon which the request was received
* @param request the original request
* @param response the packet received in the response
* @param immerr an immediate error message. If this is not LCB_SUCCESS then
* the packet in `response` shall contain only a header and the request itself
* should be analyzed
*
* @return 0 on success, nonzero if the handler could not be found for the
* command.
*/
int
mcreq_dispatch_response(mc_PIPELINE *pipeline, mc_PACKET *request,
packet_info *response, lcb_error_t immerr);
#define MCREQ_KEEP_PACKET 1
#define MCREQ_REMOVE_PACKET 2
/**
* Callback used for packet iteration wiping
*
* @param queue the queue
* @param srcpl the source pipeline which is being cleaned
* @param pkt the packet which is being cleaned
* @param cbarg the argument passed to the iterwipe
*
* @return one of MCREQ_KEEP_PACKET (if the packet should be kept inside the
* pipeline) or MCREQ_REMOVE_PACKET (if the packet should not be kept)
*/
typedef int (*mcreq_iterwipe_fn)
(mc_CMDQUEUE *queue, mc_PIPELINE *srcpl, mc_PACKET *pkt, void *cbarg);
/**
* Wipe a single pipeline. This may be used to move and/or relocate
* existing commands to other pipelines.
* @param queue the queue to use
* @param src the pipeline to wipe
* @param callback the callback to invoke for each packet
* @param arg the argument passed to the callback
*/
void
mcreq_iterwipe(mc_CMDQUEUE *queue, mc_PIPELINE *src,
mcreq_iterwipe_fn callback, void *arg);
/**
* Called when a packet does not need to have any more references to it
* remaining. A packet by itself only has two implicit references; one is
* a flush reference and the other is a handler reference.
*
* The flush reference is unset once the packet has been flushed and the
* handler reference is unset once the packet's handler callback has been
* invoked and the relevant data relayed to the user.
*
* Once this function is called, the packet passed will no longer be valid
* and thus should not be used.
*/
void
mcreq_packet_done(mc_PIPELINE *pipeline, mc_PACKET *pkt);
/**
* @brief Indicate that the packet was handled
* @param pipeline the pipeline
* @param pkt the packet which was handled
* If the packet has also been flushed, the packet's storage will be released
* and `pkt` will no longer point to valid memory.
*/
#define mcreq_packet_handled(pipeline, pkt) do { \
(pkt)->flags |= MCREQ_F_INVOKED; \
if ((pkt)->flags & MCREQ_F_FLUSHED) { \
mcreq_packet_done(pipeline, pkt); \
} \
} while (0);
/**
* Reset the timeout (or rather, the start time) on all pending packets
* to the time specified.
*
* @param pl The pipeline
* @param nstime The new timestamp to use.
*/
void
mcreq_reset_timeouts(mc_PIPELINE *pl, lcb_U64 nstime);
/**
* Callback to be invoked when a packet is about to be failed out from the
* request queue. This should be used to possibly invoke handlers. The packet
* will then be removed from the queue.
* @param pipeline the pipeline which has been errored
* @param packet the current packet
* @param err the error received
* @param arg an opaque pointer
*/
typedef void (*mcreq_pktfail_fn)
(mc_PIPELINE *pipeline, mc_PACKET *packet, lcb_error_t err, void *arg);
/**
* Fail out a given pipeline. All commands in the pipeline will be removed
* from the pipeline (though they may still not be freed if they are pending
* a flush).
*
* @param pipeline the pipeline to fail out
* @param err the error which caused the failure
* @param failcb a callback invoked to handle each failed packet
* @param cbarg a pointer passed as the last parameter to the callback
*
* @return the number of items actually failed.
*/
unsigned
mcreq_pipeline_fail(
mc_PIPELINE *pipeline, lcb_error_t err,
mcreq_pktfail_fn failcb, void *cbarg);
/**
* Fail out all commands in the pipeline which are older than a specified
* interval. This is similar to the pipeline_fail() function except that commands
* which are newer than the threshold are still kept
*
* @param pipeline the pipeline to fail out
* @param err the error to provide to the handlers (usually LCB_ETIMEDOUT)
* @param failcb the callback to invoke
* @param cbarg the last argument to the callback
* @param oldest_valid the _oldest_ time for a command to still be valid
* @param oldest_start set to the start time of the _oldest_ command which is
* still valid.
*
* @return the number of commands actually failed.
*/
unsigned
mcreq_pipeline_timeout(
mc_PIPELINE *pipeline, lcb_error_t err,
mcreq_pktfail_fn failcb, void *cbarg,
hrtime_t oldest_valid,
hrtime_t *oldest_start);
/**
* This function is called when a packet could not be properly mapped to a real
* pipeline
* @param cq the command queue
* @param pkt the packet which needs to be relocated. The packet needs to be
* properly copied via mcreq_renew_packet()
*/
typedef void (*mcreq_fallback_cb)(mc_CMDQUEUE *cq, mc_PACKET *pkt);
/**
* Set the callback function to be invoked when a packet could not be properly
* mapped to a node. The callback function is invoked from within the
* mcreq_sched_leave() function.
*
* The handler should be assigned only once, during initialization
*
* @param cq The command queue
* @param handler The handler to invoke
*/
void
mcreq_set_fallback_handler(mc_CMDQUEUE *cq, mcreq_fallback_cb handler);
/**
* Callback used by mcreq_dump_packet() and mcreq_dump_chain() to format the
* packet's payload
* @param data the data to dump
* @param size the size of the data
* @param fp the file to write the output to
*/
typedef void (*mcreq_payload_dump_fn)
(const void *data, unsigned size, FILE *fp);
/**
* Dumps a single packet to the file indicated by `fp`
* @param pkt the packet to dump
* @param fp The file to write to
* @param dumpfn If specified, this function is called to handle the packet's
* header and payload body
*/
void
mcreq_dump_packet(const mc_PACKET *pkt, FILE *fp, mcreq_payload_dump_fn dumpfn);
void
mcreq_dump_chain(const mc_PIPELINE *pipeline, FILE *fp, mcreq_payload_dump_fn dumpfn);
#define mcreq_write_hdr(pkt, hdr) \
memcpy( SPAN_BUFFER(&(pkt)->kh_span), (hdr)->bytes, sizeof((hdr)->bytes) )
#define mcreq_write_exhdr(pkt, hdr, n) \
memcpy(SPAN_BUFFER((&pkt)->kh_span), (hdr)->bytes, n)
#define mcreq_read_hdr(pkt, hdr) \
memcpy( (hdr)->bytes, SPAN_BUFFER(&(pkt)->kh_span), sizeof((hdr)->bytes) )
#define mcreq_first_packet(pipeline) \
SLLIST_IS_EMPTY(&(pipeline)->requests) ? NULL : \
SLLIST_ITEM(SLLIST_FIRST(&(pipeline)->requests), mc_PACKET, slnode)
/**@}*/
#ifdef __cplusplus
}
#endif /** __cplusplus */
#endif /* LCB_MCREQ_H */
| apache-2.0 |
pweil-/origin | vendor/gonum.org/v1/gonum/unit/capacitance_test.go | 916 | // Code generated by "go generate gonum.org/v1/gonum/unit; DO NOT EDIT.
// Copyright ©2019 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unit
import (
"fmt"
"testing"
)
func TestCapacitanceFormat(t *testing.T) {
for _, test := range []struct {
value Capacitance
format string
want string
}{
{1.23456789, "%v", "1.23456789 F"},
{1.23456789, "%.1v", "1 F"},
{1.23456789, "%20.1v", " 1 F"},
{1.23456789, "%20v", " 1.23456789 F"},
{1.23456789, "%1v", "1.23456789 F"},
{1.23456789, "%#v", "unit.Capacitance(1.23456789)"},
{1.23456789, "%s", "%!s(unit.Capacitance=1.23456789 F)"},
} {
got := fmt.Sprintf(test.format, test.value)
if got != test.want {
t.Errorf("Format %q %v: got: %q want: %q", test.format, float64(test.value), got, test.want)
}
}
}
| apache-2.0 |
yugangw-msft/azure-sdk-for-net | sdk/securitycenter/Microsoft.Azure.Management.SecurityCenter/src/Generated/Models/TagsResource.cs | 1610 | // <auto-generated>
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for
// license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
// </auto-generated>
namespace Microsoft.Azure.Management.Security.Models
{
using Newtonsoft.Json;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
/// <summary>
/// A container holding only the Tags for a resource, allowing the user to
/// update the tags.
/// </summary>
public partial class TagsResource
{
/// <summary>
/// Initializes a new instance of the TagsResource class.
/// </summary>
public TagsResource()
{
CustomInit();
}
/// <summary>
/// Initializes a new instance of the TagsResource class.
/// </summary>
/// <param name="tags">Resource tags</param>
public TagsResource(IDictionary<string, string> tags = default(IDictionary<string, string>))
{
Tags = tags;
CustomInit();
}
/// <summary>
/// An initialization method that performs custom operations like setting defaults
/// </summary>
partial void CustomInit();
/// <summary>
/// Gets or sets resource tags
/// </summary>
[JsonProperty(PropertyName = "tags")]
public IDictionary<string, string> Tags { get; set; }
}
}
| apache-2.0 |
NixaSoftware/CVis | venv/bin/libs/regex/doc/html/boost_regex/background_information.html | 4206 | <html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=US-ASCII">
<title>Background Information</title>
<link rel="stylesheet" href="../../../../../doc/src/boostbook.css" type="text/css">
<meta name="generator" content="DocBook XSL Stylesheets V1.78.1">
<link rel="home" href="../index.html" title="Boost.Regex">
<link rel="up" href="../index.html" title="Boost.Regex">
<link rel="prev" href="ref/internal_details/uni_iter.html" title="Unicode Iterators">
<link rel="next" href="background_information/headers.html" title="Headers">
</head>
<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
<table cellpadding="2" width="100%"><tr>
<td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../../boost.png"></td>
<td align="center"><a href="../../../../../index.html">Home</a></td>
<td align="center"><a href="../../../../../libs/libraries.htm">Libraries</a></td>
<td align="center"><a href="http://www.boost.org/users/people.html">People</a></td>
<td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td>
<td align="center"><a href="../../../../../more/index.htm">More</a></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="ref/internal_details/uni_iter.html"><img src="../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../index.html"><img src="../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../index.html"><img src="../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="background_information/headers.html"><img src="../../../../../doc/src/images/next.png" alt="Next"></a>
</div>
<div class="section boost_regex_background_information">
<div class="titlepage"><div><div><h2 class="title" style="clear: both">
<a name="boost_regex.background_information"></a><a class="link" href="background_information.html" title="Background Information">Background Information</a>
</h2></div></div></div>
<div class="toc"><dl class="toc">
<dt><span class="section"><a href="background_information/headers.html">Headers</a></span></dt>
<dt><span class="section"><a href="background_information/locale.html">Localization</a></span></dt>
<dt><span class="section"><a href="background_information/thread_safety.html">Thread
Safety</a></span></dt>
<dt><span class="section"><a href="background_information/examples.html">Test and
Example Programs</a></span></dt>
<dt><span class="section"><a href="background_information/futher.html">References
and Further Information</a></span></dt>
<dt><span class="section"><a href="background_information/faq.html">FAQ</a></span></dt>
<dt><span class="section"><a href="background_information/performance.html">Performance</a></span></dt>
<dt><span class="section"><a href="background_information/standards.html">Standards
Conformance</a></span></dt>
<dt><span class="section"><a href="background_information/redist.html">Redistributables</a></span></dt>
<dt><span class="section"><a href="background_information/acknowledgements.html">Acknowledgements</a></span></dt>
<dt><span class="section"><a href="background_information/history.html">History</a></span></dt>
</dl></div>
</div>
<table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr>
<td align="left"></td>
<td align="right"><div class="copyright-footer">Copyright © 1998-2010 John Maddock<p>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
</p>
</div></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="ref/internal_details/uni_iter.html"><img src="../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../index.html"><img src="../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../index.html"><img src="../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="background_information/headers.html"><img src="../../../../../doc/src/images/next.png" alt="Next"></a>
</div>
</body>
</html>
| apache-2.0 |
klucar/pachyderm | vendor/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go | 48982 | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh.
package extensions
import (
time "time"
api "k8s.io/kubernetes/pkg/api"
resource "k8s.io/kubernetes/pkg/api/resource"
unversioned "k8s.io/kubernetes/pkg/api/unversioned"
conversion "k8s.io/kubernetes/pkg/conversion"
util "k8s.io/kubernetes/pkg/util"
inf "speter.net/go/exp/math/dec/inf"
)
func deepCopy_api_AWSElasticBlockStoreVolumeSource(in api.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.Partition = in.Partition
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_Capabilities(in api.Capabilities, out *api.Capabilities, c *conversion.Cloner) error {
if in.Add != nil {
out.Add = make([]api.Capability, len(in.Add))
for i := range in.Add {
out.Add[i] = in.Add[i]
}
} else {
out.Add = nil
}
if in.Drop != nil {
out.Drop = make([]api.Capability, len(in.Drop))
for i := range in.Drop {
out.Drop[i] = in.Drop[i]
}
} else {
out.Drop = nil
}
return nil
}
func deepCopy_api_CephFSVolumeSource(in api.CephFSVolumeSource, out *api.CephFSVolumeSource, c *conversion.Cloner) error {
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(api.LocalObjectReference)
if err := deepCopy_api_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_CinderVolumeSource(in api.CinderVolumeSource, out *api.CinderVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_Container(in api.Container, out *api.Container, c *conversion.Cloner) error {
out.Name = in.Name
out.Image = in.Image
if in.Command != nil {
out.Command = make([]string, len(in.Command))
for i := range in.Command {
out.Command[i] = in.Command[i]
}
} else {
out.Command = nil
}
if in.Args != nil {
out.Args = make([]string, len(in.Args))
for i := range in.Args {
out.Args[i] = in.Args[i]
}
} else {
out.Args = nil
}
out.WorkingDir = in.WorkingDir
if in.Ports != nil {
out.Ports = make([]api.ContainerPort, len(in.Ports))
for i := range in.Ports {
if err := deepCopy_api_ContainerPort(in.Ports[i], &out.Ports[i], c); err != nil {
return err
}
}
} else {
out.Ports = nil
}
if in.Env != nil {
out.Env = make([]api.EnvVar, len(in.Env))
for i := range in.Env {
if err := deepCopy_api_EnvVar(in.Env[i], &out.Env[i], c); err != nil {
return err
}
}
} else {
out.Env = nil
}
if err := deepCopy_api_ResourceRequirements(in.Resources, &out.Resources, c); err != nil {
return err
}
if in.VolumeMounts != nil {
out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts))
for i := range in.VolumeMounts {
if err := deepCopy_api_VolumeMount(in.VolumeMounts[i], &out.VolumeMounts[i], c); err != nil {
return err
}
}
} else {
out.VolumeMounts = nil
}
if in.LivenessProbe != nil {
out.LivenessProbe = new(api.Probe)
if err := deepCopy_api_Probe(*in.LivenessProbe, out.LivenessProbe, c); err != nil {
return err
}
} else {
out.LivenessProbe = nil
}
if in.ReadinessProbe != nil {
out.ReadinessProbe = new(api.Probe)
if err := deepCopy_api_Probe(*in.ReadinessProbe, out.ReadinessProbe, c); err != nil {
return err
}
} else {
out.ReadinessProbe = nil
}
if in.Lifecycle != nil {
out.Lifecycle = new(api.Lifecycle)
if err := deepCopy_api_Lifecycle(*in.Lifecycle, out.Lifecycle, c); err != nil {
return err
}
} else {
out.Lifecycle = nil
}
out.TerminationMessagePath = in.TerminationMessagePath
out.ImagePullPolicy = in.ImagePullPolicy
if in.SecurityContext != nil {
out.SecurityContext = new(api.SecurityContext)
if err := deepCopy_api_SecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
out.Stdin = in.Stdin
out.StdinOnce = in.StdinOnce
out.TTY = in.TTY
return nil
}
func deepCopy_api_ContainerPort(in api.ContainerPort, out *api.ContainerPort, c *conversion.Cloner) error {
out.Name = in.Name
out.HostPort = in.HostPort
out.ContainerPort = in.ContainerPort
out.Protocol = in.Protocol
out.HostIP = in.HostIP
return nil
}
func deepCopy_api_DownwardAPIVolumeFile(in api.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, c *conversion.Cloner) error {
out.Path = in.Path
if err := deepCopy_api_ObjectFieldSelector(in.FieldRef, &out.FieldRef, c); err != nil {
return err
}
return nil
}
func deepCopy_api_DownwardAPIVolumeSource(in api.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, c *conversion.Cloner) error {
if in.Items != nil {
out.Items = make([]api.DownwardAPIVolumeFile, len(in.Items))
for i := range in.Items {
if err := deepCopy_api_DownwardAPIVolumeFile(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_api_EmptyDirVolumeSource(in api.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, c *conversion.Cloner) error {
out.Medium = in.Medium
return nil
}
func deepCopy_api_EnvVar(in api.EnvVar, out *api.EnvVar, c *conversion.Cloner) error {
out.Name = in.Name
out.Value = in.Value
if in.ValueFrom != nil {
out.ValueFrom = new(api.EnvVarSource)
if err := deepCopy_api_EnvVarSource(*in.ValueFrom, out.ValueFrom, c); err != nil {
return err
}
} else {
out.ValueFrom = nil
}
return nil
}
func deepCopy_api_EnvVarSource(in api.EnvVarSource, out *api.EnvVarSource, c *conversion.Cloner) error {
if in.FieldRef != nil {
out.FieldRef = new(api.ObjectFieldSelector)
if err := deepCopy_api_ObjectFieldSelector(*in.FieldRef, out.FieldRef, c); err != nil {
return err
}
} else {
out.FieldRef = nil
}
return nil
}
func deepCopy_api_ExecAction(in api.ExecAction, out *api.ExecAction, c *conversion.Cloner) error {
if in.Command != nil {
out.Command = make([]string, len(in.Command))
for i := range in.Command {
out.Command[i] = in.Command[i]
}
} else {
out.Command = nil
}
return nil
}
func deepCopy_api_FCVolumeSource(in api.FCVolumeSource, out *api.FCVolumeSource, c *conversion.Cloner) error {
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_FlockerVolumeSource(in api.FlockerVolumeSource, out *api.FlockerVolumeSource, c *conversion.Cloner) error {
out.DatasetName = in.DatasetName
return nil
}
func deepCopy_api_GCEPersistentDiskVolumeSource(in api.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, c *conversion.Cloner) error {
out.PDName = in.PDName
out.FSType = in.FSType
out.Partition = in.Partition
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_GitRepoVolumeSource(in api.GitRepoVolumeSource, out *api.GitRepoVolumeSource, c *conversion.Cloner) error {
out.Repository = in.Repository
out.Revision = in.Revision
return nil
}
func deepCopy_api_GlusterfsVolumeSource(in api.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, c *conversion.Cloner) error {
out.EndpointsName = in.EndpointsName
out.Path = in.Path
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_HTTPGetAction(in api.HTTPGetAction, out *api.HTTPGetAction, c *conversion.Cloner) error {
out.Path = in.Path
if err := deepCopy_util_IntOrString(in.Port, &out.Port, c); err != nil {
return err
}
out.Host = in.Host
out.Scheme = in.Scheme
return nil
}
func deepCopy_api_Handler(in api.Handler, out *api.Handler, c *conversion.Cloner) error {
if in.Exec != nil {
out.Exec = new(api.ExecAction)
if err := deepCopy_api_ExecAction(*in.Exec, out.Exec, c); err != nil {
return err
}
} else {
out.Exec = nil
}
if in.HTTPGet != nil {
out.HTTPGet = new(api.HTTPGetAction)
if err := deepCopy_api_HTTPGetAction(*in.HTTPGet, out.HTTPGet, c); err != nil {
return err
}
} else {
out.HTTPGet = nil
}
if in.TCPSocket != nil {
out.TCPSocket = new(api.TCPSocketAction)
if err := deepCopy_api_TCPSocketAction(*in.TCPSocket, out.TCPSocket, c); err != nil {
return err
}
} else {
out.TCPSocket = nil
}
return nil
}
func deepCopy_api_HostPathVolumeSource(in api.HostPathVolumeSource, out *api.HostPathVolumeSource, c *conversion.Cloner) error {
out.Path = in.Path
return nil
}
func deepCopy_api_ISCSIVolumeSource(in api.ISCSIVolumeSource, out *api.ISCSIVolumeSource, c *conversion.Cloner) error {
out.TargetPortal = in.TargetPortal
out.IQN = in.IQN
out.Lun = in.Lun
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_Lifecycle(in api.Lifecycle, out *api.Lifecycle, c *conversion.Cloner) error {
if in.PostStart != nil {
out.PostStart = new(api.Handler)
if err := deepCopy_api_Handler(*in.PostStart, out.PostStart, c); err != nil {
return err
}
} else {
out.PostStart = nil
}
if in.PreStop != nil {
out.PreStop = new(api.Handler)
if err := deepCopy_api_Handler(*in.PreStop, out.PreStop, c); err != nil {
return err
}
} else {
out.PreStop = nil
}
return nil
}
func deepCopy_api_LoadBalancerIngress(in api.LoadBalancerIngress, out *api.LoadBalancerIngress, c *conversion.Cloner) error {
out.IP = in.IP
out.Hostname = in.Hostname
return nil
}
func deepCopy_api_LoadBalancerStatus(in api.LoadBalancerStatus, out *api.LoadBalancerStatus, c *conversion.Cloner) error {
if in.Ingress != nil {
out.Ingress = make([]api.LoadBalancerIngress, len(in.Ingress))
for i := range in.Ingress {
if err := deepCopy_api_LoadBalancerIngress(in.Ingress[i], &out.Ingress[i], c); err != nil {
return err
}
}
} else {
out.Ingress = nil
}
return nil
}
func deepCopy_api_LocalObjectReference(in api.LocalObjectReference, out *api.LocalObjectReference, c *conversion.Cloner) error {
out.Name = in.Name
return nil
}
func deepCopy_api_NFSVolumeSource(in api.NFSVolumeSource, out *api.NFSVolumeSource, c *conversion.Cloner) error {
out.Server = in.Server
out.Path = in.Path
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_ObjectFieldSelector(in api.ObjectFieldSelector, out *api.ObjectFieldSelector, c *conversion.Cloner) error {
out.APIVersion = in.APIVersion
out.FieldPath = in.FieldPath
return nil
}
func deepCopy_api_ObjectMeta(in api.ObjectMeta, out *api.ObjectMeta, c *conversion.Cloner) error {
out.Name = in.Name
out.GenerateName = in.GenerateName
out.Namespace = in.Namespace
out.SelfLink = in.SelfLink
out.UID = in.UID
out.ResourceVersion = in.ResourceVersion
out.Generation = in.Generation
if err := deepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil {
return err
}
if in.DeletionTimestamp != nil {
out.DeletionTimestamp = new(unversioned.Time)
if err := deepCopy_unversioned_Time(*in.DeletionTimestamp, out.DeletionTimestamp, c); err != nil {
return err
}
} else {
out.DeletionTimestamp = nil
}
if in.DeletionGracePeriodSeconds != nil {
out.DeletionGracePeriodSeconds = new(int64)
*out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds
} else {
out.DeletionGracePeriodSeconds = nil
}
if in.Labels != nil {
out.Labels = make(map[string]string)
for key, val := range in.Labels {
out.Labels[key] = val
}
} else {
out.Labels = nil
}
if in.Annotations != nil {
out.Annotations = make(map[string]string)
for key, val := range in.Annotations {
out.Annotations[key] = val
}
} else {
out.Annotations = nil
}
return nil
}
func deepCopy_api_PersistentVolumeClaimVolumeSource(in api.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error {
out.ClaimName = in.ClaimName
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_PodSecurityContext(in api.PodSecurityContext, out *api.PodSecurityContext, c *conversion.Cloner) error {
out.HostNetwork = in.HostNetwork
out.HostPID = in.HostPID
out.HostIPC = in.HostIPC
if in.SELinuxOptions != nil {
out.SELinuxOptions = new(api.SELinuxOptions)
if err := deepCopy_api_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil {
return err
}
} else {
out.SELinuxOptions = nil
}
if in.RunAsUser != nil {
out.RunAsUser = new(int64)
*out.RunAsUser = *in.RunAsUser
} else {
out.RunAsUser = nil
}
if in.RunAsNonRoot != nil {
out.RunAsNonRoot = new(bool)
*out.RunAsNonRoot = *in.RunAsNonRoot
} else {
out.RunAsNonRoot = nil
}
if in.SupplementalGroups != nil {
out.SupplementalGroups = make([]int64, len(in.SupplementalGroups))
for i := range in.SupplementalGroups {
out.SupplementalGroups[i] = in.SupplementalGroups[i]
}
} else {
out.SupplementalGroups = nil
}
if in.FSGroup != nil {
out.FSGroup = new(int64)
*out.FSGroup = *in.FSGroup
} else {
out.FSGroup = nil
}
return nil
}
func deepCopy_api_PodSpec(in api.PodSpec, out *api.PodSpec, c *conversion.Cloner) error {
if in.Volumes != nil {
out.Volumes = make([]api.Volume, len(in.Volumes))
for i := range in.Volumes {
if err := deepCopy_api_Volume(in.Volumes[i], &out.Volumes[i], c); err != nil {
return err
}
}
} else {
out.Volumes = nil
}
if in.Containers != nil {
out.Containers = make([]api.Container, len(in.Containers))
for i := range in.Containers {
if err := deepCopy_api_Container(in.Containers[i], &out.Containers[i], c); err != nil {
return err
}
}
} else {
out.Containers = nil
}
out.RestartPolicy = in.RestartPolicy
if in.TerminationGracePeriodSeconds != nil {
out.TerminationGracePeriodSeconds = new(int64)
*out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds
} else {
out.TerminationGracePeriodSeconds = nil
}
if in.ActiveDeadlineSeconds != nil {
out.ActiveDeadlineSeconds = new(int64)
*out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds
} else {
out.ActiveDeadlineSeconds = nil
}
out.DNSPolicy = in.DNSPolicy
if in.NodeSelector != nil {
out.NodeSelector = make(map[string]string)
for key, val := range in.NodeSelector {
out.NodeSelector[key] = val
}
} else {
out.NodeSelector = nil
}
out.ServiceAccountName = in.ServiceAccountName
out.NodeName = in.NodeName
if in.SecurityContext != nil {
out.SecurityContext = new(api.PodSecurityContext)
if err := deepCopy_api_PodSecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
if in.ImagePullSecrets != nil {
out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets))
for i := range in.ImagePullSecrets {
if err := deepCopy_api_LocalObjectReference(in.ImagePullSecrets[i], &out.ImagePullSecrets[i], c); err != nil {
return err
}
}
} else {
out.ImagePullSecrets = nil
}
return nil
}
func deepCopy_api_PodTemplateSpec(in api.PodTemplateSpec, out *api.PodTemplateSpec, c *conversion.Cloner) error {
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_api_PodSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
return nil
}
func deepCopy_api_Probe(in api.Probe, out *api.Probe, c *conversion.Cloner) error {
if err := deepCopy_api_Handler(in.Handler, &out.Handler, c); err != nil {
return err
}
out.InitialDelaySeconds = in.InitialDelaySeconds
out.TimeoutSeconds = in.TimeoutSeconds
out.PeriodSeconds = in.PeriodSeconds
out.SuccessThreshold = in.SuccessThreshold
out.FailureThreshold = in.FailureThreshold
return nil
}
func deepCopy_api_RBDVolumeSource(in api.RBDVolumeSource, out *api.RBDVolumeSource, c *conversion.Cloner) error {
if in.CephMonitors != nil {
out.CephMonitors = make([]string, len(in.CephMonitors))
for i := range in.CephMonitors {
out.CephMonitors[i] = in.CephMonitors[i]
}
} else {
out.CephMonitors = nil
}
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
if in.SecretRef != nil {
out.SecretRef = new(api.LocalObjectReference)
if err := deepCopy_api_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_ResourceRequirements(in api.ResourceRequirements, out *api.ResourceRequirements, c *conversion.Cloner) error {
if in.Limits != nil {
out.Limits = make(api.ResourceList)
for key, val := range in.Limits {
newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
return err
}
out.Limits[key] = *newVal
}
} else {
out.Limits = nil
}
if in.Requests != nil {
out.Requests = make(api.ResourceList)
for key, val := range in.Requests {
newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
return err
}
out.Requests[key] = *newVal
}
} else {
out.Requests = nil
}
return nil
}
func deepCopy_api_SELinuxOptions(in api.SELinuxOptions, out *api.SELinuxOptions, c *conversion.Cloner) error {
out.User = in.User
out.Role = in.Role
out.Type = in.Type
out.Level = in.Level
return nil
}
func deepCopy_api_SecretVolumeSource(in api.SecretVolumeSource, out *api.SecretVolumeSource, c *conversion.Cloner) error {
out.SecretName = in.SecretName
return nil
}
func deepCopy_api_SecurityContext(in api.SecurityContext, out *api.SecurityContext, c *conversion.Cloner) error {
if in.Capabilities != nil {
out.Capabilities = new(api.Capabilities)
if err := deepCopy_api_Capabilities(*in.Capabilities, out.Capabilities, c); err != nil {
return err
}
} else {
out.Capabilities = nil
}
if in.Privileged != nil {
out.Privileged = new(bool)
*out.Privileged = *in.Privileged
} else {
out.Privileged = nil
}
if in.SELinuxOptions != nil {
out.SELinuxOptions = new(api.SELinuxOptions)
if err := deepCopy_api_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil {
return err
}
} else {
out.SELinuxOptions = nil
}
if in.RunAsUser != nil {
out.RunAsUser = new(int64)
*out.RunAsUser = *in.RunAsUser
} else {
out.RunAsUser = nil
}
if in.RunAsNonRoot != nil {
out.RunAsNonRoot = new(bool)
*out.RunAsNonRoot = *in.RunAsNonRoot
} else {
out.RunAsNonRoot = nil
}
return nil
}
func deepCopy_api_TCPSocketAction(in api.TCPSocketAction, out *api.TCPSocketAction, c *conversion.Cloner) error {
if err := deepCopy_util_IntOrString(in.Port, &out.Port, c); err != nil {
return err
}
return nil
}
func deepCopy_api_Volume(in api.Volume, out *api.Volume, c *conversion.Cloner) error {
out.Name = in.Name
if err := deepCopy_api_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil {
return err
}
return nil
}
func deepCopy_api_VolumeMount(in api.VolumeMount, out *api.VolumeMount, c *conversion.Cloner) error {
out.Name = in.Name
out.ReadOnly = in.ReadOnly
out.MountPath = in.MountPath
return nil
}
func deepCopy_api_VolumeSource(in api.VolumeSource, out *api.VolumeSource, c *conversion.Cloner) error {
if in.HostPath != nil {
out.HostPath = new(api.HostPathVolumeSource)
if err := deepCopy_api_HostPathVolumeSource(*in.HostPath, out.HostPath, c); err != nil {
return err
}
} else {
out.HostPath = nil
}
if in.EmptyDir != nil {
out.EmptyDir = new(api.EmptyDirVolumeSource)
if err := deepCopy_api_EmptyDirVolumeSource(*in.EmptyDir, out.EmptyDir, c); err != nil {
return err
}
} else {
out.EmptyDir = nil
}
if in.GCEPersistentDisk != nil {
out.GCEPersistentDisk = new(api.GCEPersistentDiskVolumeSource)
if err := deepCopy_api_GCEPersistentDiskVolumeSource(*in.GCEPersistentDisk, out.GCEPersistentDisk, c); err != nil {
return err
}
} else {
out.GCEPersistentDisk = nil
}
if in.AWSElasticBlockStore != nil {
out.AWSElasticBlockStore = new(api.AWSElasticBlockStoreVolumeSource)
if err := deepCopy_api_AWSElasticBlockStoreVolumeSource(*in.AWSElasticBlockStore, out.AWSElasticBlockStore, c); err != nil {
return err
}
} else {
out.AWSElasticBlockStore = nil
}
if in.GitRepo != nil {
out.GitRepo = new(api.GitRepoVolumeSource)
if err := deepCopy_api_GitRepoVolumeSource(*in.GitRepo, out.GitRepo, c); err != nil {
return err
}
} else {
out.GitRepo = nil
}
if in.Secret != nil {
out.Secret = new(api.SecretVolumeSource)
if err := deepCopy_api_SecretVolumeSource(*in.Secret, out.Secret, c); err != nil {
return err
}
} else {
out.Secret = nil
}
if in.NFS != nil {
out.NFS = new(api.NFSVolumeSource)
if err := deepCopy_api_NFSVolumeSource(*in.NFS, out.NFS, c); err != nil {
return err
}
} else {
out.NFS = nil
}
if in.ISCSI != nil {
out.ISCSI = new(api.ISCSIVolumeSource)
if err := deepCopy_api_ISCSIVolumeSource(*in.ISCSI, out.ISCSI, c); err != nil {
return err
}
} else {
out.ISCSI = nil
}
if in.Glusterfs != nil {
out.Glusterfs = new(api.GlusterfsVolumeSource)
if err := deepCopy_api_GlusterfsVolumeSource(*in.Glusterfs, out.Glusterfs, c); err != nil {
return err
}
} else {
out.Glusterfs = nil
}
if in.PersistentVolumeClaim != nil {
out.PersistentVolumeClaim = new(api.PersistentVolumeClaimVolumeSource)
if err := deepCopy_api_PersistentVolumeClaimVolumeSource(*in.PersistentVolumeClaim, out.PersistentVolumeClaim, c); err != nil {
return err
}
} else {
out.PersistentVolumeClaim = nil
}
if in.RBD != nil {
out.RBD = new(api.RBDVolumeSource)
if err := deepCopy_api_RBDVolumeSource(*in.RBD, out.RBD, c); err != nil {
return err
}
} else {
out.RBD = nil
}
if in.Cinder != nil {
out.Cinder = new(api.CinderVolumeSource)
if err := deepCopy_api_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
return err
}
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(api.CephFSVolumeSource)
if err := deepCopy_api_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil {
return err
}
} else {
out.CephFS = nil
}
if in.Flocker != nil {
out.Flocker = new(api.FlockerVolumeSource)
if err := deepCopy_api_FlockerVolumeSource(*in.Flocker, out.Flocker, c); err != nil {
return err
}
} else {
out.Flocker = nil
}
if in.DownwardAPI != nil {
out.DownwardAPI = new(api.DownwardAPIVolumeSource)
if err := deepCopy_api_DownwardAPIVolumeSource(*in.DownwardAPI, out.DownwardAPI, c); err != nil {
return err
}
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(api.FCVolumeSource)
if err := deepCopy_api_FCVolumeSource(*in.FC, out.FC, c); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error {
if in.Amount != nil {
if newVal, err := c.DeepCopy(in.Amount); err != nil {
return err
} else {
out.Amount = newVal.(*inf.Dec)
}
} else {
out.Amount = nil
}
out.Format = in.Format
return nil
}
func deepCopy_unversioned_ListMeta(in unversioned.ListMeta, out *unversioned.ListMeta, c *conversion.Cloner) error {
out.SelfLink = in.SelfLink
out.ResourceVersion = in.ResourceVersion
return nil
}
func deepCopy_unversioned_Time(in unversioned.Time, out *unversioned.Time, c *conversion.Cloner) error {
if newVal, err := c.DeepCopy(in.Time); err != nil {
return err
} else {
out.Time = newVal.(time.Time)
}
return nil
}
func deepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.TypeMeta, c *conversion.Cloner) error {
out.Kind = in.Kind
out.APIVersion = in.APIVersion
return nil
}
func deepCopy_extensions_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error {
out.Name = in.Name
out.APIGroup = in.APIGroup
return nil
}
func deepCopy_extensions_CPUTargetUtilization(in CPUTargetUtilization, out *CPUTargetUtilization, c *conversion.Cloner) error {
out.TargetPercentage = in.TargetPercentage
return nil
}
func deepCopy_extensions_ClusterAutoscaler(in ClusterAutoscaler, out *ClusterAutoscaler, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_extensions_ClusterAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_ClusterAutoscalerList(in ClusterAutoscalerList, out *ClusterAutoscalerList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]ClusterAutoscaler, len(in.Items))
for i := range in.Items {
if err := deepCopy_extensions_ClusterAutoscaler(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_extensions_ClusterAutoscalerSpec(in ClusterAutoscalerSpec, out *ClusterAutoscalerSpec, c *conversion.Cloner) error {
out.MinNodes = in.MinNodes
out.MaxNodes = in.MaxNodes
if in.TargetUtilization != nil {
out.TargetUtilization = make([]NodeUtilization, len(in.TargetUtilization))
for i := range in.TargetUtilization {
if err := deepCopy_extensions_NodeUtilization(in.TargetUtilization[i], &out.TargetUtilization[i], c); err != nil {
return err
}
}
} else {
out.TargetUtilization = nil
}
return nil
}
func deepCopy_extensions_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_extensions_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_extensions_DaemonSetStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]DaemonSet, len(in.Items))
for i := range in.Items {
if err := deepCopy_extensions_DaemonSet(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_extensions_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error {
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
if in.Template != nil {
out.Template = new(api.PodTemplateSpec)
if err := deepCopy_api_PodTemplateSpec(*in.Template, out.Template, c); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func deepCopy_extensions_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
return nil
}
func deepCopy_extensions_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_extensions_DeploymentSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_extensions_DeploymentStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]Deployment, len(in.Items))
for i := range in.Items {
if err := deepCopy_extensions_Deployment(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_extensions_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error {
out.Replicas = in.Replicas
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
if err := deepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
return err
}
if err := deepCopy_extensions_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil {
return err
}
out.UniqueLabelKey = in.UniqueLabelKey
return nil
}
func deepCopy_extensions_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error {
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
return nil
}
func deepCopy_extensions_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error {
out.Type = in.Type
if in.RollingUpdate != nil {
out.RollingUpdate = new(RollingUpdateDeployment)
if err := deepCopy_extensions_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
func deepCopy_extensions_HTTPIngressPath(in HTTPIngressPath, out *HTTPIngressPath, c *conversion.Cloner) error {
out.Path = in.Path
if err := deepCopy_extensions_IngressBackend(in.Backend, &out.Backend, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIngressRuleValue, c *conversion.Cloner) error {
if in.Paths != nil {
out.Paths = make([]HTTPIngressPath, len(in.Paths))
for i := range in.Paths {
if err := deepCopy_extensions_HTTPIngressPath(in.Paths[i], &out.Paths[i], c); err != nil {
return err
}
}
} else {
out.Paths = nil
}
return nil
}
func deepCopy_extensions_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_extensions_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_extensions_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]HorizontalPodAutoscaler, len(in.Items))
for i := range in.Items {
if err := deepCopy_extensions_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_extensions_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error {
if err := deepCopy_extensions_SubresourceReference(in.ScaleRef, &out.ScaleRef, c); err != nil {
return err
}
if in.MinReplicas != nil {
out.MinReplicas = new(int)
*out.MinReplicas = *in.MinReplicas
} else {
out.MinReplicas = nil
}
out.MaxReplicas = in.MaxReplicas
if in.CPUUtilization != nil {
out.CPUUtilization = new(CPUTargetUtilization)
if err := deepCopy_extensions_CPUTargetUtilization(*in.CPUUtilization, out.CPUUtilization, c); err != nil {
return err
}
} else {
out.CPUUtilization = nil
}
return nil
}
func deepCopy_extensions_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
if in.ObservedGeneration != nil {
out.ObservedGeneration = new(int64)
*out.ObservedGeneration = *in.ObservedGeneration
} else {
out.ObservedGeneration = nil
}
if in.LastScaleTime != nil {
out.LastScaleTime = new(unversioned.Time)
if err := deepCopy_unversioned_Time(*in.LastScaleTime, out.LastScaleTime, c); err != nil {
return err
}
} else {
out.LastScaleTime = nil
}
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentCPUUtilizationPercentage != nil {
out.CurrentCPUUtilizationPercentage = new(int)
*out.CurrentCPUUtilizationPercentage = *in.CurrentCPUUtilizationPercentage
} else {
out.CurrentCPUUtilizationPercentage = nil
}
return nil
}
func deepCopy_extensions_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_extensions_IngressSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_extensions_IngressStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error {
out.ServiceName = in.ServiceName
if err := deepCopy_util_IntOrString(in.ServicePort, &out.ServicePort, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]Ingress, len(in.Items))
for i := range in.Items {
if err := deepCopy_extensions_Ingress(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_extensions_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error {
out.Host = in.Host
if err := deepCopy_extensions_IngressRuleValue(in.IngressRuleValue, &out.IngressRuleValue, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_IngressRuleValue(in IngressRuleValue, out *IngressRuleValue, c *conversion.Cloner) error {
if in.HTTP != nil {
out.HTTP = new(HTTPIngressRuleValue)
if err := deepCopy_extensions_HTTPIngressRuleValue(*in.HTTP, out.HTTP, c); err != nil {
return err
}
} else {
out.HTTP = nil
}
return nil
}
func deepCopy_extensions_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error {
if in.Backend != nil {
out.Backend = new(IngressBackend)
if err := deepCopy_extensions_IngressBackend(*in.Backend, out.Backend, c); err != nil {
return err
}
} else {
out.Backend = nil
}
if in.Rules != nil {
out.Rules = make([]IngressRule, len(in.Rules))
for i := range in.Rules {
if err := deepCopy_extensions_IngressRule(in.Rules[i], &out.Rules[i], c); err != nil {
return err
}
}
} else {
out.Rules = nil
}
return nil
}
func deepCopy_extensions_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error {
if err := deepCopy_api_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_Job(in Job, out *Job, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_extensions_JobSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_extensions_JobStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
if err := deepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil {
return err
}
if err := deepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil {
return err
}
out.Reason = in.Reason
out.Message = in.Message
return nil
}
func deepCopy_extensions_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]Job, len(in.Items))
for i := range in.Items {
if err := deepCopy_extensions_Job(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_extensions_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
if in.Parallelism != nil {
out.Parallelism = new(int)
*out.Parallelism = *in.Parallelism
} else {
out.Parallelism = nil
}
if in.Completions != nil {
out.Completions = new(int)
*out.Completions = *in.Completions
} else {
out.Completions = nil
}
if in.Selector != nil {
out.Selector = new(PodSelector)
if err := deepCopy_extensions_PodSelector(*in.Selector, out.Selector, c); err != nil {
return err
}
} else {
out.Selector = nil
}
if err := deepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
if in.Conditions != nil {
out.Conditions = make([]JobCondition, len(in.Conditions))
for i := range in.Conditions {
if err := deepCopy_extensions_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil {
return err
}
}
} else {
out.Conditions = nil
}
if in.StartTime != nil {
out.StartTime = new(unversioned.Time)
if err := deepCopy_unversioned_Time(*in.StartTime, out.StartTime, c); err != nil {
return err
}
} else {
out.StartTime = nil
}
if in.CompletionTime != nil {
out.CompletionTime = new(unversioned.Time)
if err := deepCopy_unversioned_Time(*in.CompletionTime, out.CompletionTime, c); err != nil {
return err
}
} else {
out.CompletionTime = nil
}
out.Active = in.Active
out.Succeeded = in.Succeeded
out.Failed = in.Failed
return nil
}
func deepCopy_extensions_NodeUtilization(in NodeUtilization, out *NodeUtilization, c *conversion.Cloner) error {
out.Resource = in.Resource
out.Value = in.Value
return nil
}
func deepCopy_extensions_PodSelector(in PodSelector, out *PodSelector, c *conversion.Cloner) error {
if in.MatchLabels != nil {
out.MatchLabels = make(map[string]string)
for key, val := range in.MatchLabels {
out.MatchLabels[key] = val
}
} else {
out.MatchLabels = nil
}
if in.MatchExpressions != nil {
out.MatchExpressions = make([]PodSelectorRequirement, len(in.MatchExpressions))
for i := range in.MatchExpressions {
if err := deepCopy_extensions_PodSelectorRequirement(in.MatchExpressions[i], &out.MatchExpressions[i], c); err != nil {
return err
}
}
} else {
out.MatchExpressions = nil
}
return nil
}
func deepCopy_extensions_PodSelectorRequirement(in PodSelectorRequirement, out *PodSelectorRequirement, c *conversion.Cloner) error {
out.Key = in.Key
out.Operator = in.Operator
if in.Values != nil {
out.Values = make([]string, len(in.Values))
for i := range in.Values {
out.Values[i] = in.Values[i]
}
} else {
out.Values = nil
}
return nil
}
func deepCopy_extensions_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error {
if err := deepCopy_util_IntOrString(in.MaxUnavailable, &out.MaxUnavailable, c); err != nil {
return err
}
if err := deepCopy_util_IntOrString(in.MaxSurge, &out.MaxSurge, c); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
return nil
}
func deepCopy_extensions_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_extensions_ScaleSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_extensions_ScaleStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_extensions_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
out.Replicas = in.Replicas
return nil
}
func deepCopy_extensions_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
out.Replicas = in.Replicas
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
return nil
}
func deepCopy_extensions_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
out.Subresource = in.Subresource
return nil
}
func deepCopy_extensions_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
out.Description = in.Description
if in.Versions != nil {
out.Versions = make([]APIVersion, len(in.Versions))
for i := range in.Versions {
if err := deepCopy_extensions_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil {
return err
}
}
} else {
out.Versions = nil
}
return nil
}
func deepCopy_extensions_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if in.Data != nil {
out.Data = make([]uint8, len(in.Data))
for i := range in.Data {
out.Data[i] = in.Data[i]
}
} else {
out.Data = nil
}
return nil
}
func deepCopy_extensions_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]ThirdPartyResourceData, len(in.Items))
for i := range in.Items {
if err := deepCopy_extensions_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_extensions_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]ThirdPartyResource, len(in.Items))
for i := range in.Items {
if err := deepCopy_extensions_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_util_IntOrString(in util.IntOrString, out *util.IntOrString, c *conversion.Cloner) error {
out.Kind = in.Kind
out.IntVal = in.IntVal
out.StrVal = in.StrVal
return nil
}
func init() {
err := api.Scheme.AddGeneratedDeepCopyFuncs(
deepCopy_api_AWSElasticBlockStoreVolumeSource,
deepCopy_api_Capabilities,
deepCopy_api_CephFSVolumeSource,
deepCopy_api_CinderVolumeSource,
deepCopy_api_Container,
deepCopy_api_ContainerPort,
deepCopy_api_DownwardAPIVolumeFile,
deepCopy_api_DownwardAPIVolumeSource,
deepCopy_api_EmptyDirVolumeSource,
deepCopy_api_EnvVar,
deepCopy_api_EnvVarSource,
deepCopy_api_ExecAction,
deepCopy_api_FCVolumeSource,
deepCopy_api_FlockerVolumeSource,
deepCopy_api_GCEPersistentDiskVolumeSource,
deepCopy_api_GitRepoVolumeSource,
deepCopy_api_GlusterfsVolumeSource,
deepCopy_api_HTTPGetAction,
deepCopy_api_Handler,
deepCopy_api_HostPathVolumeSource,
deepCopy_api_ISCSIVolumeSource,
deepCopy_api_Lifecycle,
deepCopy_api_LoadBalancerIngress,
deepCopy_api_LoadBalancerStatus,
deepCopy_api_LocalObjectReference,
deepCopy_api_NFSVolumeSource,
deepCopy_api_ObjectFieldSelector,
deepCopy_api_ObjectMeta,
deepCopy_api_PersistentVolumeClaimVolumeSource,
deepCopy_api_PodSecurityContext,
deepCopy_api_PodSpec,
deepCopy_api_PodTemplateSpec,
deepCopy_api_Probe,
deepCopy_api_RBDVolumeSource,
deepCopy_api_ResourceRequirements,
deepCopy_api_SELinuxOptions,
deepCopy_api_SecretVolumeSource,
deepCopy_api_SecurityContext,
deepCopy_api_TCPSocketAction,
deepCopy_api_Volume,
deepCopy_api_VolumeMount,
deepCopy_api_VolumeSource,
deepCopy_resource_Quantity,
deepCopy_unversioned_ListMeta,
deepCopy_unversioned_Time,
deepCopy_unversioned_TypeMeta,
deepCopy_extensions_APIVersion,
deepCopy_extensions_CPUTargetUtilization,
deepCopy_extensions_ClusterAutoscaler,
deepCopy_extensions_ClusterAutoscalerList,
deepCopy_extensions_ClusterAutoscalerSpec,
deepCopy_extensions_DaemonSet,
deepCopy_extensions_DaemonSetList,
deepCopy_extensions_DaemonSetSpec,
deepCopy_extensions_DaemonSetStatus,
deepCopy_extensions_Deployment,
deepCopy_extensions_DeploymentList,
deepCopy_extensions_DeploymentSpec,
deepCopy_extensions_DeploymentStatus,
deepCopy_extensions_DeploymentStrategy,
deepCopy_extensions_HTTPIngressPath,
deepCopy_extensions_HTTPIngressRuleValue,
deepCopy_extensions_HorizontalPodAutoscaler,
deepCopy_extensions_HorizontalPodAutoscalerList,
deepCopy_extensions_HorizontalPodAutoscalerSpec,
deepCopy_extensions_HorizontalPodAutoscalerStatus,
deepCopy_extensions_Ingress,
deepCopy_extensions_IngressBackend,
deepCopy_extensions_IngressList,
deepCopy_extensions_IngressRule,
deepCopy_extensions_IngressRuleValue,
deepCopy_extensions_IngressSpec,
deepCopy_extensions_IngressStatus,
deepCopy_extensions_Job,
deepCopy_extensions_JobCondition,
deepCopy_extensions_JobList,
deepCopy_extensions_JobSpec,
deepCopy_extensions_JobStatus,
deepCopy_extensions_NodeUtilization,
deepCopy_extensions_PodSelector,
deepCopy_extensions_PodSelectorRequirement,
deepCopy_extensions_ReplicationControllerDummy,
deepCopy_extensions_RollingUpdateDeployment,
deepCopy_extensions_Scale,
deepCopy_extensions_ScaleSpec,
deepCopy_extensions_ScaleStatus,
deepCopy_extensions_SubresourceReference,
deepCopy_extensions_ThirdPartyResource,
deepCopy_extensions_ThirdPartyResourceData,
deepCopy_extensions_ThirdPartyResourceDataList,
deepCopy_extensions_ThirdPartyResourceList,
deepCopy_util_IntOrString,
)
if err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
panic(err)
}
}
| apache-2.0 |
Mirantis/mos-horizon | openstack_dashboard/dashboards/project/volumes/templates/volumes/volumes/_create_snapshot.html | 653 | {% extends "horizon/common/_modal_form.html" %}
{% load i18n %}
{% block modal-body-right %}
<div class="quota-dynamic">
{% include "project/volumes/volumes/_snapshot_limits.html" with usages=usages snapshot_quota=True %}
</div>
{% endblock %}
{% block modal-footer %}
<a href="{% url 'horizon:project:volumes:index' %}" class="btn btn-default cancel">{% trans "Cancel" %}</a>
{% if attached %}
<input class="btn btn-warning" type="submit" value="{% trans "Create Volume Snapshot (Force)" %}" />
{% else %}
<input class="btn btn-primary" type="submit" value="{% trans "Create Volume Snapshot" %}" />
{% endif %}
{% endblock %}
| apache-2.0 |
SRGOM/scalatest | scalatest-test/src/test/scala/org/scalatest/concurrent/FuturesSpec.scala | 25023 | /*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.concurrent
import org.scalatest.SharedHelpers.thisLineNumber
import org.scalatest.OptionValues
import org.scalatest.FunSpec
import java.util.concurrent.{Future => FutureOfJava}
import java.util.concurrent.TimeUnit
import org.scalatest._
import time._
import exceptions.{TestCanceledException, TestFailedException, TestPendingException}
class FuturesSpec extends FunSpec with Matchers with OptionValues with Futures with SeveredStackTraces {
import scala.language.implicitConversions
implicit def convertJavaFuture[T](javaFuture: FutureOfJava[T]): FutureConcept[T] =
new FutureConcept[T] {
def eitherValue: Option[Either[Throwable, T]] =
if (javaFuture.isDone())
Some(Right(javaFuture.get))
else
None
def isExpired: Boolean = false // Java Futures don't support the notion of a timeout
def isCanceled: Boolean = javaFuture.isCancelled // Two ll's in Canceled. The verbosity of Java strikes again!
// This one doesn't override futureResult, so that I can test the polling code
}
describe("A FutureConcept") {
class SuperFutureOfJava extends FutureOfJava[String] {
def cancel(mayInterruptIfRunning: Boolean): Boolean = false
def get: String = "hi"
def get(timeout: Long, unit: TimeUnit): String = "hi"
def isCancelled: Boolean = false
def isDone: Boolean = true
}
describe("when using the isReadyWithin method") {
it("should just return the result if the future completes normally") {
val futureIsNow = new SuperFutureOfJava
futureIsNow.isReadyWithin(Span(1, Second)) should be (true)
}
it("should throw TFE with appropriate detail message if the future is canceled") {
val canceledFuture =
new SuperFutureOfJava {
override def isCancelled = true
}
val caught = the [TestFailedException] thrownBy {
canceledFuture.isReadyWithin(Span(1, Second))
}
caught.message.value should be (Resources.futureWasCanceled)
withClue(caught.getStackTraceString) {
caught.failedCodeLineNumber.value should equal (thisLineNumber - 4)
}
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
it("should throw TFE with appropriate detail message if the future expires") {
val expiredFuture =
new FutureConcept[Int] {
def eitherValue = Some(Right(99))
def isCanceled = false
def isExpired = true
def awaitAtMost(span: Span) = 99
}
val caught = the [TestFailedException] thrownBy {
expiredFuture.isReadyWithin(Span(1, Second))
}
caught.message.value should be (Resources.futureExpired("1", "15 milliseconds"))
caught.failedCodeLineNumber.value should equal (thisLineNumber - 3)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
val neverReadyFuture =
new SuperFutureOfJava {
override def isDone = false
}
it("should query a never-ready future by at least the specified timeout") {
var startTime = System.currentTimeMillis
neverReadyFuture.isReadyWithin(Span(1250, Milliseconds)) should be (false)
(System.currentTimeMillis - startTime).toInt should be >= (1250)
}
it("should wrap any exception that normally causes a test to fail to propagate back wrapped in a TFE") {
val vmeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new RuntimeException("oops")))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new RuntimeException("oops")
}
val caught =
intercept[TestFailedException] {
vmeFuture.isReadyWithin(Span(1, Millisecond))
}
caught.failedCodeLineNumber.value should equal (thisLineNumber - 2)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
assert(caught.cause.value.isInstanceOf[RuntimeException])
caught.cause.value.getMessage should be ("oops")
}
it("should allow errors that do not normally cause a test to fail to propagate back without being wrapped in a TFE") {
val vmeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new VirtualMachineError {}))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new VirtualMachineError {}
}
intercept[VirtualMachineError] {
vmeFuture.isReadyWithin(Span(1, Millisecond))
}
}
it("should allow TestPendingException, which does not normally cause a test to fail, through immediately when thrown") {
val tpeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new TestPendingException))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new TestPendingException
}
intercept[TestPendingException] {
tpeFuture.isReadyWithin(Span(1, Millisecond))
}
}
it("should allow TestCanceledException, which does not normally cause a test to fail, through immediately when thrown") {
val tpeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new TestCanceledException(0)))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new TestCanceledException(0)
}
intercept[TestCanceledException] {
tpeFuture.isReadyWithin(Span(1, Millisecond))
}
}
}
describe("when using the futureValue method") {
it("should just return the result if the future completes normally") {
val futureIsNow = new SuperFutureOfJava
futureIsNow.futureValue should equal ("hi")
}
it("should throw TFE with appropriate detail message if the future is canceled") {
val canceledFuture =
new SuperFutureOfJava {
override def isCancelled = true
}
val caught = the [TestFailedException] thrownBy {
canceledFuture.futureValue
}
caught.message.value should be (Resources.futureWasCanceled)
withClue(caught.getStackTraceString) {
caught.failedCodeLineNumber.value should equal (thisLineNumber - 4)
}
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
it("should throw TFE with appropriate detail message if the future expires") {
val expiredFuture =
new FutureConcept[Int] {
def eitherValue = Some(Right(99))
def isCanceled = false
def isExpired = true
def awaitAtMost(span: Span) = 99
}
val caught = the [TestFailedException] thrownBy {
expiredFuture.futureValue
}
caught.message.value should be (Resources.futureExpired("1", "15 milliseconds"))
caught.failedCodeLineNumber.value should equal (thisLineNumber - 3)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
it("should eventually blow up with a TFE if the future is never ready") {
var count = 0
val neverReadyCountingFuture =
new SuperFutureOfJava {
override def isDone = {
count += 1
false
}
}
val caught = the [TestFailedException] thrownBy {
neverReadyCountingFuture.futureValue
}
caught.message.value should be (Resources.wasNeverReady(count.toString, "15 milliseconds"))
caught.failedCodeLineNumber.value should equal (thisLineNumber - 4)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
val neverReadyFuture =
new SuperFutureOfJava {
override def isDone = false
}
it("should provides correct stack depth") {
val caught1 = the [TestFailedException] thrownBy {
neverReadyFuture.futureValue(timeout(Span(100, Millis)), interval(Span(1, Millisecond)))
}
caught1.failedCodeLineNumber.value should equal (thisLineNumber - 2)
caught1.failedCodeFileName.value should be ("FuturesSpec.scala")
val caught3 = the [TestFailedException] thrownBy {
neverReadyFuture.futureValue(timeout(Span(100, Millis)))
}
caught3.failedCodeLineNumber.value should equal (thisLineNumber - 2)
caught3.failedCodeFileName.value should be ("FuturesSpec.scala")
val caught4 = the [TestFailedException] thrownBy {
neverReadyFuture.futureValue(interval(Span(1, Millisecond)))
}
caught4.failedCodeLineNumber.value should equal (thisLineNumber - 2)
caught4.failedCodeFileName.value should be ("FuturesSpec.scala")
}
it("should by default query a never-ready future for at least 1 second") {
var startTime = System.currentTimeMillis
a [TestFailedException] should be thrownBy {
neverReadyFuture.futureValue
}
(System.currentTimeMillis - startTime).toInt should be >= (150)
}
it("should, if an alternate implicit Timeout is provided, query a never-ready by at least the specified timeout") {
implicit val patienceConfig = PatienceConfig(timeout = Span(1500, Millis))
var startTime = System.currentTimeMillis
a [TestFailedException] should be thrownBy {
neverReadyFuture.futureValue
}
(System.currentTimeMillis - startTime).toInt should be >= (1500)
}
it("should, if an alternate explicit timeout is provided, query a never-ready future by at least the specified timeout") {
var startTime = System.currentTimeMillis
a [TestFailedException] should be thrownBy {
neverReadyFuture.futureValue(timeout(Span(1250, Milliseconds)))
}
(System.currentTimeMillis - startTime).toInt should be >= (1250)
}
it("should, if an alternate explicit timeout is provided along with an explicit interval, query a never-ready future by at least the specified timeout, even if a different implicit is provided") {
implicit val patienceConfig = PatienceConfig(timeout = Span(500, Millis), interval = Span(2, Millis))
var startTime = System.currentTimeMillis
a [TestFailedException] should be thrownBy {
neverReadyFuture.futureValue(timeout(Span(1388, Millis)), interval(Span(1, Millisecond)))
}
(System.currentTimeMillis - startTime).toInt should be >= (1388)
}
it("should wrap any exception that normally causes a test to fail to propagate back wrapped in a TFE") {
val vmeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new RuntimeException("oops")))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new RuntimeException("oops")
}
val caught =
intercept[TestFailedException] {
vmeFuture.futureValue
}
caught.failedCodeLineNumber.value should equal (thisLineNumber - 2)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
assert(caught.cause.value.isInstanceOf[RuntimeException])
caught.cause.value.getMessage should be ("oops")
}
it("should allow errors that do not normally cause a test to fail to propagate back without being wrapped in a TFE") {
val vmeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new VirtualMachineError {}))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new VirtualMachineError {}
}
intercept[VirtualMachineError] {
vmeFuture.futureValue
}
}
it("should allow TestPendingException, which does not normally cause a test to fail, through immediately when thrown") {
val tpeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new TestPendingException))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new TestPendingException
}
intercept[TestPendingException] {
tpeFuture.futureValue
}
}
it("should allow TestCanceledException, which does not normally cause a test to fail, through immediately when thrown") {
val tpeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new TestCanceledException(0)))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new TestCanceledException(0)
}
intercept[TestCanceledException] {
tpeFuture.futureValue
}
}
}
describe("when using the whenReady construct") {
class SuperFutureOfJava extends FutureOfJava[String] {
def cancel(mayInterruptIfRunning: Boolean): Boolean = false
def get: String = "hi"
def get(timeout: Long, unit: TimeUnit): String = "hi"
def isCancelled: Boolean = false
def isDone: Boolean = true
}
it("should just return if the function arg returns normally") {
val futureIsNow = new SuperFutureOfJava
whenReady(futureIsNow) { s =>
s should equal ("hi")
}
}
it("should return the last value if the function arg returns normally") {
val futureIsNow = new SuperFutureOfJava
val result =
whenReady(futureIsNow) { s =>
s should equal ("hi")
99
}
result should equal (99)
}
it("should, if the function arg completes abruptly with a TFE, complete abruptly with the same exception") {
val futureIsNow = new SuperFutureOfJava
val caught =
the [TestFailedException] thrownBy {
whenReady(futureIsNow) { s =>
s should equal ("ho")
}
}
caught.message.value should be ("\"h[i]\" did not equal \"h[o]\"")
caught.failedCodeLineNumber.value should equal (thisLineNumber - 4)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
it("should, if the function arg completes abruptly with a non-stack depth exception, complete abruptly with the same exception") {
val futureIsNow = new SuperFutureOfJava
val caught =
the [RuntimeException] thrownBy {
whenReady(futureIsNow) { s =>
s should equal ("hi")
throw new RuntimeException("oops")
}
}
caught.getMessage should be ("oops")
}
it("should query the future just once if the future is ready the first time") {
var count = 0
val countingFuture =
new SuperFutureOfJava {
override def isDone = {
count += 1
true
}
}
whenReady(countingFuture) { s =>
s should equal ("hi")
}
count should equal (1)
}
it("should query the future five times if the future is not ready four times before finally being ready the fifth time") {
var count = 0
val countingFuture =
new SuperFutureOfJava {
override def isDone = {
count += 1
count >= 5
}
}
whenReady(countingFuture) { s =>
s should equal ("hi")
}
count should equal (5)
}
// TODO: tests for isDropped and isExpired
it("should throw TFE with appropriate detail message if the future is canceled") {
val canceledFuture =
new SuperFutureOfJava {
override def isCancelled = true
}
val caught = the [TestFailedException] thrownBy {
whenReady(canceledFuture) { s =>
s should equal ("hi")
}
}
caught.message.value should be (Resources.futureWasCanceled)
withClue(caught.getStackTraceString) {
caught.failedCodeLineNumber.value should equal (thisLineNumber - 6)
}
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
it("should throw TFE with appropriate detail message if the future expires") {
val expiredFuture =
new FutureConcept[Int] {
def eitherValue = Some(Right(99))
def isCanceled = false
def isExpired = true
def awaitAtMost(span: Span) = 99
}
val caught = the [TestFailedException] thrownBy {
whenReady(expiredFuture) { s =>
s should equal (99)
}
}
caught.message.value should be (Resources.futureExpired("1", "15 milliseconds"))
caught.failedCodeLineNumber.value should equal (thisLineNumber - 5)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
it("should eventually blow up with a TFE if the future is never ready") {
var count = 0
val neverReadyCountingFuture =
new SuperFutureOfJava {
override def isDone = {
count += 1
false
}
}
val caught = the [TestFailedException] thrownBy {
whenReady(neverReadyCountingFuture) { s =>
s should equal ("hi")
}
}
caught.message.value should be (Resources.wasNeverReady(count.toString, "15 milliseconds"))
caught.failedCodeLineNumber.value should equal (thisLineNumber - 6)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
}
val neverReadyFuture =
new SuperFutureOfJava {
override def isDone = false
}
it("should provides correct stack depth") {
val caught1 = the [TestFailedException] thrownBy {
whenReady(neverReadyFuture, timeout(Span(100, Millis)), interval(Span(1, Millisecond))) { s => s should equal ("hi") }
}
caught1.failedCodeLineNumber.value should equal (thisLineNumber - 2)
caught1.failedCodeFileName.value should be ("FuturesSpec.scala")
val caught3 = the [TestFailedException] thrownBy {
whenReady(neverReadyFuture, timeout(Span(100, Millis))) { s => s should equal ("hi") }
}
caught3.failedCodeLineNumber.value should equal (thisLineNumber - 2)
caught3.failedCodeFileName.value should be ("FuturesSpec.scala")
val caught4 = the [TestFailedException] thrownBy {
whenReady(neverReadyFuture, interval(Span(1, Millisecond))) { s => s should equal ("hi") }
}
caught4.failedCodeLineNumber.value should equal (thisLineNumber - 2)
caught4.failedCodeFileName.value should be ("FuturesSpec.scala")
}
it("should by default query a never-ready future for at least 1 second") {
var startTime = System.currentTimeMillis
a [TestFailedException] should be thrownBy {
whenReady(neverReadyFuture) { s =>
s should equal ("hi")
}
}
(System.currentTimeMillis - startTime).toInt should be >= (150)
}
it("should, if an alternate implicit Timeout is provided, query a never-ready by at least the specified timeout") {
implicit val patienceConfig = PatienceConfig(timeout = Span(1500, Millis))
var startTime = System.currentTimeMillis
a [TestFailedException] should be thrownBy {
whenReady(neverReadyFuture) { s =>
s should equal ("hi")
}
}
(System.currentTimeMillis - startTime).toInt should be >= (1500)
}
it("should, if an alternate explicit timeout is provided, query a never-ready future by at least the specified timeout") {
var startTime = System.currentTimeMillis
a [TestFailedException] should be thrownBy {
whenReady(neverReadyFuture, timeout(Span(1250, Milliseconds))) { s =>
s should equal ("hi")
}
}
(System.currentTimeMillis - startTime).toInt should be >= (1250)
}
it("should, if an alternate explicit timeout is provided along with an explicit interval, query a never-ready future by at least the specified timeout, even if a different implicit is provided") {
implicit val patienceConfig = PatienceConfig(timeout = Span(500, Millis), interval = Span(2, Millis))
var startTime = System.currentTimeMillis
a [TestFailedException] should be thrownBy {
whenReady(neverReadyFuture, timeout(Span(1388, Millis)), interval(Span(1, Millisecond))) { s =>
s should equal ("hi")
}
}
(System.currentTimeMillis - startTime).toInt should be >= (1388)
}
it("should wrap any exception that normally causes a test to fail to propagate back wrapped in a TFE") {
val vmeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new RuntimeException("oops")))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new RuntimeException("oops")
}
val caught =
intercept[TestFailedException] {
whenReady(vmeFuture) { s =>
s should equal ("hi")
}
}
caught.failedCodeLineNumber.value should equal (thisLineNumber - 4)
caught.failedCodeFileName.value should be ("FuturesSpec.scala")
assert(caught.cause.value.isInstanceOf[RuntimeException])
caught.cause.value.getMessage should be ("oops")
}
it("should allow errors that do not normally cause a test to fail to propagate back without being wrapped in a TFE") {
val vmeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new VirtualMachineError {}))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new VirtualMachineError {}
}
intercept[VirtualMachineError] {
whenReady(vmeFuture) { s =>
s should equal ("hi")
}
}
}
// Same thing here and in 2.0 need to add a test for TestCanceledException
it("should allow TestPendingException, which does not normally cause a test to fail, through immediately when thrown") {
val tpeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new TestPendingException))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new TestPendingException
}
intercept[TestPendingException] {
whenReady(tpeFuture) { s =>
s should equal ("hi")
}
}
}
it("should allow TestCanceledException, which does not normally cause a test to fail, through immediately when thrown") {
val tpeFuture =
new FutureConcept[String] {
def eitherValue: Option[Either[Throwable, String]] = Some(Left(new TestCanceledException(0)))
def isExpired: Boolean = false
def isCanceled: Boolean = false
def awaitAtMost(span: Span): String = throw new TestCanceledException(0)
}
intercept[TestCanceledException] {
whenReady(tpeFuture) { s =>
s should equal ("hi")
}
}
}
}
}
}
| apache-2.0 |
racker/omnibus | source/ruby-1.9.2-p180/test/rdoc/test_rdoc_ri_store.rb | 7781 | require 'rubygems'
require 'minitest/autorun'
require 'rdoc/ri'
require 'rdoc/markup'
require 'tmpdir'
require 'fileutils'
class TestRDocRIStore < MiniTest::Unit::TestCase
def setup
RDoc::TopLevel.reset
@tmpdir = File.join Dir.tmpdir, "test_rdoc_ri_store_#{$$}"
@s = RDoc::RI::Store.new @tmpdir
@top_level = RDoc::TopLevel.new 'file.rb'
@klass = @top_level.add_class RDoc::NormalClass, 'Object'
@klass.comment = 'original'
@cmeth = RDoc::AnyMethod.new nil, 'cmethod'
@cmeth.singleton = true
@meth = RDoc::AnyMethod.new nil, 'method'
@meth_bang = RDoc::AnyMethod.new nil, 'method!'
@attr = RDoc::Attr.new nil, 'attr', 'RW', ''
@klass.add_method @cmeth
@klass.add_method @meth
@klass.add_method @meth_bang
@klass.add_attribute @attr
@nest_klass = @klass.add_class RDoc::NormalClass, 'SubClass'
@nest_meth = RDoc::AnyMethod.new nil, 'method'
@nest_incl = RDoc::Include.new 'Incl', ''
@nest_klass.add_method @nest_meth
@nest_klass.add_include @nest_incl
@RM = RDoc::Markup
end
def teardown
FileUtils.rm_rf @tmpdir
end
def assert_cache imethods, cmethods, attrs, modules, ancestors = {}
expected = {
:class_methods => cmethods,
:instance_methods => imethods,
:attributes => attrs,
:modules => modules,
:ancestors => ancestors
}
assert_equal expected, @s.cache
end
def assert_directory path
assert File.directory?(path), "#{path} is not a directory"
end
def assert_file path
assert File.file?(path), "#{path} is not a file"
end
def test_attributes
@s.cache[:attributes]['Object'] = %w[attr]
expected = { 'Object' => %w[attr] }
assert_equal expected, @s.attributes
end
def test_class_file
assert_equal File.join(@tmpdir, 'Object', 'cdesc-Object.ri'),
@s.class_file('Object')
assert_equal File.join(@tmpdir, 'Object', 'SubClass', 'cdesc-SubClass.ri'),
@s.class_file('Object::SubClass')
end
def test_class_methods
@s.cache[:class_methods]['Object'] = %w[method]
expected = { 'Object' => %w[method] }
assert_equal expected, @s.class_methods
end
def test_class_path
assert_equal File.join(@tmpdir, 'Object'), @s.class_path('Object')
assert_equal File.join(@tmpdir, 'Object', 'SubClass'),
@s.class_path('Object::SubClass')
end
def test_friendly_path
@s.path = @tmpdir
@s.type = nil
assert_equal @s.path, @s.friendly_path
@s.type = :extra
assert_equal @s.path, @s.friendly_path
@s.type = :system
assert_equal "ruby core", @s.friendly_path
@s.type = :site
assert_equal "ruby site", @s.friendly_path
@s.type = :home
assert_equal "~/.ri", @s.friendly_path
@s.type = :gem
@s.path = "#{@tmpdir}/gem_repository/doc/gem_name-1.0/ri"
assert_equal "gem gem_name-1.0", @s.friendly_path
end
def test_instance_methods
@s.cache[:instance_methods]['Object'] = %w[method]
expected = { 'Object' => %w[method] }
assert_equal expected, @s.instance_methods
end
def test_load_cache
cache = {
:methods => %w[Object#method],
:modules => %w[Object],
}
Dir.mkdir @tmpdir
open File.join(@tmpdir, 'cache.ri'), 'wb' do |io|
Marshal.dump cache, io
end
@s.load_cache
assert_equal cache, @s.cache
end
def test_load_cache_no_cache
cache = {
:ancestors => {},
:attributes => {},
:class_methods => {},
:instance_methods => {},
:modules => [],
}
@s.load_cache
assert_equal cache, @s.cache
end
def test_load_class
@s.save_class @klass
assert_equal @klass, @s.load_class('Object')
end
def test_load_method_bang
@s.save_method @klass, @meth_bang
meth = @s.load_method('Object', '#method!')
assert_equal @meth_bang, meth
end
def test_method_file
assert_equal File.join(@tmpdir, 'Object', 'method-i.ri'),
@s.method_file('Object', 'Object#method')
assert_equal File.join(@tmpdir, 'Object', 'method%21-i.ri'),
@s.method_file('Object', 'Object#method!')
assert_equal File.join(@tmpdir, 'Object', 'SubClass', 'method%21-i.ri'),
@s.method_file('Object::SubClass', 'Object::SubClass#method!')
assert_equal File.join(@tmpdir, 'Object', 'method-c.ri'),
@s.method_file('Object', 'Object::method')
end
def test_save_cache
@s.save_class @klass
@s.save_method @klass, @meth
@s.save_method @klass, @cmeth
@s.save_class @nest_klass
@s.save_cache
assert_file File.join(@tmpdir, 'cache.ri')
expected = {
:attributes => { 'Object' => ['attr_accessor attr'] },
:class_methods => { 'Object' => %w[cmethod] },
:instance_methods => { 'Object' => %w[method] },
:modules => %w[Object Object::SubClass],
:ancestors => {
'Object' => %w[Object],
'Object::SubClass' => %w[Incl Object],
},
}
open File.join(@tmpdir, 'cache.ri'), 'rb' do |io|
cache = Marshal.load io.read
assert_equal expected, cache
end
end
def test_save_cache_duplicate_methods
@s.save_method @klass, @meth
@s.save_method @klass, @meth
@s.save_cache
assert_cache({ 'Object' => %w[method] }, {}, {}, [])
end
def test_save_class
@s.save_class @klass
assert_directory File.join(@tmpdir, 'Object')
assert_file File.join(@tmpdir, 'Object', 'cdesc-Object.ri')
assert_cache({}, {}, { 'Object' => ['attr_accessor attr'] }, %w[Object],
'Object' => %w[Object])
assert_equal @klass, @s.load_class('Object')
end
def test_save_class_basic_object
@klass.instance_variable_set :@superclass, nil
@s.save_class @klass
assert_directory File.join(@tmpdir, 'Object')
assert_file File.join(@tmpdir, 'Object', 'cdesc-Object.ri')
assert_cache({}, {}, { 'Object' => ['attr_accessor attr'] }, %w[Object],
'Object' => %w[])
assert_equal @klass, @s.load_class('Object')
end
def test_save_class_merge
@s.save_class @klass
klass = RDoc::NormalClass.new 'Object'
klass.comment = 'new class'
s = RDoc::RI::Store.new @tmpdir
s.save_class klass
s = RDoc::RI::Store.new @tmpdir
document = @RM::Document.new(
@RM::Paragraph.new('original'),
@RM::Paragraph.new('new class'))
assert_equal document, s.load_class('Object').comment
end
def test_save_class_methods
@s.save_class @klass
assert_directory File.join(@tmpdir, 'Object')
assert_file File.join(@tmpdir, 'Object', 'cdesc-Object.ri')
assert_cache({}, {}, { 'Object' => ['attr_accessor attr'] }, %w[Object],
'Object' => %w[Object])
assert_equal @klass, @s.load_class('Object')
end
def test_save_class_nested
@s.save_class @nest_klass
assert_directory File.join(@tmpdir, 'Object', 'SubClass')
assert_file File.join(@tmpdir, 'Object', 'SubClass', 'cdesc-SubClass.ri')
assert_cache({}, {}, {}, %w[Object::SubClass],
'Object::SubClass' => %w[Incl Object])
end
def test_save_method
@s.save_method @klass, @meth
assert_directory File.join(@tmpdir, 'Object')
assert_file File.join(@tmpdir, 'Object', 'method-i.ri')
assert_cache({ 'Object' => %w[method] }, {}, {}, [])
assert_equal @meth, @s.load_method('Object', '#method')
end
def test_save_method_nested
@s.save_method @nest_klass, @nest_meth
assert_directory File.join(@tmpdir, 'Object', 'SubClass')
assert_file File.join(@tmpdir, 'Object', 'SubClass', 'method-i.ri')
assert_cache({ 'Object::SubClass' => %w[method] }, {}, {}, [])
end
end
| apache-2.0 |
TiVo/kafka | trogdor/src/main/java/org/apache/kafka/trogdor/rest/TaskStopping.java | 1713 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.trogdor.rest;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.JsonNode;
import org.apache.kafka.trogdor.task.TaskSpec;
/**
* The state for a task which is being stopped on the coordinator.
*/
public class TaskStopping extends TaskState {
/**
* The time on the agent when the task was received.
*/
private final long startedMs;
@JsonCreator
public TaskStopping(@JsonProperty("spec") TaskSpec spec,
@JsonProperty("startedMs") long startedMs,
@JsonProperty("status") JsonNode status) {
super(spec, status);
this.startedMs = startedMs;
}
@JsonProperty
public long startedMs() {
return startedMs;
}
@Override
public TaskStateType stateType() {
return TaskStateType.STOPPING;
}
}
| apache-2.0 |
pweil-/origin | vendor/github.com/Azure/azure-sdk-for-go/profiles/preview/servicefabric/servicefabric/models.go | 148564 | // +build go1.9
// Copyright 2019 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code was auto-generated by:
// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
package servicefabric
import original "github.com/Azure/azure-sdk-for-go/services/servicefabric/6.5/servicefabric"
const (
DefaultBaseURI = original.DefaultBaseURI
)
type ApplicationDefinitionKind = original.ApplicationDefinitionKind
const (
Compose ApplicationDefinitionKind = original.Compose
Invalid ApplicationDefinitionKind = original.Invalid
ServiceFabricApplicationDescription ApplicationDefinitionKind = original.ServiceFabricApplicationDescription
)
type ApplicationPackageCleanupPolicy = original.ApplicationPackageCleanupPolicy
const (
ApplicationPackageCleanupPolicyAutomatic ApplicationPackageCleanupPolicy = original.ApplicationPackageCleanupPolicyAutomatic
ApplicationPackageCleanupPolicyDefault ApplicationPackageCleanupPolicy = original.ApplicationPackageCleanupPolicyDefault
ApplicationPackageCleanupPolicyInvalid ApplicationPackageCleanupPolicy = original.ApplicationPackageCleanupPolicyInvalid
ApplicationPackageCleanupPolicyManual ApplicationPackageCleanupPolicy = original.ApplicationPackageCleanupPolicyManual
)
type ApplicationScopedVolumeKind = original.ApplicationScopedVolumeKind
const (
ServiceFabricVolumeDisk ApplicationScopedVolumeKind = original.ServiceFabricVolumeDisk
)
type ApplicationStatus = original.ApplicationStatus
const (
ApplicationStatusCreating ApplicationStatus = original.ApplicationStatusCreating
ApplicationStatusDeleting ApplicationStatus = original.ApplicationStatusDeleting
ApplicationStatusFailed ApplicationStatus = original.ApplicationStatusFailed
ApplicationStatusInvalid ApplicationStatus = original.ApplicationStatusInvalid
ApplicationStatusReady ApplicationStatus = original.ApplicationStatusReady
ApplicationStatusUpgrading ApplicationStatus = original.ApplicationStatusUpgrading
)
type ApplicationTypeDefinitionKind = original.ApplicationTypeDefinitionKind
const (
ApplicationTypeDefinitionKindCompose ApplicationTypeDefinitionKind = original.ApplicationTypeDefinitionKindCompose
ApplicationTypeDefinitionKindInvalid ApplicationTypeDefinitionKind = original.ApplicationTypeDefinitionKindInvalid
ApplicationTypeDefinitionKindServiceFabricApplicationPackage ApplicationTypeDefinitionKind = original.ApplicationTypeDefinitionKindServiceFabricApplicationPackage
)
type ApplicationTypeStatus = original.ApplicationTypeStatus
const (
ApplicationTypeStatusAvailable ApplicationTypeStatus = original.ApplicationTypeStatusAvailable
ApplicationTypeStatusFailed ApplicationTypeStatus = original.ApplicationTypeStatusFailed
ApplicationTypeStatusInvalid ApplicationTypeStatus = original.ApplicationTypeStatusInvalid
ApplicationTypeStatusProvisioning ApplicationTypeStatus = original.ApplicationTypeStatusProvisioning
ApplicationTypeStatusUnprovisioning ApplicationTypeStatus = original.ApplicationTypeStatusUnprovisioning
)
type AutoScalingMechanismKind = original.AutoScalingMechanismKind
const (
AddRemoveReplica AutoScalingMechanismKind = original.AddRemoveReplica
)
type AutoScalingMetricKind = original.AutoScalingMetricKind
const (
Resource AutoScalingMetricKind = original.Resource
)
type AutoScalingResourceMetricName = original.AutoScalingResourceMetricName
const (
CPU AutoScalingResourceMetricName = original.CPU
MemoryInGB AutoScalingResourceMetricName = original.MemoryInGB
)
type AutoScalingTriggerKind = original.AutoScalingTriggerKind
const (
AverageLoad AutoScalingTriggerKind = original.AverageLoad
)
type BackupEntityKind = original.BackupEntityKind
const (
BackupEntityKindApplication BackupEntityKind = original.BackupEntityKindApplication
BackupEntityKindInvalid BackupEntityKind = original.BackupEntityKindInvalid
BackupEntityKindPartition BackupEntityKind = original.BackupEntityKindPartition
BackupEntityKindService BackupEntityKind = original.BackupEntityKindService
)
type BackupPolicyScope = original.BackupPolicyScope
const (
BackupPolicyScopeApplication BackupPolicyScope = original.BackupPolicyScopeApplication
BackupPolicyScopeInvalid BackupPolicyScope = original.BackupPolicyScopeInvalid
BackupPolicyScopePartition BackupPolicyScope = original.BackupPolicyScopePartition
BackupPolicyScopeService BackupPolicyScope = original.BackupPolicyScopeService
)
type BackupScheduleFrequencyType = original.BackupScheduleFrequencyType
const (
BackupScheduleFrequencyTypeDaily BackupScheduleFrequencyType = original.BackupScheduleFrequencyTypeDaily
BackupScheduleFrequencyTypeInvalid BackupScheduleFrequencyType = original.BackupScheduleFrequencyTypeInvalid
BackupScheduleFrequencyTypeWeekly BackupScheduleFrequencyType = original.BackupScheduleFrequencyTypeWeekly
)
type BackupScheduleKind = original.BackupScheduleKind
const (
BackupScheduleKindFrequencyBased BackupScheduleKind = original.BackupScheduleKindFrequencyBased
BackupScheduleKindInvalid BackupScheduleKind = original.BackupScheduleKindInvalid
BackupScheduleKindTimeBased BackupScheduleKind = original.BackupScheduleKindTimeBased
)
type BackupState = original.BackupState
const (
BackupStateAccepted BackupState = original.BackupStateAccepted
BackupStateBackupInProgress BackupState = original.BackupStateBackupInProgress
BackupStateFailure BackupState = original.BackupStateFailure
BackupStateInvalid BackupState = original.BackupStateInvalid
BackupStateSuccess BackupState = original.BackupStateSuccess
BackupStateTimeout BackupState = original.BackupStateTimeout
)
type BackupStorageKind = original.BackupStorageKind
const (
BackupStorageKindAzureBlobStore BackupStorageKind = original.BackupStorageKindAzureBlobStore
BackupStorageKindFileShare BackupStorageKind = original.BackupStorageKindFileShare
BackupStorageKindInvalid BackupStorageKind = original.BackupStorageKindInvalid
)
type BackupSuspensionScope = original.BackupSuspensionScope
const (
BackupSuspensionScopeApplication BackupSuspensionScope = original.BackupSuspensionScopeApplication
BackupSuspensionScopeInvalid BackupSuspensionScope = original.BackupSuspensionScopeInvalid
BackupSuspensionScopePartition BackupSuspensionScope = original.BackupSuspensionScopePartition
BackupSuspensionScopeService BackupSuspensionScope = original.BackupSuspensionScopeService
)
type BackupType = original.BackupType
const (
BackupTypeFull BackupType = original.BackupTypeFull
BackupTypeIncremental BackupType = original.BackupTypeIncremental
BackupTypeInvalid BackupType = original.BackupTypeInvalid
)
type ChaosEventKind = original.ChaosEventKind
const (
ChaosEventKindExecutingFaults ChaosEventKind = original.ChaosEventKindExecutingFaults
ChaosEventKindInvalid ChaosEventKind = original.ChaosEventKindInvalid
ChaosEventKindStarted ChaosEventKind = original.ChaosEventKindStarted
ChaosEventKindStopped ChaosEventKind = original.ChaosEventKindStopped
ChaosEventKindTestError ChaosEventKind = original.ChaosEventKindTestError
ChaosEventKindValidationFailed ChaosEventKind = original.ChaosEventKindValidationFailed
ChaosEventKindWaiting ChaosEventKind = original.ChaosEventKindWaiting
)
type ChaosScheduleStatus = original.ChaosScheduleStatus
const (
ChaosScheduleStatusActive ChaosScheduleStatus = original.ChaosScheduleStatusActive
ChaosScheduleStatusExpired ChaosScheduleStatus = original.ChaosScheduleStatusExpired
ChaosScheduleStatusInvalid ChaosScheduleStatus = original.ChaosScheduleStatusInvalid
ChaosScheduleStatusPending ChaosScheduleStatus = original.ChaosScheduleStatusPending
ChaosScheduleStatusStopped ChaosScheduleStatus = original.ChaosScheduleStatusStopped
)
type ChaosStatus = original.ChaosStatus
const (
ChaosStatusInvalid ChaosStatus = original.ChaosStatusInvalid
ChaosStatusRunning ChaosStatus = original.ChaosStatusRunning
ChaosStatusStopped ChaosStatus = original.ChaosStatusStopped
)
type ComposeDeploymentStatus = original.ComposeDeploymentStatus
const (
ComposeDeploymentStatusCreating ComposeDeploymentStatus = original.ComposeDeploymentStatusCreating
ComposeDeploymentStatusDeleting ComposeDeploymentStatus = original.ComposeDeploymentStatusDeleting
ComposeDeploymentStatusFailed ComposeDeploymentStatus = original.ComposeDeploymentStatusFailed
ComposeDeploymentStatusInvalid ComposeDeploymentStatus = original.ComposeDeploymentStatusInvalid
ComposeDeploymentStatusProvisioning ComposeDeploymentStatus = original.ComposeDeploymentStatusProvisioning
ComposeDeploymentStatusReady ComposeDeploymentStatus = original.ComposeDeploymentStatusReady
ComposeDeploymentStatusUnprovisioning ComposeDeploymentStatus = original.ComposeDeploymentStatusUnprovisioning
ComposeDeploymentStatusUpgrading ComposeDeploymentStatus = original.ComposeDeploymentStatusUpgrading
)
type ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeState
const (
ComposeDeploymentUpgradeStateFailed ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateFailed
ComposeDeploymentUpgradeStateInvalid ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateInvalid
ComposeDeploymentUpgradeStateProvisioningTarget ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateProvisioningTarget
ComposeDeploymentUpgradeStateRollingBackCompleted ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateRollingBackCompleted
ComposeDeploymentUpgradeStateRollingBackInProgress ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateRollingBackInProgress
ComposeDeploymentUpgradeStateRollingForwardCompleted ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateRollingForwardCompleted
ComposeDeploymentUpgradeStateRollingForwardInProgress ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateRollingForwardInProgress
ComposeDeploymentUpgradeStateRollingForwardPending ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateRollingForwardPending
ComposeDeploymentUpgradeStateUnprovisioningCurrent ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateUnprovisioningCurrent
ComposeDeploymentUpgradeStateUnprovisioningTarget ComposeDeploymentUpgradeState = original.ComposeDeploymentUpgradeStateUnprovisioningTarget
)
type CreateFabricDump = original.CreateFabricDump
const (
False CreateFabricDump = original.False
True CreateFabricDump = original.True
)
type DataLossMode = original.DataLossMode
const (
DataLossModeFullDataLoss DataLossMode = original.DataLossModeFullDataLoss
DataLossModeInvalid DataLossMode = original.DataLossModeInvalid
DataLossModePartialDataLoss DataLossMode = original.DataLossModePartialDataLoss
)
type DayOfWeek = original.DayOfWeek
const (
Friday DayOfWeek = original.Friday
Monday DayOfWeek = original.Monday
Saturday DayOfWeek = original.Saturday
Sunday DayOfWeek = original.Sunday
Thursday DayOfWeek = original.Thursday
Tuesday DayOfWeek = original.Tuesday
Wednesday DayOfWeek = original.Wednesday
)
type DeactivationIntent = original.DeactivationIntent
const (
Pause DeactivationIntent = original.Pause
RemoveData DeactivationIntent = original.RemoveData
Restart DeactivationIntent = original.Restart
)
type DeployedApplicationStatus = original.DeployedApplicationStatus
const (
DeployedApplicationStatusActivating DeployedApplicationStatus = original.DeployedApplicationStatusActivating
DeployedApplicationStatusActive DeployedApplicationStatus = original.DeployedApplicationStatusActive
DeployedApplicationStatusDeactivating DeployedApplicationStatus = original.DeployedApplicationStatusDeactivating
DeployedApplicationStatusDownloading DeployedApplicationStatus = original.DeployedApplicationStatusDownloading
DeployedApplicationStatusInvalid DeployedApplicationStatus = original.DeployedApplicationStatusInvalid
DeployedApplicationStatusUpgrading DeployedApplicationStatus = original.DeployedApplicationStatusUpgrading
)
type DeploymentStatus = original.DeploymentStatus
const (
DeploymentStatusActivating DeploymentStatus = original.DeploymentStatusActivating
DeploymentStatusActive DeploymentStatus = original.DeploymentStatusActive
DeploymentStatusDeactivating DeploymentStatus = original.DeploymentStatusDeactivating
DeploymentStatusDownloading DeploymentStatus = original.DeploymentStatusDownloading
DeploymentStatusInvalid DeploymentStatus = original.DeploymentStatusInvalid
DeploymentStatusUpgrading DeploymentStatus = original.DeploymentStatusUpgrading
)
type DiagnosticsSinkKind = original.DiagnosticsSinkKind
const (
DiagnosticsSinkKindAzureInternalMonitoringPipeline DiagnosticsSinkKind = original.DiagnosticsSinkKindAzureInternalMonitoringPipeline
DiagnosticsSinkKindInvalid DiagnosticsSinkKind = original.DiagnosticsSinkKindInvalid
)
type EntityKind = original.EntityKind
const (
EntityKindApplication EntityKind = original.EntityKindApplication
EntityKindCluster EntityKind = original.EntityKindCluster
EntityKindDeployedApplication EntityKind = original.EntityKindDeployedApplication
EntityKindDeployedServicePackage EntityKind = original.EntityKindDeployedServicePackage
EntityKindInvalid EntityKind = original.EntityKindInvalid
EntityKindNode EntityKind = original.EntityKindNode
EntityKindPartition EntityKind = original.EntityKindPartition
EntityKindReplica EntityKind = original.EntityKindReplica
EntityKindService EntityKind = original.EntityKindService
)
type EntityKindBasicBackupEntity = original.EntityKindBasicBackupEntity
const (
EntityKindApplication1 EntityKindBasicBackupEntity = original.EntityKindApplication1
EntityKindBackupEntity EntityKindBasicBackupEntity = original.EntityKindBackupEntity
EntityKindPartition1 EntityKindBasicBackupEntity = original.EntityKindPartition1
EntityKindService1 EntityKindBasicBackupEntity = original.EntityKindService1
)
type EntryPointStatus = original.EntryPointStatus
const (
EntryPointStatusInvalid EntryPointStatus = original.EntryPointStatusInvalid
EntryPointStatusPending EntryPointStatus = original.EntryPointStatusPending
EntryPointStatusStarted EntryPointStatus = original.EntryPointStatusStarted
EntryPointStatusStarting EntryPointStatus = original.EntryPointStatusStarting
EntryPointStatusStopped EntryPointStatus = original.EntryPointStatusStopped
EntryPointStatusStopping EntryPointStatus = original.EntryPointStatusStopping
)
type FabricErrorCodes = original.FabricErrorCodes
const (
EABORT FabricErrorCodes = original.EABORT
EFAIL FabricErrorCodes = original.EFAIL
EINVALIDARG FabricErrorCodes = original.EINVALIDARG
FABRICEAPPLICATIONALREADYEXISTS FabricErrorCodes = original.FABRICEAPPLICATIONALREADYEXISTS
FABRICEAPPLICATIONALREADYINTARGETVERSION FabricErrorCodes = original.FABRICEAPPLICATIONALREADYINTARGETVERSION
FABRICEAPPLICATIONNOTFOUND FabricErrorCodes = original.FABRICEAPPLICATIONNOTFOUND
FABRICEAPPLICATIONNOTUPGRADING FabricErrorCodes = original.FABRICEAPPLICATIONNOTUPGRADING
FABRICEAPPLICATIONTYPEALREADYEXISTS FabricErrorCodes = original.FABRICEAPPLICATIONTYPEALREADYEXISTS
FABRICEAPPLICATIONTYPEINUSE FabricErrorCodes = original.FABRICEAPPLICATIONTYPEINUSE
FABRICEAPPLICATIONTYPENOTFOUND FabricErrorCodes = original.FABRICEAPPLICATIONTYPENOTFOUND
FABRICEAPPLICATIONTYPEPROVISIONINPROGRESS FabricErrorCodes = original.FABRICEAPPLICATIONTYPEPROVISIONINPROGRESS
FABRICEAPPLICATIONUPGRADEINPROGRESS FabricErrorCodes = original.FABRICEAPPLICATIONUPGRADEINPROGRESS
FABRICEAPPLICATIONUPGRADEVALIDATIONERROR FabricErrorCodes = original.FABRICEAPPLICATIONUPGRADEVALIDATIONERROR
FABRICEBACKUPINPROGRESS FabricErrorCodes = original.FABRICEBACKUPINPROGRESS
FABRICEBACKUPISENABLED FabricErrorCodes = original.FABRICEBACKUPISENABLED
FABRICEBACKUPNOTENABLED FabricErrorCodes = original.FABRICEBACKUPNOTENABLED
FABRICEBACKUPPOLICYALREADYEXISTING FabricErrorCodes = original.FABRICEBACKUPPOLICYALREADYEXISTING
FABRICEBACKUPPOLICYNOTEXISTING FabricErrorCodes = original.FABRICEBACKUPPOLICYNOTEXISTING
FABRICECOMMUNICATIONERROR FabricErrorCodes = original.FABRICECOMMUNICATIONERROR
FABRICECONFIGURATIONPARAMETERNOTFOUND FabricErrorCodes = original.FABRICECONFIGURATIONPARAMETERNOTFOUND
FABRICECONFIGURATIONSECTIONNOTFOUND FabricErrorCodes = original.FABRICECONFIGURATIONSECTIONNOTFOUND
FABRICEDIRECTORYNOTFOUND FabricErrorCodes = original.FABRICEDIRECTORYNOTFOUND
FABRICEENUMERATIONCOMPLETED FabricErrorCodes = original.FABRICEENUMERATIONCOMPLETED
FABRICEFABRICALREADYINTARGETVERSION FabricErrorCodes = original.FABRICEFABRICALREADYINTARGETVERSION
FABRICEFABRICNOTUPGRADING FabricErrorCodes = original.FABRICEFABRICNOTUPGRADING
FABRICEFABRICUPGRADEINPROGRESS FabricErrorCodes = original.FABRICEFABRICUPGRADEINPROGRESS
FABRICEFABRICUPGRADEVALIDATIONERROR FabricErrorCodes = original.FABRICEFABRICUPGRADEVALIDATIONERROR
FABRICEFABRICVERSIONALREADYEXISTS FabricErrorCodes = original.FABRICEFABRICVERSIONALREADYEXISTS
FABRICEFABRICVERSIONINUSE FabricErrorCodes = original.FABRICEFABRICVERSIONINUSE
FABRICEFABRICVERSIONNOTFOUND FabricErrorCodes = original.FABRICEFABRICVERSIONNOTFOUND
FABRICEFAULTANALYSISSERVICENOTEXISTING FabricErrorCodes = original.FABRICEFAULTANALYSISSERVICENOTEXISTING
FABRICEFILENOTFOUND FabricErrorCodes = original.FABRICEFILENOTFOUND
FABRICEHEALTHENTITYNOTFOUND FabricErrorCodes = original.FABRICEHEALTHENTITYNOTFOUND
FABRICEHEALTHSTALEREPORT FabricErrorCodes = original.FABRICEHEALTHSTALEREPORT
FABRICEIMAGEBUILDERRESERVEDDIRECTORYERROR FabricErrorCodes = original.FABRICEIMAGEBUILDERRESERVEDDIRECTORYERROR
FABRICEIMAGEBUILDERVALIDATIONERROR FabricErrorCodes = original.FABRICEIMAGEBUILDERVALIDATIONERROR
FABRICEINSTANCEIDMISMATCH FabricErrorCodes = original.FABRICEINSTANCEIDMISMATCH
FABRICEINVALIDADDRESS FabricErrorCodes = original.FABRICEINVALIDADDRESS
FABRICEINVALIDATOMICGROUP FabricErrorCodes = original.FABRICEINVALIDATOMICGROUP
FABRICEINVALIDCONFIGURATION FabricErrorCodes = original.FABRICEINVALIDCONFIGURATION
FABRICEINVALIDFORSTATELESSSERVICES FabricErrorCodes = original.FABRICEINVALIDFORSTATELESSSERVICES
FABRICEINVALIDNAMEURI FabricErrorCodes = original.FABRICEINVALIDNAMEURI
FABRICEINVALIDPARTITIONKEY FabricErrorCodes = original.FABRICEINVALIDPARTITIONKEY
FABRICEINVALIDSERVICESCALINGPOLICY FabricErrorCodes = original.FABRICEINVALIDSERVICESCALINGPOLICY
FABRICEKEYNOTFOUND FabricErrorCodes = original.FABRICEKEYNOTFOUND
FABRICEKEYTOOLARGE FabricErrorCodes = original.FABRICEKEYTOOLARGE
FABRICENAMEALREADYEXISTS FabricErrorCodes = original.FABRICENAMEALREADYEXISTS
FABRICENAMEDOESNOTEXIST FabricErrorCodes = original.FABRICENAMEDOESNOTEXIST
FABRICENAMENOTEMPTY FabricErrorCodes = original.FABRICENAMENOTEMPTY
FABRICENODEHASNOTSTOPPEDYET FabricErrorCodes = original.FABRICENODEHASNOTSTOPPEDYET
FABRICENODEISUP FabricErrorCodes = original.FABRICENODEISUP
FABRICENODENOTFOUND FabricErrorCodes = original.FABRICENODENOTFOUND
FABRICENOTPRIMARY FabricErrorCodes = original.FABRICENOTPRIMARY
FABRICENOTREADY FabricErrorCodes = original.FABRICENOTREADY
FABRICENOWRITEQUORUM FabricErrorCodes = original.FABRICENOWRITEQUORUM
FABRICEOPERATIONNOTCOMPLETE FabricErrorCodes = original.FABRICEOPERATIONNOTCOMPLETE
FABRICEPARTITIONNOTFOUND FabricErrorCodes = original.FABRICEPARTITIONNOTFOUND
FABRICEPATHTOOLONG FabricErrorCodes = original.FABRICEPATHTOOLONG
FABRICEPROPERTYCHECKFAILED FabricErrorCodes = original.FABRICEPROPERTYCHECKFAILED
FABRICEPROPERTYDOESNOTEXIST FabricErrorCodes = original.FABRICEPROPERTYDOESNOTEXIST
FABRICERECONFIGURATIONPENDING FabricErrorCodes = original.FABRICERECONFIGURATIONPENDING
FABRICEREPLICADOESNOTEXIST FabricErrorCodes = original.FABRICEREPLICADOESNOTEXIST
FABRICERESTOREINPROGRESS FabricErrorCodes = original.FABRICERESTOREINPROGRESS
FABRICERESTORESOURCETARGETPARTITIONMISMATCH FabricErrorCodes = original.FABRICERESTORESOURCETARGETPARTITIONMISMATCH
FABRICESEQUENCENUMBERCHECKFAILED FabricErrorCodes = original.FABRICESEQUENCENUMBERCHECKFAILED
FABRICESERVICEAFFINITYCHAINNOTSUPPORTED FabricErrorCodes = original.FABRICESERVICEAFFINITYCHAINNOTSUPPORTED
FABRICESERVICEALREADYEXISTS FabricErrorCodes = original.FABRICESERVICEALREADYEXISTS
FABRICESERVICEDOESNOTEXIST FabricErrorCodes = original.FABRICESERVICEDOESNOTEXIST
FABRICESERVICEGROUPALREADYEXISTS FabricErrorCodes = original.FABRICESERVICEGROUPALREADYEXISTS
FABRICESERVICEGROUPDOESNOTEXIST FabricErrorCodes = original.FABRICESERVICEGROUPDOESNOTEXIST
FABRICESERVICEMANIFESTNOTFOUND FabricErrorCodes = original.FABRICESERVICEMANIFESTNOTFOUND
FABRICESERVICEMETADATAMISMATCH FabricErrorCodes = original.FABRICESERVICEMETADATAMISMATCH
FABRICESERVICEOFFLINE FabricErrorCodes = original.FABRICESERVICEOFFLINE
FABRICESERVICETYPEMISMATCH FabricErrorCodes = original.FABRICESERVICETYPEMISMATCH
FABRICESERVICETYPENOTFOUND FabricErrorCodes = original.FABRICESERVICETYPENOTFOUND
FABRICESERVICETYPETEMPLATENOTFOUND FabricErrorCodes = original.FABRICESERVICETYPETEMPLATENOTFOUND
FABRICESINGLEINSTANCEAPPLICATIONALREADYEXISTS FabricErrorCodes = original.FABRICESINGLEINSTANCEAPPLICATIONALREADYEXISTS
FABRICESINGLEINSTANCEAPPLICATIONNOTFOUND FabricErrorCodes = original.FABRICESINGLEINSTANCEAPPLICATIONNOTFOUND
FABRICETIMEOUT FabricErrorCodes = original.FABRICETIMEOUT
FABRICEVALUEEMPTY FabricErrorCodes = original.FABRICEVALUEEMPTY
FABRICEVALUETOOLARGE FabricErrorCodes = original.FABRICEVALUETOOLARGE
FABRICEVOLUMEALREADYEXISTS FabricErrorCodes = original.FABRICEVOLUMEALREADYEXISTS
FABRICEVOLUMENOTFOUND FabricErrorCodes = original.FABRICEVOLUMENOTFOUND
SerializationError FabricErrorCodes = original.SerializationError
)
type FabricEventKind = original.FabricEventKind
const (
FabricEventKindApplicationContainerInstanceExited FabricEventKind = original.FabricEventKindApplicationContainerInstanceExited
FabricEventKindApplicationCreated FabricEventKind = original.FabricEventKindApplicationCreated
FabricEventKindApplicationDeleted FabricEventKind = original.FabricEventKindApplicationDeleted
FabricEventKindApplicationEvent FabricEventKind = original.FabricEventKindApplicationEvent
FabricEventKindApplicationHealthReportExpired FabricEventKind = original.FabricEventKindApplicationHealthReportExpired
FabricEventKindApplicationNewHealthReport FabricEventKind = original.FabricEventKindApplicationNewHealthReport
FabricEventKindApplicationProcessExited FabricEventKind = original.FabricEventKindApplicationProcessExited
FabricEventKindApplicationUpgradeCompleted FabricEventKind = original.FabricEventKindApplicationUpgradeCompleted
FabricEventKindApplicationUpgradeDomainCompleted FabricEventKind = original.FabricEventKindApplicationUpgradeDomainCompleted
FabricEventKindApplicationUpgradeRollbackCompleted FabricEventKind = original.FabricEventKindApplicationUpgradeRollbackCompleted
FabricEventKindApplicationUpgradeRollbackStarted FabricEventKind = original.FabricEventKindApplicationUpgradeRollbackStarted
FabricEventKindApplicationUpgradeStarted FabricEventKind = original.FabricEventKindApplicationUpgradeStarted
FabricEventKindChaosCodePackageRestartScheduled FabricEventKind = original.FabricEventKindChaosCodePackageRestartScheduled
FabricEventKindChaosNodeRestartScheduled FabricEventKind = original.FabricEventKindChaosNodeRestartScheduled
FabricEventKindChaosPartitionPrimaryMoveScheduled FabricEventKind = original.FabricEventKindChaosPartitionPrimaryMoveScheduled
FabricEventKindChaosPartitionSecondaryMoveScheduled FabricEventKind = original.FabricEventKindChaosPartitionSecondaryMoveScheduled
FabricEventKindChaosReplicaRemovalScheduled FabricEventKind = original.FabricEventKindChaosReplicaRemovalScheduled
FabricEventKindChaosReplicaRestartScheduled FabricEventKind = original.FabricEventKindChaosReplicaRestartScheduled
FabricEventKindChaosStarted FabricEventKind = original.FabricEventKindChaosStarted
FabricEventKindChaosStopped FabricEventKind = original.FabricEventKindChaosStopped
FabricEventKindClusterEvent FabricEventKind = original.FabricEventKindClusterEvent
FabricEventKindClusterHealthReportExpired FabricEventKind = original.FabricEventKindClusterHealthReportExpired
FabricEventKindClusterNewHealthReport FabricEventKind = original.FabricEventKindClusterNewHealthReport
FabricEventKindClusterUpgradeCompleted FabricEventKind = original.FabricEventKindClusterUpgradeCompleted
FabricEventKindClusterUpgradeDomainCompleted FabricEventKind = original.FabricEventKindClusterUpgradeDomainCompleted
FabricEventKindClusterUpgradeRollbackCompleted FabricEventKind = original.FabricEventKindClusterUpgradeRollbackCompleted
FabricEventKindClusterUpgradeRollbackStarted FabricEventKind = original.FabricEventKindClusterUpgradeRollbackStarted
FabricEventKindClusterUpgradeStarted FabricEventKind = original.FabricEventKindClusterUpgradeStarted
FabricEventKindContainerInstanceEvent FabricEventKind = original.FabricEventKindContainerInstanceEvent
FabricEventKindDeployedApplicationHealthReportExpired FabricEventKind = original.FabricEventKindDeployedApplicationHealthReportExpired
FabricEventKindDeployedApplicationNewHealthReport FabricEventKind = original.FabricEventKindDeployedApplicationNewHealthReport
FabricEventKindDeployedServicePackageHealthReportExpired FabricEventKind = original.FabricEventKindDeployedServicePackageHealthReportExpired
FabricEventKindDeployedServicePackageNewHealthReport FabricEventKind = original.FabricEventKindDeployedServicePackageNewHealthReport
FabricEventKindNodeAborted FabricEventKind = original.FabricEventKindNodeAborted
FabricEventKindNodeAddedToCluster FabricEventKind = original.FabricEventKindNodeAddedToCluster
FabricEventKindNodeClosed FabricEventKind = original.FabricEventKindNodeClosed
FabricEventKindNodeDeactivateCompleted FabricEventKind = original.FabricEventKindNodeDeactivateCompleted
FabricEventKindNodeDeactivateStarted FabricEventKind = original.FabricEventKindNodeDeactivateStarted
FabricEventKindNodeDown FabricEventKind = original.FabricEventKindNodeDown
FabricEventKindNodeEvent FabricEventKind = original.FabricEventKindNodeEvent
FabricEventKindNodeHealthReportExpired FabricEventKind = original.FabricEventKindNodeHealthReportExpired
FabricEventKindNodeNewHealthReport FabricEventKind = original.FabricEventKindNodeNewHealthReport
FabricEventKindNodeOpenFailed FabricEventKind = original.FabricEventKindNodeOpenFailed
FabricEventKindNodeOpenSucceeded FabricEventKind = original.FabricEventKindNodeOpenSucceeded
FabricEventKindNodeRemovedFromCluster FabricEventKind = original.FabricEventKindNodeRemovedFromCluster
FabricEventKindNodeUp FabricEventKind = original.FabricEventKindNodeUp
FabricEventKindPartitionAnalysisEvent FabricEventKind = original.FabricEventKindPartitionAnalysisEvent
FabricEventKindPartitionEvent FabricEventKind = original.FabricEventKindPartitionEvent
FabricEventKindPartitionHealthReportExpired FabricEventKind = original.FabricEventKindPartitionHealthReportExpired
FabricEventKindPartitionNewHealthReport FabricEventKind = original.FabricEventKindPartitionNewHealthReport
FabricEventKindPartitionPrimaryMoveAnalysis FabricEventKind = original.FabricEventKindPartitionPrimaryMoveAnalysis
FabricEventKindPartitionReconfigured FabricEventKind = original.FabricEventKindPartitionReconfigured
FabricEventKindReplicaEvent FabricEventKind = original.FabricEventKindReplicaEvent
FabricEventKindServiceCreated FabricEventKind = original.FabricEventKindServiceCreated
FabricEventKindServiceDeleted FabricEventKind = original.FabricEventKindServiceDeleted
FabricEventKindServiceEvent FabricEventKind = original.FabricEventKindServiceEvent
FabricEventKindServiceHealthReportExpired FabricEventKind = original.FabricEventKindServiceHealthReportExpired
FabricEventKindServiceNewHealthReport FabricEventKind = original.FabricEventKindServiceNewHealthReport
FabricEventKindStatefulReplicaHealthReportExpired FabricEventKind = original.FabricEventKindStatefulReplicaHealthReportExpired
FabricEventKindStatefulReplicaNewHealthReport FabricEventKind = original.FabricEventKindStatefulReplicaNewHealthReport
FabricEventKindStatelessReplicaHealthReportExpired FabricEventKind = original.FabricEventKindStatelessReplicaHealthReportExpired
FabricEventKindStatelessReplicaNewHealthReport FabricEventKind = original.FabricEventKindStatelessReplicaNewHealthReport
)
type FabricReplicaStatus = original.FabricReplicaStatus
const (
FabricReplicaStatusDown FabricReplicaStatus = original.FabricReplicaStatusDown
FabricReplicaStatusInvalid FabricReplicaStatus = original.FabricReplicaStatusInvalid
FabricReplicaStatusUp FabricReplicaStatus = original.FabricReplicaStatusUp
)
type FailureAction = original.FailureAction
const (
FailureActionInvalid FailureAction = original.FailureActionInvalid
FailureActionManual FailureAction = original.FailureActionManual
FailureActionRollback FailureAction = original.FailureActionRollback
)
type FailureReason = original.FailureReason
const (
HealthCheck FailureReason = original.HealthCheck
Interrupted FailureReason = original.Interrupted
None FailureReason = original.None
OverallUpgradeTimeout FailureReason = original.OverallUpgradeTimeout
UpgradeDomainTimeout FailureReason = original.UpgradeDomainTimeout
)
type HeaderMatchType = original.HeaderMatchType
const (
Exact HeaderMatchType = original.Exact
)
type HealthEvaluationKind = original.HealthEvaluationKind
const (
HealthEvaluationKindApplication HealthEvaluationKind = original.HealthEvaluationKindApplication
HealthEvaluationKindApplications HealthEvaluationKind = original.HealthEvaluationKindApplications
HealthEvaluationKindApplicationTypeApplications HealthEvaluationKind = original.HealthEvaluationKindApplicationTypeApplications
HealthEvaluationKindDeltaNodesCheck HealthEvaluationKind = original.HealthEvaluationKindDeltaNodesCheck
HealthEvaluationKindDeployedApplication HealthEvaluationKind = original.HealthEvaluationKindDeployedApplication
HealthEvaluationKindDeployedApplications HealthEvaluationKind = original.HealthEvaluationKindDeployedApplications
HealthEvaluationKindDeployedServicePackage HealthEvaluationKind = original.HealthEvaluationKindDeployedServicePackage
HealthEvaluationKindDeployedServicePackages HealthEvaluationKind = original.HealthEvaluationKindDeployedServicePackages
HealthEvaluationKindEvent HealthEvaluationKind = original.HealthEvaluationKindEvent
HealthEvaluationKindInvalid HealthEvaluationKind = original.HealthEvaluationKindInvalid
HealthEvaluationKindNode HealthEvaluationKind = original.HealthEvaluationKindNode
HealthEvaluationKindNodes HealthEvaluationKind = original.HealthEvaluationKindNodes
HealthEvaluationKindPartition HealthEvaluationKind = original.HealthEvaluationKindPartition
HealthEvaluationKindPartitions HealthEvaluationKind = original.HealthEvaluationKindPartitions
HealthEvaluationKindReplica HealthEvaluationKind = original.HealthEvaluationKindReplica
HealthEvaluationKindReplicas HealthEvaluationKind = original.HealthEvaluationKindReplicas
HealthEvaluationKindService HealthEvaluationKind = original.HealthEvaluationKindService
HealthEvaluationKindServices HealthEvaluationKind = original.HealthEvaluationKindServices
HealthEvaluationKindSystemApplication HealthEvaluationKind = original.HealthEvaluationKindSystemApplication
HealthEvaluationKindUpgradeDomainDeltaNodesCheck HealthEvaluationKind = original.HealthEvaluationKindUpgradeDomainDeltaNodesCheck
HealthEvaluationKindUpgradeDomainDeployedApplications HealthEvaluationKind = original.HealthEvaluationKindUpgradeDomainDeployedApplications
HealthEvaluationKindUpgradeDomainNodes HealthEvaluationKind = original.HealthEvaluationKindUpgradeDomainNodes
)
type HealthState = original.HealthState
const (
HealthStateError HealthState = original.HealthStateError
HealthStateInvalid HealthState = original.HealthStateInvalid
HealthStateOk HealthState = original.HealthStateOk
HealthStateUnknown HealthState = original.HealthStateUnknown
HealthStateWarning HealthState = original.HealthStateWarning
)
type HostIsolationMode = original.HostIsolationMode
const (
HostIsolationModeHyperV HostIsolationMode = original.HostIsolationModeHyperV
HostIsolationModeNone HostIsolationMode = original.HostIsolationModeNone
HostIsolationModeProcess HostIsolationMode = original.HostIsolationModeProcess
)
type HostType = original.HostType
const (
HostTypeContainerHost HostType = original.HostTypeContainerHost
HostTypeExeHost HostType = original.HostTypeExeHost
HostTypeInvalid HostType = original.HostTypeInvalid
)
type ImpactLevel = original.ImpactLevel
const (
ImpactLevelInvalid ImpactLevel = original.ImpactLevelInvalid
ImpactLevelNone ImpactLevel = original.ImpactLevelNone
ImpactLevelRemoveData ImpactLevel = original.ImpactLevelRemoveData
ImpactLevelRemoveNode ImpactLevel = original.ImpactLevelRemoveNode
ImpactLevelRestart ImpactLevel = original.ImpactLevelRestart
)
type Kind = original.Kind
const (
KindApplication Kind = original.KindApplication
KindApplications Kind = original.KindApplications
KindApplicationTypeApplications Kind = original.KindApplicationTypeApplications
KindDeltaNodesCheck Kind = original.KindDeltaNodesCheck
KindDeployedApplication Kind = original.KindDeployedApplication
KindDeployedApplications Kind = original.KindDeployedApplications
KindDeployedServicePackage Kind = original.KindDeployedServicePackage
KindDeployedServicePackages Kind = original.KindDeployedServicePackages
KindEvent Kind = original.KindEvent
KindHealthEvaluation Kind = original.KindHealthEvaluation
KindNode Kind = original.KindNode
KindNodes Kind = original.KindNodes
KindPartition Kind = original.KindPartition
KindPartitions Kind = original.KindPartitions
KindReplica Kind = original.KindReplica
KindReplicas Kind = original.KindReplicas
KindService Kind = original.KindService
KindServices Kind = original.KindServices
KindSystemApplication Kind = original.KindSystemApplication
KindUpgradeDomainDeltaNodesCheck Kind = original.KindUpgradeDomainDeltaNodesCheck
KindUpgradeDomainNodes Kind = original.KindUpgradeDomainNodes
)
type KindBasicApplicationScopedVolumeCreationParameters = original.KindBasicApplicationScopedVolumeCreationParameters
const (
KindApplicationScopedVolumeCreationParameters KindBasicApplicationScopedVolumeCreationParameters = original.KindApplicationScopedVolumeCreationParameters
KindServiceFabricVolumeDisk KindBasicApplicationScopedVolumeCreationParameters = original.KindServiceFabricVolumeDisk
)
type KindBasicAutoScalingMechanism = original.KindBasicAutoScalingMechanism
const (
KindAddRemoveReplica KindBasicAutoScalingMechanism = original.KindAddRemoveReplica
KindAutoScalingMechanism KindBasicAutoScalingMechanism = original.KindAutoScalingMechanism
)
type KindBasicAutoScalingMetric = original.KindBasicAutoScalingMetric
const (
KindAutoScalingMetric KindBasicAutoScalingMetric = original.KindAutoScalingMetric
KindResource KindBasicAutoScalingMetric = original.KindResource
)
type KindBasicAutoScalingTrigger = original.KindBasicAutoScalingTrigger
const (
KindAutoScalingTrigger KindBasicAutoScalingTrigger = original.KindAutoScalingTrigger
KindAverageLoad KindBasicAutoScalingTrigger = original.KindAverageLoad
)
type KindBasicBackupConfigurationInfo = original.KindBasicBackupConfigurationInfo
const (
KindBasicBackupConfigurationInfoKindApplication KindBasicBackupConfigurationInfo = original.KindBasicBackupConfigurationInfoKindApplication
KindBasicBackupConfigurationInfoKindBackupConfigurationInfo KindBasicBackupConfigurationInfo = original.KindBasicBackupConfigurationInfoKindBackupConfigurationInfo
KindBasicBackupConfigurationInfoKindPartition KindBasicBackupConfigurationInfo = original.KindBasicBackupConfigurationInfoKindPartition
KindBasicBackupConfigurationInfoKindService KindBasicBackupConfigurationInfo = original.KindBasicBackupConfigurationInfoKindService
)
type KindBasicChaosEvent = original.KindBasicChaosEvent
const (
KindChaosEvent KindBasicChaosEvent = original.KindChaosEvent
KindExecutingFaults KindBasicChaosEvent = original.KindExecutingFaults
KindStarted KindBasicChaosEvent = original.KindStarted
KindStopped KindBasicChaosEvent = original.KindStopped
KindTestError KindBasicChaosEvent = original.KindTestError
KindValidationFailed KindBasicChaosEvent = original.KindValidationFailed
KindWaiting KindBasicChaosEvent = original.KindWaiting
)
type KindBasicDiagnosticsSinkProperties = original.KindBasicDiagnosticsSinkProperties
const (
KindAzureInternalMonitoringPipeline KindBasicDiagnosticsSinkProperties = original.KindAzureInternalMonitoringPipeline
KindDiagnosticsSinkProperties KindBasicDiagnosticsSinkProperties = original.KindDiagnosticsSinkProperties
)
type KindBasicFabricEvent = original.KindBasicFabricEvent
const (
KindApplicationContainerInstanceExited KindBasicFabricEvent = original.KindApplicationContainerInstanceExited
KindApplicationCreated KindBasicFabricEvent = original.KindApplicationCreated
KindApplicationDeleted KindBasicFabricEvent = original.KindApplicationDeleted
KindApplicationEvent KindBasicFabricEvent = original.KindApplicationEvent
KindApplicationHealthReportExpired KindBasicFabricEvent = original.KindApplicationHealthReportExpired
KindApplicationNewHealthReport KindBasicFabricEvent = original.KindApplicationNewHealthReport
KindApplicationProcessExited KindBasicFabricEvent = original.KindApplicationProcessExited
KindApplicationUpgradeCompleted KindBasicFabricEvent = original.KindApplicationUpgradeCompleted
KindApplicationUpgradeDomainCompleted KindBasicFabricEvent = original.KindApplicationUpgradeDomainCompleted
KindApplicationUpgradeRollbackCompleted KindBasicFabricEvent = original.KindApplicationUpgradeRollbackCompleted
KindApplicationUpgradeRollbackStarted KindBasicFabricEvent = original.KindApplicationUpgradeRollbackStarted
KindApplicationUpgradeStarted KindBasicFabricEvent = original.KindApplicationUpgradeStarted
KindChaosCodePackageRestartScheduled KindBasicFabricEvent = original.KindChaosCodePackageRestartScheduled
KindChaosNodeRestartScheduled KindBasicFabricEvent = original.KindChaosNodeRestartScheduled
KindChaosPartitionPrimaryMoveScheduled KindBasicFabricEvent = original.KindChaosPartitionPrimaryMoveScheduled
KindChaosPartitionSecondaryMoveScheduled KindBasicFabricEvent = original.KindChaosPartitionSecondaryMoveScheduled
KindChaosReplicaRemovalScheduled KindBasicFabricEvent = original.KindChaosReplicaRemovalScheduled
KindChaosReplicaRestartScheduled KindBasicFabricEvent = original.KindChaosReplicaRestartScheduled
KindChaosStarted KindBasicFabricEvent = original.KindChaosStarted
KindChaosStopped KindBasicFabricEvent = original.KindChaosStopped
KindClusterEvent KindBasicFabricEvent = original.KindClusterEvent
KindClusterHealthReportExpired KindBasicFabricEvent = original.KindClusterHealthReportExpired
KindClusterNewHealthReport KindBasicFabricEvent = original.KindClusterNewHealthReport
KindClusterUpgradeCompleted KindBasicFabricEvent = original.KindClusterUpgradeCompleted
KindClusterUpgradeDomainCompleted KindBasicFabricEvent = original.KindClusterUpgradeDomainCompleted
KindClusterUpgradeRollbackCompleted KindBasicFabricEvent = original.KindClusterUpgradeRollbackCompleted
KindClusterUpgradeRollbackStarted KindBasicFabricEvent = original.KindClusterUpgradeRollbackStarted
KindClusterUpgradeStarted KindBasicFabricEvent = original.KindClusterUpgradeStarted
KindContainerInstanceEvent KindBasicFabricEvent = original.KindContainerInstanceEvent
KindDeployedApplicationHealthReportExpired KindBasicFabricEvent = original.KindDeployedApplicationHealthReportExpired
KindDeployedApplicationNewHealthReport KindBasicFabricEvent = original.KindDeployedApplicationNewHealthReport
KindDeployedServicePackageHealthReportExpired KindBasicFabricEvent = original.KindDeployedServicePackageHealthReportExpired
KindDeployedServicePackageNewHealthReport KindBasicFabricEvent = original.KindDeployedServicePackageNewHealthReport
KindFabricEvent KindBasicFabricEvent = original.KindFabricEvent
KindNodeAborted KindBasicFabricEvent = original.KindNodeAborted
KindNodeAddedToCluster KindBasicFabricEvent = original.KindNodeAddedToCluster
KindNodeClosed KindBasicFabricEvent = original.KindNodeClosed
KindNodeDeactivateCompleted KindBasicFabricEvent = original.KindNodeDeactivateCompleted
KindNodeDeactivateStarted KindBasicFabricEvent = original.KindNodeDeactivateStarted
KindNodeDown KindBasicFabricEvent = original.KindNodeDown
KindNodeEvent KindBasicFabricEvent = original.KindNodeEvent
KindNodeHealthReportExpired KindBasicFabricEvent = original.KindNodeHealthReportExpired
KindNodeNewHealthReport KindBasicFabricEvent = original.KindNodeNewHealthReport
KindNodeOpenFailed KindBasicFabricEvent = original.KindNodeOpenFailed
KindNodeOpenSucceeded KindBasicFabricEvent = original.KindNodeOpenSucceeded
KindNodeRemovedFromCluster KindBasicFabricEvent = original.KindNodeRemovedFromCluster
KindNodeUp KindBasicFabricEvent = original.KindNodeUp
KindPartitionAnalysisEvent KindBasicFabricEvent = original.KindPartitionAnalysisEvent
KindPartitionEvent KindBasicFabricEvent = original.KindPartitionEvent
KindPartitionHealthReportExpired KindBasicFabricEvent = original.KindPartitionHealthReportExpired
KindPartitionNewHealthReport KindBasicFabricEvent = original.KindPartitionNewHealthReport
KindPartitionPrimaryMoveAnalysis KindBasicFabricEvent = original.KindPartitionPrimaryMoveAnalysis
KindPartitionReconfigured KindBasicFabricEvent = original.KindPartitionReconfigured
KindReplicaEvent KindBasicFabricEvent = original.KindReplicaEvent
KindServiceCreated KindBasicFabricEvent = original.KindServiceCreated
KindServiceDeleted KindBasicFabricEvent = original.KindServiceDeleted
KindServiceEvent KindBasicFabricEvent = original.KindServiceEvent
KindServiceHealthReportExpired KindBasicFabricEvent = original.KindServiceHealthReportExpired
KindServiceNewHealthReport KindBasicFabricEvent = original.KindServiceNewHealthReport
KindStatefulReplicaHealthReportExpired KindBasicFabricEvent = original.KindStatefulReplicaHealthReportExpired
KindStatefulReplicaNewHealthReport KindBasicFabricEvent = original.KindStatefulReplicaNewHealthReport
KindStatelessReplicaHealthReportExpired KindBasicFabricEvent = original.KindStatelessReplicaHealthReportExpired
KindStatelessReplicaNewHealthReport KindBasicFabricEvent = original.KindStatelessReplicaNewHealthReport
)
type KindBasicNetworkResourcePropertiesBase = original.KindBasicNetworkResourcePropertiesBase
const (
KindLocal KindBasicNetworkResourcePropertiesBase = original.KindLocal
KindNetworkResourceProperties KindBasicNetworkResourcePropertiesBase = original.KindNetworkResourceProperties
KindNetworkResourcePropertiesBase KindBasicNetworkResourcePropertiesBase = original.KindNetworkResourcePropertiesBase
)
type KindBasicPropertyBatchInfo = original.KindBasicPropertyBatchInfo
const (
KindFailed KindBasicPropertyBatchInfo = original.KindFailed
KindPropertyBatchInfo KindBasicPropertyBatchInfo = original.KindPropertyBatchInfo
KindSuccessful KindBasicPropertyBatchInfo = original.KindSuccessful
)
type KindBasicPropertyBatchOperation = original.KindBasicPropertyBatchOperation
const (
KindCheckExists KindBasicPropertyBatchOperation = original.KindCheckExists
KindCheckSequence KindBasicPropertyBatchOperation = original.KindCheckSequence
KindCheckValue KindBasicPropertyBatchOperation = original.KindCheckValue
KindDelete KindBasicPropertyBatchOperation = original.KindDelete
KindGet KindBasicPropertyBatchOperation = original.KindGet
KindPropertyBatchOperation KindBasicPropertyBatchOperation = original.KindPropertyBatchOperation
KindPut KindBasicPropertyBatchOperation = original.KindPut
)
type KindBasicPropertyValue = original.KindBasicPropertyValue
const (
KindBinary KindBasicPropertyValue = original.KindBinary
KindDouble KindBasicPropertyValue = original.KindDouble
KindGUID KindBasicPropertyValue = original.KindGUID
KindInt64 KindBasicPropertyValue = original.KindInt64
KindPropertyValue KindBasicPropertyValue = original.KindPropertyValue
KindString KindBasicPropertyValue = original.KindString
)
type KindBasicProvisionApplicationTypeDescriptionBase = original.KindBasicProvisionApplicationTypeDescriptionBase
const (
KindExternalStore KindBasicProvisionApplicationTypeDescriptionBase = original.KindExternalStore
KindImageStorePath KindBasicProvisionApplicationTypeDescriptionBase = original.KindImageStorePath
KindProvisionApplicationTypeDescriptionBase KindBasicProvisionApplicationTypeDescriptionBase = original.KindProvisionApplicationTypeDescriptionBase
)
type KindBasicRepairImpactDescriptionBase = original.KindBasicRepairImpactDescriptionBase
const (
KindBasicRepairImpactDescriptionBaseKindNode KindBasicRepairImpactDescriptionBase = original.KindBasicRepairImpactDescriptionBaseKindNode
KindBasicRepairImpactDescriptionBaseKindRepairImpactDescriptionBase KindBasicRepairImpactDescriptionBase = original.KindBasicRepairImpactDescriptionBaseKindRepairImpactDescriptionBase
)
type KindBasicRepairTargetDescriptionBase = original.KindBasicRepairTargetDescriptionBase
const (
KindBasicRepairTargetDescriptionBaseKindNode KindBasicRepairTargetDescriptionBase = original.KindBasicRepairTargetDescriptionBaseKindNode
KindBasicRepairTargetDescriptionBaseKindRepairTargetDescriptionBase KindBasicRepairTargetDescriptionBase = original.KindBasicRepairTargetDescriptionBaseKindRepairTargetDescriptionBase
)
type KindBasicReplicaStatusBase = original.KindBasicReplicaStatusBase
const (
KindKeyValueStore KindBasicReplicaStatusBase = original.KindKeyValueStore
KindReplicaStatusBase KindBasicReplicaStatusBase = original.KindReplicaStatusBase
)
type KindBasicReplicatorStatus = original.KindBasicReplicatorStatus
const (
KindActiveSecondary KindBasicReplicatorStatus = original.KindActiveSecondary
KindIdleSecondary KindBasicReplicatorStatus = original.KindIdleSecondary
KindPrimary KindBasicReplicatorStatus = original.KindPrimary
KindReplicatorStatus KindBasicReplicatorStatus = original.KindReplicatorStatus
KindSecondaryReplicatorStatus KindBasicReplicatorStatus = original.KindSecondaryReplicatorStatus
)
type KindBasicSafetyCheck = original.KindBasicSafetyCheck
const (
KindEnsureAvailability KindBasicSafetyCheck = original.KindEnsureAvailability
KindEnsurePartitionQuorum KindBasicSafetyCheck = original.KindEnsurePartitionQuorum
KindEnsureSeedNodeQuorum KindBasicSafetyCheck = original.KindEnsureSeedNodeQuorum
KindPartitionSafetyCheck KindBasicSafetyCheck = original.KindPartitionSafetyCheck
KindSafetyCheck KindBasicSafetyCheck = original.KindSafetyCheck
KindWaitForInbuildReplica KindBasicSafetyCheck = original.KindWaitForInbuildReplica
KindWaitForPrimaryPlacement KindBasicSafetyCheck = original.KindWaitForPrimaryPlacement
KindWaitForPrimarySwap KindBasicSafetyCheck = original.KindWaitForPrimarySwap
KindWaitForReconfiguration KindBasicSafetyCheck = original.KindWaitForReconfiguration
)
type KindBasicScalingMechanismDescription = original.KindBasicScalingMechanismDescription
const (
KindAddRemoveIncrementalNamedPartition KindBasicScalingMechanismDescription = original.KindAddRemoveIncrementalNamedPartition
KindPartitionInstanceCount KindBasicScalingMechanismDescription = original.KindPartitionInstanceCount
KindScalingMechanismDescription KindBasicScalingMechanismDescription = original.KindScalingMechanismDescription
)
type KindBasicScalingTriggerDescription = original.KindBasicScalingTriggerDescription
const (
KindAveragePartitionLoad KindBasicScalingTriggerDescription = original.KindAveragePartitionLoad
KindAverageServiceLoad KindBasicScalingTriggerDescription = original.KindAverageServiceLoad
KindScalingTriggerDescription KindBasicScalingTriggerDescription = original.KindScalingTriggerDescription
)
type KindBasicSecretResourcePropertiesBase = original.KindBasicSecretResourcePropertiesBase
const (
KindInlinedValue KindBasicSecretResourcePropertiesBase = original.KindInlinedValue
KindSecretResourceProperties KindBasicSecretResourcePropertiesBase = original.KindSecretResourceProperties
KindSecretResourcePropertiesBase KindBasicSecretResourcePropertiesBase = original.KindSecretResourcePropertiesBase
)
type KindBasicServiceTypeDescription = original.KindBasicServiceTypeDescription
const (
KindServiceTypeDescription KindBasicServiceTypeDescription = original.KindServiceTypeDescription
KindStateful KindBasicServiceTypeDescription = original.KindStateful
KindStateless KindBasicServiceTypeDescription = original.KindStateless
)
type MoveCost = original.MoveCost
const (
High MoveCost = original.High
Low MoveCost = original.Low
Medium MoveCost = original.Medium
Zero MoveCost = original.Zero
)
type NetworkKind = original.NetworkKind
const (
Local NetworkKind = original.Local
)
type NodeDeactivationIntent = original.NodeDeactivationIntent
const (
NodeDeactivationIntentInvalid NodeDeactivationIntent = original.NodeDeactivationIntentInvalid
NodeDeactivationIntentPause NodeDeactivationIntent = original.NodeDeactivationIntentPause
NodeDeactivationIntentRemoveData NodeDeactivationIntent = original.NodeDeactivationIntentRemoveData
NodeDeactivationIntentRemoveNode NodeDeactivationIntent = original.NodeDeactivationIntentRemoveNode
NodeDeactivationIntentRestart NodeDeactivationIntent = original.NodeDeactivationIntentRestart
)
type NodeDeactivationStatus = original.NodeDeactivationStatus
const (
NodeDeactivationStatusCompleted NodeDeactivationStatus = original.NodeDeactivationStatusCompleted
NodeDeactivationStatusNone NodeDeactivationStatus = original.NodeDeactivationStatusNone
NodeDeactivationStatusSafetyCheckComplete NodeDeactivationStatus = original.NodeDeactivationStatusSafetyCheckComplete
NodeDeactivationStatusSafetyCheckInProgress NodeDeactivationStatus = original.NodeDeactivationStatusSafetyCheckInProgress
)
type NodeDeactivationTaskType = original.NodeDeactivationTaskType
const (
NodeDeactivationTaskTypeClient NodeDeactivationTaskType = original.NodeDeactivationTaskTypeClient
NodeDeactivationTaskTypeInfrastructure NodeDeactivationTaskType = original.NodeDeactivationTaskTypeInfrastructure
NodeDeactivationTaskTypeInvalid NodeDeactivationTaskType = original.NodeDeactivationTaskTypeInvalid
NodeDeactivationTaskTypeRepair NodeDeactivationTaskType = original.NodeDeactivationTaskTypeRepair
)
type NodeStatus = original.NodeStatus
const (
NodeStatusDisabled NodeStatus = original.NodeStatusDisabled
NodeStatusDisabling NodeStatus = original.NodeStatusDisabling
NodeStatusDown NodeStatus = original.NodeStatusDown
NodeStatusEnabling NodeStatus = original.NodeStatusEnabling
NodeStatusInvalid NodeStatus = original.NodeStatusInvalid
NodeStatusRemoved NodeStatus = original.NodeStatusRemoved
NodeStatusUnknown NodeStatus = original.NodeStatusUnknown
NodeStatusUp NodeStatus = original.NodeStatusUp
)
type NodeStatusFilter = original.NodeStatusFilter
const (
All NodeStatusFilter = original.All
Default NodeStatusFilter = original.Default
Disabled NodeStatusFilter = original.Disabled
Disabling NodeStatusFilter = original.Disabling
Down NodeStatusFilter = original.Down
Enabling NodeStatusFilter = original.Enabling
Removed NodeStatusFilter = original.Removed
Unknown NodeStatusFilter = original.Unknown
Up NodeStatusFilter = original.Up
)
type NodeTransitionType = original.NodeTransitionType
const (
NodeTransitionTypeInvalid NodeTransitionType = original.NodeTransitionTypeInvalid
NodeTransitionTypeStart NodeTransitionType = original.NodeTransitionTypeStart
NodeTransitionTypeStop NodeTransitionType = original.NodeTransitionTypeStop
)
type NodeUpgradePhase = original.NodeUpgradePhase
const (
NodeUpgradePhaseInvalid NodeUpgradePhase = original.NodeUpgradePhaseInvalid
NodeUpgradePhasePostUpgradeSafetyCheck NodeUpgradePhase = original.NodeUpgradePhasePostUpgradeSafetyCheck
NodeUpgradePhasePreUpgradeSafetyCheck NodeUpgradePhase = original.NodeUpgradePhasePreUpgradeSafetyCheck
NodeUpgradePhaseUpgrading NodeUpgradePhase = original.NodeUpgradePhaseUpgrading
)
type OperatingSystemType = original.OperatingSystemType
const (
Linux OperatingSystemType = original.Linux
Windows OperatingSystemType = original.Windows
)
type OperationState = original.OperationState
const (
OperationStateCancelled OperationState = original.OperationStateCancelled
OperationStateCompleted OperationState = original.OperationStateCompleted
OperationStateFaulted OperationState = original.OperationStateFaulted
OperationStateForceCancelled OperationState = original.OperationStateForceCancelled
OperationStateInvalid OperationState = original.OperationStateInvalid
OperationStateRollingBack OperationState = original.OperationStateRollingBack
OperationStateRunning OperationState = original.OperationStateRunning
)
type OperationType = original.OperationType
const (
OperationTypeInvalid OperationType = original.OperationTypeInvalid
OperationTypeNodeTransition OperationType = original.OperationTypeNodeTransition
OperationTypePartitionDataLoss OperationType = original.OperationTypePartitionDataLoss
OperationTypePartitionQuorumLoss OperationType = original.OperationTypePartitionQuorumLoss
OperationTypePartitionRestart OperationType = original.OperationTypePartitionRestart
)
type PackageSharingPolicyScope = original.PackageSharingPolicyScope
const (
PackageSharingPolicyScopeAll PackageSharingPolicyScope = original.PackageSharingPolicyScopeAll
PackageSharingPolicyScopeCode PackageSharingPolicyScope = original.PackageSharingPolicyScopeCode
PackageSharingPolicyScopeConfig PackageSharingPolicyScope = original.PackageSharingPolicyScopeConfig
PackageSharingPolicyScopeData PackageSharingPolicyScope = original.PackageSharingPolicyScopeData
PackageSharingPolicyScopeNone PackageSharingPolicyScope = original.PackageSharingPolicyScopeNone
)
type PartitionAccessStatus = original.PartitionAccessStatus
const (
PartitionAccessStatusGranted PartitionAccessStatus = original.PartitionAccessStatusGranted
PartitionAccessStatusInvalid PartitionAccessStatus = original.PartitionAccessStatusInvalid
PartitionAccessStatusNotPrimary PartitionAccessStatus = original.PartitionAccessStatusNotPrimary
PartitionAccessStatusNoWriteQuorum PartitionAccessStatus = original.PartitionAccessStatusNoWriteQuorum
PartitionAccessStatusReconfigurationPending PartitionAccessStatus = original.PartitionAccessStatusReconfigurationPending
)
type PartitionScheme = original.PartitionScheme
const (
PartitionSchemeInvalid PartitionScheme = original.PartitionSchemeInvalid
PartitionSchemeNamed PartitionScheme = original.PartitionSchemeNamed
PartitionSchemeSingleton PartitionScheme = original.PartitionSchemeSingleton
PartitionSchemeUniformInt64Range PartitionScheme = original.PartitionSchemeUniformInt64Range
)
type PartitionSchemeBasicPartitionSchemeDescription = original.PartitionSchemeBasicPartitionSchemeDescription
const (
PartitionSchemeNamed1 PartitionSchemeBasicPartitionSchemeDescription = original.PartitionSchemeNamed1
PartitionSchemePartitionSchemeDescription PartitionSchemeBasicPartitionSchemeDescription = original.PartitionSchemePartitionSchemeDescription
PartitionSchemeSingleton1 PartitionSchemeBasicPartitionSchemeDescription = original.PartitionSchemeSingleton1
PartitionSchemeUniformInt64Range1 PartitionSchemeBasicPartitionSchemeDescription = original.PartitionSchemeUniformInt64Range1
)
type PropertyBatchInfoKind = original.PropertyBatchInfoKind
const (
PropertyBatchInfoKindFailed PropertyBatchInfoKind = original.PropertyBatchInfoKindFailed
PropertyBatchInfoKindInvalid PropertyBatchInfoKind = original.PropertyBatchInfoKindInvalid
PropertyBatchInfoKindSuccessful PropertyBatchInfoKind = original.PropertyBatchInfoKindSuccessful
)
type PropertyBatchOperationKind = original.PropertyBatchOperationKind
const (
PropertyBatchOperationKindCheckExists PropertyBatchOperationKind = original.PropertyBatchOperationKindCheckExists
PropertyBatchOperationKindCheckSequence PropertyBatchOperationKind = original.PropertyBatchOperationKindCheckSequence
PropertyBatchOperationKindCheckValue PropertyBatchOperationKind = original.PropertyBatchOperationKindCheckValue
PropertyBatchOperationKindDelete PropertyBatchOperationKind = original.PropertyBatchOperationKindDelete
PropertyBatchOperationKindGet PropertyBatchOperationKind = original.PropertyBatchOperationKindGet
PropertyBatchOperationKindInvalid PropertyBatchOperationKind = original.PropertyBatchOperationKindInvalid
PropertyBatchOperationKindPut PropertyBatchOperationKind = original.PropertyBatchOperationKindPut
)
type PropertyValueKind = original.PropertyValueKind
const (
PropertyValueKindBinary PropertyValueKind = original.PropertyValueKindBinary
PropertyValueKindDouble PropertyValueKind = original.PropertyValueKindDouble
PropertyValueKindGUID PropertyValueKind = original.PropertyValueKindGUID
PropertyValueKindInt64 PropertyValueKind = original.PropertyValueKindInt64
PropertyValueKindInvalid PropertyValueKind = original.PropertyValueKindInvalid
PropertyValueKindString PropertyValueKind = original.PropertyValueKindString
)
type ProvisionApplicationTypeKind = original.ProvisionApplicationTypeKind
const (
ProvisionApplicationTypeKindExternalStore ProvisionApplicationTypeKind = original.ProvisionApplicationTypeKindExternalStore
ProvisionApplicationTypeKindImageStorePath ProvisionApplicationTypeKind = original.ProvisionApplicationTypeKindImageStorePath
ProvisionApplicationTypeKindInvalid ProvisionApplicationTypeKind = original.ProvisionApplicationTypeKindInvalid
)
type QuorumLossMode = original.QuorumLossMode
const (
QuorumLossModeAllReplicas QuorumLossMode = original.QuorumLossModeAllReplicas
QuorumLossModeInvalid QuorumLossMode = original.QuorumLossModeInvalid
QuorumLossModeQuorumReplicas QuorumLossMode = original.QuorumLossModeQuorumReplicas
)
type ReconfigurationPhase = original.ReconfigurationPhase
const (
ReconfigurationPhaseAbortPhaseZero ReconfigurationPhase = original.ReconfigurationPhaseAbortPhaseZero
ReconfigurationPhaseNone ReconfigurationPhase = original.ReconfigurationPhaseNone
ReconfigurationPhasePhase0 ReconfigurationPhase = original.ReconfigurationPhasePhase0
ReconfigurationPhasePhase1 ReconfigurationPhase = original.ReconfigurationPhasePhase1
ReconfigurationPhasePhase2 ReconfigurationPhase = original.ReconfigurationPhasePhase2
ReconfigurationPhasePhase3 ReconfigurationPhase = original.ReconfigurationPhasePhase3
ReconfigurationPhasePhase4 ReconfigurationPhase = original.ReconfigurationPhasePhase4
ReconfigurationPhaseUnknown ReconfigurationPhase = original.ReconfigurationPhaseUnknown
)
type ReconfigurationType = original.ReconfigurationType
const (
ReconfigurationTypeFailover ReconfigurationType = original.ReconfigurationTypeFailover
ReconfigurationTypeOther ReconfigurationType = original.ReconfigurationTypeOther
ReconfigurationTypeSwapPrimary ReconfigurationType = original.ReconfigurationTypeSwapPrimary
ReconfigurationTypeUnknown ReconfigurationType = original.ReconfigurationTypeUnknown
)
type RepairImpactKind = original.RepairImpactKind
const (
RepairImpactKindInvalid RepairImpactKind = original.RepairImpactKindInvalid
RepairImpactKindNode RepairImpactKind = original.RepairImpactKindNode
)
type RepairTargetKind = original.RepairTargetKind
const (
RepairTargetKindInvalid RepairTargetKind = original.RepairTargetKindInvalid
RepairTargetKindNode RepairTargetKind = original.RepairTargetKindNode
)
type RepairTaskHealthCheckState = original.RepairTaskHealthCheckState
const (
InProgress RepairTaskHealthCheckState = original.InProgress
NotStarted RepairTaskHealthCheckState = original.NotStarted
Skipped RepairTaskHealthCheckState = original.Skipped
Succeeded RepairTaskHealthCheckState = original.Succeeded
TimedOut RepairTaskHealthCheckState = original.TimedOut
)
type ReplicaHealthReportServiceKind = original.ReplicaHealthReportServiceKind
const (
Stateful ReplicaHealthReportServiceKind = original.Stateful
Stateless ReplicaHealthReportServiceKind = original.Stateless
)
type ReplicaKind = original.ReplicaKind
const (
ReplicaKindInvalid ReplicaKind = original.ReplicaKindInvalid
ReplicaKindKeyValueStore ReplicaKind = original.ReplicaKindKeyValueStore
)
type ReplicaRole = original.ReplicaRole
const (
ReplicaRoleActiveSecondary ReplicaRole = original.ReplicaRoleActiveSecondary
ReplicaRoleIdleSecondary ReplicaRole = original.ReplicaRoleIdleSecondary
ReplicaRoleNone ReplicaRole = original.ReplicaRoleNone
ReplicaRolePrimary ReplicaRole = original.ReplicaRolePrimary
ReplicaRoleUnknown ReplicaRole = original.ReplicaRoleUnknown
)
type ReplicaStatus = original.ReplicaStatus
const (
ReplicaStatusDown ReplicaStatus = original.ReplicaStatusDown
ReplicaStatusDropped ReplicaStatus = original.ReplicaStatusDropped
ReplicaStatusInBuild ReplicaStatus = original.ReplicaStatusInBuild
ReplicaStatusInvalid ReplicaStatus = original.ReplicaStatusInvalid
ReplicaStatusReady ReplicaStatus = original.ReplicaStatusReady
ReplicaStatusStandby ReplicaStatus = original.ReplicaStatusStandby
)
type ReplicatorOperationName = original.ReplicatorOperationName
const (
ReplicatorOperationNameAbort ReplicatorOperationName = original.ReplicatorOperationNameAbort
ReplicatorOperationNameBuild ReplicatorOperationName = original.ReplicatorOperationNameBuild
ReplicatorOperationNameChangeRole ReplicatorOperationName = original.ReplicatorOperationNameChangeRole
ReplicatorOperationNameClose ReplicatorOperationName = original.ReplicatorOperationNameClose
ReplicatorOperationNameInvalid ReplicatorOperationName = original.ReplicatorOperationNameInvalid
ReplicatorOperationNameNone ReplicatorOperationName = original.ReplicatorOperationNameNone
ReplicatorOperationNameOnDataLoss ReplicatorOperationName = original.ReplicatorOperationNameOnDataLoss
ReplicatorOperationNameOpen ReplicatorOperationName = original.ReplicatorOperationNameOpen
ReplicatorOperationNameUpdateEpoch ReplicatorOperationName = original.ReplicatorOperationNameUpdateEpoch
ReplicatorOperationNameWaitForCatchup ReplicatorOperationName = original.ReplicatorOperationNameWaitForCatchup
)
type ResourceStatus = original.ResourceStatus
const (
ResourceStatusCreating ResourceStatus = original.ResourceStatusCreating
ResourceStatusDeleting ResourceStatus = original.ResourceStatusDeleting
ResourceStatusFailed ResourceStatus = original.ResourceStatusFailed
ResourceStatusReady ResourceStatus = original.ResourceStatusReady
ResourceStatusUnknown ResourceStatus = original.ResourceStatusUnknown
ResourceStatusUpgrading ResourceStatus = original.ResourceStatusUpgrading
)
type RestartPartitionMode = original.RestartPartitionMode
const (
RestartPartitionModeAllReplicasOrInstances RestartPartitionMode = original.RestartPartitionModeAllReplicasOrInstances
RestartPartitionModeInvalid RestartPartitionMode = original.RestartPartitionModeInvalid
RestartPartitionModeOnlyActiveSecondaries RestartPartitionMode = original.RestartPartitionModeOnlyActiveSecondaries
)
type RestoreState = original.RestoreState
const (
RestoreStateAccepted RestoreState = original.RestoreStateAccepted
RestoreStateFailure RestoreState = original.RestoreStateFailure
RestoreStateInvalid RestoreState = original.RestoreStateInvalid
RestoreStateRestoreInProgress RestoreState = original.RestoreStateRestoreInProgress
RestoreStateSuccess RestoreState = original.RestoreStateSuccess
RestoreStateTimeout RestoreState = original.RestoreStateTimeout
)
type ResultStatus = original.ResultStatus
const (
ResultStatusCancelled ResultStatus = original.ResultStatusCancelled
ResultStatusFailed ResultStatus = original.ResultStatusFailed
ResultStatusInterrupted ResultStatus = original.ResultStatusInterrupted
ResultStatusInvalid ResultStatus = original.ResultStatusInvalid
ResultStatusPending ResultStatus = original.ResultStatusPending
ResultStatusSucceeded ResultStatus = original.ResultStatusSucceeded
)
type RetentionPolicyType = original.RetentionPolicyType
const (
RetentionPolicyTypeBasic RetentionPolicyType = original.RetentionPolicyTypeBasic
RetentionPolicyTypeInvalid RetentionPolicyType = original.RetentionPolicyTypeInvalid
)
type RetentionPolicyTypeBasicBasicRetentionPolicyDescription = original.RetentionPolicyTypeBasicBasicRetentionPolicyDescription
const (
RetentionPolicyTypeBasic1 RetentionPolicyTypeBasicBasicRetentionPolicyDescription = original.RetentionPolicyTypeBasic1
RetentionPolicyTypeRetentionPolicyDescription RetentionPolicyTypeBasicBasicRetentionPolicyDescription = original.RetentionPolicyTypeRetentionPolicyDescription
)
type SafetyCheckKind = original.SafetyCheckKind
const (
SafetyCheckKindEnsureAvailability SafetyCheckKind = original.SafetyCheckKindEnsureAvailability
SafetyCheckKindEnsurePartitionQuorum SafetyCheckKind = original.SafetyCheckKindEnsurePartitionQuorum
SafetyCheckKindEnsureSeedNodeQuorum SafetyCheckKind = original.SafetyCheckKindEnsureSeedNodeQuorum
SafetyCheckKindInvalid SafetyCheckKind = original.SafetyCheckKindInvalid
SafetyCheckKindWaitForInbuildReplica SafetyCheckKind = original.SafetyCheckKindWaitForInbuildReplica
SafetyCheckKindWaitForPrimaryPlacement SafetyCheckKind = original.SafetyCheckKindWaitForPrimaryPlacement
SafetyCheckKindWaitForPrimarySwap SafetyCheckKind = original.SafetyCheckKindWaitForPrimarySwap
SafetyCheckKindWaitForReconfiguration SafetyCheckKind = original.SafetyCheckKindWaitForReconfiguration
)
type ScalingMechanismKind = original.ScalingMechanismKind
const (
ScalingMechanismKindAddRemoveIncrementalNamedPartition ScalingMechanismKind = original.ScalingMechanismKindAddRemoveIncrementalNamedPartition
ScalingMechanismKindInvalid ScalingMechanismKind = original.ScalingMechanismKindInvalid
ScalingMechanismKindPartitionInstanceCount ScalingMechanismKind = original.ScalingMechanismKindPartitionInstanceCount
)
type ScalingTriggerKind = original.ScalingTriggerKind
const (
ScalingTriggerKindAveragePartitionLoad ScalingTriggerKind = original.ScalingTriggerKindAveragePartitionLoad
ScalingTriggerKindAverageServiceLoad ScalingTriggerKind = original.ScalingTriggerKindAverageServiceLoad
ScalingTriggerKindInvalid ScalingTriggerKind = original.ScalingTriggerKindInvalid
)
type ScheduleKind = original.ScheduleKind
const (
ScheduleKindBackupScheduleDescription ScheduleKind = original.ScheduleKindBackupScheduleDescription
ScheduleKindFrequencyBased ScheduleKind = original.ScheduleKindFrequencyBased
ScheduleKindTimeBased ScheduleKind = original.ScheduleKindTimeBased
)
type SecretKind = original.SecretKind
const (
InlinedValue SecretKind = original.InlinedValue
)
type ServiceCorrelationScheme = original.ServiceCorrelationScheme
const (
ServiceCorrelationSchemeAffinity ServiceCorrelationScheme = original.ServiceCorrelationSchemeAffinity
ServiceCorrelationSchemeAlignedAffinity ServiceCorrelationScheme = original.ServiceCorrelationSchemeAlignedAffinity
ServiceCorrelationSchemeInvalid ServiceCorrelationScheme = original.ServiceCorrelationSchemeInvalid
ServiceCorrelationSchemeNonAlignedAffinity ServiceCorrelationScheme = original.ServiceCorrelationSchemeNonAlignedAffinity
)
type ServiceEndpointRole = original.ServiceEndpointRole
const (
ServiceEndpointRoleInvalid ServiceEndpointRole = original.ServiceEndpointRoleInvalid
ServiceEndpointRoleStatefulPrimary ServiceEndpointRole = original.ServiceEndpointRoleStatefulPrimary
ServiceEndpointRoleStatefulSecondary ServiceEndpointRole = original.ServiceEndpointRoleStatefulSecondary
ServiceEndpointRoleStateless ServiceEndpointRole = original.ServiceEndpointRoleStateless
)
type ServiceKind = original.ServiceKind
const (
ServiceKindInvalid ServiceKind = original.ServiceKindInvalid
ServiceKindStateful ServiceKind = original.ServiceKindStateful
ServiceKindStateless ServiceKind = original.ServiceKindStateless
)
type ServiceKindBasicDeployedServiceReplicaDetailInfo = original.ServiceKindBasicDeployedServiceReplicaDetailInfo
const (
ServiceKindDeployedServiceReplicaDetailInfo ServiceKindBasicDeployedServiceReplicaDetailInfo = original.ServiceKindDeployedServiceReplicaDetailInfo
ServiceKindStateful1 ServiceKindBasicDeployedServiceReplicaDetailInfo = original.ServiceKindStateful1
ServiceKindStateless1 ServiceKindBasicDeployedServiceReplicaDetailInfo = original.ServiceKindStateless1
)
type ServiceKindBasicDeployedServiceReplicaInfo = original.ServiceKindBasicDeployedServiceReplicaInfo
const (
ServiceKindBasicDeployedServiceReplicaInfoServiceKindDeployedServiceReplicaInfo ServiceKindBasicDeployedServiceReplicaInfo = original.ServiceKindBasicDeployedServiceReplicaInfoServiceKindDeployedServiceReplicaInfo
ServiceKindBasicDeployedServiceReplicaInfoServiceKindStateful ServiceKindBasicDeployedServiceReplicaInfo = original.ServiceKindBasicDeployedServiceReplicaInfoServiceKindStateful
ServiceKindBasicDeployedServiceReplicaInfoServiceKindStateless ServiceKindBasicDeployedServiceReplicaInfo = original.ServiceKindBasicDeployedServiceReplicaInfoServiceKindStateless
)
type ServiceKindBasicReplicaHealth = original.ServiceKindBasicReplicaHealth
const (
ServiceKindBasicReplicaHealthServiceKindReplicaHealth ServiceKindBasicReplicaHealth = original.ServiceKindBasicReplicaHealthServiceKindReplicaHealth
ServiceKindBasicReplicaHealthServiceKindStateful ServiceKindBasicReplicaHealth = original.ServiceKindBasicReplicaHealthServiceKindStateful
ServiceKindBasicReplicaHealthServiceKindStateless ServiceKindBasicReplicaHealth = original.ServiceKindBasicReplicaHealthServiceKindStateless
)
type ServiceKindBasicReplicaHealthState = original.ServiceKindBasicReplicaHealthState
const (
ServiceKindBasicReplicaHealthStateServiceKindReplicaHealthState ServiceKindBasicReplicaHealthState = original.ServiceKindBasicReplicaHealthStateServiceKindReplicaHealthState
ServiceKindBasicReplicaHealthStateServiceKindStateful ServiceKindBasicReplicaHealthState = original.ServiceKindBasicReplicaHealthStateServiceKindStateful
ServiceKindBasicReplicaHealthStateServiceKindStateless ServiceKindBasicReplicaHealthState = original.ServiceKindBasicReplicaHealthStateServiceKindStateless
)
type ServiceKindBasicReplicaInfo = original.ServiceKindBasicReplicaInfo
const (
ServiceKindBasicReplicaInfoServiceKindReplicaInfo ServiceKindBasicReplicaInfo = original.ServiceKindBasicReplicaInfoServiceKindReplicaInfo
ServiceKindBasicReplicaInfoServiceKindStateful ServiceKindBasicReplicaInfo = original.ServiceKindBasicReplicaInfoServiceKindStateful
ServiceKindBasicReplicaInfoServiceKindStateless ServiceKindBasicReplicaInfo = original.ServiceKindBasicReplicaInfoServiceKindStateless
)
type ServiceKindBasicServiceDescription = original.ServiceKindBasicServiceDescription
const (
ServiceKindBasicServiceDescriptionServiceKindServiceDescription ServiceKindBasicServiceDescription = original.ServiceKindBasicServiceDescriptionServiceKindServiceDescription
ServiceKindBasicServiceDescriptionServiceKindStateful ServiceKindBasicServiceDescription = original.ServiceKindBasicServiceDescriptionServiceKindStateful
ServiceKindBasicServiceDescriptionServiceKindStateless ServiceKindBasicServiceDescription = original.ServiceKindBasicServiceDescriptionServiceKindStateless
)
type ServiceKindBasicServiceInfo = original.ServiceKindBasicServiceInfo
const (
ServiceKindBasicServiceInfoServiceKindServiceInfo ServiceKindBasicServiceInfo = original.ServiceKindBasicServiceInfoServiceKindServiceInfo
ServiceKindBasicServiceInfoServiceKindStateful ServiceKindBasicServiceInfo = original.ServiceKindBasicServiceInfoServiceKindStateful
ServiceKindBasicServiceInfoServiceKindStateless ServiceKindBasicServiceInfo = original.ServiceKindBasicServiceInfoServiceKindStateless
)
type ServiceKindBasicServicePartitionInfo = original.ServiceKindBasicServicePartitionInfo
const (
ServiceKindBasicServicePartitionInfoServiceKindServicePartitionInfo ServiceKindBasicServicePartitionInfo = original.ServiceKindBasicServicePartitionInfoServiceKindServicePartitionInfo
ServiceKindBasicServicePartitionInfoServiceKindStateful ServiceKindBasicServicePartitionInfo = original.ServiceKindBasicServicePartitionInfoServiceKindStateful
ServiceKindBasicServicePartitionInfoServiceKindStateless ServiceKindBasicServicePartitionInfo = original.ServiceKindBasicServicePartitionInfoServiceKindStateless
)
type ServiceKindBasicServiceUpdateDescription = original.ServiceKindBasicServiceUpdateDescription
const (
ServiceKindBasicServiceUpdateDescriptionServiceKindServiceUpdateDescription ServiceKindBasicServiceUpdateDescription = original.ServiceKindBasicServiceUpdateDescriptionServiceKindServiceUpdateDescription
ServiceKindBasicServiceUpdateDescriptionServiceKindStateful ServiceKindBasicServiceUpdateDescription = original.ServiceKindBasicServiceUpdateDescriptionServiceKindStateful
ServiceKindBasicServiceUpdateDescriptionServiceKindStateless ServiceKindBasicServiceUpdateDescription = original.ServiceKindBasicServiceUpdateDescriptionServiceKindStateless
)
type ServiceLoadMetricWeight = original.ServiceLoadMetricWeight
const (
ServiceLoadMetricWeightHigh ServiceLoadMetricWeight = original.ServiceLoadMetricWeightHigh
ServiceLoadMetricWeightLow ServiceLoadMetricWeight = original.ServiceLoadMetricWeightLow
ServiceLoadMetricWeightMedium ServiceLoadMetricWeight = original.ServiceLoadMetricWeightMedium
ServiceLoadMetricWeightZero ServiceLoadMetricWeight = original.ServiceLoadMetricWeightZero
)
type ServiceOperationName = original.ServiceOperationName
const (
ServiceOperationNameAbort ServiceOperationName = original.ServiceOperationNameAbort
ServiceOperationNameChangeRole ServiceOperationName = original.ServiceOperationNameChangeRole
ServiceOperationNameClose ServiceOperationName = original.ServiceOperationNameClose
ServiceOperationNameNone ServiceOperationName = original.ServiceOperationNameNone
ServiceOperationNameOpen ServiceOperationName = original.ServiceOperationNameOpen
ServiceOperationNameUnknown ServiceOperationName = original.ServiceOperationNameUnknown
)
type ServicePackageActivationMode = original.ServicePackageActivationMode
const (
ExclusiveProcess ServicePackageActivationMode = original.ExclusiveProcess
SharedProcess ServicePackageActivationMode = original.SharedProcess
)
type ServicePartitionKind = original.ServicePartitionKind
const (
ServicePartitionKindInt64Range ServicePartitionKind = original.ServicePartitionKindInt64Range
ServicePartitionKindInvalid ServicePartitionKind = original.ServicePartitionKindInvalid
ServicePartitionKindNamed ServicePartitionKind = original.ServicePartitionKindNamed
ServicePartitionKindSingleton ServicePartitionKind = original.ServicePartitionKindSingleton
)
type ServicePartitionKindBasicPartitionInformation = original.ServicePartitionKindBasicPartitionInformation
const (
ServicePartitionKindInt64Range1 ServicePartitionKindBasicPartitionInformation = original.ServicePartitionKindInt64Range1
ServicePartitionKindNamed1 ServicePartitionKindBasicPartitionInformation = original.ServicePartitionKindNamed1
ServicePartitionKindPartitionInformation ServicePartitionKindBasicPartitionInformation = original.ServicePartitionKindPartitionInformation
ServicePartitionKindSingleton1 ServicePartitionKindBasicPartitionInformation = original.ServicePartitionKindSingleton1
)
type ServicePartitionStatus = original.ServicePartitionStatus
const (
ServicePartitionStatusDeleting ServicePartitionStatus = original.ServicePartitionStatusDeleting
ServicePartitionStatusInQuorumLoss ServicePartitionStatus = original.ServicePartitionStatusInQuorumLoss
ServicePartitionStatusInvalid ServicePartitionStatus = original.ServicePartitionStatusInvalid
ServicePartitionStatusNotReady ServicePartitionStatus = original.ServicePartitionStatusNotReady
ServicePartitionStatusReady ServicePartitionStatus = original.ServicePartitionStatusReady
ServicePartitionStatusReconfiguring ServicePartitionStatus = original.ServicePartitionStatusReconfiguring
)
type ServicePlacementPolicyType = original.ServicePlacementPolicyType
const (
ServicePlacementPolicyTypeInvalid ServicePlacementPolicyType = original.ServicePlacementPolicyTypeInvalid
ServicePlacementPolicyTypeInvalidDomain ServicePlacementPolicyType = original.ServicePlacementPolicyTypeInvalidDomain
ServicePlacementPolicyTypeNonPartiallyPlaceService ServicePlacementPolicyType = original.ServicePlacementPolicyTypeNonPartiallyPlaceService
ServicePlacementPolicyTypePreferPrimaryDomain ServicePlacementPolicyType = original.ServicePlacementPolicyTypePreferPrimaryDomain
ServicePlacementPolicyTypeRequireDomain ServicePlacementPolicyType = original.ServicePlacementPolicyTypeRequireDomain
ServicePlacementPolicyTypeRequireDomainDistribution ServicePlacementPolicyType = original.ServicePlacementPolicyTypeRequireDomainDistribution
)
type ServiceStatus = original.ServiceStatus
const (
ServiceStatusActive ServiceStatus = original.ServiceStatusActive
ServiceStatusCreating ServiceStatus = original.ServiceStatusCreating
ServiceStatusDeleting ServiceStatus = original.ServiceStatusDeleting
ServiceStatusFailed ServiceStatus = original.ServiceStatusFailed
ServiceStatusUnknown ServiceStatus = original.ServiceStatusUnknown
ServiceStatusUpgrading ServiceStatus = original.ServiceStatusUpgrading
)
type ServiceTypeRegistrationStatus = original.ServiceTypeRegistrationStatus
const (
ServiceTypeRegistrationStatusDisabled ServiceTypeRegistrationStatus = original.ServiceTypeRegistrationStatusDisabled
ServiceTypeRegistrationStatusEnabled ServiceTypeRegistrationStatus = original.ServiceTypeRegistrationStatusEnabled
ServiceTypeRegistrationStatusInvalid ServiceTypeRegistrationStatus = original.ServiceTypeRegistrationStatusInvalid
ServiceTypeRegistrationStatusRegistered ServiceTypeRegistrationStatus = original.ServiceTypeRegistrationStatusRegistered
)
type SizeTypes = original.SizeTypes
const (
SizeTypesLarge SizeTypes = original.SizeTypesLarge
SizeTypesMedium SizeTypes = original.SizeTypesMedium
SizeTypesSmall SizeTypes = original.SizeTypesSmall
)
type State = original.State
const (
StateApproved State = original.StateApproved
StateClaimed State = original.StateClaimed
StateCompleted State = original.StateCompleted
StateCreated State = original.StateCreated
StateExecuting State = original.StateExecuting
StateInvalid State = original.StateInvalid
StatePreparing State = original.StatePreparing
StateRestoring State = original.StateRestoring
)
type StorageKind = original.StorageKind
const (
StorageKindAzureBlobStore StorageKind = original.StorageKindAzureBlobStore
StorageKindBackupStorageDescription StorageKind = original.StorageKindBackupStorageDescription
StorageKindFileShare StorageKind = original.StorageKindFileShare
)
type Type = original.Type
const (
TypeInvalidDomain Type = original.TypeInvalidDomain
TypeNonPartiallyPlaceService Type = original.TypeNonPartiallyPlaceService
TypePreferPrimaryDomain Type = original.TypePreferPrimaryDomain
TypeRequireDomain Type = original.TypeRequireDomain
TypeRequireDomainDistribution Type = original.TypeRequireDomainDistribution
TypeServicePlacementPolicyDescription Type = original.TypeServicePlacementPolicyDescription
)
type UpgradeDomainState = original.UpgradeDomainState
const (
UpgradeDomainStateCompleted UpgradeDomainState = original.UpgradeDomainStateCompleted
UpgradeDomainStateInProgress UpgradeDomainState = original.UpgradeDomainStateInProgress
UpgradeDomainStateInvalid UpgradeDomainState = original.UpgradeDomainStateInvalid
UpgradeDomainStatePending UpgradeDomainState = original.UpgradeDomainStatePending
)
type UpgradeKind = original.UpgradeKind
const (
UpgradeKindInvalid UpgradeKind = original.UpgradeKindInvalid
UpgradeKindRolling UpgradeKind = original.UpgradeKindRolling
)
type UpgradeMode = original.UpgradeMode
const (
UpgradeModeInvalid UpgradeMode = original.UpgradeModeInvalid
UpgradeModeMonitored UpgradeMode = original.UpgradeModeMonitored
UpgradeModeUnmonitoredAuto UpgradeMode = original.UpgradeModeUnmonitoredAuto
UpgradeModeUnmonitoredManual UpgradeMode = original.UpgradeModeUnmonitoredManual
)
type UpgradeSortOrder = original.UpgradeSortOrder
const (
UpgradeSortOrderDefault UpgradeSortOrder = original.UpgradeSortOrderDefault
UpgradeSortOrderInvalid UpgradeSortOrder = original.UpgradeSortOrderInvalid
UpgradeSortOrderLexicographical UpgradeSortOrder = original.UpgradeSortOrderLexicographical
UpgradeSortOrderNumeric UpgradeSortOrder = original.UpgradeSortOrderNumeric
UpgradeSortOrderReverseLexicographical UpgradeSortOrder = original.UpgradeSortOrderReverseLexicographical
UpgradeSortOrderReverseNumeric UpgradeSortOrder = original.UpgradeSortOrderReverseNumeric
)
type UpgradeState = original.UpgradeState
const (
UpgradeStateFailed UpgradeState = original.UpgradeStateFailed
UpgradeStateInvalid UpgradeState = original.UpgradeStateInvalid
UpgradeStateRollingBackCompleted UpgradeState = original.UpgradeStateRollingBackCompleted
UpgradeStateRollingBackInProgress UpgradeState = original.UpgradeStateRollingBackInProgress
UpgradeStateRollingForwardCompleted UpgradeState = original.UpgradeStateRollingForwardCompleted
UpgradeStateRollingForwardInProgress UpgradeState = original.UpgradeStateRollingForwardInProgress
UpgradeStateRollingForwardPending UpgradeState = original.UpgradeStateRollingForwardPending
)
type UpgradeType = original.UpgradeType
const (
UpgradeTypeInvalid UpgradeType = original.UpgradeTypeInvalid
UpgradeTypeRolling UpgradeType = original.UpgradeTypeRolling
UpgradeTypeRollingForceRestart UpgradeType = original.UpgradeTypeRollingForceRestart
)
type VolumeProvider = original.VolumeProvider
const (
SFAzureFile VolumeProvider = original.SFAzureFile
)
type AadMetadata = original.AadMetadata
type AadMetadataObject = original.AadMetadataObject
type AddRemoveIncrementalNamedPartitionScalingMechanism = original.AddRemoveIncrementalNamedPartitionScalingMechanism
type AddRemoveReplicaScalingMechanism = original.AddRemoveReplicaScalingMechanism
type AnalysisEventMetadata = original.AnalysisEventMetadata
type ApplicationBackupConfigurationInfo = original.ApplicationBackupConfigurationInfo
type ApplicationBackupEntity = original.ApplicationBackupEntity
type ApplicationCapacityDescription = original.ApplicationCapacityDescription
type ApplicationContainerInstanceExitedEvent = original.ApplicationContainerInstanceExitedEvent
type ApplicationCreatedEvent = original.ApplicationCreatedEvent
type ApplicationDeletedEvent = original.ApplicationDeletedEvent
type ApplicationDescription = original.ApplicationDescription
type ApplicationEvent = original.ApplicationEvent
type ApplicationHealth = original.ApplicationHealth
type ApplicationHealthEvaluation = original.ApplicationHealthEvaluation
type ApplicationHealthPolicies = original.ApplicationHealthPolicies
type ApplicationHealthPolicy = original.ApplicationHealthPolicy
type ApplicationHealthPolicyMapItem = original.ApplicationHealthPolicyMapItem
type ApplicationHealthReportExpiredEvent = original.ApplicationHealthReportExpiredEvent
type ApplicationHealthState = original.ApplicationHealthState
type ApplicationHealthStateChunk = original.ApplicationHealthStateChunk
type ApplicationHealthStateChunkList = original.ApplicationHealthStateChunkList
type ApplicationHealthStateFilter = original.ApplicationHealthStateFilter
type ApplicationInfo = original.ApplicationInfo
type ApplicationLoadInfo = original.ApplicationLoadInfo
type ApplicationMetricDescription = original.ApplicationMetricDescription
type ApplicationNameInfo = original.ApplicationNameInfo
type ApplicationNewHealthReportEvent = original.ApplicationNewHealthReportEvent
type ApplicationParameter = original.ApplicationParameter
type ApplicationProcessExitedEvent = original.ApplicationProcessExitedEvent
type ApplicationProperties = original.ApplicationProperties
type ApplicationResourceDescription = original.ApplicationResourceDescription
type ApplicationScopedVolume = original.ApplicationScopedVolume
type ApplicationScopedVolumeCreationParameters = original.ApplicationScopedVolumeCreationParameters
type ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk = original.ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk
type ApplicationTypeApplicationsHealthEvaluation = original.ApplicationTypeApplicationsHealthEvaluation
type ApplicationTypeHealthPolicyMapItem = original.ApplicationTypeHealthPolicyMapItem
type ApplicationTypeImageStorePath = original.ApplicationTypeImageStorePath
type ApplicationTypeInfo = original.ApplicationTypeInfo
type ApplicationTypeManifest = original.ApplicationTypeManifest
type ApplicationUpgradeCompletedEvent = original.ApplicationUpgradeCompletedEvent
type ApplicationUpgradeDescription = original.ApplicationUpgradeDescription
type ApplicationUpgradeDomainCompletedEvent = original.ApplicationUpgradeDomainCompletedEvent
type ApplicationUpgradeProgressInfo = original.ApplicationUpgradeProgressInfo
type ApplicationUpgradeRollbackCompletedEvent = original.ApplicationUpgradeRollbackCompletedEvent
type ApplicationUpgradeRollbackStartedEvent = original.ApplicationUpgradeRollbackStartedEvent
type ApplicationUpgradeStartedEvent = original.ApplicationUpgradeStartedEvent
type ApplicationUpgradeUpdateDescription = original.ApplicationUpgradeUpdateDescription
type ApplicationsHealthEvaluation = original.ApplicationsHealthEvaluation
type AutoScalingMechanism = original.AutoScalingMechanism
type AutoScalingMetric = original.AutoScalingMetric
type AutoScalingPolicy = original.AutoScalingPolicy
type AutoScalingResourceMetric = original.AutoScalingResourceMetric
type AutoScalingTrigger = original.AutoScalingTrigger
type AverageLoadScalingTrigger = original.AverageLoadScalingTrigger
type AveragePartitionLoadScalingTrigger = original.AveragePartitionLoadScalingTrigger
type AverageServiceLoadScalingTrigger = original.AverageServiceLoadScalingTrigger
type AzureBlobBackupStorageDescription = original.AzureBlobBackupStorageDescription
type AzureInternalMonitoringPipelineSinkDescription = original.AzureInternalMonitoringPipelineSinkDescription
type BackupConfigurationInfo = original.BackupConfigurationInfo
type BackupEntity = original.BackupEntity
type BackupInfo = original.BackupInfo
type BackupPartitionDescription = original.BackupPartitionDescription
type BackupPolicyDescription = original.BackupPolicyDescription
type BackupProgressInfo = original.BackupProgressInfo
type BackupScheduleDescription = original.BackupScheduleDescription
type BackupStorageDescription = original.BackupStorageDescription
type BackupSuspensionInfo = original.BackupSuspensionInfo
type BaseClient = original.BaseClient
type BasicApplicationEvent = original.BasicApplicationEvent
type BasicApplicationScopedVolumeCreationParameters = original.BasicApplicationScopedVolumeCreationParameters
type BasicAutoScalingMechanism = original.BasicAutoScalingMechanism
type BasicAutoScalingMetric = original.BasicAutoScalingMetric
type BasicAutoScalingTrigger = original.BasicAutoScalingTrigger
type BasicBackupConfigurationInfo = original.BasicBackupConfigurationInfo
type BasicBackupEntity = original.BasicBackupEntity
type BasicBackupScheduleDescription = original.BasicBackupScheduleDescription
type BasicBackupStorageDescription = original.BasicBackupStorageDescription
type BasicBasicRetentionPolicyDescription = original.BasicBasicRetentionPolicyDescription
type BasicChaosEvent = original.BasicChaosEvent
type BasicClusterEvent = original.BasicClusterEvent
type BasicDeployedServiceReplicaDetailInfo = original.BasicDeployedServiceReplicaDetailInfo
type BasicDeployedServiceReplicaInfo = original.BasicDeployedServiceReplicaInfo
type BasicDiagnosticsSinkProperties = original.BasicDiagnosticsSinkProperties
type BasicFabricEvent = original.BasicFabricEvent
type BasicHealthEvaluation = original.BasicHealthEvaluation
type BasicNetworkResourceProperties = original.BasicNetworkResourceProperties
type BasicNetworkResourcePropertiesBase = original.BasicNetworkResourcePropertiesBase
type BasicNodeEvent = original.BasicNodeEvent
type BasicPartitionAnalysisEvent = original.BasicPartitionAnalysisEvent
type BasicPartitionEvent = original.BasicPartitionEvent
type BasicPartitionInformation = original.BasicPartitionInformation
type BasicPartitionSafetyCheck = original.BasicPartitionSafetyCheck
type BasicPartitionSchemeDescription = original.BasicPartitionSchemeDescription
type BasicPropertyBatchInfo = original.BasicPropertyBatchInfo
type BasicPropertyBatchOperation = original.BasicPropertyBatchOperation
type BasicPropertyValue = original.BasicPropertyValue
type BasicProvisionApplicationTypeDescriptionBase = original.BasicProvisionApplicationTypeDescriptionBase
type BasicRepairImpactDescriptionBase = original.BasicRepairImpactDescriptionBase
type BasicRepairTargetDescriptionBase = original.BasicRepairTargetDescriptionBase
type BasicReplicaEvent = original.BasicReplicaEvent
type BasicReplicaHealth = original.BasicReplicaHealth
type BasicReplicaHealthState = original.BasicReplicaHealthState
type BasicReplicaInfo = original.BasicReplicaInfo
type BasicReplicaStatusBase = original.BasicReplicaStatusBase
type BasicReplicatorStatus = original.BasicReplicatorStatus
type BasicRetentionPolicyDescription = original.BasicRetentionPolicyDescription
type BasicSafetyCheck = original.BasicSafetyCheck
type BasicScalingMechanismDescription = original.BasicScalingMechanismDescription
type BasicScalingTriggerDescription = original.BasicScalingTriggerDescription
type BasicSecondaryReplicatorStatus = original.BasicSecondaryReplicatorStatus
type BasicSecretResourceProperties = original.BasicSecretResourceProperties
type BasicSecretResourcePropertiesBase = original.BasicSecretResourcePropertiesBase
type BasicServiceDescription = original.BasicServiceDescription
type BasicServiceEvent = original.BasicServiceEvent
type BasicServiceInfo = original.BasicServiceInfo
type BasicServicePartitionInfo = original.BasicServicePartitionInfo
type BasicServicePlacementPolicyDescription = original.BasicServicePlacementPolicyDescription
type BasicServiceTypeDescription = original.BasicServiceTypeDescription
type BasicServiceUpdateDescription = original.BasicServiceUpdateDescription
type BinaryPropertyValue = original.BinaryPropertyValue
type Chaos = original.Chaos
type ChaosCodePackageRestartScheduledEvent = original.ChaosCodePackageRestartScheduledEvent
type ChaosContext = original.ChaosContext
type ChaosEvent = original.ChaosEvent
type ChaosEventWrapper = original.ChaosEventWrapper
type ChaosEventsSegment = original.ChaosEventsSegment
type ChaosNodeRestartScheduledEvent = original.ChaosNodeRestartScheduledEvent
type ChaosParameters = original.ChaosParameters
type ChaosParametersDictionaryItem = original.ChaosParametersDictionaryItem
type ChaosPartitionPrimaryMoveScheduledEvent = original.ChaosPartitionPrimaryMoveScheduledEvent
type ChaosPartitionSecondaryMoveScheduledEvent = original.ChaosPartitionSecondaryMoveScheduledEvent
type ChaosReplicaRemovalScheduledEvent = original.ChaosReplicaRemovalScheduledEvent
type ChaosReplicaRestartScheduledEvent = original.ChaosReplicaRestartScheduledEvent
type ChaosSchedule = original.ChaosSchedule
type ChaosScheduleDescription = original.ChaosScheduleDescription
type ChaosScheduleJob = original.ChaosScheduleJob
type ChaosScheduleJobActiveDaysOfWeek = original.ChaosScheduleJobActiveDaysOfWeek
type ChaosStartedEvent = original.ChaosStartedEvent
type ChaosStoppedEvent = original.ChaosStoppedEvent
type ChaosTargetFilter = original.ChaosTargetFilter
type CheckExistsPropertyBatchOperation = original.CheckExistsPropertyBatchOperation
type CheckSequencePropertyBatchOperation = original.CheckSequencePropertyBatchOperation
type CheckValuePropertyBatchOperation = original.CheckValuePropertyBatchOperation
type ClusterConfiguration = original.ClusterConfiguration
type ClusterConfigurationUpgradeDescription = original.ClusterConfigurationUpgradeDescription
type ClusterConfigurationUpgradeStatusInfo = original.ClusterConfigurationUpgradeStatusInfo
type ClusterEvent = original.ClusterEvent
type ClusterHealth = original.ClusterHealth
type ClusterHealthChunk = original.ClusterHealthChunk
type ClusterHealthChunkQueryDescription = original.ClusterHealthChunkQueryDescription
type ClusterHealthPolicies = original.ClusterHealthPolicies
type ClusterHealthPolicy = original.ClusterHealthPolicy
type ClusterHealthReportExpiredEvent = original.ClusterHealthReportExpiredEvent
type ClusterLoadInfo = original.ClusterLoadInfo
type ClusterManifest = original.ClusterManifest
type ClusterNewHealthReportEvent = original.ClusterNewHealthReportEvent
type ClusterUpgradeCompletedEvent = original.ClusterUpgradeCompletedEvent
type ClusterUpgradeDescriptionObject = original.ClusterUpgradeDescriptionObject
type ClusterUpgradeDomainCompletedEvent = original.ClusterUpgradeDomainCompletedEvent
type ClusterUpgradeHealthPolicyObject = original.ClusterUpgradeHealthPolicyObject
type ClusterUpgradeProgressObject = original.ClusterUpgradeProgressObject
type ClusterUpgradeRollbackCompletedEvent = original.ClusterUpgradeRollbackCompletedEvent
type ClusterUpgradeRollbackStartedEvent = original.ClusterUpgradeRollbackStartedEvent
type ClusterUpgradeStartedEvent = original.ClusterUpgradeStartedEvent
type ClusterVersion = original.ClusterVersion
type CodePackageEntryPoint = original.CodePackageEntryPoint
type CodePackageEntryPointStatistics = original.CodePackageEntryPointStatistics
type ComposeDeploymentStatusInfo = original.ComposeDeploymentStatusInfo
type ComposeDeploymentUpgradeDescription = original.ComposeDeploymentUpgradeDescription
type ComposeDeploymentUpgradeProgressInfo = original.ComposeDeploymentUpgradeProgressInfo
type ContainerAPIRequestBody = original.ContainerAPIRequestBody
type ContainerAPIResponse = original.ContainerAPIResponse
type ContainerAPIResult = original.ContainerAPIResult
type ContainerCodePackageProperties = original.ContainerCodePackageProperties
type ContainerEvent = original.ContainerEvent
type ContainerInstanceEvent = original.ContainerInstanceEvent
type ContainerInstanceView = original.ContainerInstanceView
type ContainerLabel = original.ContainerLabel
type ContainerLogs = original.ContainerLogs
type ContainerState = original.ContainerState
type CreateComposeDeploymentDescription = original.CreateComposeDeploymentDescription
type CurrentUpgradeDomainProgressInfo = original.CurrentUpgradeDomainProgressInfo
type DeactivationIntentDescription = original.DeactivationIntentDescription
type DeletePropertyBatchOperation = original.DeletePropertyBatchOperation
type DeltaNodesCheckHealthEvaluation = original.DeltaNodesCheckHealthEvaluation
type DeployServicePackageToNodeDescription = original.DeployServicePackageToNodeDescription
type DeployedApplicationHealth = original.DeployedApplicationHealth
type DeployedApplicationHealthEvaluation = original.DeployedApplicationHealthEvaluation
type DeployedApplicationHealthReportExpiredEvent = original.DeployedApplicationHealthReportExpiredEvent
type DeployedApplicationHealthState = original.DeployedApplicationHealthState
type DeployedApplicationHealthStateChunk = original.DeployedApplicationHealthStateChunk
type DeployedApplicationHealthStateChunkList = original.DeployedApplicationHealthStateChunkList
type DeployedApplicationHealthStateFilter = original.DeployedApplicationHealthStateFilter
type DeployedApplicationInfo = original.DeployedApplicationInfo
type DeployedApplicationNewHealthReportEvent = original.DeployedApplicationNewHealthReportEvent
type DeployedApplicationsHealthEvaluation = original.DeployedApplicationsHealthEvaluation
type DeployedCodePackageInfo = original.DeployedCodePackageInfo
type DeployedServicePackageHealth = original.DeployedServicePackageHealth
type DeployedServicePackageHealthEvaluation = original.DeployedServicePackageHealthEvaluation
type DeployedServicePackageHealthReportExpiredEvent = original.DeployedServicePackageHealthReportExpiredEvent
type DeployedServicePackageHealthState = original.DeployedServicePackageHealthState
type DeployedServicePackageHealthStateChunk = original.DeployedServicePackageHealthStateChunk
type DeployedServicePackageHealthStateChunkList = original.DeployedServicePackageHealthStateChunkList
type DeployedServicePackageHealthStateFilter = original.DeployedServicePackageHealthStateFilter
type DeployedServicePackageInfo = original.DeployedServicePackageInfo
type DeployedServicePackageNewHealthReportEvent = original.DeployedServicePackageNewHealthReportEvent
type DeployedServicePackagesHealthEvaluation = original.DeployedServicePackagesHealthEvaluation
type DeployedServiceReplicaDetailInfo = original.DeployedServiceReplicaDetailInfo
type DeployedServiceReplicaDetailInfoModel = original.DeployedServiceReplicaDetailInfoModel
type DeployedServiceReplicaInfo = original.DeployedServiceReplicaInfo
type DeployedServiceTypeInfo = original.DeployedServiceTypeInfo
type DeployedStatefulServiceReplicaDetailInfo = original.DeployedStatefulServiceReplicaDetailInfo
type DeployedStatefulServiceReplicaInfo = original.DeployedStatefulServiceReplicaInfo
type DeployedStatelessServiceInstanceDetailInfo = original.DeployedStatelessServiceInstanceDetailInfo
type DeployedStatelessServiceInstanceInfo = original.DeployedStatelessServiceInstanceInfo
type DiagnosticsDescription = original.DiagnosticsDescription
type DiagnosticsRef = original.DiagnosticsRef
type DiagnosticsSinkProperties = original.DiagnosticsSinkProperties
type DisableBackupDescription = original.DisableBackupDescription
type DoublePropertyValue = original.DoublePropertyValue
type EnableBackupDescription = original.EnableBackupDescription
type EndpointProperties = original.EndpointProperties
type EndpointRef = original.EndpointRef
type EnsureAvailabilitySafetyCheck = original.EnsureAvailabilitySafetyCheck
type EnsurePartitionQuorumSafetyCheck = original.EnsurePartitionQuorumSafetyCheck
type EntityHealth = original.EntityHealth
type EntityHealthState = original.EntityHealthState
type EntityHealthStateChunk = original.EntityHealthStateChunk
type EntityHealthStateChunkList = original.EntityHealthStateChunkList
type EntityKindHealthStateCount = original.EntityKindHealthStateCount
type EnvironmentVariable = original.EnvironmentVariable
type Epoch = original.Epoch
type EventHealthEvaluation = original.EventHealthEvaluation
type ExecutingFaultsChaosEvent = original.ExecutingFaultsChaosEvent
type ExternalStoreProvisionApplicationTypeDescription = original.ExternalStoreProvisionApplicationTypeDescription
type FabricCodeVersionInfo = original.FabricCodeVersionInfo
type FabricConfigVersionInfo = original.FabricConfigVersionInfo
type FabricError = original.FabricError
type FabricErrorError = original.FabricErrorError
type FabricEvent = original.FabricEvent
type FailedPropertyBatchInfo = original.FailedPropertyBatchInfo
type FailedUpgradeDomainProgressObject = original.FailedUpgradeDomainProgressObject
type FailureUpgradeDomainProgressInfo = original.FailureUpgradeDomainProgressInfo
type FileInfo = original.FileInfo
type FileShareBackupStorageDescription = original.FileShareBackupStorageDescription
type FileVersion = original.FileVersion
type FolderInfo = original.FolderInfo
type FolderSizeInfo = original.FolderSizeInfo
type FrequencyBasedBackupScheduleDescription = original.FrequencyBasedBackupScheduleDescription
type GUIDPropertyValue = original.GUIDPropertyValue
type GatewayDestination = original.GatewayDestination
type GatewayProperties = original.GatewayProperties
type GatewayResourceDescription = original.GatewayResourceDescription
type GetBackupByStorageQueryDescription = original.GetBackupByStorageQueryDescription
type GetPropertyBatchOperation = original.GetPropertyBatchOperation
type HTTPConfig = original.HTTPConfig
type HTTPHostConfig = original.HTTPHostConfig
type HTTPRouteConfig = original.HTTPRouteConfig
type HTTPRouteMatchHeader = original.HTTPRouteMatchHeader
type HTTPRouteMatchPath = original.HTTPRouteMatchPath
type HTTPRouteMatchRule = original.HTTPRouteMatchRule
type HealthEvaluation = original.HealthEvaluation
type HealthEvaluationWrapper = original.HealthEvaluationWrapper
type HealthEvent = original.HealthEvent
type HealthInformation = original.HealthInformation
type HealthStateCount = original.HealthStateCount
type HealthStatistics = original.HealthStatistics
type IdentityDescription = original.IdentityDescription
type IdentityItemDescription = original.IdentityItemDescription
type ImageRegistryCredential = original.ImageRegistryCredential
type ImageStoreContent = original.ImageStoreContent
type ImageStoreCopyDescription = original.ImageStoreCopyDescription
type InlinedValueSecretResourceProperties = original.InlinedValueSecretResourceProperties
type Int64PropertyValue = original.Int64PropertyValue
type Int64RangePartitionInformation = original.Int64RangePartitionInformation
type InvokeDataLossResult = original.InvokeDataLossResult
type InvokeQuorumLossResult = original.InvokeQuorumLossResult
type KeyValueStoreReplicaStatus = original.KeyValueStoreReplicaStatus
type ListApplicationEvent = original.ListApplicationEvent
type ListClusterEvent = original.ListClusterEvent
type ListContainerInstanceEvent = original.ListContainerInstanceEvent
type ListDeployedCodePackageInfo = original.ListDeployedCodePackageInfo
type ListDeployedServicePackageInfo = original.ListDeployedServicePackageInfo
type ListDeployedServiceReplicaInfo = original.ListDeployedServiceReplicaInfo
type ListDeployedServiceTypeInfo = original.ListDeployedServiceTypeInfo
type ListFabricCodeVersionInfo = original.ListFabricCodeVersionInfo
type ListFabricConfigVersionInfo = original.ListFabricConfigVersionInfo
type ListFabricEvent = original.ListFabricEvent
type ListNodeEvent = original.ListNodeEvent
type ListOperationStatus = original.ListOperationStatus
type ListPartitionEvent = original.ListPartitionEvent
type ListRepairTask = original.ListRepairTask
type ListReplicaEvent = original.ListReplicaEvent
type ListServiceEvent = original.ListServiceEvent
type ListServiceTypeInfo = original.ListServiceTypeInfo
type LoadMetricInformation = original.LoadMetricInformation
type LoadMetricReport = original.LoadMetricReport
type LoadMetricReportInfo = original.LoadMetricReportInfo
type LocalNetworkResourceProperties = original.LocalNetworkResourceProperties
type ManagedApplicationIdentity = original.ManagedApplicationIdentity
type ManagedApplicationIdentityDescription = original.ManagedApplicationIdentityDescription
type MeshApplicationClient = original.MeshApplicationClient
type MeshCodePackageClient = original.MeshCodePackageClient
type MeshGatewayClient = original.MeshGatewayClient
type MeshNetworkClient = original.MeshNetworkClient
type MeshSecretClient = original.MeshSecretClient
type MeshSecretValueClient = original.MeshSecretValueClient
type MeshServiceClient = original.MeshServiceClient
type MeshServiceReplicaClient = original.MeshServiceReplicaClient
type MeshVolumeClient = original.MeshVolumeClient
type MonitoringPolicyDescription = original.MonitoringPolicyDescription
type NameDescription = original.NameDescription
type NamedPartitionInformation = original.NamedPartitionInformation
type NamedPartitionSchemeDescription = original.NamedPartitionSchemeDescription
type NetworkRef = original.NetworkRef
type NetworkResourceDescription = original.NetworkResourceDescription
type NetworkResourceProperties = original.NetworkResourceProperties
type NetworkResourcePropertiesBase = original.NetworkResourcePropertiesBase
type NodeAbortedEvent = original.NodeAbortedEvent
type NodeAddedToClusterEvent = original.NodeAddedToClusterEvent
type NodeClosedEvent = original.NodeClosedEvent
type NodeDeactivateCompletedEvent = original.NodeDeactivateCompletedEvent
type NodeDeactivateStartedEvent = original.NodeDeactivateStartedEvent
type NodeDeactivationInfo = original.NodeDeactivationInfo
type NodeDeactivationTask = original.NodeDeactivationTask
type NodeDeactivationTaskID = original.NodeDeactivationTaskID
type NodeDownEvent = original.NodeDownEvent
type NodeEvent = original.NodeEvent
type NodeHealth = original.NodeHealth
type NodeHealthEvaluation = original.NodeHealthEvaluation
type NodeHealthReportExpiredEvent = original.NodeHealthReportExpiredEvent
type NodeHealthState = original.NodeHealthState
type NodeHealthStateChunk = original.NodeHealthStateChunk
type NodeHealthStateChunkList = original.NodeHealthStateChunkList
type NodeHealthStateFilter = original.NodeHealthStateFilter
type NodeID = original.NodeID
type NodeImpact = original.NodeImpact
type NodeInfo = original.NodeInfo
type NodeLoadInfo = original.NodeLoadInfo
type NodeLoadMetricInformation = original.NodeLoadMetricInformation
type NodeNewHealthReportEvent = original.NodeNewHealthReportEvent
type NodeOpenFailedEvent = original.NodeOpenFailedEvent
type NodeOpenSucceededEvent = original.NodeOpenSucceededEvent
type NodeRemovedFromClusterEvent = original.NodeRemovedFromClusterEvent
type NodeRepairImpactDescription = original.NodeRepairImpactDescription
type NodeRepairTargetDescription = original.NodeRepairTargetDescription
type NodeResult = original.NodeResult
type NodeTransitionProgress = original.NodeTransitionProgress
type NodeTransitionResult = original.NodeTransitionResult
type NodeUpEvent = original.NodeUpEvent
type NodeUpgradeProgressInfo = original.NodeUpgradeProgressInfo
type NodesHealthEvaluation = original.NodesHealthEvaluation
type OperationStatus = original.OperationStatus
type PackageSharingPolicyInfo = original.PackageSharingPolicyInfo
type PagedApplicationInfoList = original.PagedApplicationInfoList
type PagedApplicationResourceDescriptionList = original.PagedApplicationResourceDescriptionList
type PagedApplicationTypeInfoList = original.PagedApplicationTypeInfoList
type PagedBackupConfigurationInfoList = original.PagedBackupConfigurationInfoList
type PagedBackupEntityList = original.PagedBackupEntityList
type PagedBackupInfoList = original.PagedBackupInfoList
type PagedBackupPolicyDescriptionList = original.PagedBackupPolicyDescriptionList
type PagedComposeDeploymentStatusInfoList = original.PagedComposeDeploymentStatusInfoList
type PagedDeployedApplicationInfoList = original.PagedDeployedApplicationInfoList
type PagedGatewayResourceDescriptionList = original.PagedGatewayResourceDescriptionList
type PagedNetworkResourceDescriptionList = original.PagedNetworkResourceDescriptionList
type PagedNodeInfoList = original.PagedNodeInfoList
type PagedPropertyInfoList = original.PagedPropertyInfoList
type PagedReplicaInfoList = original.PagedReplicaInfoList
type PagedSecretResourceDescriptionList = original.PagedSecretResourceDescriptionList
type PagedSecretValueResourceDescriptionList = original.PagedSecretValueResourceDescriptionList
type PagedServiceInfoList = original.PagedServiceInfoList
type PagedServicePartitionInfoList = original.PagedServicePartitionInfoList
type PagedServiceReplicaDescriptionList = original.PagedServiceReplicaDescriptionList
type PagedServiceResourceDescriptionList = original.PagedServiceResourceDescriptionList
type PagedSubNameInfoList = original.PagedSubNameInfoList
type PagedVolumeResourceDescriptionList = original.PagedVolumeResourceDescriptionList
type PartitionAnalysisEvent = original.PartitionAnalysisEvent
type PartitionBackupConfigurationInfo = original.PartitionBackupConfigurationInfo
type PartitionBackupEntity = original.PartitionBackupEntity
type PartitionDataLossProgress = original.PartitionDataLossProgress
type PartitionEvent = original.PartitionEvent
type PartitionHealth = original.PartitionHealth
type PartitionHealthEvaluation = original.PartitionHealthEvaluation
type PartitionHealthReportExpiredEvent = original.PartitionHealthReportExpiredEvent
type PartitionHealthState = original.PartitionHealthState
type PartitionHealthStateChunk = original.PartitionHealthStateChunk
type PartitionHealthStateChunkList = original.PartitionHealthStateChunkList
type PartitionHealthStateFilter = original.PartitionHealthStateFilter
type PartitionInformation = original.PartitionInformation
type PartitionInstanceCountScaleMechanism = original.PartitionInstanceCountScaleMechanism
type PartitionLoadInformation = original.PartitionLoadInformation
type PartitionNewHealthReportEvent = original.PartitionNewHealthReportEvent
type PartitionPrimaryMoveAnalysisEvent = original.PartitionPrimaryMoveAnalysisEvent
type PartitionQuorumLossProgress = original.PartitionQuorumLossProgress
type PartitionReconfiguredEvent = original.PartitionReconfiguredEvent
type PartitionRestartProgress = original.PartitionRestartProgress
type PartitionSafetyCheck = original.PartitionSafetyCheck
type PartitionSchemeDescription = original.PartitionSchemeDescription
type PartitionsHealthEvaluation = original.PartitionsHealthEvaluation
type PrimaryReplicatorStatus = original.PrimaryReplicatorStatus
type PropertyBatchDescriptionList = original.PropertyBatchDescriptionList
type PropertyBatchInfo = original.PropertyBatchInfo
type PropertyBatchInfoModel = original.PropertyBatchInfoModel
type PropertyBatchOperation = original.PropertyBatchOperation
type PropertyDescription = original.PropertyDescription
type PropertyInfo = original.PropertyInfo
type PropertyMetadata = original.PropertyMetadata
type PropertyValue = original.PropertyValue
type ProvisionApplicationTypeDescription = original.ProvisionApplicationTypeDescription
type ProvisionApplicationTypeDescriptionBase = original.ProvisionApplicationTypeDescriptionBase
type ProvisionFabricDescription = original.ProvisionFabricDescription
type PutPropertyBatchOperation = original.PutPropertyBatchOperation
type ReconfigurationInformation = original.ReconfigurationInformation
type RegistryCredential = original.RegistryCredential
type ReliableCollectionsRef = original.ReliableCollectionsRef
type RemoteReplicatorAcknowledgementDetail = original.RemoteReplicatorAcknowledgementDetail
type RemoteReplicatorAcknowledgementStatus = original.RemoteReplicatorAcknowledgementStatus
type RemoteReplicatorStatus = original.RemoteReplicatorStatus
type RepairImpactDescriptionBase = original.RepairImpactDescriptionBase
type RepairTargetDescriptionBase = original.RepairTargetDescriptionBase
type RepairTask = original.RepairTask
type RepairTaskApproveDescription = original.RepairTaskApproveDescription
type RepairTaskCancelDescription = original.RepairTaskCancelDescription
type RepairTaskDeleteDescription = original.RepairTaskDeleteDescription
type RepairTaskHistory = original.RepairTaskHistory
type RepairTaskUpdateHealthPolicyDescription = original.RepairTaskUpdateHealthPolicyDescription
type RepairTaskUpdateInfo = original.RepairTaskUpdateInfo
type ReplicaEvent = original.ReplicaEvent
type ReplicaHealth = original.ReplicaHealth
type ReplicaHealthEvaluation = original.ReplicaHealthEvaluation
type ReplicaHealthModel = original.ReplicaHealthModel
type ReplicaHealthState = original.ReplicaHealthState
type ReplicaHealthStateChunk = original.ReplicaHealthStateChunk
type ReplicaHealthStateChunkList = original.ReplicaHealthStateChunkList
type ReplicaHealthStateFilter = original.ReplicaHealthStateFilter
type ReplicaInfo = original.ReplicaInfo
type ReplicaInfoModel = original.ReplicaInfoModel
type ReplicaStatusBase = original.ReplicaStatusBase
type ReplicasHealthEvaluation = original.ReplicasHealthEvaluation
type ReplicatorQueueStatus = original.ReplicatorQueueStatus
type ReplicatorStatus = original.ReplicatorStatus
type ResolvedServiceEndpoint = original.ResolvedServiceEndpoint
type ResolvedServicePartition = original.ResolvedServicePartition
type ResourceLimits = original.ResourceLimits
type ResourceRequests = original.ResourceRequests
type ResourceRequirements = original.ResourceRequirements
type RestartDeployedCodePackageDescription = original.RestartDeployedCodePackageDescription
type RestartNodeDescription = original.RestartNodeDescription
type RestartPartitionResult = original.RestartPartitionResult
type RestorePartitionDescription = original.RestorePartitionDescription
type RestoreProgressInfo = original.RestoreProgressInfo
type ResumeApplicationUpgradeDescription = original.ResumeApplicationUpgradeDescription
type ResumeClusterUpgradeDescription = original.ResumeClusterUpgradeDescription
type RetentionPolicyDescription = original.RetentionPolicyDescription
type RollingUpgradeUpdateDescription = original.RollingUpgradeUpdateDescription
type SafetyCheck = original.SafetyCheck
type SafetyCheckWrapper = original.SafetyCheckWrapper
type ScalingMechanismDescription = original.ScalingMechanismDescription
type ScalingPolicyDescription = original.ScalingPolicyDescription
type ScalingTriggerDescription = original.ScalingTriggerDescription
type SecondaryActiveReplicatorStatus = original.SecondaryActiveReplicatorStatus
type SecondaryIdleReplicatorStatus = original.SecondaryIdleReplicatorStatus
type SecondaryReplicatorStatus = original.SecondaryReplicatorStatus
type SecretResourceDescription = original.SecretResourceDescription
type SecretResourceProperties = original.SecretResourceProperties
type SecretResourcePropertiesBase = original.SecretResourcePropertiesBase
type SecretValue = original.SecretValue
type SecretValueProperties = original.SecretValueProperties
type SecretValueResourceDescription = original.SecretValueResourceDescription
type SecretValueResourceProperties = original.SecretValueResourceProperties
type SeedNodeSafetyCheck = original.SeedNodeSafetyCheck
type SelectedPartition = original.SelectedPartition
type ServiceBackupConfigurationInfo = original.ServiceBackupConfigurationInfo
type ServiceBackupEntity = original.ServiceBackupEntity
type ServiceCorrelationDescription = original.ServiceCorrelationDescription
type ServiceCreatedEvent = original.ServiceCreatedEvent
type ServiceDeletedEvent = original.ServiceDeletedEvent
type ServiceDescription = original.ServiceDescription
type ServiceDescriptionModel = original.ServiceDescriptionModel
type ServiceEvent = original.ServiceEvent
type ServiceFromTemplateDescription = original.ServiceFromTemplateDescription
type ServiceHealth = original.ServiceHealth
type ServiceHealthEvaluation = original.ServiceHealthEvaluation
type ServiceHealthReportExpiredEvent = original.ServiceHealthReportExpiredEvent
type ServiceHealthState = original.ServiceHealthState
type ServiceHealthStateChunk = original.ServiceHealthStateChunk
type ServiceHealthStateChunkList = original.ServiceHealthStateChunkList
type ServiceHealthStateFilter = original.ServiceHealthStateFilter
type ServiceIdentity = original.ServiceIdentity
type ServiceInfo = original.ServiceInfo
type ServiceInfoModel = original.ServiceInfoModel
type ServiceLoadMetricDescription = original.ServiceLoadMetricDescription
type ServiceNameInfo = original.ServiceNameInfo
type ServiceNewHealthReportEvent = original.ServiceNewHealthReportEvent
type ServicePartitionInfo = original.ServicePartitionInfo
type ServicePartitionInfoModel = original.ServicePartitionInfoModel
type ServicePlacementInvalidDomainPolicyDescription = original.ServicePlacementInvalidDomainPolicyDescription
type ServicePlacementNonPartiallyPlaceServicePolicyDescription = original.ServicePlacementNonPartiallyPlaceServicePolicyDescription
type ServicePlacementPolicyDescription = original.ServicePlacementPolicyDescription
type ServicePlacementPreferPrimaryDomainPolicyDescription = original.ServicePlacementPreferPrimaryDomainPolicyDescription
type ServicePlacementRequireDomainDistributionPolicyDescription = original.ServicePlacementRequireDomainDistributionPolicyDescription
type ServicePlacementRequiredDomainPolicyDescription = original.ServicePlacementRequiredDomainPolicyDescription
type ServiceProperties = original.ServiceProperties
type ServiceReplicaDescription = original.ServiceReplicaDescription
type ServiceReplicaProperties = original.ServiceReplicaProperties
type ServiceResourceDescription = original.ServiceResourceDescription
type ServiceResourceProperties = original.ServiceResourceProperties
type ServiceTypeDescription = original.ServiceTypeDescription
type ServiceTypeExtensionDescription = original.ServiceTypeExtensionDescription
type ServiceTypeHealthPolicy = original.ServiceTypeHealthPolicy
type ServiceTypeHealthPolicyMapItem = original.ServiceTypeHealthPolicyMapItem
type ServiceTypeInfo = original.ServiceTypeInfo
type ServiceTypeManifest = original.ServiceTypeManifest
type ServiceUpdateDescription = original.ServiceUpdateDescription
type ServicesHealthEvaluation = original.ServicesHealthEvaluation
type Setting = original.Setting
type SingletonPartitionInformation = original.SingletonPartitionInformation
type SingletonPartitionSchemeDescription = original.SingletonPartitionSchemeDescription
type StartClusterUpgradeDescription = original.StartClusterUpgradeDescription
type StartedChaosEvent = original.StartedChaosEvent
type StatefulReplicaHealthReportExpiredEvent = original.StatefulReplicaHealthReportExpiredEvent
type StatefulReplicaNewHealthReportEvent = original.StatefulReplicaNewHealthReportEvent
type StatefulServiceDescription = original.StatefulServiceDescription
type StatefulServiceInfo = original.StatefulServiceInfo
type StatefulServicePartitionInfo = original.StatefulServicePartitionInfo
type StatefulServiceReplicaHealth = original.StatefulServiceReplicaHealth
type StatefulServiceReplicaHealthState = original.StatefulServiceReplicaHealthState
type StatefulServiceReplicaInfo = original.StatefulServiceReplicaInfo
type StatefulServiceTypeDescription = original.StatefulServiceTypeDescription
type StatefulServiceUpdateDescription = original.StatefulServiceUpdateDescription
type StatelessReplicaHealthReportExpiredEvent = original.StatelessReplicaHealthReportExpiredEvent
type StatelessReplicaNewHealthReportEvent = original.StatelessReplicaNewHealthReportEvent
type StatelessServiceDescription = original.StatelessServiceDescription
type StatelessServiceInfo = original.StatelessServiceInfo
type StatelessServiceInstanceHealth = original.StatelessServiceInstanceHealth
type StatelessServiceInstanceHealthState = original.StatelessServiceInstanceHealthState
type StatelessServiceInstanceInfo = original.StatelessServiceInstanceInfo
type StatelessServicePartitionInfo = original.StatelessServicePartitionInfo
type StatelessServiceTypeDescription = original.StatelessServiceTypeDescription
type StatelessServiceUpdateDescription = original.StatelessServiceUpdateDescription
type StoppedChaosEvent = original.StoppedChaosEvent
type String = original.String
type StringPropertyValue = original.StringPropertyValue
type SuccessfulPropertyBatchInfo = original.SuccessfulPropertyBatchInfo
type SystemApplicationHealthEvaluation = original.SystemApplicationHealthEvaluation
type TCPConfig = original.TCPConfig
type TestErrorChaosEvent = original.TestErrorChaosEvent
type TimeBasedBackupScheduleDescription = original.TimeBasedBackupScheduleDescription
type TimeOfDay = original.TimeOfDay
type TimeRange = original.TimeRange
type UniformInt64RangePartitionSchemeDescription = original.UniformInt64RangePartitionSchemeDescription
type UnplacedReplicaInformation = original.UnplacedReplicaInformation
type UnprovisionApplicationTypeDescriptionInfo = original.UnprovisionApplicationTypeDescriptionInfo
type UnprovisionFabricDescription = original.UnprovisionFabricDescription
type UpdateClusterUpgradeDescription = original.UpdateClusterUpgradeDescription
type UpgradeDomainDeltaNodesCheckHealthEvaluation = original.UpgradeDomainDeltaNodesCheckHealthEvaluation
type UpgradeDomainInfo = original.UpgradeDomainInfo
type UpgradeDomainNodesHealthEvaluation = original.UpgradeDomainNodesHealthEvaluation
type UpgradeOrchestrationServiceState = original.UpgradeOrchestrationServiceState
type UpgradeOrchestrationServiceStateSummary = original.UpgradeOrchestrationServiceStateSummary
type UploadChunkRange = original.UploadChunkRange
type UploadSession = original.UploadSession
type UploadSessionInfo = original.UploadSessionInfo
type ValidationFailedChaosEvent = original.ValidationFailedChaosEvent
type VolumeProperties = original.VolumeProperties
type VolumeProviderParametersAzureFile = original.VolumeProviderParametersAzureFile
type VolumeReference = original.VolumeReference
type VolumeResourceDescription = original.VolumeResourceDescription
type WaitForInbuildReplicaSafetyCheck = original.WaitForInbuildReplicaSafetyCheck
type WaitForPrimaryPlacementSafetyCheck = original.WaitForPrimaryPlacementSafetyCheck
type WaitForPrimarySwapSafetyCheck = original.WaitForPrimarySwapSafetyCheck
type WaitForReconfigurationSafetyCheck = original.WaitForReconfigurationSafetyCheck
type WaitingChaosEvent = original.WaitingChaosEvent
func New() BaseClient {
return original.New()
}
func NewMeshApplicationClient() MeshApplicationClient {
return original.NewMeshApplicationClient()
}
func NewMeshApplicationClientWithBaseURI(baseURI string) MeshApplicationClient {
return original.NewMeshApplicationClientWithBaseURI(baseURI)
}
func NewMeshCodePackageClient() MeshCodePackageClient {
return original.NewMeshCodePackageClient()
}
func NewMeshCodePackageClientWithBaseURI(baseURI string) MeshCodePackageClient {
return original.NewMeshCodePackageClientWithBaseURI(baseURI)
}
func NewMeshGatewayClient() MeshGatewayClient {
return original.NewMeshGatewayClient()
}
func NewMeshGatewayClientWithBaseURI(baseURI string) MeshGatewayClient {
return original.NewMeshGatewayClientWithBaseURI(baseURI)
}
func NewMeshNetworkClient() MeshNetworkClient {
return original.NewMeshNetworkClient()
}
func NewMeshNetworkClientWithBaseURI(baseURI string) MeshNetworkClient {
return original.NewMeshNetworkClientWithBaseURI(baseURI)
}
func NewMeshSecretClient() MeshSecretClient {
return original.NewMeshSecretClient()
}
func NewMeshSecretClientWithBaseURI(baseURI string) MeshSecretClient {
return original.NewMeshSecretClientWithBaseURI(baseURI)
}
func NewMeshSecretValueClient() MeshSecretValueClient {
return original.NewMeshSecretValueClient()
}
func NewMeshSecretValueClientWithBaseURI(baseURI string) MeshSecretValueClient {
return original.NewMeshSecretValueClientWithBaseURI(baseURI)
}
func NewMeshServiceClient() MeshServiceClient {
return original.NewMeshServiceClient()
}
func NewMeshServiceClientWithBaseURI(baseURI string) MeshServiceClient {
return original.NewMeshServiceClientWithBaseURI(baseURI)
}
func NewMeshServiceReplicaClient() MeshServiceReplicaClient {
return original.NewMeshServiceReplicaClient()
}
func NewMeshServiceReplicaClientWithBaseURI(baseURI string) MeshServiceReplicaClient {
return original.NewMeshServiceReplicaClientWithBaseURI(baseURI)
}
func NewMeshVolumeClient() MeshVolumeClient {
return original.NewMeshVolumeClient()
}
func NewMeshVolumeClientWithBaseURI(baseURI string) MeshVolumeClient {
return original.NewMeshVolumeClientWithBaseURI(baseURI)
}
func NewWithBaseURI(baseURI string) BaseClient {
return original.NewWithBaseURI(baseURI)
}
func PossibleApplicationDefinitionKindValues() []ApplicationDefinitionKind {
return original.PossibleApplicationDefinitionKindValues()
}
func PossibleApplicationPackageCleanupPolicyValues() []ApplicationPackageCleanupPolicy {
return original.PossibleApplicationPackageCleanupPolicyValues()
}
func PossibleApplicationScopedVolumeKindValues() []ApplicationScopedVolumeKind {
return original.PossibleApplicationScopedVolumeKindValues()
}
func PossibleApplicationStatusValues() []ApplicationStatus {
return original.PossibleApplicationStatusValues()
}
func PossibleApplicationTypeDefinitionKindValues() []ApplicationTypeDefinitionKind {
return original.PossibleApplicationTypeDefinitionKindValues()
}
func PossibleApplicationTypeStatusValues() []ApplicationTypeStatus {
return original.PossibleApplicationTypeStatusValues()
}
func PossibleAutoScalingMechanismKindValues() []AutoScalingMechanismKind {
return original.PossibleAutoScalingMechanismKindValues()
}
func PossibleAutoScalingMetricKindValues() []AutoScalingMetricKind {
return original.PossibleAutoScalingMetricKindValues()
}
func PossibleAutoScalingResourceMetricNameValues() []AutoScalingResourceMetricName {
return original.PossibleAutoScalingResourceMetricNameValues()
}
func PossibleAutoScalingTriggerKindValues() []AutoScalingTriggerKind {
return original.PossibleAutoScalingTriggerKindValues()
}
func PossibleBackupEntityKindValues() []BackupEntityKind {
return original.PossibleBackupEntityKindValues()
}
func PossibleBackupPolicyScopeValues() []BackupPolicyScope {
return original.PossibleBackupPolicyScopeValues()
}
func PossibleBackupScheduleFrequencyTypeValues() []BackupScheduleFrequencyType {
return original.PossibleBackupScheduleFrequencyTypeValues()
}
func PossibleBackupScheduleKindValues() []BackupScheduleKind {
return original.PossibleBackupScheduleKindValues()
}
func PossibleBackupStateValues() []BackupState {
return original.PossibleBackupStateValues()
}
func PossibleBackupStorageKindValues() []BackupStorageKind {
return original.PossibleBackupStorageKindValues()
}
func PossibleBackupSuspensionScopeValues() []BackupSuspensionScope {
return original.PossibleBackupSuspensionScopeValues()
}
func PossibleBackupTypeValues() []BackupType {
return original.PossibleBackupTypeValues()
}
func PossibleChaosEventKindValues() []ChaosEventKind {
return original.PossibleChaosEventKindValues()
}
func PossibleChaosScheduleStatusValues() []ChaosScheduleStatus {
return original.PossibleChaosScheduleStatusValues()
}
func PossibleChaosStatusValues() []ChaosStatus {
return original.PossibleChaosStatusValues()
}
func PossibleComposeDeploymentStatusValues() []ComposeDeploymentStatus {
return original.PossibleComposeDeploymentStatusValues()
}
func PossibleComposeDeploymentUpgradeStateValues() []ComposeDeploymentUpgradeState {
return original.PossibleComposeDeploymentUpgradeStateValues()
}
func PossibleCreateFabricDumpValues() []CreateFabricDump {
return original.PossibleCreateFabricDumpValues()
}
func PossibleDataLossModeValues() []DataLossMode {
return original.PossibleDataLossModeValues()
}
func PossibleDayOfWeekValues() []DayOfWeek {
return original.PossibleDayOfWeekValues()
}
func PossibleDeactivationIntentValues() []DeactivationIntent {
return original.PossibleDeactivationIntentValues()
}
func PossibleDeployedApplicationStatusValues() []DeployedApplicationStatus {
return original.PossibleDeployedApplicationStatusValues()
}
func PossibleDeploymentStatusValues() []DeploymentStatus {
return original.PossibleDeploymentStatusValues()
}
func PossibleDiagnosticsSinkKindValues() []DiagnosticsSinkKind {
return original.PossibleDiagnosticsSinkKindValues()
}
func PossibleEntityKindBasicBackupEntityValues() []EntityKindBasicBackupEntity {
return original.PossibleEntityKindBasicBackupEntityValues()
}
func PossibleEntityKindValues() []EntityKind {
return original.PossibleEntityKindValues()
}
func PossibleEntryPointStatusValues() []EntryPointStatus {
return original.PossibleEntryPointStatusValues()
}
func PossibleFabricErrorCodesValues() []FabricErrorCodes {
return original.PossibleFabricErrorCodesValues()
}
func PossibleFabricEventKindValues() []FabricEventKind {
return original.PossibleFabricEventKindValues()
}
func PossibleFabricReplicaStatusValues() []FabricReplicaStatus {
return original.PossibleFabricReplicaStatusValues()
}
func PossibleFailureActionValues() []FailureAction {
return original.PossibleFailureActionValues()
}
func PossibleFailureReasonValues() []FailureReason {
return original.PossibleFailureReasonValues()
}
func PossibleHeaderMatchTypeValues() []HeaderMatchType {
return original.PossibleHeaderMatchTypeValues()
}
func PossibleHealthEvaluationKindValues() []HealthEvaluationKind {
return original.PossibleHealthEvaluationKindValues()
}
func PossibleHealthStateValues() []HealthState {
return original.PossibleHealthStateValues()
}
func PossibleHostIsolationModeValues() []HostIsolationMode {
return original.PossibleHostIsolationModeValues()
}
func PossibleHostTypeValues() []HostType {
return original.PossibleHostTypeValues()
}
func PossibleImpactLevelValues() []ImpactLevel {
return original.PossibleImpactLevelValues()
}
func PossibleKindBasicApplicationScopedVolumeCreationParametersValues() []KindBasicApplicationScopedVolumeCreationParameters {
return original.PossibleKindBasicApplicationScopedVolumeCreationParametersValues()
}
func PossibleKindBasicAutoScalingMechanismValues() []KindBasicAutoScalingMechanism {
return original.PossibleKindBasicAutoScalingMechanismValues()
}
func PossibleKindBasicAutoScalingMetricValues() []KindBasicAutoScalingMetric {
return original.PossibleKindBasicAutoScalingMetricValues()
}
func PossibleKindBasicAutoScalingTriggerValues() []KindBasicAutoScalingTrigger {
return original.PossibleKindBasicAutoScalingTriggerValues()
}
func PossibleKindBasicBackupConfigurationInfoValues() []KindBasicBackupConfigurationInfo {
return original.PossibleKindBasicBackupConfigurationInfoValues()
}
func PossibleKindBasicChaosEventValues() []KindBasicChaosEvent {
return original.PossibleKindBasicChaosEventValues()
}
func PossibleKindBasicDiagnosticsSinkPropertiesValues() []KindBasicDiagnosticsSinkProperties {
return original.PossibleKindBasicDiagnosticsSinkPropertiesValues()
}
func PossibleKindBasicFabricEventValues() []KindBasicFabricEvent {
return original.PossibleKindBasicFabricEventValues()
}
func PossibleKindBasicNetworkResourcePropertiesBaseValues() []KindBasicNetworkResourcePropertiesBase {
return original.PossibleKindBasicNetworkResourcePropertiesBaseValues()
}
func PossibleKindBasicPropertyBatchInfoValues() []KindBasicPropertyBatchInfo {
return original.PossibleKindBasicPropertyBatchInfoValues()
}
func PossibleKindBasicPropertyBatchOperationValues() []KindBasicPropertyBatchOperation {
return original.PossibleKindBasicPropertyBatchOperationValues()
}
func PossibleKindBasicPropertyValueValues() []KindBasicPropertyValue {
return original.PossibleKindBasicPropertyValueValues()
}
func PossibleKindBasicProvisionApplicationTypeDescriptionBaseValues() []KindBasicProvisionApplicationTypeDescriptionBase {
return original.PossibleKindBasicProvisionApplicationTypeDescriptionBaseValues()
}
func PossibleKindBasicRepairImpactDescriptionBaseValues() []KindBasicRepairImpactDescriptionBase {
return original.PossibleKindBasicRepairImpactDescriptionBaseValues()
}
func PossibleKindBasicRepairTargetDescriptionBaseValues() []KindBasicRepairTargetDescriptionBase {
return original.PossibleKindBasicRepairTargetDescriptionBaseValues()
}
func PossibleKindBasicReplicaStatusBaseValues() []KindBasicReplicaStatusBase {
return original.PossibleKindBasicReplicaStatusBaseValues()
}
func PossibleKindBasicReplicatorStatusValues() []KindBasicReplicatorStatus {
return original.PossibleKindBasicReplicatorStatusValues()
}
func PossibleKindBasicSafetyCheckValues() []KindBasicSafetyCheck {
return original.PossibleKindBasicSafetyCheckValues()
}
func PossibleKindBasicScalingMechanismDescriptionValues() []KindBasicScalingMechanismDescription {
return original.PossibleKindBasicScalingMechanismDescriptionValues()
}
func PossibleKindBasicScalingTriggerDescriptionValues() []KindBasicScalingTriggerDescription {
return original.PossibleKindBasicScalingTriggerDescriptionValues()
}
func PossibleKindBasicSecretResourcePropertiesBaseValues() []KindBasicSecretResourcePropertiesBase {
return original.PossibleKindBasicSecretResourcePropertiesBaseValues()
}
func PossibleKindBasicServiceTypeDescriptionValues() []KindBasicServiceTypeDescription {
return original.PossibleKindBasicServiceTypeDescriptionValues()
}
func PossibleKindValues() []Kind {
return original.PossibleKindValues()
}
func PossibleMoveCostValues() []MoveCost {
return original.PossibleMoveCostValues()
}
func PossibleNetworkKindValues() []NetworkKind {
return original.PossibleNetworkKindValues()
}
func PossibleNodeDeactivationIntentValues() []NodeDeactivationIntent {
return original.PossibleNodeDeactivationIntentValues()
}
func PossibleNodeDeactivationStatusValues() []NodeDeactivationStatus {
return original.PossibleNodeDeactivationStatusValues()
}
func PossibleNodeDeactivationTaskTypeValues() []NodeDeactivationTaskType {
return original.PossibleNodeDeactivationTaskTypeValues()
}
func PossibleNodeStatusFilterValues() []NodeStatusFilter {
return original.PossibleNodeStatusFilterValues()
}
func PossibleNodeStatusValues() []NodeStatus {
return original.PossibleNodeStatusValues()
}
func PossibleNodeTransitionTypeValues() []NodeTransitionType {
return original.PossibleNodeTransitionTypeValues()
}
func PossibleNodeUpgradePhaseValues() []NodeUpgradePhase {
return original.PossibleNodeUpgradePhaseValues()
}
func PossibleOperatingSystemTypeValues() []OperatingSystemType {
return original.PossibleOperatingSystemTypeValues()
}
func PossibleOperationStateValues() []OperationState {
return original.PossibleOperationStateValues()
}
func PossibleOperationTypeValues() []OperationType {
return original.PossibleOperationTypeValues()
}
func PossiblePackageSharingPolicyScopeValues() []PackageSharingPolicyScope {
return original.PossiblePackageSharingPolicyScopeValues()
}
func PossiblePartitionAccessStatusValues() []PartitionAccessStatus {
return original.PossiblePartitionAccessStatusValues()
}
func PossiblePartitionSchemeBasicPartitionSchemeDescriptionValues() []PartitionSchemeBasicPartitionSchemeDescription {
return original.PossiblePartitionSchemeBasicPartitionSchemeDescriptionValues()
}
func PossiblePartitionSchemeValues() []PartitionScheme {
return original.PossiblePartitionSchemeValues()
}
func PossiblePropertyBatchInfoKindValues() []PropertyBatchInfoKind {
return original.PossiblePropertyBatchInfoKindValues()
}
func PossiblePropertyBatchOperationKindValues() []PropertyBatchOperationKind {
return original.PossiblePropertyBatchOperationKindValues()
}
func PossiblePropertyValueKindValues() []PropertyValueKind {
return original.PossiblePropertyValueKindValues()
}
func PossibleProvisionApplicationTypeKindValues() []ProvisionApplicationTypeKind {
return original.PossibleProvisionApplicationTypeKindValues()
}
func PossibleQuorumLossModeValues() []QuorumLossMode {
return original.PossibleQuorumLossModeValues()
}
func PossibleReconfigurationPhaseValues() []ReconfigurationPhase {
return original.PossibleReconfigurationPhaseValues()
}
func PossibleReconfigurationTypeValues() []ReconfigurationType {
return original.PossibleReconfigurationTypeValues()
}
func PossibleRepairImpactKindValues() []RepairImpactKind {
return original.PossibleRepairImpactKindValues()
}
func PossibleRepairTargetKindValues() []RepairTargetKind {
return original.PossibleRepairTargetKindValues()
}
func PossibleRepairTaskHealthCheckStateValues() []RepairTaskHealthCheckState {
return original.PossibleRepairTaskHealthCheckStateValues()
}
func PossibleReplicaHealthReportServiceKindValues() []ReplicaHealthReportServiceKind {
return original.PossibleReplicaHealthReportServiceKindValues()
}
func PossibleReplicaKindValues() []ReplicaKind {
return original.PossibleReplicaKindValues()
}
func PossibleReplicaRoleValues() []ReplicaRole {
return original.PossibleReplicaRoleValues()
}
func PossibleReplicaStatusValues() []ReplicaStatus {
return original.PossibleReplicaStatusValues()
}
func PossibleReplicatorOperationNameValues() []ReplicatorOperationName {
return original.PossibleReplicatorOperationNameValues()
}
func PossibleResourceStatusValues() []ResourceStatus {
return original.PossibleResourceStatusValues()
}
func PossibleRestartPartitionModeValues() []RestartPartitionMode {
return original.PossibleRestartPartitionModeValues()
}
func PossibleRestoreStateValues() []RestoreState {
return original.PossibleRestoreStateValues()
}
func PossibleResultStatusValues() []ResultStatus {
return original.PossibleResultStatusValues()
}
func PossibleRetentionPolicyTypeBasicBasicRetentionPolicyDescriptionValues() []RetentionPolicyTypeBasicBasicRetentionPolicyDescription {
return original.PossibleRetentionPolicyTypeBasicBasicRetentionPolicyDescriptionValues()
}
func PossibleRetentionPolicyTypeValues() []RetentionPolicyType {
return original.PossibleRetentionPolicyTypeValues()
}
func PossibleSafetyCheckKindValues() []SafetyCheckKind {
return original.PossibleSafetyCheckKindValues()
}
func PossibleScalingMechanismKindValues() []ScalingMechanismKind {
return original.PossibleScalingMechanismKindValues()
}
func PossibleScalingTriggerKindValues() []ScalingTriggerKind {
return original.PossibleScalingTriggerKindValues()
}
func PossibleScheduleKindValues() []ScheduleKind {
return original.PossibleScheduleKindValues()
}
func PossibleSecretKindValues() []SecretKind {
return original.PossibleSecretKindValues()
}
func PossibleServiceCorrelationSchemeValues() []ServiceCorrelationScheme {
return original.PossibleServiceCorrelationSchemeValues()
}
func PossibleServiceEndpointRoleValues() []ServiceEndpointRole {
return original.PossibleServiceEndpointRoleValues()
}
func PossibleServiceKindBasicDeployedServiceReplicaDetailInfoValues() []ServiceKindBasicDeployedServiceReplicaDetailInfo {
return original.PossibleServiceKindBasicDeployedServiceReplicaDetailInfoValues()
}
func PossibleServiceKindBasicDeployedServiceReplicaInfoValues() []ServiceKindBasicDeployedServiceReplicaInfo {
return original.PossibleServiceKindBasicDeployedServiceReplicaInfoValues()
}
func PossibleServiceKindBasicReplicaHealthStateValues() []ServiceKindBasicReplicaHealthState {
return original.PossibleServiceKindBasicReplicaHealthStateValues()
}
func PossibleServiceKindBasicReplicaHealthValues() []ServiceKindBasicReplicaHealth {
return original.PossibleServiceKindBasicReplicaHealthValues()
}
func PossibleServiceKindBasicReplicaInfoValues() []ServiceKindBasicReplicaInfo {
return original.PossibleServiceKindBasicReplicaInfoValues()
}
func PossibleServiceKindBasicServiceDescriptionValues() []ServiceKindBasicServiceDescription {
return original.PossibleServiceKindBasicServiceDescriptionValues()
}
func PossibleServiceKindBasicServiceInfoValues() []ServiceKindBasicServiceInfo {
return original.PossibleServiceKindBasicServiceInfoValues()
}
func PossibleServiceKindBasicServicePartitionInfoValues() []ServiceKindBasicServicePartitionInfo {
return original.PossibleServiceKindBasicServicePartitionInfoValues()
}
func PossibleServiceKindBasicServiceUpdateDescriptionValues() []ServiceKindBasicServiceUpdateDescription {
return original.PossibleServiceKindBasicServiceUpdateDescriptionValues()
}
func PossibleServiceKindValues() []ServiceKind {
return original.PossibleServiceKindValues()
}
func PossibleServiceLoadMetricWeightValues() []ServiceLoadMetricWeight {
return original.PossibleServiceLoadMetricWeightValues()
}
func PossibleServiceOperationNameValues() []ServiceOperationName {
return original.PossibleServiceOperationNameValues()
}
func PossibleServicePackageActivationModeValues() []ServicePackageActivationMode {
return original.PossibleServicePackageActivationModeValues()
}
func PossibleServicePartitionKindBasicPartitionInformationValues() []ServicePartitionKindBasicPartitionInformation {
return original.PossibleServicePartitionKindBasicPartitionInformationValues()
}
func PossibleServicePartitionKindValues() []ServicePartitionKind {
return original.PossibleServicePartitionKindValues()
}
func PossibleServicePartitionStatusValues() []ServicePartitionStatus {
return original.PossibleServicePartitionStatusValues()
}
func PossibleServicePlacementPolicyTypeValues() []ServicePlacementPolicyType {
return original.PossibleServicePlacementPolicyTypeValues()
}
func PossibleServiceStatusValues() []ServiceStatus {
return original.PossibleServiceStatusValues()
}
func PossibleServiceTypeRegistrationStatusValues() []ServiceTypeRegistrationStatus {
return original.PossibleServiceTypeRegistrationStatusValues()
}
func PossibleSizeTypesValues() []SizeTypes {
return original.PossibleSizeTypesValues()
}
func PossibleStateValues() []State {
return original.PossibleStateValues()
}
func PossibleStorageKindValues() []StorageKind {
return original.PossibleStorageKindValues()
}
func PossibleTypeValues() []Type {
return original.PossibleTypeValues()
}
func PossibleUpgradeDomainStateValues() []UpgradeDomainState {
return original.PossibleUpgradeDomainStateValues()
}
func PossibleUpgradeKindValues() []UpgradeKind {
return original.PossibleUpgradeKindValues()
}
func PossibleUpgradeModeValues() []UpgradeMode {
return original.PossibleUpgradeModeValues()
}
func PossibleUpgradeSortOrderValues() []UpgradeSortOrder {
return original.PossibleUpgradeSortOrderValues()
}
func PossibleUpgradeStateValues() []UpgradeState {
return original.PossibleUpgradeStateValues()
}
func PossibleUpgradeTypeValues() []UpgradeType {
return original.PossibleUpgradeTypeValues()
}
func PossibleVolumeProviderValues() []VolumeProvider {
return original.PossibleVolumeProviderValues()
}
func UserAgent() string {
return original.UserAgent() + " profiles/preview"
}
func Version() string {
return original.Version()
}
| apache-2.0 |
ollie314/kafka | clients/src/test/java/org/apache/kafka/common/metrics/JmxReporterTest.java | 5954 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.stats.Avg;
import org.apache.kafka.common.metrics.stats.Total;
import org.junit.Test;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.lang.management.ManagementFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class JmxReporterTest {
@Test
public void testJmxRegistration() throws Exception {
Metrics metrics = new Metrics();
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
JmxReporter reporter = new JmxReporter();
metrics.addReporter(reporter);
assertFalse(server.isRegistered(new ObjectName(":type=grp1")));
Sensor sensor = metrics.sensor("kafka.requests");
sensor.add(metrics.metricName("pack.bean1.avg", "grp1"), new Avg());
sensor.add(metrics.metricName("pack.bean2.total", "grp2"), new Total());
assertTrue(server.isRegistered(new ObjectName(":type=grp1")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=grp1"), "pack.bean1.avg"));
assertTrue(server.isRegistered(new ObjectName(":type=grp2")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=grp2"), "pack.bean2.total"));
MetricName metricName = metrics.metricName("pack.bean1.avg", "grp1");
String mBeanName = JmxReporter.getMBeanName("", metricName);
assertTrue(reporter.containsMbean(mBeanName));
metrics.removeMetric(metricName);
assertFalse(reporter.containsMbean(mBeanName));
assertFalse(server.isRegistered(new ObjectName(":type=grp1")));
assertTrue(server.isRegistered(new ObjectName(":type=grp2")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=grp2"), "pack.bean2.total"));
metricName = metrics.metricName("pack.bean2.total", "grp2");
metrics.removeMetric(metricName);
assertFalse(reporter.containsMbean(mBeanName));
assertFalse(server.isRegistered(new ObjectName(":type=grp1")));
assertFalse(server.isRegistered(new ObjectName(":type=grp2")));
} finally {
metrics.close();
}
}
@Test
public void testJmxRegistrationSanitization() throws Exception {
Metrics metrics = new Metrics();
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
metrics.addReporter(new JmxReporter());
Sensor sensor = metrics.sensor("kafka.requests");
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo*"), new Total());
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo+"), new Total());
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo?"), new Total());
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo:"), new Total());
sensor.add(metrics.metricName("name", "group", "desc", "id", "foo%"), new Total());
assertTrue(server.isRegistered(new ObjectName(":type=group,id=\"foo\\*\"")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=\"foo\\*\""), "name"));
assertTrue(server.isRegistered(new ObjectName(":type=group,id=\"foo+\"")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=\"foo+\""), "name"));
assertTrue(server.isRegistered(new ObjectName(":type=group,id=\"foo\\?\"")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=\"foo\\?\""), "name"));
assertTrue(server.isRegistered(new ObjectName(":type=group,id=\"foo:\"")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=\"foo:\""), "name"));
assertTrue(server.isRegistered(new ObjectName(":type=group,id=foo%")));
assertEquals(0.0, server.getAttribute(new ObjectName(":type=group,id=foo%"), "name"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo*"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo+"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo?"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo:"));
metrics.removeMetric(metrics.metricName("name", "group", "desc", "id", "foo%"));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=\"foo\\*\"")));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=foo+")));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=\"foo\\?\"")));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=\"foo:\"")));
assertFalse(server.isRegistered(new ObjectName(":type=group,id=foo%")));
} finally {
metrics.close();
}
}
}
| apache-2.0 |
donttrustthisbot/amphtml | ads/inabox/frame-overlay-helper.js | 3755 | /**
* Copyright 2017 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
LayoutRectDef,
layoutRectFromDomRect,
layoutRectLtwh,
} from '../../src/layout-rect';
import {
centerFrameUnderVsyncMutate,
collapseFrameUnderVsyncMutate,
expandFrameUnderVsyncMutate,
} from '../../src/full-overlay-frame-helper';
import {restrictedVsync, timer} from './util';
const CENTER_TRANSITION_TIME_MS = 500;
const CENTER_TRANSITION_END_WAIT_TIME_MS = 200;
/**
* Places the child frame in full overlay mode.
* @param {!Window} win Host window.
* @param {!HTMLIFrameElement} iframe
* @param {function(!LayoutRectDef, !LayoutRectDef)} onFinish
* @private
*/
const expandFrameImpl = function(win, iframe, onFinish) {
restrictedVsync(win, {
measure(state) {
state.viewportSize = {
width: win./*OK*/innerWidth,
height: win./*OK*/innerHeight,
};
state.rect = iframe./*OK*/getBoundingClientRect();
},
mutate(state) {
const collapsedRect = layoutRectFromDomRect(state.rect);
const expandedRect = layoutRectLtwh(
0, 0, state.viewportSize.width, state.viewportSize.height);
centerFrameUnderVsyncMutate(iframe, state.rect, state.viewportSize,
CENTER_TRANSITION_TIME_MS);
timer(() => {
restrictedVsync(win, {
mutate() {
expandFrameUnderVsyncMutate(iframe);
onFinish(collapsedRect, expandedRect);
},
});
}, CENTER_TRANSITION_TIME_MS + CENTER_TRANSITION_END_WAIT_TIME_MS);
},
}, {});
};
/**
* Resets the frame from full overlay mode.
* @param {!Window} win Host window.
* @param {!HTMLIFrameElement} iframe
* @param {function()} onFinish
* @param {function(!LayoutRectDef)} onMeasure
* @private
*/
const collapseFrameImpl = function(win, iframe, onFinish, onMeasure) {
restrictedVsync(win, {
mutate() {
collapseFrameUnderVsyncMutate(iframe);
onFinish();
// remeasure so client knows about updated dimensions
restrictedVsync(win, {
measure() {
onMeasure(
layoutRectFromDomRect(iframe./*OK*/getBoundingClientRect()));
},
});
},
});
};
/**
* Places the child frame in full overlay mode.
* @param {!Window} win Host window.
* @param {!HTMLIFrameElement} iframe
* @param {function(!LayoutRectDef, !LayoutRectDef)} onFinish
*/
export let expandFrame = expandFrameImpl;
/**
* @param {!Function} implFn
* @visibleForTesting
*/
export function stubExpandFrameForTesting(implFn) {
expandFrame = implFn;
}
/**
* @visibleForTesting
*/
export function resetExpandFrameForTesting() {
expandFrame = expandFrameImpl;
}
/**
* Places the child frame in full overlay mode.
* @param {!Window} win Host window.
* @param {!HTMLIFrameElement} iframe
* @param {function()} onFinish
* @param {function(!LayoutRectDef)} onMeasure
*/
export let collapseFrame = collapseFrameImpl;
/**
* @param {!Function} implFn
* @visibleForTesting
*/
export function stubCollapseFrameForTesting(implFn) {
collapseFrame = implFn;
}
/**
* @visibleForTesting
*/
export function resetCollapseFrameForTesting() {
collapseFrame = collapseFrameImpl;
}
| apache-2.0 |
mminella/spring-cloud-data | spring-cloud-dataflow-core/src/main/java/org/springframework/cloud/dataflow/core/TaskManifest.java | 2150 | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.dataflow.core;
import org.springframework.cloud.deployer.spi.core.AppDeploymentRequest;
import org.springframework.core.style.ToStringCreator;
/**
* Description of an execution of a task including resource to be executed and how it was configured via Spring Cloud
* Data Flow
*
* @author Mark Pollack
* @author Michael Minella
* @since 2.3
*/
public class TaskManifest {
private AppDeploymentRequest taskDeploymentRequest;
private String platformName;
/**
* Name of the platform the related task execution was executed on.
*
* @return name of the platform
*/
public String getPlatformName() {
return platformName;
}
/**
* Name of the platform the related task execution was executed on.
*
* @param platformName platform name
*/
public void setPlatformName(String platformName) {
this.platformName = platformName;
}
/**
* {@code AppDeploymentRequest} representing the task being executed
*
* @return {@code AppDeploymentRequest}
*/
public AppDeploymentRequest getTaskDeploymentRequest() {
return taskDeploymentRequest;
}
/**
* Task deployment
*
* @param taskDeploymentRequest {@code AppDeploymentRequest}
*/
public void setTaskDeploymentRequest(AppDeploymentRequest taskDeploymentRequest) {
this.taskDeploymentRequest = taskDeploymentRequest;
}
public String toString() {
return (new ToStringCreator(this)).append("taskDeploymentRequest", this.taskDeploymentRequest).append("platformName", this.platformName).toString();
}
}
| apache-2.0 |
mmaracic/elasticsearch | test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java | 5321 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test.store;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.store.DirectoryService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.plugins.Plugin;
import java.util.Collections;
import java.util.EnumSet;
import java.util.IdentityHashMap;
import java.util.Map;
public class MockFSIndexStore extends IndexStore {
public static final Setting<Boolean> INDEX_CHECK_INDEX_ON_CLOSE_SETTING =
Setting.boolSetting("index.store.mock.check_index_on_close", true, Property.IndexScope, Property.NodeScope);
public static class TestPlugin extends Plugin {
@Override
public String name() {
return "mock-index-store";
}
@Override
public String description() {
return "a mock index store for testing";
}
@Override
public Settings additionalSettings() {
return Settings.builder().put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "mock").build();
}
public void onModule(SettingsModule module) {
module.registerSetting(INDEX_CHECK_INDEX_ON_CLOSE_SETTING);
module.registerSetting(MockFSDirectoryService.CRASH_INDEX_SETTING);
module.registerSetting(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING);
module.registerSetting(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE_SETTING);
module.registerSetting(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE_SETTING);
module.registerSetting(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING);
}
@Override
public void onIndexModule(IndexModule indexModule) {
Settings indexSettings = indexModule.getSettings();
if ("mock".equals(indexSettings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()))) {
if (INDEX_CHECK_INDEX_ON_CLOSE_SETTING.get(indexSettings)) {
indexModule.addIndexEventListener(new Listener());
}
indexModule.addIndexStore("mock", MockFSIndexStore::new);
}
}
}
MockFSIndexStore(IndexSettings indexSettings,
IndexStoreConfig config) {
super(indexSettings, config);
}
public DirectoryService newDirectoryService(ShardPath path) {
return new MockFSDirectoryService(indexSettings, this, path);
}
private static final EnumSet<IndexShardState> validCheckIndexStates = EnumSet.of(
IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY
);
private static final class Listener implements IndexEventListener {
private final Map<IndexShard, Boolean> shardSet = Collections.synchronizedMap(new IdentityHashMap<>());
@Override
public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) {
if (indexShard != null) {
Boolean remove = shardSet.remove(indexShard);
if (remove == Boolean.TRUE) {
ESLogger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId());
}
}
}
@Override
public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {
if (currentState == IndexShardState.CLOSED && validCheckIndexStates.contains(previousState) && indexShard.indexSettings().isOnSharedFilesystem() == false) {
shardSet.put(indexShard, Boolean.TRUE);
}
}
}
}
| apache-2.0 |
jhrcek/kie-wb-common | kie-wb-common-dmn/kie-wb-common-dmn-client/src/main/java/org/kie/workbench/common/dmn/client/resources/DMNSVGViewFactory.java | 2348 | /*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.dmn.client.resources;
import org.kie.workbench.common.stunner.svg.annotation.SVGSource;
import org.kie.workbench.common.stunner.svg.annotation.SVGViewFactory;
import org.kie.workbench.common.stunner.svg.client.shape.view.SVGShapeViewResource;
import static org.kie.workbench.common.dmn.client.resources.DMNSVGViewFactory.PATH_CSS;
@SVGViewFactory(cssPath = PATH_CSS)
public interface DMNSVGViewFactory {
String PATH_CSS = "images/shapes/dmn-shapes.css";
String DIAGRAM = "images/shapes/diagram.svg";
String BUSINESS_KNOWLEDGE_MODEL = "images/shapes/business-knowledge-model.svg";
String BUSINESS_KNOWLEDGE_MODEL_PALETTE = "images/shapes/business-knowledge-model-palette.svg";
String DECISION = "images/shapes/decision.svg";
String DECISION_PALETTE = "images/shapes/decision-palette.svg";
String INPUT_DATA = "images/shapes/input-data.svg";
String INPUT_DATA_PALETTE = "images/shapes/input-data-palette.svg";
String KNOWLEDGE_SOURCE = "images/shapes/knowledge-source.svg";
String KNOWLEDGE_SOURCE_PALETTE = "images/shapes/knowledge-source-palette.svg";
String TEXT_ANNOTATION = "images/shapes/text-annotation.svg";
String TEXT_ANNOTATION_PALETTE = "images/shapes/text-annotation-palette.svg";
@SVGSource(DIAGRAM)
SVGShapeViewResource diagram();
@SVGSource(BUSINESS_KNOWLEDGE_MODEL)
SVGShapeViewResource businessKnowledgeModel();
@SVGSource(DECISION)
SVGShapeViewResource decision();
@SVGSource(INPUT_DATA)
SVGShapeViewResource inputData();
@SVGSource(KNOWLEDGE_SOURCE)
SVGShapeViewResource knowledgeSource();
@SVGSource(TEXT_ANNOTATION)
SVGShapeViewResource textAnnotation();
}
| apache-2.0 |
ncdc/origin | pkg/template/generated/listers/template/internalversion/brokertemplateinstance.go | 1892 | // This file was automatically generated by lister-gen
package internalversion
import (
api "github.com/openshift/origin/pkg/template/api"
"k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// BrokerTemplateInstanceLister helps list BrokerTemplateInstances.
type BrokerTemplateInstanceLister interface {
// List lists all BrokerTemplateInstances in the indexer.
List(selector labels.Selector) (ret []*api.BrokerTemplateInstance, err error)
// Get retrieves the BrokerTemplateInstance from the index for a given name.
Get(name string) (*api.BrokerTemplateInstance, error)
BrokerTemplateInstanceListerExpansion
}
// brokerTemplateInstanceLister implements the BrokerTemplateInstanceLister interface.
type brokerTemplateInstanceLister struct {
indexer cache.Indexer
}
// NewBrokerTemplateInstanceLister returns a new BrokerTemplateInstanceLister.
func NewBrokerTemplateInstanceLister(indexer cache.Indexer) BrokerTemplateInstanceLister {
return &brokerTemplateInstanceLister{indexer: indexer}
}
// List lists all BrokerTemplateInstances in the indexer.
func (s *brokerTemplateInstanceLister) List(selector labels.Selector) (ret []*api.BrokerTemplateInstance, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*api.BrokerTemplateInstance))
})
return ret, err
}
// Get retrieves the BrokerTemplateInstance from the index for a given name.
func (s *brokerTemplateInstanceLister) Get(name string) (*api.BrokerTemplateInstance, error) {
key := &api.BrokerTemplateInstance{ObjectMeta: v1.ObjectMeta{Name: name}}
obj, exists, err := s.indexer.Get(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(api.Resource("brokertemplateinstance"), name)
}
return obj.(*api.BrokerTemplateInstance), nil
}
| apache-2.0 |
coderzh/hugo | utils/utils.go | 1295 | // Copyright 2015 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"os"
jww "github.com/spf13/jwalterweatherman"
)
func CheckErr(err error, s ...string) {
if err != nil {
if len(s) == 0 {
jww.CRITICAL.Println(err)
} else {
for _, message := range s {
jww.ERROR.Println(message)
}
jww.ERROR.Println(err)
}
}
}
func StopOnErr(err error, s ...string) {
if err != nil {
if len(s) == 0 {
newMessage := err.Error()
// Printing an empty string results in a error with
// no message, no bueno.
if newMessage != "" {
jww.CRITICAL.Println(newMessage)
}
} else {
for _, message := range s {
if message != "" {
jww.CRITICAL.Println(message)
}
}
}
os.Exit(-1)
}
}
| apache-2.0 |
JGiola/swift | include/swift/SILOptimizer/PassManager/Passes.h | 2755 | //===--- Passes.h - Swift Compiler SIL Pass Entrypoints ---------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file declares the main entrypoints to SIL passes.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_SILOPTIMIZER_PASSMANAGER_PASSES_H
#define SWIFT_SILOPTIMIZER_PASSMANAGER_PASSES_H
#include "swift/SIL/SILModule.h"
namespace swift {
class SILOptions;
class SILTransform;
class SILModuleTransform;
namespace irgen {
class IRGenModule;
}
/// Run all the SIL diagnostic passes on \p M.
///
/// \returns true if the diagnostic passes produced an error
bool runSILDiagnosticPasses(SILModule &M);
/// Run all the SIL performance optimization passes on \p M.
void runSILOptimizationPasses(SILModule &M);
/// Run all SIL passes for -Onone on module \p M.
void runSILPassesForOnone(SILModule &M);
/// Run the SIL lower hop-to-actor pass on \p M.
bool runSILLowerHopToActorPass(SILModule &M);
/// Run the SIL ownership eliminator pass on \p M.
bool runSILOwnershipEliminatorPass(SILModule &M);
void runSILOptimizationPassesWithFileSpecification(SILModule &Module,
StringRef FileName);
/// Detect and remove unreachable code. Diagnose provably unreachable
/// user code.
void performSILDiagnoseUnreachable(SILModule *M);
/// Remove dead functions from \p M.
void performSILDeadFunctionElimination(SILModule *M);
/// Convert SIL to a lowered form suitable for IRGen.
void runSILLoweringPasses(SILModule &M);
/// Perform SIL Inst Count on M if needed.
void performSILInstCountIfNeeded(SILModule *M);
/// Identifiers for all passes. Used to procedurally create passes from
/// lists of passes.
enum class PassKind {
#define PASS(ID, TAG, NAME) ID,
#define PASS_RANGE(ID, START, END) ID##_First = START, ID##_Last = END,
#include "Passes.def"
invalidPassKind
};
PassKind PassKindFromString(StringRef ID);
StringRef PassKindID(PassKind Kind);
StringRef PassKindTag(PassKind Kind);
#define PASS(ID, TAG, NAME) \
SILTransform *create##ID();
#define SWIFT_FUNCTION_PASS_WITH_LEGACY(ID, TAG, NAME) \
PASS(ID, TAG, NAME) \
SILTransform *createLegacy##ID();
#define IRGEN_PASS(ID, TAG, NAME)
#include "Passes.def"
} // end namespace swift
#endif
| apache-2.0 |
physhi/roslyn | src/Compilers/Core/Rebuild/Extensions.cs | 1272 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.IO;
using System.Linq;
using System.Reflection.Metadata;
using System.Reflection.PortableExecutable;
namespace Microsoft.CodeAnalysis.Rebuild
{
public static class Extensions
{
internal static void SkipNullTerminator(ref this BlobReader blobReader)
{
var b = blobReader.ReadByte();
if (b != '\0')
{
throw new InvalidDataException(string.Format(RebuildResources.Encountered_unexpected_byte_0_when_expecting_a_null_terminator, b));
}
}
public static MetadataReader? GetEmbeddedPdbMetadataReader(this PEReader peReader)
{
var entry = peReader.ReadDebugDirectory().SingleOrDefault(x => x.Type == DebugDirectoryEntryType.EmbeddedPortablePdb);
if (entry.Type == DebugDirectoryEntryType.Unknown)
{
return null;
}
var provider = peReader.ReadEmbeddedPortablePdbDebugDirectoryData(entry);
return provider.GetMetadataReader();
}
}
}
| apache-2.0 |
rowhit/h2o-2 | src/main/java/water/deploy/LaunchJar.java | 4106 | package water.deploy;
import java.io.*;
import java.util.*;
import java.util.Map.Entry;
import java.util.jar.*;
import javassist.*;
import water.*;
import water.api.DocGen;
import water.util.Utils;
public class LaunchJar extends Request2 {
static final int API_WEAVER = 1;
static public DocGen.FieldDoc[] DOC_FIELDS;
@API(help = "Jars keys", required = true, filter = Default.class)
public String jars;
@API(help = "Class to instantiate and launch", required = true, filter = Default.class)
public String job_class;
@Override protected Response serve() {
final Job job;
try {
// Move jars from KV store to tmp files
ClassPool pool = new ClassPool(true);
ArrayList<JarEntry> entries = new ArrayList<JarEntry>();
String[] splits = jars.split(",");
for( int i = 0; i < splits.length; i++ ) {
Key key = Key.make(splits[i]);
throw H2O.unimpl();
//ValueArray va = UKV.get(key);
//File file = File.createTempFile("h2o", ".jar");
//Utils.writeFileAndClose(file, va.openStream());
//DKV.remove(key);
//pool.appendClassPath(file.getPath());
//
//JarFile jar = new JarFile(file);
//Enumeration e = jar.entries();
//while( e.hasMoreElements() ) {
// JarEntry entry = (JarEntry) e.nextElement();
// entries.add(entry);
//}
//jar.close();
}
// Append UID to class names so allow multiple invocations
String uid = Key.rand();
ClassMap renames = new ClassMap();
for( JarEntry entry : entries ) {
if( entry.getName().endsWith(".class") ) {
String n = Utils.className(entry.getName());
String u;
int index = n.indexOf("$");
if( index < 0 )
index = n.length();
u = n.substring(0, index) + uid + n.substring(index);
renames.put(n, u);
}
}
ArrayList<CtClass> updated = new ArrayList();
for( Entry<String, String> entry : ((Map<String, String>) renames).entrySet() ) {
CtClass c = pool.get(entry.getKey().replace('/', '.'));
c.replaceClassName(renames);
updated.add(c);
}
// Create jar file and register it on each node
HashSet<String> packages = new HashSet();
ByteArrayOutputStream mem = new ByteArrayOutputStream();
JarOutputStream jar = new JarOutputStream(mem);
DataOutputStream bc = new DataOutputStream(jar);
for( CtClass c : updated ) {
jar.putNextEntry(new JarEntry(c.getName().replace('.', '/') + ".class"));
c.toBytecode(bc);
bc.flush();
String p = c.getPackageName();
if( p == null )
throw new IllegalArgumentException("Package is null for class " + c);
packages.add(p);
}
jar.close();
weavePackages(packages.toArray(new String[0]));
AddJar task = new AddJar();
task._data = mem.toByteArray();
task.invokeOnAllNodes();
// Start job
Class c = Class.forName(job_class + uid);
job = (Job) c.newInstance();
job.fork();
} catch( Exception ex ) {
throw new RuntimeException(ex);
}
return Response.done(this);
}
public static void weavePackages(String... names) {
WeavePackages task = new WeavePackages();
task._names = names;
task.invokeOnAllNodes();
}
static class WeavePackages extends DRemoteTask {
String[] _names;
@Override public void lcompute() {
for( String name : _names )
Boot.weavePackage(name);
tryComplete();
}
@Override public void reduce(DRemoteTask drt) {
}
}
static class AddJar extends DRemoteTask {
byte[] _data;
@Override public void lcompute() {
try {
File file = File.createTempFile("h2o", ".jar");
Utils.writeFileAndClose(file, new ByteArrayInputStream(_data));
Boot._init.addExternalJars(file);
tryComplete();
} catch( Exception ex ) {
throw new RuntimeException(ex);
}
}
@Override public void reduce(DRemoteTask drt) {
}
}
}
| apache-2.0 |
tjordanchat/rundeck | test/api/api-expect-exec-success.sh | 881 | #!/bin/bash
# usage:
# api-expect-exec-success.sh ID [message]
# tests an execution status is succeeded
execid="$1"
shift
expectstatus=${1:-succeeded}
shift
# arg to include.sh
set -- -
DIR=$(cd `dirname $0` && pwd)
source $DIR/include.sh
# now submit req
runurl="${APIURL}/execution/${execid}"
params=""
# get listing
docurl ${runurl}?${params} > $DIR/curl.out
if [ 0 != $? ] ; then
errorMsg "ERROR: failed query request ${runurl}?${params}"
exit 2
fi
$SHELL $SRC_DIR/api-test-success.sh $DIR/curl.out || (echo "${runurl}?${params}"; exit 2)
#Check projects list
itemcount=$($XMLSTARLET sel -T -t -v "/result/executions/@count" $DIR/curl.out)
assert "1" "$itemcount" "execution count should be 1"
status=$($XMLSTARLET sel -T -t -v "//execution[@id=$execid]/@status" $DIR/curl.out)
assert "$expectstatus" "$status" "execution status should be succeeded"
exit 0 | apache-2.0 |
madmax983/h2o-3 | h2o-docs/src/booklets/v2_2015/source/GLM_Vignette_code_examples/glm_model_output_20.py | 19 | binomial_fit.coef() | apache-2.0 |
MatthieuMEZIL/roslyn | src/Features/VisualBasic/Portable/CodeFixes/SimplifyTypeNames/SimplifyTypeNamesCodeFixProvider.SimplifyTypeNamesFixAllProvider.vb | 1377 | ' Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
Imports System.Threading
Imports Microsoft.CodeAnalysis.CodeFixes
Namespace Microsoft.CodeAnalysis.VisualBasic.CodeFixes.SimplifyTypeNames
Partial Friend Class SimplifyTypeNamesCodeFixProvider
Inherits CodeFixProvider
Private Class SimplifyTypeNamesFixAllProvider
Inherits BatchSimplificationFixAllProvider
Friend Shared Shadows ReadOnly Instance As SimplifyTypeNamesFixAllProvider = New SimplifyTypeNamesFixAllProvider
Protected Overrides Function GetNodeToSimplify(root As SyntaxNode, model As SemanticModel, diagnostic As Diagnostic, document As Document, ByRef codeActionId As String, cancellationToken As CancellationToken) As SyntaxNode
codeActionId = Nothing
Dim diagnosticId As String = Nothing
Dim node = SimplifyTypeNamesCodeFixProvider.GetNodeToSimplify(root, model, diagnostic.Location.SourceSpan, document.Options, diagnosticId, cancellationToken)
If node IsNot Nothing Then
codeActionId = GetCodeActionId(diagnosticId, node.ToString)
End If
Return node
End Function
End Class
End Class
End Namespace
| apache-2.0 |
ssaroha/node-webrtc | third_party/webrtc/include/chromium/src/chrome/browser/extensions/api/permissions/permissions_api_helpers.h | 1220 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_EXTENSIONS_API_PERMISSIONS_PERMISSIONS_API_HELPERS_H_
#define CHROME_BROWSER_EXTENSIONS_API_PERMISSIONS_PERMISSIONS_API_HELPERS_H_
#include <memory>
#include <string>
#include "base/memory/ref_counted.h"
namespace base {
class DictionaryValue;
}
namespace extensions {
class PermissionSet;
namespace api {
namespace permissions {
struct Permissions;
}
}
namespace permissions_api_helpers {
// Converts the permission |set| to a permissions object.
std::unique_ptr<api::permissions::Permissions> PackPermissionSet(
const PermissionSet& set);
// Creates a permission set from |permissions|. Returns NULL if the permissions
// cannot be converted to a permission set, in which case |error| will be set.
std::unique_ptr<const PermissionSet> UnpackPermissionSet(
const api::permissions::Permissions& permissions,
bool allow_file_access,
std::string* error);
} // namespace permissions_api_helpers
} // namespace extensions
#endif // CHROME_BROWSER_EXTENSIONS_API_PERMISSIONS_PERMISSIONS_API_HELPERS_H_
| bsd-2-clause |
timeyyy/PyUpdater | pyupdater/vendor/PyInstaller/lib/macholib/macho_standalone.py | 718 | #!/usr/bin/env python
import os
import sys
from macholib.MachOStandalone import MachOStandalone
from macholib.util import strip_files
def standaloneApp(path):
if not os.path.isdir(path) and os.path.exists(
os.path.join(path, 'Contents')):
raise SystemExit('%s: %s does not look like an app bundle'
% (sys.argv[0], path))
files = MachOStandalone(path).run()
strip_files(files)
def main():
print("WARNING: 'macho_standalone' is deprecated, use 'python -mmacholib dump' instead")
if not sys.argv[1:]:
raise SystemExit('usage: %s [appbundle ...]' % (sys.argv[0],))
for fn in sys.argv[1:]:
standaloneApp(fn)
if __name__ == '__main__':
main()
| bsd-2-clause |
solitaryr/sticky-notes | public/assets/pbr/js/stickynotes.js | 10960 | /**
* Sticky Notes
*
* An open source lightweight pastebin application
*
* @package StickyNotes
* @author Sayak Banerjee
* @copyright (c) 2014 Sayak Banerjee <mail@sayakbanerjee.com>. All rights reserved.
* @license http://www.opensource.org/licenses/bsd-license.php
* @link http://sayakbanerjee.com/sticky-notes
* @since Version 1.0
* @filesource
*/
/**
* Stores the current URL
*
* @var string
*/
var currentUrl = $(location).attr('href');
/**
* Timer container
*
* @var array
*/
var timers = new Array();
/**
* Instance counter
*
* @var int
*/
var instance = 0;
/**
* This is the main entry point of the script
*
* @return void
*/
function initMain()
{
// Initialize a new instance
initInstance();
// Initialize AJAX components
initAjaxComponents();
// Initialize AJAX navigation
initAjaxNavigation();
// Initialize addons
initAddons();
}
/**
* This initializes all JS addons
*
* @return void
*/
function initAddons()
{
// Initialize code wrapping
initWrapToggle();
// Initialize the code editor
initEditor();
// Initialize tab persistence
initTabPersistence();
// Initialize line reference
initLineReference();
// Initialize bootstrap components
initBootstrap();
}
/**
* Initializes a new instance of the JS library
*
* @return void
*/
function initInstance()
{
// Clear all timers
if (timers[instance] !== undefined)
{
for (idx in timers[instance])
{
clearInterval(timers[instance][idx]);
}
}
// Create a new instance and timer container
instance++;
timers[instance] = new Array();
}
/**
* Starts a new timed operation
*
* @param operation
* @param callback
* @param interval
* @return void
*/
function initTimer(operation, callback, interval)
{
switch (operation)
{
case 'once':
setTimeout(callback, interval);
break;
case 'repeat':
timers[instance].push(setInterval(callback, interval));
break;
}
}
/**
* Scans for and processes AJAX components
*
* Each AJAX component can have 4 parameters:
* - realtime : Indicates if the component involves realtime data
* - onload : The AJAX request will be triggered automatically
* - component : The utility component to request
* - extra : Any extra data that will be sent to the server
*
* @return void
*/
function initAjaxComponents()
{
var count = 1;
// Setup AJAX requests
$('[data-toggle="ajax"]').each(function()
{
var id = 'stickynotes-' + count++;
var onload = $(this).attr('data-onload') === 'true';
var realtime = $(this).attr('data-realtime') === 'true';
var component = $(this).attr('data-component');
var extra = $(this).attr('data-extra');
// Set the id of this element
$(this).attr('data-id', id);
// AJAX URL and component must be defined
if (ajaxUrl !== undefined && component !== undefined)
{
var getUrl = ajaxUrl + '/' + component + (extra !== undefined ? '/' + extra : '');
var callback = function(e)
{
// Add the loading icon
$(this).html('<span class="glyphicon glyphicon-refresh"></span>');
// Send the AJAX request
$.ajax({
url: getUrl,
data: { key: Math.random(), ajax: 1 },
context: $('[data-id="' + id + '"]'),
success: function(response)
{
// Dump the HTML in the element
$(this).html(response);
// If response is link, set it as href as well
if (response.indexOf('http') === 0)
{
$(this).attr('href', response);
$(this).removeAttr('data-toggle');
$(this).off('click');
}
// Load addons again
initAddons();
}
});
if (e !== undefined)
{
e.preventDefault();
}
};
// Execute the AJAX callback
if (onload)
{
if (realtime)
{
initTimer('repeat', callback, 5000);
}
initTimer('once', callback, 0);
}
else
{
$(this).off('click').on('click', callback);
}
}
});
}
/**
* Enabled AJAX navigation across the site
*
* @return void
*/
function initAjaxNavigation()
{
if (ajaxNav !== undefined && ajaxNav && $.support.cors)
{
// AJAX callback
var callback = function(e)
{
var navMethod = $(this).prop('tagName') == 'A' ? 'GET' : 'POST';
var seek = $(this).attr('data-seek');
// Set up data based on method
switch (navMethod)
{
case 'GET':
navUrl = $(this).attr('href');
payload = 'ajax=1';
break;
case 'POST':
navUrl = $(this).attr('action');
payload = $(this).serialize() + '&ajax=1';
break;
}
// Send an AJAX request for all but anchor links
if (navUrl !== undefined && !$('.loader').is(':visible'))
{
$('.loader').show();
$.ajax({
url: navUrl,
method: navMethod,
context: $('body'),
data: payload,
success: function(response, status, info)
{
var isPageSection = response.indexOf('<!DOCTYPE html>') == -1;
var isHtmlContent = info.getResponseHeader('Content-Type').indexOf('text/html') != -1;
// Change the page URL
currentUrl = info.getResponseHeader('StickyNotes-Url');
window.history.pushState({ html: response }, null, currentUrl);
// Handle the response
if (isPageSection && isHtmlContent)
{
$(this).html(response);
}
else if (isHtmlContent)
{
dom = $(document.createElement('html'));
dom[0].innerHTML = response;
$(this).html(dom.find('body').html());
}
else
{
window.location = navUrl;
}
// Seek to top of the page
$.scrollTo(0, 200);
// Load JS triggers again
initMain();
},
error: function()
{
window.location = navUrl;
}
});
e.preventDefault();
}
};
// Execute callback on all links, excluding some
$('body').find('a' +
':not([href*="/admin"])' +
':not([href*="/attachment"])' +
':not([href*="#"])' +
':not([href*="mailto:"])' +
':not([onclick])'
).off('click').on('click', callback);
// Execute callback on all designated forms
$('body').find('form[data-navigate="ajax"]').off('submit').on('submit', callback);
// URL change monitor
initTimer('repeat', function()
{
var href = $(location).attr('href');
// Trim the trailing slash from currentUrl
if (currentUrl.substr(-1) == '/')
{
currentUrl = currentUrl.substr(0, currentUrl.length - 1);
}
// Trim the trailing slash from href
if (href.substr(-1) == '/')
{
href = href.substr(0, href.length - 1);
}
// Reload page if URL changed
if (currentUrl != href && href.indexOf('#') == -1)
{
currentUrl = href;
// Load the selected page
$('.loader').show();
$.get(href, function(response)
{
dom = $(document.createElement('html'));
dom[0].innerHTML = response;
$('body').html(dom.find('body').html());
});
}
}, 300);
}
}
/**
* Activates the code wrapping toggle function
*
* @return void
*/
function initWrapToggle()
{
$('[data-toggle="wrap"]').off('click').on('click', function(e)
{
var isWrapped = $('.pre div').css('white-space') != 'nowrap';
var newValue = isWrapped ? 'nowrap' : 'inherit';
$('.pre div').css('white-space', newValue);
e.preventDefault();
});
}
/**
* Activates the paste editor
*
* @return void
*/
function initEditor()
{
// Insert tab in the code box
$('[name="data"]').off('keydown').on('keydown', function (e)
{
if (e.keyCode == 9)
{
var myValue = "\t";
var startPos = this.selectionStart;
var endPos = this.selectionEnd;
var scrollTop = this.scrollTop;
this.value = this.value.substring(0, startPos) + myValue + this.value.substring(endPos,this.value.length);
this.focus();
this.selectionStart = startPos + myValue.length;
this.selectionEnd = startPos + myValue.length;
this.scrollTop = scrollTop;
e.preventDefault();
}
});
// Tick the private checkbox if password is entered
$('[name="password"]').off('keyup').on('keyup', function()
{
$('[name="private"]').attr('checked', $(this).val().length > 0);
});
}
/**
* Activates some bootstrap components
*
* @return void
*/
function initBootstrap()
{
// Activate tooltips
$('[data-toggle="tooltip"]').tooltip();
}
/**
* Saves the tab state on all pages
*
* @return void
*/
function initTabPersistence()
{
// Restore the previous tab state
$('.nav-tabs').each(function()
{
var id = $(this).attr('id');
var index = $.cookie('stickynotes_tabstate');
if (index !== undefined)
{
$('.nav-tabs > li:eq(' + index + ') a').tab('show');
}
});
// Save the current tab state
$('.nav-tabs > li > a').on('shown.bs.tab', function (e)
{
var id = $(this).parents('.nav-tabs').attr('id');
var index = $(this).parents('li').index();
$.cookie('stickynotes_tabstate', index);
})
// Clear tab state when navigated to a different page
if ($('.nav-tabs').length == 0)
{
$.cookie('stickynotes_tabstate', null);
}
}
/**
* Highlights lines upon clicking them on the #show page
*
* @return void
*/
function initLineReference()
{
if ($('section#show').length != 0)
{
var line = 1;
// First, we allocate unique IDs to all lines
$('.pre li').each(function()
{
$(this).attr('id', 'line-' + line++);
});
// Next, navigate to an ID if the user requested it
var anchor = window.location.hash;
if (anchor.length > 0)
{
var top = $(anchor).offset().top;
// Scroll to the anchor
$.scrollTo(top, 200);
// Highlight the anchor
$(anchor).addClass('highlight');
}
// Click to change anchor
$('.pre li').off('mouseup').on('mouseup', function()
{
if (window.getSelection() == '')
{
var id = $(this).attr('id');
var top = $(this).offset().top;
// Scroll to the anchor
$.scrollTo(top, 200, function() {
window.location.hash = '#' + id;
});
// Highlight the anchor
$('.pre li').removeClass('highlight');
$(this).addClass('highlight');
}
});
}
}
/**
* Draws a Google chart in a container
*
* @return void
*/
function initAreaChart()
{
if (chartData !== undefined && chartContainer !== undefined)
{
// Create an instance of line chart
var chart = new google.visualization.AreaChart(chartContainer);
// Define chart options
var options = {
colors: [ '#428bca', '#d9534f' ],
areaOpacity: 0.1,
lineWidth: 4,
pointSize: 8,
hAxis: {
textStyle: {
color: '#666'
},
gridlines: {
color: 'transparent'
},
baselineColor: '#eeeeee',
format:'MMM d'
},
vAxis: {
textStyle: {
color: '#666'
},
gridlines: {
color: '#eee'
}
},
chartArea: {
left: 50,
top: 10,
width: '100%',
height: 210
},
legend: {
position: 'bottom'
}
};
// Draw the line chart
chart.draw(chartData, options);
}
// Redraw chart on window resize
$(window).off('resize').on('resize', initAreaChart);
}
/**
* Invoke the entry point on DOM ready
*/
$(initMain);
| bsd-2-clause |
Latertater/jbox2d | jbox2d-serialization/src/main/java/org/jbox2d/serialization/SerializationResult.java | 2130 | /*******************************************************************************
* Copyright (c) 2013, Daniel Murphy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
package org.jbox2d.serialization;
import java.io.IOException;
import java.io.OutputStream;
/**
* Container for holding serialization results. Use
* {@link #getValue()} to get the implementation-specific
* result.
* @author Daniel Murphy
*
*/
public interface SerializationResult {
/**
* The implementation-specific serialization
* result.
* @return serialization result
*/
public Object getValue();
/**
* Writes the result to the given output stream.
* @param argOutputStream
* @throws IOException
*/
public void writeTo(OutputStream argOutputStream) throws IOException;
}
| bsd-2-clause |
klane/homebrew-cask | Casks/media-center.rb | 773 | cask 'media-center' do
version '23.00.20'
sha256 '70042295e59a0114900ca475cb2ab46d8c8793c58dbb429542ce4129614e5f25'
url "http://files.jriver.com/mediacenter/channels/v#{version.major}/stable/MediaCenter#{version.no_dots}.dmg"
name 'JRiver Media Center'
homepage 'https://www.jriver.com/'
app "Media Center #{version.major}.app"
zap delete: [
"~/Library/Caches/com.jriver.MediaCenter#{version.major}",
"~/Library/Saved Application State/com.jriver.MediaCenter#{version.major}.savedState",
],
trash: [
'~/Library/Application Support/J River/',
'~/Documents/JRiver/',
"~/Library/Preferences/com.jriver.MediaCenter#{version.major}.plist",
]
end
| bsd-2-clause |
kaufmo/koala-framework | tests/Kwc/Trl/DateHelper/DateTime/Component.php | 75 | <?php
class Kwc_Trl_DateHelper_DateTime_Component extends Kwc_Abstract
{
}
| bsd-2-clause |
nwjs/chromium.src | third_party/blink/renderer/platform/peerconnection/rtc_video_encoder_factory_test.cc | 4887 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdint.h>
#include "base/test/task_environment.h"
#include "media/base/svc_scalability_mode.h"
#include "media/base/video_codecs.h"
#include "media/video/mock_gpu_video_accelerator_factories.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/platform/peerconnection/rtc_video_encoder.h"
#include "third_party/blink/renderer/platform/peerconnection/rtc_video_encoder_factory.h"
#include "third_party/webrtc/api/video_codecs/sdp_video_format.h"
#include "third_party/webrtc/api/video_codecs/video_encoder_factory.h"
using ::testing::Return;
namespace blink {
namespace {
constexpr webrtc::VideoEncoderFactory::CodecSupport kSupportedPowerEfficient = {
true, true};
constexpr webrtc::VideoEncoderFactory::CodecSupport kUnsupported = {false,
false};
constexpr gfx::Size kMaxResolution = {1920, 1080};
constexpr uint32_t kMaxFramerateNumerator = 30;
constexpr uint32_t kMaxFramerateDenominator = 1;
const std::vector<media::SVCScalabilityMode> kScalabilityModes = {
media::SVCScalabilityMode::kL1T2, media::SVCScalabilityMode::kL1T3};
bool Equals(webrtc::VideoEncoderFactory::CodecSupport a,
webrtc::VideoEncoderFactory::CodecSupport b) {
return a.is_supported == b.is_supported &&
a.is_power_efficient == b.is_power_efficient;
}
class MockGpuVideoEncodeAcceleratorFactories
: public media::MockGpuVideoAcceleratorFactories {
public:
MockGpuVideoEncodeAcceleratorFactories()
: MockGpuVideoAcceleratorFactories(nullptr) {}
absl::optional<media::VideoEncodeAccelerator::SupportedProfiles>
GetVideoEncodeAcceleratorSupportedProfiles() override {
media::VideoEncodeAccelerator::SupportedProfiles profiles = {
{media::VP8PROFILE_ANY, kMaxResolution, kMaxFramerateNumerator,
kMaxFramerateDenominator, kScalabilityModes},
{media::VP9PROFILE_PROFILE0, kMaxResolution, kMaxFramerateNumerator,
kMaxFramerateDenominator, kScalabilityModes}};
return profiles;
}
};
} // anonymous namespace
typedef webrtc::SdpVideoFormat Sdp;
typedef webrtc::SdpVideoFormat::Parameters Params;
class RTCVideoEncoderFactoryTest : public ::testing::Test {
public:
RTCVideoEncoderFactoryTest() : encoder_factory_(&mock_gpu_factories_) {}
protected:
base::test::TaskEnvironment task_environment_;
MockGpuVideoEncodeAcceleratorFactories mock_gpu_factories_;
RTCVideoEncoderFactory encoder_factory_;
};
TEST_F(RTCVideoEncoderFactoryTest, QueryCodecSupportNoSvc) {
EXPECT_CALL(mock_gpu_factories_, IsEncoderSupportKnown())
.WillRepeatedly(Return(true));
// VP8, H264, and VP9 profile 0 are supported.
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(
Sdp("VP8"), /*scalability_mode=*/absl::nullopt),
kSupportedPowerEfficient));
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(
Sdp("VP9"), /*scalability_mode=*/absl::nullopt),
kSupportedPowerEfficient));
// H264, VP9 profile 2 and AV1 are unsupported.
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(
Sdp("H264", Params{{"level-asymmetry-allowed", "1"},
{"packetization-mode", "1"},
{"profile-level-id", "42001f"}}),
/*scalability_mode=*/absl::nullopt),
kUnsupported));
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(
Sdp("VP9", Params{{"profile-id", "2"}}),
/*scalability_mode=*/absl::nullopt),
kUnsupported));
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(
Sdp("AV1"), /*scalability_mode=*/absl::nullopt),
kUnsupported));
}
TEST_F(RTCVideoEncoderFactoryTest, QueryCodecSupportSvc) {
EXPECT_CALL(mock_gpu_factories_, IsEncoderSupportKnown())
.WillRepeatedly(Return(true));
// Test supported modes.
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(Sdp("VP8"), "L1T2"),
kSupportedPowerEfficient));
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(Sdp("VP9"), "L1T3"),
kSupportedPowerEfficient));
// Test unsupported modes.
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(Sdp("AV1"), "L2T1"),
kUnsupported));
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(Sdp("H264"), "L1T2"),
kUnsupported));
EXPECT_TRUE(Equals(encoder_factory_.QueryCodecSupport(Sdp("VP8"), "L3T3"),
kUnsupported));
}
} // namespace blink
| bsd-3-clause |
scheib/chromium | third_party/blink/web_tests/wpt_internal/prerender/restriction-element-request-fullscreen.html | 890 | <!DOCTYPE html>
<title>Prerendering cannot invoke Element.requestFullscreen</title>
<meta name="timeout" content="long">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="resources/utils.js"></script>
<body>
<script>
promise_test(async t => {
const bc = new BroadcastChannel('prerender-channel');
const gotMessage = new Promise(resolve => {
bc.addEventListener('message', e => {
resolve(e.data);
}, {
once: true
});
});
// Start prerendering a page that attempts to invoke
// element.requestFullscreen.
// This API is activation-gated so it's expected to fail.
startPrerendering(`resources/request-fullscreen.html`);
const result = await gotMessage;
assert_equals(result, 'request failed');
}, 'prerendering page cannot invoke element.requestFullscreen');
</script>
</body>
| bsd-3-clause |
weiawe/django | tests/cache/tests.py | 85300 | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections, transaction
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, TestCase, TransactionTestCase, override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian')
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = six.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertIsNone(cache.get("key1"))
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(TestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
| bsd-3-clause |
qiuzhong/crosswalk-test-suite | webapi/tct-bluetooth-tizen-tests/bluetooth/BluetoothAdapter_createBonding_successCallback_TypeMismatch.html | 2321 | <!DOCTYPE html>
<!--
Copyright (c) 2013 Samsung Electronics Co., Ltd.
Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
Lukasz Bardeli <l.bardeli@samsung.com>
-->
<html>
<head>
<title>BluetoothAdapter_createBonding_successCallback_TypeMismatch</title>
<script src="support/unitcommon.js"></script>
<script src="support/bluetooth_common.js"></script>
</head>
<body>
<div id="log"></div>
<script>
//==== TEST: BluetoothAdapter_createBonding_successCallback_TypeMismatch
//==== LABEL Check whether createBonding() method called with invalid successCallback argument throws an exception
//==== PRIORITY P2
//==== ONLOAD_DELAY 180
//==== SPEC Tizen Web API:Communication:Bluetooth:BluetoothAdapter:createBonding M
//==== SPEC_URL https://developer.tizen.org/help/topic/org.tizen.web.device.apireference/tizen/bluetooth.html
//==== TEST_CRITERIA MC
setup({timeout: 180000});
var t = async_test(document.title, {timeout: 180000}),
adapter, exceptionName, i, errorCallback, powerOnSuccess, successCallback,
conversionTable = getTypeConversionExceptions("functionObject", false);
t.step(function () {
errorCallback = t.step_func(function (e) {
assert_unreached("errorCallback exception:" + e.message);
});
powerOnSuccess = t.step_func(function () {
for(i = 0; i < conversionTable.length; i++) {
successCallback = conversionTable[i][0];
exceptionName = conversionTable[i][1];
assert_throws({name: exceptionName},
function () {
adapter.createBonding(REMOTE_DEVICE_ADDRESS, successCallback, errorCallback);
}, "Given incorrect successCallback.");
}
t.done();
});
adapter = tizen.bluetooth.getDefaultAdapter();
setPowered(t, adapter, powerOnSuccess);
});
</script>
</body>
</html>
| bsd-3-clause |
drmateo/pcl | doc/tutorials/content/sources/rops_feature/rops_feature.cpp | 2148 | #include <pcl/features/rops_estimation.h>
#include <pcl/io/pcd_io.h>
int main (int argc, char** argv)
{
if (argc != 4)
return (-1);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ> ());
if (pcl::io::loadPCDFile (argv[1], *cloud) == -1)
return (-1);
pcl::PointIndicesPtr indices (new pcl::PointIndices);
std::ifstream indices_file;
indices_file.open (argv[2], std::ifstream::in);
for (std::string line; std::getline (indices_file, line);)
{
std::istringstream in (line);
unsigned int index = 0;
in >> index;
indices->indices.push_back (index - 1);
}
indices_file.close ();
std::vector <pcl::Vertices> triangles;
std::ifstream triangles_file;
triangles_file.open (argv[3], std::ifstream::in);
for (std::string line; std::getline (triangles_file, line);)
{
pcl::Vertices triangle;
std::istringstream in (line);
unsigned int vertex = 0;
in >> vertex;
triangle.vertices.push_back (vertex - 1);
in >> vertex;
triangle.vertices.push_back (vertex - 1);
in >> vertex;
triangle.vertices.push_back (vertex - 1);
triangles.push_back (triangle);
}
float support_radius = 0.0285f;
unsigned int number_of_partition_bins = 5;
unsigned int number_of_rotations = 3;
pcl::search::KdTree<pcl::PointXYZ>::Ptr search_method (new pcl::search::KdTree<pcl::PointXYZ>);
search_method->setInputCloud (cloud);
pcl::ROPSEstimation <pcl::PointXYZ, pcl::Histogram <135> > feature_estimator;
feature_estimator.setSearchMethod (search_method);
feature_estimator.setSearchSurface (cloud);
feature_estimator.setInputCloud (cloud);
feature_estimator.setIndices (indices);
feature_estimator.setTriangles (triangles);
feature_estimator.setRadiusSearch (support_radius);
feature_estimator.setNumberOfPartitionBins (number_of_partition_bins);
feature_estimator.setNumberOfRotations (number_of_rotations);
feature_estimator.setSupportRadius (support_radius);
pcl::PointCloud<pcl::Histogram <135> >::Ptr histograms (new pcl::PointCloud <pcl::Histogram <135> > ());
feature_estimator.compute (*histograms);
return (0);
}
| bsd-3-clause |
NifTK/MITK | Plugins/org.mitk.gui.qt.cmdlinemodules/src/internal/QmitkUiLoader.cpp | 2060 | /*===================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) University College London (UCL).
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE.txt or http://www.mitk.org for details.
===================================================================*/
#include "QmitkUiLoader.h"
#include "QmitkDataStorageComboBoxWithSelectNone.h"
#include "mitkNodePredicateDataType.h"
#include "mitkNodePredicateOr.h"
#include "mitkImage.h"
//-----------------------------------------------------------------------------
QmitkUiLoader::QmitkUiLoader(const mitk::DataStorage* dataStorage, QObject *parent)
: ctkCmdLineModuleQtUiLoader(parent)
, m_DataStorage(dataStorage)
{
}
//-----------------------------------------------------------------------------
QmitkUiLoader::~QmitkUiLoader()
{
}
//-----------------------------------------------------------------------------
QStringList QmitkUiLoader::availableWidgets () const
{
QStringList availableWidgets = ctkCmdLineModuleQtUiLoader::availableWidgets();
availableWidgets << "QmitkDataStorageComboBoxWithSelectNone";
return availableWidgets;
}
//-----------------------------------------------------------------------------
QWidget* QmitkUiLoader::createWidget(const QString& className, QWidget* parent, const QString& name)
{
QWidget* widget = nullptr;
if (className == "QmitkDataStorageComboBoxWithSelectNone")
{
auto comboBox = new QmitkDataStorageComboBoxWithSelectNone(parent);
comboBox->setObjectName(name);
comboBox->SetAutoSelectNewItems(false);
comboBox->SetPredicate(mitk::TNodePredicateDataType< mitk::Image >::New());
comboBox->SetDataStorage(const_cast<mitk::DataStorage*>(m_DataStorage));
comboBox->setCurrentIndex(0);
widget = comboBox;
}
else
{
widget = ctkCmdLineModuleQtUiLoader::createWidget(className, parent, name);
}
return widget;
}
| bsd-3-clause |
scheib/chromium | third_party/blink/renderer/modules/webgl/oes_draw_buffers_indexed.h | 1603 | // Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_MODULES_WEBGL_OES_DRAW_BUFFERS_INDEXED_H_
#define THIRD_PARTY_BLINK_RENDERER_MODULES_WEBGL_OES_DRAW_BUFFERS_INDEXED_H_
#include "third_party/blink/renderer/modules/webgl/webgl_extension.h"
#include "third_party/khronos/GLES2/gl2.h"
namespace blink {
class OESDrawBuffersIndexed final : public WebGLExtension {
DEFINE_WRAPPERTYPEINFO();
public:
static bool Supported(WebGLRenderingContextBase*);
static const char* ExtensionName();
explicit OESDrawBuffersIndexed(WebGLRenderingContextBase*);
WebGLExtensionName GetName() const override;
void enableiOES(GLenum target, GLuint index);
void disableiOES(GLenum target, GLuint index);
void blendEquationiOES(GLuint buf, GLenum mode);
void blendEquationSeparateiOES(GLuint buf, GLenum modeRGB, GLenum modeAlpha);
void blendFunciOES(GLuint buf, GLenum src, GLenum dst);
void blendFuncSeparateiOES(GLuint buf,
GLenum srcRGB,
GLenum dstRGB,
GLenum srcAlpha,
GLenum dstAlpha);
void colorMaskiOES(GLuint buf,
GLboolean r,
GLboolean g,
GLboolean b,
GLboolean a);
GLboolean isEnablediOES(GLenum target, GLuint index);
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_MODULES_WEBGL_OES_DRAW_BUFFERS_INDEXED_H_
| bsd-3-clause |
modulexcite/blink | LayoutTests/fast/events/script-tests/page-scaled-mouse-click.js | 1651 | description("This tests that page scaling does not affect mouse event pageX and pageY coordinates.");
var html = document.documentElement;
var div = document.createElement("div");
div.style.width = "100px";
div.style.height = "100px";
div.style.backgroundColor = "blue";
var eventLog = "";
function appendEventLog() {
var msg = event.type + "(" + event.pageX + ", " + event.pageY + ")";
if (window.eventSender) {
eventLog += msg;
} else {
debug(msg);
}
}
function clearEventLog() {
eventLog = "";
}
div.addEventListener("click", appendEventLog, false);
document.body.insertBefore(div, document.body.firstChild);
function sendEvents(button) {
if (!window.eventSender) {
debug("This test requires DumpRenderTree. Click on the blue rect with the left mouse button to log the mouse coordinates.")
return;
}
eventSender.mouseDown(button);
eventSender.mouseUp(button);
}
function testEvents(button, description, expectedString) {
sendEvents(button);
debug(description);
shouldBeEqualToString("eventLog", expectedString);
debug("");
clearEventLog();
}
if (window.eventSender) {
eventSender.mouseMoveTo(10, 10);
// We are clicking in the same position on screen. As we scale or transform the page,
// we expect the pageX and pageY event coordinates to change because different
// parts of the document are under the mouse.
testEvents(0, "Unscaled", "click(10, 10)");
window.eventSender.setPageScaleFactorLimits(0.5, 0.5);
window.eventSender.setPageScaleFactor(0.5, 0, 0);
testEvents(0, "setPageScale(0.5)", "click(20, 20)");
}
| bsd-3-clause |
scheib/chromium | third_party/blink/web_tests/compositing/reflections/reflection-opacity.html | 727 | <!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<style type="text/css" media="screen">
div {
-webkit-box-sizing: border-box;
}
.reflected {
position: relative;
width: 150px;
height: 100px;
background-color: green;
opacity: 0.5;
-webkit-box-reflect: below -50px;
}
.composited {
will-change: transform;
}
</style>
</head>
<p>Opacity should be applied after reflection, so you should see a green rectangle below. The overlap between the original and reflection should not be visible.</p>
<div class="reflected composited">
</div>
</html>
| bsd-3-clause |
ondra-novak/blink | LayoutTests/ietestcenter/Javascript/15.2.3.3-4-164.html | 344 | <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
<html>
<head>
<script src="../../resources/js-test.js"></script>
</head>
<body>
<p id="description"></p>
<div id="console"></div>
<script src="resources/ie-test-pre.js"></script>
<script src="TestCases/15.2.3.3-4-164.js"></script>
<script src="resources/ie-test-post.js"></script>
</body>
</html>
| bsd-3-clause |
AlexJeng/react | src/modern/class/__tests__/ReactES6Class-test.js | 11780 | /**
* Copyright 2013-2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @emails react-core
*/
'use strict';
var React;
describe('ReactES6Class', function() {
var container;
var Inner;
var attachedListener = null;
var renderedName = null;
beforeEach(function() {
React = require('React');
container = document.createElement('div');
attachedListener = null;
renderedName = null;
Inner = class extends React.Component {
getName() {
return this.props.name;
}
render() {
attachedListener = this.props.onClick;
renderedName = this.props.name;
return <div className={this.props.name} />;
}
};
});
function test(element, expectedTag, expectedClassName) {
var instance = React.render(element, container);
expect(container.firstChild).not.toBeNull();
expect(container.firstChild.tagName).toBe(expectedTag);
expect(container.firstChild.className).toBe(expectedClassName);
return instance;
}
it('preserves the name of the class for use in error messages', function() {
class Foo extends React.Component { }
expect(Foo.name).toBe('Foo');
});
it('throws if no render function is defined', function() {
class Foo extends React.Component { }
expect(() => React.render(<Foo />, container)).toThrow();
});
it('renders a simple stateless component with prop', function() {
class Foo {
render() {
return <Inner name={this.props.bar} />;
}
}
test(<Foo bar="foo" />, 'DIV', 'foo');
test(<Foo bar="bar" />, 'DIV', 'bar');
});
it('renders based on state using initial values in this.props', function() {
class Foo extends React.Component {
constructor(props) {
super(props);
this.state = {bar: this.props.initialValue};
}
render() {
return <span className={this.state.bar} />;
}
}
test(<Foo initialValue="foo" />, 'SPAN', 'foo');
});
it('renders based on state using props in the constructor', function() {
class Foo extends React.Component {
constructor(props) {
this.state = {bar: props.initialValue};
}
changeState() {
this.setState({bar: 'bar'});
}
render() {
if (this.state.bar === 'foo') {
return <div className="foo" />;
}
return <span className={this.state.bar} />;
}
}
var instance = test(<Foo initialValue="foo" />, 'DIV', 'foo');
instance.changeState();
test(<Foo />, 'SPAN', 'bar');
});
it('renders based on context in the constructor', function() {
class Foo extends React.Component {
constructor(props, context) {
super(props, context);
this.state = {tag: context.tag, className: this.context.className};
}
render() {
var Tag = this.state.tag;
return <Tag className={this.state.className} />;
}
}
Foo.contextTypes = {
tag: React.PropTypes.string,
className: React.PropTypes.string
};
class Outer extends React.Component {
getChildContext() {
return {tag: 'span', className: 'foo'};
}
render() {
return <Foo />;
}
}
Outer.childContextTypes = {
tag: React.PropTypes.string,
className: React.PropTypes.string
};
test(<Outer />, 'SPAN', 'foo');
});
it('renders only once when setting state in componentWillMount', function() {
var renderCount = 0;
class Foo extends React.Component {
constructor(props) {
this.state = {bar: props.initialValue};
}
componentWillMount() {
this.setState({bar: 'bar'});
}
render() {
renderCount++;
return <span className={this.state.bar} />;
}
}
test(<Foo initialValue="foo" />, 'SPAN', 'bar');
expect(renderCount).toBe(1);
});
it('should throw with non-object in the initial state property', function() {
[['an array'], 'a string', 1234].forEach(function(state) {
class Foo {
constructor() {
this.state = state;
}
render() {
return <span />;
}
}
expect(() => test(<Foo />, 'span', '')).toThrow(
'Invariant Violation: Foo.state: ' +
'must be set to an object or null'
);
});
});
it('should render with null in the initial state property', function() {
class Foo extends React.Component {
constructor() {
this.state = null;
}
render() {
return <span />;
}
}
test(<Foo />, 'SPAN', '');
});
it('setState through an event handler', function() {
class Foo extends React.Component {
constructor(props) {
this.state = {bar: props.initialValue};
}
handleClick() {
this.setState({bar: 'bar'});
}
render() {
return (
<Inner
name={this.state.bar}
onClick={this.handleClick.bind(this)}
/>
);
}
}
test(<Foo initialValue="foo" />, 'DIV', 'foo');
attachedListener();
expect(renderedName).toBe('bar');
});
it('should not implicitly bind event handlers', function() {
class Foo extends React.Component {
constructor(props) {
this.state = {bar: props.initialValue};
}
handleClick() {
this.setState({bar: 'bar'});
}
render() {
return (
<Inner
name={this.state.bar}
onClick={this.handleClick}
/>
);
}
}
test(<Foo initialValue="foo" />, 'DIV', 'foo');
expect(attachedListener).toThrow();
});
it('renders using forceUpdate even when there is no state', function() {
class Foo extends React.Component {
constructor(props) {
this.mutativeValue = props.initialValue;
}
handleClick() {
this.mutativeValue = 'bar';
this.forceUpdate();
}
render() {
return (
<Inner
name={this.mutativeValue}
onClick={this.handleClick.bind(this)}
/>
);
}
}
test(<Foo initialValue="foo" />, 'DIV', 'foo');
attachedListener();
expect(renderedName).toBe('bar');
});
it('will call all the normal life cycle methods', function() {
var lifeCycles = [];
class Foo {
constructor() {
this.state = {};
}
componentWillMount() {
lifeCycles.push('will-mount');
}
componentDidMount() {
lifeCycles.push('did-mount');
}
componentWillReceiveProps(nextProps) {
lifeCycles.push('receive-props', nextProps);
}
shouldComponentUpdate(nextProps, nextState) {
lifeCycles.push('should-update', nextProps, nextState);
return true;
}
componentWillUpdate(nextProps, nextState) {
lifeCycles.push('will-update', nextProps, nextState);
}
componentDidUpdate(prevProps, prevState) {
lifeCycles.push('did-update', prevProps, prevState);
}
componentWillUnmount() {
lifeCycles.push('will-unmount');
}
render() {
return <span className={this.props.value} />;
}
}
test(<Foo value="foo" />, 'SPAN', 'foo');
expect(lifeCycles).toEqual([
'will-mount',
'did-mount'
]);
lifeCycles = []; // reset
test(<Foo value="bar" />, 'SPAN', 'bar');
expect(lifeCycles).toEqual([
'receive-props', {value: 'bar'},
'should-update', {value: 'bar'}, {},
'will-update', {value: 'bar'}, {},
'did-update', {value: 'foo'}, {}
]);
lifeCycles = []; // reset
React.unmountComponentAtNode(container);
expect(lifeCycles).toEqual([
'will-unmount'
]);
});
it('warns when classic properties are defined on the instance, ' +
'but does not invoke them.', function() {
spyOn(console, 'error');
var getInitialStateWasCalled = false;
class Foo extends React.Component {
constructor() {
this.contextTypes = {};
this.propTypes = {};
}
getInitialState() {
getInitialStateWasCalled = true;
return {};
}
render() {
return <span className="foo" />;
}
}
test(<Foo />, 'SPAN', 'foo');
expect(getInitialStateWasCalled).toBe(false);
expect(console.error.calls.length).toBe(3);
expect(console.error.calls[0].args[0]).toContain(
'getInitialState was defined on Foo, a plain JavaScript class.'
);
expect(console.error.calls[1].args[0]).toContain(
'propTypes was defined as an instance property on Foo.'
);
expect(console.error.calls[2].args[0]).toContain(
'contextTypes was defined as an instance property on Foo.'
);
});
it('should warn when mispelling shouldComponentUpdate', function() {
spyOn(console, 'error');
class NamedComponent {
componentShouldUpdate() {
return false;
}
render() {
return <span className="foo" />;
}
}
test(<NamedComponent />, 'SPAN', 'foo');
expect(console.error.calls.length).toBe(1);
expect(console.error.calls[0].args[0]).toBe(
'Warning: ' +
'NamedComponent has a method called componentShouldUpdate(). Did you ' +
'mean shouldComponentUpdate()? The name is phrased as a question ' +
'because the function is expected to return a value.'
);
});
it('should throw AND warn when trying to access classic APIs', function() {
spyOn(console, 'error');
var instance = test(<Inner name="foo" />, 'DIV', 'foo');
expect(() => instance.getDOMNode()).toThrow();
expect(() => instance.replaceState({})).toThrow();
expect(() => instance.isMounted()).toThrow();
expect(() => instance.setProps({name: 'bar'})).toThrow();
expect(() => instance.replaceProps({name: 'bar'})).toThrow();
expect(console.error.calls.length).toBe(5);
expect(console.error.calls[0].args[0]).toContain(
'getDOMNode(...) is deprecated in plain JavaScript React classes'
);
expect(console.error.calls[1].args[0]).toContain(
'replaceState(...) is deprecated in plain JavaScript React classes'
);
expect(console.error.calls[2].args[0]).toContain(
'isMounted(...) is deprecated in plain JavaScript React classes'
);
expect(console.error.calls[3].args[0]).toContain(
'setProps(...) is deprecated in plain JavaScript React classes'
);
expect(console.error.calls[4].args[0]).toContain(
'replaceProps(...) is deprecated in plain JavaScript React classes'
);
});
it('supports this.context passed via getChildContext', function() {
class Bar {
render() {
return <div className={this.context.bar} />;
}
}
Bar.contextTypes = {bar: React.PropTypes.string};
class Foo {
getChildContext() {
return {bar: 'bar-through-context'};
}
render() {
return <Bar />;
}
}
Foo.childContextTypes = {bar: React.PropTypes.string};
test(<Foo />, 'DIV', 'bar-through-context');
});
it('supports classic refs', function() {
class Foo {
render() {
return <Inner name="foo" ref="inner" />;
}
}
var instance = test(<Foo />, 'DIV', 'foo');
expect(instance.refs.inner.getName()).toBe('foo');
});
it('supports drilling through to the DOM using findDOMNode', function() {
var instance = test(<Inner name="foo" />, 'DIV', 'foo');
var node = React.findDOMNode(instance);
expect(node).toBe(container.firstChild);
});
});
| bsd-3-clause |
mohamed--abdel-maksoud/chromium.src | ash/shell.cc | 43179 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/shell.h"
#include <algorithm>
#include <string>
#include "ash/accelerators/accelerator_controller.h"
#include "ash/accelerators/accelerator_delegate.h"
#include "ash/accelerators/focus_manager_factory.h"
#include "ash/accelerators/nested_accelerator_delegate.h"
#include "ash/ash_switches.h"
#include "ash/autoclick/autoclick_controller.h"
#include "ash/desktop_background/desktop_background_controller.h"
#include "ash/desktop_background/desktop_background_view.h"
#include "ash/desktop_background/user_wallpaper_delegate.h"
#include "ash/display/cursor_window_controller.h"
#include "ash/display/display_controller.h"
#include "ash/display/display_manager.h"
#include "ash/display/event_transformation_handler.h"
#include "ash/display/mouse_cursor_event_filter.h"
#include "ash/display/screen_position_controller.h"
#include "ash/drag_drop/drag_drop_controller.h"
#include "ash/first_run/first_run_helper_impl.h"
#include "ash/focus_cycler.h"
#include "ash/frame/custom_frame_view_ash.h"
#include "ash/gpu_support.h"
#include "ash/high_contrast/high_contrast_controller.h"
#include "ash/host/ash_window_tree_host_init_params.h"
#include "ash/keyboard_uma_event_filter.h"
#include "ash/magnifier/magnification_controller.h"
#include "ash/magnifier/partial_magnification_controller.h"
#include "ash/media_delegate.h"
#include "ash/new_window_delegate.h"
#include "ash/root_window_controller.h"
#include "ash/session/session_state_delegate.h"
#include "ash/shelf/app_list_shelf_item_delegate.h"
#include "ash/shelf/shelf_delegate.h"
#include "ash/shelf/shelf_item_delegate.h"
#include "ash/shelf/shelf_item_delegate_manager.h"
#include "ash/shelf/shelf_layout_manager.h"
#include "ash/shelf/shelf_model.h"
#include "ash/shelf/shelf_widget.h"
#include "ash/shelf/shelf_window_watcher.h"
#include "ash/shell_delegate.h"
#include "ash/shell_factory.h"
#include "ash/shell_init_params.h"
#include "ash/shell_window_ids.h"
#include "ash/system/locale/locale_notification_controller.h"
#include "ash/system/status_area_widget.h"
#include "ash/system/tray/system_tray_delegate.h"
#include "ash/system/tray/system_tray_notifier.h"
#include "ash/wm/app_list_controller.h"
#include "ash/wm/ash_focus_rules.h"
#include "ash/wm/ash_native_cursor_manager.h"
#include "ash/wm/coordinate_conversion.h"
#include "ash/wm/event_client_impl.h"
#include "ash/wm/lock_state_controller.h"
#include "ash/wm/maximize_mode/maximize_mode_controller.h"
#include "ash/wm/maximize_mode/maximize_mode_window_manager.h"
#include "ash/wm/mru_window_tracker.h"
#include "ash/wm/overlay_event_filter.h"
#include "ash/wm/overview/window_selector_controller.h"
#include "ash/wm/power_button_controller.h"
#include "ash/wm/resize_shadow_controller.h"
#include "ash/wm/root_window_layout_manager.h"
#include "ash/wm/screen_dimmer.h"
#include "ash/wm/system_gesture_event_filter.h"
#include "ash/wm/system_modal_container_event_filter.h"
#include "ash/wm/system_modal_container_layout_manager.h"
#include "ash/wm/toplevel_window_event_handler.h"
#include "ash/wm/video_detector.h"
#include "ash/wm/window_animations.h"
#include "ash/wm/window_cycle_controller.h"
#include "ash/wm/window_positioner.h"
#include "ash/wm/window_properties.h"
#include "ash/wm/window_util.h"
#include "ash/wm/workspace_controller.h"
#include "base/bind.h"
#include "base/debug/trace_event.h"
#include "ui/aura/client/aura_constants.h"
#include "ui/aura/env.h"
#include "ui/aura/layout_manager.h"
#include "ui/aura/window.h"
#include "ui/aura/window_event_dispatcher.h"
#include "ui/base/ui_base_switches.h"
#include "ui/base/user_activity/user_activity_detector.h"
#include "ui/compositor/layer.h"
#include "ui/compositor/layer_animator.h"
#include "ui/events/event_target_iterator.h"
#include "ui/gfx/display.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/image/image_skia.h"
#include "ui/gfx/screen.h"
#include "ui/keyboard/keyboard.h"
#include "ui/keyboard/keyboard_controller.h"
#include "ui/keyboard/keyboard_switches.h"
#include "ui/keyboard/keyboard_util.h"
#include "ui/message_center/message_center.h"
#include "ui/views/corewm/tooltip_aura.h"
#include "ui/views/corewm/tooltip_controller.h"
#include "ui/views/focus/focus_manager_factory.h"
#include "ui/views/widget/native_widget_aura.h"
#include "ui/views/widget/widget.h"
#include "ui/wm/core/accelerator_filter.h"
#include "ui/wm/core/compound_event_filter.h"
#include "ui/wm/core/focus_controller.h"
#include "ui/wm/core/input_method_event_filter.h"
#include "ui/wm/core/nested_accelerator_controller.h"
#include "ui/wm/core/shadow_controller.h"
#include "ui/wm/core/visibility_controller.h"
#include "ui/wm/core/window_modality_controller.h"
#if defined(OS_CHROMEOS)
#if defined(USE_X11)
#include "ui/gfx/x/x11_types.h"
#endif // defined(USE_X11)
#include "ash/accelerators/magnifier_key_scroller.h"
#include "ash/accelerators/spoken_feedback_toggler.h"
#include "ash/ash_constants.h"
#include "ash/content/display/screen_orientation_delegate_chromeos.h"
#include "ash/display/display_change_observer_chromeos.h"
#include "ash/display/display_configurator_animation.h"
#include "ash/display/display_error_observer_chromeos.h"
#include "ash/display/projecting_observer_chromeos.h"
#include "ash/display/resolution_notification_controller.h"
#include "ash/sticky_keys/sticky_keys_controller.h"
#include "ash/system/chromeos/bluetooth/bluetooth_notification_controller.h"
#include "ash/system/chromeos/brightness/brightness_controller_chromeos.h"
#include "ash/system/chromeos/power/power_event_observer.h"
#include "ash/system/chromeos/power/power_status.h"
#include "ash/system/chromeos/power/video_activity_notifier.h"
#include "ash/system/chromeos/session/last_window_closed_logout_reminder.h"
#include "ash/system/chromeos/session/logout_confirmation_controller.h"
#include "ash/touch/touch_transformer_controller.h"
#include "ash/virtual_keyboard_controller.h"
#include "base/bind_helpers.h"
#include "base/sys_info.h"
#include "chromeos/accelerometer/accelerometer_reader.h"
#include "chromeos/dbus/dbus_thread_manager.h"
#include "ui/chromeos/user_activity_power_manager_notifier.h"
#include "ui/display/chromeos/display_configurator.h"
#endif // defined(OS_CHROMEOS)
namespace ash {
namespace {
using aura::Window;
using views::Widget;
// A Corewm VisibilityController subclass that calls the Ash animation routine
// so we can pick up our extended animations. See ash/wm/window_animations.h.
class AshVisibilityController : public ::wm::VisibilityController {
public:
AshVisibilityController() {}
~AshVisibilityController() override {}
private:
// Overridden from ::wm::VisibilityController:
bool CallAnimateOnChildWindowVisibilityChanged(aura::Window* window,
bool visible) override {
return AnimateOnChildWindowVisibilityChanged(window, visible);
}
DISALLOW_COPY_AND_ASSIGN(AshVisibilityController);
};
AshWindowTreeHostInitParams ShellInitParamsToAshWindowTreeHostInitParams(
const ShellInitParams& shell_init_params) {
AshWindowTreeHostInitParams ash_init_params;
#if defined(OS_WIN)
ash_init_params.remote_hwnd = shell_init_params.remote_hwnd;
#endif
return ash_init_params;
}
} // namespace
// static
Shell* Shell::instance_ = NULL;
// static
bool Shell::initially_hide_cursor_ = false;
////////////////////////////////////////////////////////////////////////////////
// Shell, public:
// static
Shell* Shell::CreateInstance(const ShellInitParams& init_params) {
CHECK(!instance_);
instance_ = new Shell(init_params.delegate);
instance_->Init(init_params);
return instance_;
}
// static
Shell* Shell::GetInstance() {
CHECK(instance_);
return instance_;
}
// static
bool Shell::HasInstance() {
return !!instance_;
}
// static
void Shell::DeleteInstance() {
delete instance_;
instance_ = NULL;
}
// static
RootWindowController* Shell::GetPrimaryRootWindowController() {
CHECK(HasInstance());
return GetRootWindowController(GetPrimaryRootWindow());
}
// static
Shell::RootWindowControllerList Shell::GetAllRootWindowControllers() {
CHECK(HasInstance());
return Shell::GetInstance()->display_controller()->
GetAllRootWindowControllers();
}
// static
aura::Window* Shell::GetPrimaryRootWindow() {
CHECK(HasInstance());
return GetInstance()->display_controller()->GetPrimaryRootWindow();
}
// static
aura::Window* Shell::GetTargetRootWindow() {
CHECK(HasInstance());
Shell* shell = GetInstance();
if (shell->scoped_target_root_window_)
return shell->scoped_target_root_window_;
return shell->target_root_window_;
}
// static
gfx::Screen* Shell::GetScreen() {
return gfx::Screen::GetScreenByType(gfx::SCREEN_TYPE_ALTERNATE);
}
// static
aura::Window::Windows Shell::GetAllRootWindows() {
CHECK(HasInstance());
return Shell::GetInstance()->display_controller()->
GetAllRootWindows();
}
// static
aura::Window* Shell::GetContainer(aura::Window* root_window,
int container_id) {
return root_window->GetChildById(container_id);
}
// static
const aura::Window* Shell::GetContainer(const aura::Window* root_window,
int container_id) {
return root_window->GetChildById(container_id);
}
// static
std::vector<aura::Window*> Shell::GetContainersFromAllRootWindows(
int container_id,
aura::Window* priority_root) {
std::vector<aura::Window*> containers;
aura::Window::Windows root_windows = GetAllRootWindows();
for (aura::Window::Windows::const_iterator it = root_windows.begin();
it != root_windows.end(); ++it) {
aura::Window* container = (*it)->GetChildById(container_id);
if (container) {
if (priority_root && priority_root->Contains(container))
containers.insert(containers.begin(), container);
else
containers.push_back(container);
}
}
return containers;
}
void Shell::ShowContextMenu(const gfx::Point& location_in_screen,
ui::MenuSourceType source_type) {
// No context menus if there is no session with an active user.
if (!session_state_delegate_->NumberOfLoggedInUsers())
return;
// No context menus when screen is locked.
if (session_state_delegate_->IsScreenLocked())
return;
aura::Window* root =
wm::GetRootWindowMatching(gfx::Rect(location_in_screen, gfx::Size()));
GetRootWindowController(root)
->ShowContextMenu(location_in_screen, source_type);
}
void Shell::ShowAppList(aura::Window* window) {
// If the context window is not given, show it on the target root window.
if (!window)
window = GetTargetRootWindow();
if (!app_list_controller_)
app_list_controller_.reset(new AppListController);
app_list_controller_->Show(window);
}
void Shell::DismissAppList() {
if (!app_list_controller_)
return;
app_list_controller_->Dismiss();
}
void Shell::ToggleAppList(aura::Window* window) {
if (app_list_controller_ && app_list_controller_->IsVisible()) {
DismissAppList();
return;
}
ShowAppList(window);
}
bool Shell::GetAppListTargetVisibility() const {
return app_list_controller_.get() &&
app_list_controller_->GetTargetVisibility();
}
aura::Window* Shell::GetAppListWindow() {
return app_list_controller_.get() ? app_list_controller_->GetWindow() : NULL;
}
app_list::AppListView* Shell::GetAppListView() {
return app_list_controller_.get() ? app_list_controller_->GetView() : NULL;
}
bool Shell::IsSystemModalWindowOpen() const {
if (simulate_modal_window_open_for_testing_)
return true;
const std::vector<aura::Window*> containers = GetContainersFromAllRootWindows(
kShellWindowId_SystemModalContainer, NULL);
for (std::vector<aura::Window*>::const_iterator cit = containers.begin();
cit != containers.end(); ++cit) {
for (aura::Window::Windows::const_iterator wit = (*cit)->children().begin();
wit != (*cit)->children().end(); ++wit) {
if ((*wit)->GetProperty(aura::client::kModalKey) ==
ui::MODAL_TYPE_SYSTEM && (*wit)->TargetVisibility()) {
return true;
}
}
}
return false;
}
views::NonClientFrameView* Shell::CreateDefaultNonClientFrameView(
views::Widget* widget) {
// Use translucent-style window frames for dialogs.
return new CustomFrameViewAsh(widget);
}
void Shell::RotateFocus(Direction direction) {
focus_cycler_->RotateFocus(direction == FORWARD ? FocusCycler::FORWARD
: FocusCycler::BACKWARD);
}
void Shell::SetDisplayWorkAreaInsets(Window* contains,
const gfx::Insets& insets) {
if (!display_controller_->UpdateWorkAreaOfDisplayNearestWindow(
contains, insets)) {
return;
}
FOR_EACH_OBSERVER(ShellObserver, observers_,
OnDisplayWorkAreaInsetsChanged());
}
void Shell::OnLoginStateChanged(user::LoginStatus status) {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnLoginStateChanged(status));
}
void Shell::OnLoginUserProfilePrepared() {
CreateShelf();
CreateKeyboard();
}
void Shell::UpdateAfterLoginStatusChange(user::LoginStatus status) {
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter)
(*iter)->UpdateAfterLoginStatusChange(status);
}
void Shell::OnAppTerminating() {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnAppTerminating());
}
void Shell::OnLockStateChanged(bool locked) {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnLockStateChanged(locked));
#ifndef NDEBUG
// Make sure that there is no system modal in Lock layer when unlocked.
if (!locked) {
std::vector<aura::Window*> containers = GetContainersFromAllRootWindows(
kShellWindowId_LockSystemModalContainer, GetPrimaryRootWindow());
for (std::vector<aura::Window*>::const_iterator iter = containers.begin();
iter != containers.end(); ++iter) {
DCHECK_EQ(0u, (*iter)->children().size());
}
}
#endif
}
void Shell::OnCastingSessionStartedOrStopped(bool started) {
#if defined(OS_CHROMEOS)
if (projecting_observer_)
projecting_observer_->OnCastingSessionStartedOrStopped(started);
#endif
}
void Shell::OnOverviewModeStarting() {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnOverviewModeStarting());
}
void Shell::OnOverviewModeEnding() {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnOverviewModeEnding());
}
void Shell::OnMaximizeModeStarted() {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnMaximizeModeStarted());
}
void Shell::OnMaximizeModeEnded() {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnMaximizeModeEnded());
}
void Shell::OnRootWindowAdded(aura::Window* root_window) {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnRootWindowAdded(root_window));
}
void Shell::CreateShelf() {
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter)
(*iter)->shelf()->CreateShelf();
}
void Shell::OnShelfCreatedForRootWindow(aura::Window* root_window) {
FOR_EACH_OBSERVER(ShellObserver,
observers_,
OnShelfCreatedForRootWindow(root_window));
}
void Shell::CreateKeyboard() {
// TODO(bshe): Primary root window controller may not be the controller to
// attach virtual keyboard. See http://crbug.com/303429
InitKeyboard();
GetPrimaryRootWindowController()->
ActivateKeyboard(keyboard::KeyboardController::GetInstance());
}
void Shell::DeactivateKeyboard() {
if (keyboard::KeyboardController::GetInstance()) {
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter) {
(*iter)->DeactivateKeyboard(keyboard::KeyboardController::GetInstance());
}
}
keyboard::KeyboardController::ResetInstance(NULL);
}
void Shell::ShowShelf() {
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter)
(*iter)->ShowShelf();
}
void Shell::AddShellObserver(ShellObserver* observer) {
observers_.AddObserver(observer);
}
void Shell::RemoveShellObserver(ShellObserver* observer) {
observers_.RemoveObserver(observer);
}
#if defined(OS_CHROMEOS)
bool Shell::ShouldSaveDisplaySettings() {
return !((maximize_mode_controller_->IsMaximizeModeWindowManagerEnabled() &&
maximize_mode_controller_->
ignore_display_configuration_updates()) ||
resolution_notification_controller_->DoesNotificationTimeout());
}
#endif
void Shell::UpdateShelfVisibility() {
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter)
if ((*iter)->shelf())
(*iter)->UpdateShelfVisibility();
}
void Shell::SetShelfAutoHideBehavior(ShelfAutoHideBehavior behavior,
aura::Window* root_window) {
ash::ShelfLayoutManager::ForShelf(root_window)->SetAutoHideBehavior(behavior);
}
ShelfAutoHideBehavior Shell::GetShelfAutoHideBehavior(
aura::Window* root_window) const {
return ash::ShelfLayoutManager::ForShelf(root_window)->auto_hide_behavior();
}
void Shell::SetShelfAlignment(ShelfAlignment alignment,
aura::Window* root_window) {
if (ash::ShelfLayoutManager::ForShelf(root_window)->SetAlignment(alignment)) {
FOR_EACH_OBSERVER(
ShellObserver, observers_, OnShelfAlignmentChanged(root_window));
}
}
ShelfAlignment Shell::GetShelfAlignment(const aura::Window* root_window) {
return GetRootWindowController(root_window)
->GetShelfLayoutManager()
->GetAlignment();
}
void Shell::SetDimming(bool should_dim) {
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter)
(*iter)->screen_dimmer()->SetDimming(should_dim);
}
void Shell::NotifyFullscreenStateChange(bool is_fullscreen,
aura::Window* root_window) {
FOR_EACH_OBSERVER(ShellObserver, observers_, OnFullscreenStateChanged(
is_fullscreen, root_window));
}
void Shell::CreateModalBackground(aura::Window* window) {
if (!modality_filter_) {
modality_filter_.reset(new SystemModalContainerEventFilter(this));
AddPreTargetHandler(modality_filter_.get());
}
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter)
(*iter)->GetSystemModalLayoutManager(window)->CreateModalBackground();
}
void Shell::OnModalWindowRemoved(aura::Window* removed) {
RootWindowControllerList controllers = GetAllRootWindowControllers();
bool activated = false;
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end() && !activated; ++iter) {
activated = (*iter)->GetSystemModalLayoutManager(removed)->
ActivateNextModalWindow();
}
if (!activated) {
RemovePreTargetHandler(modality_filter_.get());
modality_filter_.reset();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter)
(*iter)->GetSystemModalLayoutManager(removed)->DestroyModalBackground();
}
}
WebNotificationTray* Shell::GetWebNotificationTray() {
return GetPrimaryRootWindowController()->shelf()->
status_area_widget()->web_notification_tray();
}
bool Shell::HasPrimaryStatusArea() {
ShelfWidget* shelf = GetPrimaryRootWindowController()->shelf();
return shelf && shelf->status_area_widget();
}
SystemTray* Shell::GetPrimarySystemTray() {
return GetPrimaryRootWindowController()->GetSystemTray();
}
ShelfDelegate* Shell::GetShelfDelegate() {
if (!shelf_delegate_) {
shelf_model_.reset(new ShelfModel);
// Creates ShelfItemDelegateManager before ShelfDelegate.
shelf_item_delegate_manager_.reset(
new ShelfItemDelegateManager(shelf_model_.get()));
shelf_delegate_.reset(delegate_->CreateShelfDelegate(shelf_model_.get()));
scoped_ptr<ShelfItemDelegate> controller(new AppListShelfItemDelegate);
// Finding the shelf model's location of the app list and setting its
// ShelfItemDelegate.
int app_list_index = shelf_model_->GetItemIndexForType(TYPE_APP_LIST);
DCHECK_GE(app_list_index, 0);
ShelfID app_list_id = shelf_model_->items()[app_list_index].id;
DCHECK(app_list_id);
shelf_item_delegate_manager_->SetShelfItemDelegate(app_list_id,
controller.Pass());
shelf_window_watcher_.reset(new ShelfWindowWatcher(
shelf_model_.get(), shelf_item_delegate_manager_.get()));
}
return shelf_delegate_.get();
}
void Shell::SetTouchHudProjectionEnabled(bool enabled) {
if (is_touch_hud_projection_enabled_ == enabled)
return;
is_touch_hud_projection_enabled_ = enabled;
FOR_EACH_OBSERVER(ShellObserver, observers_,
OnTouchHudProjectionToggled(enabled));
}
#if defined(OS_CHROMEOS)
ash::FirstRunHelper* Shell::CreateFirstRunHelper() {
return new ash::FirstRunHelperImpl;
}
void Shell::SetCursorCompositingEnabled(bool enabled) {
display_controller_->cursor_window_controller()->SetCursorCompositingEnabled(
enabled);
native_cursor_manager_->SetNativeCursorEnabled(!enabled);
}
#endif // defined(OS_CHROMEOS)
void Shell::DoInitialWorkspaceAnimation() {
return GetPrimaryRootWindowController()->workspace_controller()->
DoInitialAnimation();
}
////////////////////////////////////////////////////////////////////////////////
// Shell, private:
Shell::Shell(ShellDelegate* delegate)
: target_root_window_(NULL),
scoped_target_root_window_(NULL),
delegate_(delegate),
window_positioner_(new WindowPositioner),
activation_client_(NULL),
#if defined(OS_CHROMEOS)
accelerometer_reader_(new chromeos::AccelerometerReader()),
display_configurator_(new ui::DisplayConfigurator()),
#endif // defined(OS_CHROMEOS)
native_cursor_manager_(new AshNativeCursorManager),
cursor_manager_(
scoped_ptr<::wm::NativeCursorManager>(native_cursor_manager_)),
simulate_modal_window_open_for_testing_(false),
is_touch_hud_projection_enabled_(false) {
DCHECK(delegate_.get());
gpu_support_.reset(delegate_->CreateGPUSupport());
display_manager_.reset(new DisplayManager);
display_controller_.reset(new DisplayController);
user_metrics_recorder_.reset(new UserMetricsRecorder);
#if defined(OS_CHROMEOS)
PowerStatus::Initialize();
#endif
}
Shell::~Shell() {
TRACE_EVENT0("shutdown", "ash::Shell::Destructor");
delegate_->PreShutdown();
views::FocusManagerFactory::Install(NULL);
// Remove the focus from any window. This will prevent overhead and side
// effects (e.g. crashes) from changing focus during shutdown.
// See bug crbug.com/134502.
aura::client::GetFocusClient(GetPrimaryRootWindow())->FocusWindow(NULL);
// Please keep in same order as in Init() because it's easy to miss one.
if (window_modality_controller_)
window_modality_controller_.reset();
#if defined(OS_CHROMEOS)
RemovePreTargetHandler(magnifier_key_scroll_handler_.get());
magnifier_key_scroll_handler_.reset();
RemovePreTargetHandler(speech_feedback_handler_.get());
speech_feedback_handler_.reset();
#endif
RemovePreTargetHandler(user_activity_detector_.get());
RemovePreTargetHandler(overlay_filter_.get());
RemovePreTargetHandler(input_method_filter_.get());
RemovePreTargetHandler(accelerator_filter_.get());
RemovePreTargetHandler(event_transformation_handler_.get());
RemovePreTargetHandler(toplevel_window_event_handler_.get());
RemovePostTargetHandler(toplevel_window_event_handler_.get());
RemovePreTargetHandler(system_gesture_filter_.get());
RemovePreTargetHandler(keyboard_metrics_filter_.get());
RemovePreTargetHandler(mouse_cursor_filter_.get());
// TooltipController is deleted with the Shell so removing its references.
RemovePreTargetHandler(tooltip_controller_.get());
// Destroy the virtual keyboard controller before the maximize mode controller
// since the latters destructor triggers events that the former is listening
// to but no longer cares about.
#if defined(OS_CHROMEOS)
virtual_keyboard_controller_.reset();
#endif
// Destroy maximize mode controller early on since it has some observers which
// need to be removed.
maximize_mode_controller_->Shutdown();
maximize_mode_controller_.reset();
// AppList needs to be released before shelf layout manager, which is
// destroyed with shelf container in the loop below. However, app list
// container is now on top of shelf container and released after it.
// TODO(xiyuan): Move it back when app list container is no longer needed.
app_list_controller_.reset();
#if defined(OS_CHROMEOS)
// Destroy the LastWindowClosedLogoutReminder before the
// LogoutConfirmationController.
last_window_closed_logout_reminder_.reset();
// Destroy the LogoutConfirmationController before the SystemTrayDelegate.
logout_confirmation_controller_.reset();
#endif
// Destroy SystemTrayDelegate before destroying the status area(s).
system_tray_delegate_->Shutdown();
system_tray_delegate_.reset();
locale_notification_controller_.reset();
// Drag-and-drop must be canceled prior to close all windows.
drag_drop_controller_.reset();
// Controllers who have WindowObserver added must be deleted
// before |display_controller_| is deleted.
#if defined(OS_CHROMEOS)
// VideoActivityNotifier must be deleted before |video_detector_| is
// deleted because it's observing video activity through
// VideoDetectorObserver interface.
video_activity_notifier_.reset();
#endif // defined(OS_CHROMEOS)
video_detector_.reset();
high_contrast_controller_.reset();
shadow_controller_.reset();
resize_shadow_controller_.reset();
window_cycle_controller_.reset();
window_selector_controller_.reset();
mru_window_tracker_.reset();
// |shelf_window_watcher_| has a weak pointer to |shelf_Model_|
// and has window observers.
shelf_window_watcher_.reset();
// Destroy all child windows including widgets.
display_controller_->CloseChildWindows();
display_controller_->CloseMirroringDisplay();
// Chrome implementation of shelf delegate depends on FocusClient,
// so must be deleted before |focus_client_|.
shelf_delegate_.reset();
focus_client_.reset();
// Destroy SystemTrayNotifier after destroying SystemTray as TrayItems
// needs to remove observers from it.
system_tray_notifier_.reset();
// These need a valid Shell instance to clean up properly, so explicitly
// delete them before invalidating the instance.
// Alphabetical. TODO(oshima): sort.
magnification_controller_.reset();
partial_magnification_controller_.reset();
tooltip_controller_.reset();
event_client_.reset();
nested_accelerator_controller_.reset();
toplevel_window_event_handler_.reset();
visibility_controller_.reset();
// |shelf_item_delegate_manager_| observes |shelf_model_|. It must be
// destroyed before |shelf_model_| is destroyed.
shelf_item_delegate_manager_.reset();
shelf_model_.reset();
power_button_controller_.reset();
lock_state_controller_.reset();
#if defined(OS_CHROMEOS)
resolution_notification_controller_.reset();
#endif
desktop_background_controller_.reset();
mouse_cursor_filter_.reset();
#if defined(OS_CHROMEOS)
touch_transformer_controller_.reset();
#endif // defined(OS_CHROMEOS)
// This also deletes all RootWindows. Note that we invoke Shutdown() on
// DisplayController before resetting |display_controller_|, since destruction
// of its owned RootWindowControllers relies on the value.
display_manager_->CreateScreenForShutdown();
display_controller_->Shutdown();
display_controller_.reset();
screen_position_controller_.reset();
accessibility_delegate_.reset();
new_window_delegate_.reset();
media_delegate_.reset();
keyboard::KeyboardController::ResetInstance(NULL);
#if defined(OS_CHROMEOS)
if (display_change_observer_)
display_configurator_->RemoveObserver(display_change_observer_.get());
if (display_configurator_animation_)
display_configurator_->RemoveObserver(
display_configurator_animation_.get());
if (display_error_observer_)
display_configurator_->RemoveObserver(display_error_observer_.get());
if (projecting_observer_)
display_configurator_->RemoveObserver(projecting_observer_.get());
display_change_observer_.reset();
PowerStatus::Shutdown();
// Ensure that DBusThreadManager outlives this Shell.
DCHECK(chromeos::DBusThreadManager::IsInitialized());
#endif
DCHECK(instance_ == this);
instance_ = NULL;
}
void Shell::Init(const ShellInitParams& init_params) {
delegate_->PreInit();
bool display_initialized = display_manager_->InitFromCommandLine();
#if defined(OS_CHROMEOS)
display_configurator_->Init(!gpu_support_->IsPanelFittingDisabled());
display_configurator_animation_.reset(new DisplayConfiguratorAnimation());
display_configurator_->AddObserver(display_configurator_animation_.get());
// The DBusThreadManager must outlive this Shell. See the DCHECK in ~Shell.
chromeos::DBusThreadManager* dbus_thread_manager =
chromeos::DBusThreadManager::Get();
projecting_observer_.reset(
new ProjectingObserver(dbus_thread_manager->GetPowerManagerClient()));
display_configurator_->AddObserver(projecting_observer_.get());
if (!display_initialized && base::SysInfo::IsRunningOnChromeOS()) {
display_change_observer_.reset(new DisplayChangeObserver);
// Register |display_change_observer_| first so that the rest of
// observer gets invoked after the root windows are configured.
display_configurator_->AddObserver(display_change_observer_.get());
display_error_observer_.reset(new DisplayErrorObserver());
display_configurator_->AddObserver(display_error_observer_.get());
display_configurator_->set_state_controller(display_change_observer_.get());
display_configurator_->set_mirroring_controller(display_manager_.get());
display_configurator_->ForceInitialConfigure(
delegate_->IsFirstRunAfterBoot() ? kChromeOsBootColor : 0);
display_initialized = true;
}
#endif // defined(OS_CHROMEOS)
if (!display_initialized)
display_manager_->InitDefaultDisplay();
display_manager_->RefreshFontParams();
// Install the custom factory first so that views::FocusManagers for Tray,
// Shelf, and WallPaper could be created by the factory.
views::FocusManagerFactory::Install(new AshFocusManagerFactory);
aura::Env::CreateInstance(true);
aura::Env::GetInstance()->set_context_factory(init_params.context_factory);
// The WindowModalityController needs to be at the front of the input event
// pretarget handler list to ensure that it processes input events when modal
// windows are active.
window_modality_controller_.reset(
new ::wm::WindowModalityController(this));
env_filter_.reset(new ::wm::CompoundEventFilter);
AddPreTargetHandler(env_filter_.get());
::wm::FocusController* focus_controller =
new ::wm::FocusController(new wm::AshFocusRules);
focus_client_.reset(focus_controller);
activation_client_ = focus_controller;
activation_client_->AddObserver(this);
focus_cycler_.reset(new FocusCycler());
screen_position_controller_.reset(new ScreenPositionController);
display_controller_->Start();
display_controller_->CreatePrimaryHost(
ShellInitParamsToAshWindowTreeHostInitParams(init_params));
aura::Window* root_window = display_controller_->GetPrimaryRootWindow();
target_root_window_ = root_window;
#if defined(OS_CHROMEOS)
resolution_notification_controller_.reset(
new ResolutionNotificationController);
#endif
cursor_manager_.SetDisplay(GetScreen()->GetPrimaryDisplay());
nested_accelerator_controller_.reset(
new ::wm::NestedAcceleratorController(new NestedAcceleratorDelegate));
accelerator_controller_.reset(new AcceleratorController);
maximize_mode_controller_.reset(new MaximizeModeController());
#if defined(OS_CHROMEOS)
magnifier_key_scroll_handler_ = MagnifierKeyScroller::CreateHandler();
AddPreTargetHandler(magnifier_key_scroll_handler_.get());
speech_feedback_handler_ = SpokenFeedbackToggler::CreateHandler();
AddPreTargetHandler(speech_feedback_handler_.get());
#endif
// The order in which event filters are added is significant.
// ui::UserActivityDetector passes events to observers, so let them get
// rewritten first.
user_activity_detector_.reset(new ui::UserActivityDetector);
AddPreTargetHandler(user_activity_detector_.get());
overlay_filter_.reset(new OverlayEventFilter);
AddPreTargetHandler(overlay_filter_.get());
AddShellObserver(overlay_filter_.get());
input_method_filter_.reset(new ::wm::InputMethodEventFilter(
root_window->GetHost()->GetAcceleratedWidget()));
AddPreTargetHandler(input_method_filter_.get());
accelerator_filter_.reset(new ::wm::AcceleratorFilter(
scoped_ptr< ::wm::AcceleratorDelegate>(new AcceleratorDelegate).Pass(),
accelerator_controller_->accelerator_history()));
AddPreTargetHandler(accelerator_filter_.get());
event_transformation_handler_.reset(new EventTransformationHandler);
AddPreTargetHandler(event_transformation_handler_.get());
toplevel_window_event_handler_.reset(new ToplevelWindowEventHandler);
system_gesture_filter_.reset(new SystemGestureEventFilter);
AddPreTargetHandler(system_gesture_filter_.get());
keyboard_metrics_filter_.reset(new KeyboardUMAEventFilter);
AddPreTargetHandler(keyboard_metrics_filter_.get());
// The keyboard system must be initialized before the RootWindowController is
// created.
#if defined(OS_CHROMEOS)
keyboard::InitializeKeyboard();
#endif
#if defined(OS_CHROMEOS)
sticky_keys_controller_.reset(new StickyKeysController);
#endif
lock_state_controller_.reset(new LockStateController);
power_button_controller_.reset(new PowerButtonController(
lock_state_controller_.get()));
#if defined(OS_CHROMEOS)
// Pass the initial display state to PowerButtonController.
power_button_controller_->OnDisplayModeChanged(
display_configurator_->cached_displays());
#endif
AddShellObserver(lock_state_controller_.get());
drag_drop_controller_.reset(new DragDropController);
mouse_cursor_filter_.reset(new MouseCursorEventFilter());
PrependPreTargetHandler(mouse_cursor_filter_.get());
// Create Controllers that may need root window.
// TODO(oshima): Move as many controllers before creating
// RootWindowController as possible.
visibility_controller_.reset(new AshVisibilityController);
magnification_controller_.reset(
MagnificationController::CreateInstance());
mru_window_tracker_.reset(new MruWindowTracker(activation_client_));
partial_magnification_controller_.reset(
new PartialMagnificationController());
autoclick_controller_.reset(AutoclickController::CreateInstance());
high_contrast_controller_.reset(new HighContrastController);
video_detector_.reset(new VideoDetector);
window_selector_controller_.reset(new WindowSelectorController());
window_cycle_controller_.reset(new WindowCycleController());
tooltip_controller_.reset(
new views::corewm::TooltipController(
scoped_ptr<views::corewm::Tooltip>(
new views::corewm::TooltipAura(gfx::SCREEN_TYPE_ALTERNATE))));
AddPreTargetHandler(tooltip_controller_.get());
event_client_.reset(new EventClientImpl);
// This controller needs to be set before SetupManagedWindowMode.
desktop_background_controller_.reset(new DesktopBackgroundController());
user_wallpaper_delegate_.reset(delegate_->CreateUserWallpaperDelegate());
session_state_delegate_.reset(delegate_->CreateSessionStateDelegate());
accessibility_delegate_.reset(delegate_->CreateAccessibilityDelegate());
new_window_delegate_.reset(delegate_->CreateNewWindowDelegate());
media_delegate_.reset(delegate_->CreateMediaDelegate());
resize_shadow_controller_.reset(new ResizeShadowController());
shadow_controller_.reset(
new ::wm::ShadowController(activation_client_));
// Create system_tray_notifier_ before the delegate.
system_tray_notifier_.reset(new ash::SystemTrayNotifier());
// Initialize system_tray_delegate_ before initializing StatusAreaWidget.
system_tray_delegate_.reset(delegate()->CreateSystemTrayDelegate());
DCHECK(system_tray_delegate_.get());
locale_notification_controller_.reset(new LocaleNotificationController);
// Initialize system_tray_delegate_ after StatusAreaWidget is created.
system_tray_delegate_->Initialize();
#if defined(OS_CHROMEOS)
// Create the LogoutConfirmationController after the SystemTrayDelegate.
logout_confirmation_controller_.reset(new LogoutConfirmationController(
base::Bind(&SystemTrayDelegate::SignOut,
base::Unretained(system_tray_delegate_.get()))));
// Create TouchTransformerController before DisplayController::InitDisplays()
// since TouchTransformerController listens on
// DisplayController::Observer::OnDisplaysInitialized().
touch_transformer_controller_.reset(new TouchTransformerController());
#endif // defined(OS_CHROMEOS)
display_controller_->InitDisplays();
#if defined(OS_CHROMEOS)
// Needs to be created after InitDisplays() since it may cause the virtual
// keyboard to be deployed.
virtual_keyboard_controller_.reset(new VirtualKeyboardController);
#endif // defined(OS_CHROMEOS)
// It needs to be created after RootWindowController has been created
// (which calls OnWindowResized has been called, otherwise the
// widget will not paint when restoring after a browser crash. Also it needs
// to be created after InitSecondaryDisplays() to initialize the wallpapers in
// the correct size.
user_wallpaper_delegate_->InitializeWallpaper();
if (initially_hide_cursor_)
cursor_manager_.HideCursor();
cursor_manager_.SetCursor(ui::kCursorPointer);
#if defined(OS_CHROMEOS)
// Set accelerator controller delegates.
accelerator_controller_->SetBrightnessControlDelegate(
scoped_ptr<ash::BrightnessControlDelegate>(
new ash::system::BrightnessControllerChromeos).Pass());
power_event_observer_.reset(new PowerEventObserver());
user_activity_notifier_.reset(
new ui::UserActivityPowerManagerNotifier(user_activity_detector_.get()));
video_activity_notifier_.reset(
new VideoActivityNotifier(video_detector_.get()));
bluetooth_notification_controller_.reset(new BluetoothNotificationController);
last_window_closed_logout_reminder_.reset(new LastWindowClosedLogoutReminder);
screen_orientation_delegate_.reset(new ScreenOrientationDelegate());
#endif
// The compositor thread and main message loop have to be running in
// order to create mirror window. Run it after the main message loop
// is started.
display_manager_->CreateMirrorWindowAsyncIfAny();
}
void Shell::InitKeyboard() {
if (keyboard::IsKeyboardEnabled()) {
if (keyboard::KeyboardController::GetInstance()) {
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter) {
(*iter)->DeactivateKeyboard(
keyboard::KeyboardController::GetInstance());
}
}
keyboard::KeyboardControllerProxy* proxy =
delegate_->CreateKeyboardControllerProxy();
keyboard::KeyboardController::ResetInstance(
new keyboard::KeyboardController(proxy));
}
}
void Shell::InitRootWindow(aura::Window* root_window) {
DCHECK(activation_client_);
DCHECK(visibility_controller_.get());
DCHECK(drag_drop_controller_.get());
aura::client::SetFocusClient(root_window, focus_client_.get());
input_method_filter_->SetInputMethodPropertyInRootWindow(root_window);
aura::client::SetActivationClient(root_window, activation_client_);
::wm::FocusController* focus_controller =
static_cast< ::wm::FocusController*>(activation_client_);
root_window->AddPreTargetHandler(focus_controller);
aura::client::SetVisibilityClient(root_window, visibility_controller_.get());
aura::client::SetDragDropClient(root_window, drag_drop_controller_.get());
aura::client::SetScreenPositionClient(root_window,
screen_position_controller_.get());
aura::client::SetCursorClient(root_window, &cursor_manager_);
aura::client::SetTooltipClient(root_window, tooltip_controller_.get());
aura::client::SetEventClient(root_window, event_client_.get());
aura::client::SetWindowMoveClient(root_window,
toplevel_window_event_handler_.get());
root_window->AddPreTargetHandler(toplevel_window_event_handler_.get());
root_window->AddPostTargetHandler(toplevel_window_event_handler_.get());
if (nested_accelerator_controller_) {
aura::client::SetDispatcherClient(root_window,
nested_accelerator_controller_.get());
}
}
bool Shell::CanWindowReceiveEvents(aura::Window* window) {
RootWindowControllerList controllers = GetAllRootWindowControllers();
for (RootWindowControllerList::iterator iter = controllers.begin();
iter != controllers.end(); ++iter) {
SystemModalContainerLayoutManager* layout_manager =
(*iter)->GetSystemModalLayoutManager(window);
if (layout_manager && layout_manager->CanWindowReceiveEvents(window))
return true;
// Allow events to fall through to the virtual keyboard even if displaying
// a system modal dialog.
if ((*iter)->IsVirtualKeyboardWindow(window))
return true;
}
return false;
}
////////////////////////////////////////////////////////////////////////////////
// Shell, ui::EventTarget overrides:
bool Shell::CanAcceptEvent(const ui::Event& event) {
return true;
}
ui::EventTarget* Shell::GetParentTarget() {
return aura::Env::GetInstance();
}
scoped_ptr<ui::EventTargetIterator> Shell::GetChildIterator() const {
return scoped_ptr<ui::EventTargetIterator>();
}
ui::EventTargeter* Shell::GetEventTargeter() {
NOTREACHED();
return NULL;
}
void Shell::OnEvent(ui::Event* event) {
}
////////////////////////////////////////////////////////////////////////////////
// Shell, aura::client::ActivationChangeObserver implementation:
void Shell::OnWindowActivated(aura::Window* gained_active,
aura::Window* lost_active) {
if (gained_active)
target_root_window_ = gained_active->GetRootWindow();
}
} // namespace ash
| bsd-3-clause |
shaotuanchen/sunflower_exp | tools/source/gcc-4.2.4/libjava/classpath/include/gnu_java_awt_peer_gtk_GtkLabelPeer.h | 1010 | /* DO NOT EDIT THIS FILE - it is machine generated */
#ifndef __gnu_java_awt_peer_gtk_GtkLabelPeer__
#define __gnu_java_awt_peer_gtk_GtkLabelPeer__
#include <jni.h>
#ifdef __cplusplus
extern "C"
{
#endif
JNIEXPORT void JNICALL Java_gnu_java_awt_peer_gtk_GtkLabelPeer_create (JNIEnv *env, jobject, jstring, jfloat);
JNIEXPORT void JNICALL Java_gnu_java_awt_peer_gtk_GtkLabelPeer_gtkWidgetModifyFont (JNIEnv *env, jobject, jstring, jint, jint);
JNIEXPORT void JNICALL Java_gnu_java_awt_peer_gtk_GtkLabelPeer_nativeSetAlignment (JNIEnv *env, jobject, jfloat);
JNIEXPORT void JNICALL Java_gnu_java_awt_peer_gtk_GtkLabelPeer_setNativeText (JNIEnv *env, jobject, jstring);
JNIEXPORT void JNICALL Java_gnu_java_awt_peer_gtk_GtkLabelPeer_setNativeBounds (JNIEnv *env, jobject, jint, jint, jint, jint);
JNIEXPORT void JNICALL Java_gnu_java_awt_peer_gtk_GtkLabelPeer_gtkWidgetGetPreferredDimensions (JNIEnv *env, jobject, jintArray);
#ifdef __cplusplus
}
#endif
#endif /* __gnu_java_awt_peer_gtk_GtkLabelPeer__ */
| bsd-3-clause |
nwjs/blink | LayoutTests/svg/custom/pattern-3-step-cycle-dynamic-4.html | 1655 | <!DOCTYPE html>
<script src="../../resources/run-after-display.js"></script>
<script>
if (window.testRunner) {
testRunner.dumpAsText();
testRunner.waitUntilDone();
window.onload = function() {
testRunner.displayAsyncThen(function() {
mutateTree();
testRunner.displayAsyncThen(function() {
testRunner.notifyDone();
});
});
};
} else {
window.onload = function() { setTimeout(mutateTree, 100); };
}
const svgNs = 'http://www.w3.org/2000/svg';
function buildPattern(patternId, refId) {
var pattern = document.createElementNS(svgNs, 'pattern');
var rect = pattern.appendChild(document.createElementNS(svgNs, 'rect'));
pattern.setAttribute('id', patternId);
pattern.setAttribute('width', 1);
pattern.setAttribute('height', 1);
rect.setAttribute('width', 100);
rect.setAttribute('height', 100);
rect.setAttribute('fill', 'url(#' + refId + ')');
return pattern;
}
function mutateTree() {
// Get reference to rect in pattern#p2 before inserting the pattern.
var p2rect = document.getElementsByTagName('rect')[1];
// Add a pattern#p3 and a reference to it from pattern#p2 to form a cycle.
var defs = document.querySelector('defs');
defs.appendChild(buildPattern('p3', 'p1'));
p2rect.setAttribute('fill', 'url(#p3)');
}
</script>
<p>PASS if no crash (stack overflow).</p>
<svg width="100" height="100">
<rect width="100" height="100" fill="url(#p1)"/>
<defs>
<pattern id="p2" width="1" height="1">
<rect width="100" height="100"/>
</pattern>
<pattern id="p1" width="1" height="1">
<rect fill="url(#p2)" width="100" height="100"/>
</pattern>
</defs>
</svg>
| bsd-3-clause |
shaotuanchen/sunflower_exp | tools/source/gcc-4.2.4/libjava/classpath/tools/gnu/classpath/tools/appletviewer/Main.java | 9974 | /* Main.java -- a standalone viewer for Java applets
Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of GNU Classpath.
GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING. If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
Linking this library statically or dynamically with other modules is
making a combined work based on this library. Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.
As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module. An independent module is a module which is not derived from
or based on this library. If you modify this library, you may extend
this exception to your version of the library, but you are not
obligated to do so. If you do not wish to do so, delete this
exception statement from your version. */
package gnu.classpath.tools.appletviewer;
import gnu.classpath.tools.getopt.ClasspathToolParser;
import gnu.classpath.tools.getopt.Option;
import gnu.classpath.tools.getopt.OptionException;
import gnu.classpath.tools.getopt.OptionGroup;
import gnu.classpath.tools.getopt.Parser;
import java.applet.Applet;
import java.awt.Dimension;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.ResourceBundle;
class Main
{
/**
* The localized strings are kept in a separate file.
*/
public static final ResourceBundle messages = ResourceBundle.getBundle
("gnu.classpath.tools.appletviewer.MessagesBundle");
private static HashMap classLoaderCache = new HashMap();
private static ClassLoader getClassLoader(URL codebase, ArrayList archives)
{
// Should load class loader each time. It is possible that there
// are more than one applet to be loaded with different archives.
AppletClassLoader loader = new AppletClassLoader(codebase, archives);
classLoaderCache.put(codebase, loader);
return loader;
}
private static String code = null;
private static String codebase = null;
private static String archive = null;
private static List parameters = new ArrayList();
private static Dimension dimensions = new Dimension(-1, -1);
private static String pipeInName = null;
private static String pipeOutName = null;
private static boolean pluginMode = false;
private static Parser parser = null;
static Applet createApplet(AppletTag tag)
{
Applet applet = null;
try
{
ClassLoader loader = getClassLoader(tag.prependCodeBase(""),
tag.getArchives());
String code = tag.getCode();
if (code.endsWith(".class"))
code = code.substring(0, code.length() - 6).replace('/', '.');
Class c = loader.loadClass(code);
applet = (Applet) c.newInstance();
}
catch (Exception e)
{
e.printStackTrace();
}
if (applet == null)
applet = new ErrorApplet("Error loading applet");
return applet;
}
protected static boolean verbose;
/**
* The main method starting the applet viewer.
*
* @param args the arguments given on the command line.
*
* @exception IOException if an error occurs.
*/
public static void main(String[] args) throws IOException
{
parser = new ClasspathToolParser("appletviewer", true);
parser.setHeader("usage: appletviewer [OPTION] -code CODE | URL...");
OptionGroup attributeGroup = new OptionGroup("Applet tag options");
attributeGroup.add(new Option("code", Main.messages.getString
("gcjwebplugin.code_description"),
"CODE")
{
public void parsed(String argument) throws OptionException
{
code = argument;
}
});
attributeGroup.add(new Option("codebase", Main.messages.getString
("gcjwebplugin.codebase_description"),
"CODEBASE")
{
public void parsed(String argument) throws OptionException
{
codebase = argument;
}
});
attributeGroup.add(new Option("archive", Main.messages.getString
("gcjwebplugin.archive_description"),
"ARCHIVE")
{
public void parsed(String argument) throws OptionException
{
archive = argument;
}
});
attributeGroup.add(new Option("width", Main.messages.getString
("gcjwebplugin.width_description"),
"WIDTH")
{
public void parsed(String argument) throws OptionException
{
dimensions.width = Integer.parseInt(argument);
}
});
attributeGroup.add(new Option("height", Main.messages.getString
("gcjwebplugin.height_description"),
"HEIGHT")
{
public void parsed(String argument) throws OptionException
{
dimensions.height = Integer.parseInt(argument);
}
});
attributeGroup.add(new Option("param", Main.messages.getString
("gcjwebplugin.param_description"),
"NAME,VALUE")
{
public void parsed(String argument) throws OptionException
{
parameters.add(argument);
}
});
OptionGroup pluginGroup = new OptionGroup("Plugin option");
pluginGroup.add(new Option("plugin", Main.messages.getString
("gcjwebplugin.plugin_description"),
"INPUT,OUTPUT")
{
public void parsed(String argument) throws OptionException
{
pluginMode = true;
int comma = argument.indexOf(',');
pipeInName = argument.substring(0, comma);
pipeOutName = argument.substring(comma + 1);
}
});
OptionGroup debuggingGroup = new OptionGroup("Debugging option");
debuggingGroup.add(new Option("verbose", Main.messages.getString
("gcjwebplugin.verbose_description"),
(String) null)
{
public void parsed(String argument) throws OptionException
{
verbose = true;
}
});
OptionGroup compatibilityGroup = new OptionGroup("Compatibility options");
compatibilityGroup.add(new Option("debug", Main.messages.getString
("gcjwebplugin.debug_description"),
(String) null)
{
public void parsed(String argument) throws OptionException
{
// Currently ignored.
}
});
compatibilityGroup.add(new Option("encoding", Main.messages.getString
("gcjwebplugin.encoding_description"),
"CHARSET")
{
public void parsed(String argument) throws OptionException
{
// FIXME: We should probably be using
// java.nio.charset.CharsetDecoder to handle the encoding. What
// is the status of Classpath's implementation?
}
});
parser.add(attributeGroup);
parser.add(pluginGroup);
parser.add(debuggingGroup);
parser.add(compatibilityGroup);
String[] urls = parser.parse(args);
// Print arguments.
printArguments(args);
args = urls;
if (dimensions.height < 0)
dimensions.height = 200;
if (dimensions.width < 0)
dimensions.width = (int) (1.6 * dimensions.height);
//System.setSecurityManager(new AppletSecurityManager(pluginMode));
if (pluginMode)
{
InputStream in;
OutputStream out;
in = new FileInputStream(pipeInName);
out = new FileOutputStream(pipeOutName);
PluginAppletViewer.start(in, out);
}
else
{
if (code == null)
{
// The --code option wasn't given and there are no URL
// arguments so we have nothing to work with.
if (args.length == 0)
{
System.err.println(Main.messages.getString("gcjwebplugin.no_input_files"));
System.exit(1);
}
// Create a standalone appletviewer from a list of URLs.
new StandaloneAppletViewer(args);
}
else
{
// Create a standalone appletviewer from the --code
// option.
new StandaloneAppletViewer(code, codebase, archive, parameters, dimensions);
}
}
}
static void printArguments(String[] args)
{
if (verbose)
{
System.out.println("raw arguments:");
for (int i = 0; i < args.length; i++)
System.out.println(" " + args[i]);
}
}
}
| bsd-3-clause |
js0701/chromium-crosswalk | sync/tools/testserver/run_sync_testserver.cc | 3925 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdint.h>
#include <stdio.h>
#include "base/at_exit.h"
#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/process/launch.h"
#include "base/strings/string_number_conversions.h"
#include "base/test/test_timeouts.h"
#include "net/test/python_utils.h"
#include "sync/test/local_sync_test_server.h"
static void PrintUsage() {
printf("run_sync_testserver [--port=<port>] [--xmpp-port=<xmpp_port>]\n");
}
// Launches the chromiumsync_test.py or xmppserver_test.py scripts, which test
// the sync HTTP and XMPP sever functionality respectively.
static bool RunSyncTest(
const base::FilePath::StringType& sync_test_script_name) {
scoped_ptr<syncer::LocalSyncTestServer> test_server(
new syncer::LocalSyncTestServer());
if (!test_server->SetPythonPath()) {
LOG(ERROR) << "Error trying to set python path. Exiting.";
return false;
}
base::FilePath sync_test_script_path;
if (!test_server->GetTestScriptPath(sync_test_script_name,
&sync_test_script_path)) {
LOG(ERROR) << "Error trying to get path for test script "
<< sync_test_script_name;
return false;
}
base::CommandLine python_command(base::CommandLine::NO_PROGRAM);
if (!GetPythonCommand(&python_command)) {
LOG(ERROR) << "Could not get python runtime command.";
return false;
}
python_command.AppendArgPath(sync_test_script_path);
if (!base::LaunchProcess(python_command, base::LaunchOptions()).IsValid()) {
LOG(ERROR) << "Failed to launch test script " << sync_test_script_name;
return false;
}
return true;
}
// Gets a port value from the switch with name |switch_name| and writes it to
// |port|. Returns true if a port was provided and false otherwise.
static bool GetPortFromSwitch(const std::string& switch_name, uint16_t* port) {
DCHECK(port != NULL) << "|port| is NULL";
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
int port_int = 0;
if (command_line->HasSwitch(switch_name)) {
std::string port_str = command_line->GetSwitchValueASCII(switch_name);
if (!base::StringToInt(port_str, &port_int)) {
return false;
}
}
*port = static_cast<uint16_t>(port_int);
return true;
}
int main(int argc, const char* argv[]) {
base::AtExitManager at_exit_manager;
base::MessageLoopForIO message_loop;
// Process command line
base::CommandLine::Init(argc, argv);
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
logging::LoggingSettings settings;
settings.logging_dest = logging::LOG_TO_ALL;
settings.log_file = FILE_PATH_LITERAL("sync_testserver.log");
if (!logging::InitLogging(settings)) {
printf("Error: could not initialize logging. Exiting.\n");
return -1;
}
TestTimeouts::Initialize();
if (command_line->HasSwitch("help")) {
PrintUsage();
return 0;
}
if (command_line->HasSwitch("sync-test")) {
return RunSyncTest(FILE_PATH_LITERAL("chromiumsync_test.py")) ? 0 : -1;
}
if (command_line->HasSwitch("xmpp-test")) {
return RunSyncTest(FILE_PATH_LITERAL("xmppserver_test.py")) ? 0 : -1;
}
uint16_t port = 0;
GetPortFromSwitch("port", &port);
uint16_t xmpp_port = 0;
GetPortFromSwitch("xmpp-port", &xmpp_port);
scoped_ptr<syncer::LocalSyncTestServer> test_server(
new syncer::LocalSyncTestServer(port, xmpp_port));
if (!test_server->Start()) {
printf("Error: failed to start python sync test server. Exiting.\n");
return -1;
}
printf("Python sync test server running at %s (type ctrl+c to exit)\n",
test_server->host_port_pair().ToString().c_str());
message_loop.Run();
return 0;
}
| bsd-3-clause |
js0701/chromium-crosswalk | content/browser/plugin_loader_posix_unittest.cc | 13212 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/plugin_loader_posix.h"
#include <stddef.h>
#include <stdint.h>
#include "base/at_exit.h"
#include "base/bind.h"
#include "base/files/file_path.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/utf_string_conversions.h"
#include "content/browser/browser_thread_impl.h"
#include "content/common/plugin_list.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using base::ASCIIToUTF16;
namespace content {
class MockPluginLoaderPosix : public PluginLoaderPosix {
public:
MOCK_METHOD0(LoadPluginsInternal, void(void));
size_t number_of_pending_callbacks() {
return callbacks_.size();
}
std::vector<base::FilePath>* canonical_list() {
return &canonical_list_;
}
size_t next_load_index() {
return next_load_index_;
}
const std::vector<WebPluginInfo>& loaded_plugins() {
return loaded_plugins_;
}
std::vector<WebPluginInfo>* internal_plugins() {
return &internal_plugins_;
}
void RealLoadPluginsInternal() {
PluginLoaderPosix::LoadPluginsInternal();
}
bool LaunchUtilityProcess() override {
// This method always does nothing and returns false. The actual
// implementation of this method launches another process, which is not
// very unit_test friendly.
return false;
}
void TestOnPluginLoaded(uint32_t index, const WebPluginInfo& plugin) {
OnPluginLoaded(index, plugin);
}
void TestOnPluginLoadFailed(uint32_t index, const base::FilePath& path) {
OnPluginLoadFailed(index, path);
}
protected:
virtual ~MockPluginLoaderPosix() {}
};
void VerifyCallback(int* run_count, const std::vector<WebPluginInfo>&) {
++(*run_count);
}
class PluginLoaderPosixTest : public testing::Test {
public:
PluginLoaderPosixTest()
: plugin1_(ASCIIToUTF16("plugin1"), base::FilePath("/tmp/one.plugin"),
ASCIIToUTF16("1.0"), base::string16()),
plugin2_(ASCIIToUTF16("plugin2"), base::FilePath("/tmp/two.plugin"),
ASCIIToUTF16("2.0"), base::string16()),
plugin3_(ASCIIToUTF16("plugin3"), base::FilePath("/tmp/three.plugin"),
ASCIIToUTF16("3.0"), base::string16()),
file_thread_(BrowserThread::FILE, &message_loop_),
io_thread_(BrowserThread::IO, &message_loop_),
plugin_loader_(new MockPluginLoaderPosix) {
}
void SetUp() override { PluginServiceImpl::GetInstance()->Init(); }
base::MessageLoop* message_loop() { return &message_loop_; }
MockPluginLoaderPosix* plugin_loader() { return plugin_loader_.get(); }
void AddThreePlugins() {
plugin_loader_->canonical_list()->clear();
plugin_loader_->canonical_list()->push_back(plugin1_.path);
plugin_loader_->canonical_list()->push_back(plugin2_.path);
plugin_loader_->canonical_list()->push_back(plugin3_.path);
}
// Data used for testing.
WebPluginInfo plugin1_;
WebPluginInfo plugin2_;
WebPluginInfo plugin3_;
private:
// Destroys PluginService and PluginList.
base::ShadowingAtExitManager at_exit_manager_;
base::MessageLoopForIO message_loop_;
BrowserThreadImpl file_thread_;
BrowserThreadImpl io_thread_;
scoped_refptr<MockPluginLoaderPosix> plugin_loader_;
};
TEST_F(PluginLoaderPosixTest, QueueRequests) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
plugin_loader()->GetPlugins(callback);
plugin_loader()->GetPlugins(callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
plugin_loader()->canonical_list()->clear();
plugin_loader()->canonical_list()->push_back(plugin1_.path);
plugin_loader()->TestOnPluginLoaded(0, plugin1_);
message_loop()->RunUntilIdle();
EXPECT_EQ(2, did_callback);
}
TEST_F(PluginLoaderPosixTest, QueueRequestsAndInvalidate) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
plugin_loader()->GetPlugins(callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
::testing::Mock::VerifyAndClearExpectations(plugin_loader());
// Invalidate the plugin list, then queue up another request.
PluginList::Singleton()->RefreshPlugins();
plugin_loader()->GetPlugins(callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
plugin_loader()->canonical_list()->clear();
plugin_loader()->canonical_list()->push_back(plugin1_.path);
plugin_loader()->TestOnPluginLoaded(0, plugin1_);
message_loop()->RunUntilIdle();
// Only the first request should have been fulfilled.
EXPECT_EQ(1, did_callback);
plugin_loader()->canonical_list()->clear();
plugin_loader()->canonical_list()->push_back(plugin1_.path);
plugin_loader()->TestOnPluginLoaded(0, plugin1_);
message_loop()->RunUntilIdle();
EXPECT_EQ(2, did_callback);
}
TEST_F(PluginLoaderPosixTest, ThreeSuccessfulLoads) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
plugin_loader()->GetPlugins(callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
message_loop()->RunUntilIdle();
AddThreePlugins();
EXPECT_EQ(0u, plugin_loader()->next_load_index());
const std::vector<WebPluginInfo>& plugins(plugin_loader()->loaded_plugins());
plugin_loader()->TestOnPluginLoaded(0, plugin1_);
EXPECT_EQ(1u, plugin_loader()->next_load_index());
EXPECT_EQ(1u, plugins.size());
EXPECT_EQ(plugin1_.name, plugins[0].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
plugin_loader()->TestOnPluginLoaded(1, plugin2_);
EXPECT_EQ(2u, plugin_loader()->next_load_index());
EXPECT_EQ(2u, plugins.size());
EXPECT_EQ(plugin2_.name, plugins[1].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
plugin_loader()->TestOnPluginLoaded(2, plugin3_);
EXPECT_EQ(3u, plugins.size());
EXPECT_EQ(plugin3_.name, plugins[2].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(1, did_callback);
}
TEST_F(PluginLoaderPosixTest, ThreeSuccessfulLoadsThenCrash) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
plugin_loader()->GetPlugins(callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(2);
message_loop()->RunUntilIdle();
AddThreePlugins();
EXPECT_EQ(0u, plugin_loader()->next_load_index());
const std::vector<WebPluginInfo>& plugins(plugin_loader()->loaded_plugins());
plugin_loader()->TestOnPluginLoaded(0, plugin1_);
EXPECT_EQ(1u, plugin_loader()->next_load_index());
EXPECT_EQ(1u, plugins.size());
EXPECT_EQ(plugin1_.name, plugins[0].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
plugin_loader()->TestOnPluginLoaded(1, plugin2_);
EXPECT_EQ(2u, plugin_loader()->next_load_index());
EXPECT_EQ(2u, plugins.size());
EXPECT_EQ(plugin2_.name, plugins[1].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
plugin_loader()->TestOnPluginLoaded(2, plugin3_);
EXPECT_EQ(3u, plugins.size());
EXPECT_EQ(plugin3_.name, plugins[2].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(1, did_callback);
plugin_loader()->OnProcessCrashed(42);
}
TEST_F(PluginLoaderPosixTest, TwoFailures) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
plugin_loader()->GetPlugins(callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
message_loop()->RunUntilIdle();
AddThreePlugins();
EXPECT_EQ(0u, plugin_loader()->next_load_index());
const std::vector<WebPluginInfo>& plugins(plugin_loader()->loaded_plugins());
plugin_loader()->TestOnPluginLoadFailed(0, plugin1_.path);
EXPECT_EQ(1u, plugin_loader()->next_load_index());
EXPECT_EQ(0u, plugins.size());
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
plugin_loader()->TestOnPluginLoaded(1, plugin2_);
EXPECT_EQ(2u, plugin_loader()->next_load_index());
EXPECT_EQ(1u, plugins.size());
EXPECT_EQ(plugin2_.name, plugins[0].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
plugin_loader()->TestOnPluginLoadFailed(2, plugin3_.path);
EXPECT_EQ(1u, plugins.size());
message_loop()->RunUntilIdle();
EXPECT_EQ(1, did_callback);
}
TEST_F(PluginLoaderPosixTest, CrashedProcess) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
plugin_loader()->GetPlugins(callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
message_loop()->RunUntilIdle();
AddThreePlugins();
EXPECT_EQ(0u, plugin_loader()->next_load_index());
const std::vector<WebPluginInfo>& plugins(plugin_loader()->loaded_plugins());
plugin_loader()->TestOnPluginLoaded(0, plugin1_);
EXPECT_EQ(1u, plugin_loader()->next_load_index());
EXPECT_EQ(1u, plugins.size());
EXPECT_EQ(plugin1_.name, plugins[0].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
plugin_loader()->OnProcessCrashed(42);
EXPECT_EQ(1u, plugin_loader()->canonical_list()->size());
EXPECT_EQ(0u, plugin_loader()->next_load_index());
EXPECT_EQ(plugin3_.path.value(),
plugin_loader()->canonical_list()->at(0).value());
}
TEST_F(PluginLoaderPosixTest, InternalPlugin) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
plugin_loader()->GetPlugins(callback);
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
message_loop()->RunUntilIdle();
plugin2_.path = base::FilePath("/internal/plugin.plugin");
AddThreePlugins();
plugin_loader()->internal_plugins()->clear();
plugin_loader()->internal_plugins()->push_back(plugin2_);
EXPECT_EQ(0u, plugin_loader()->next_load_index());
const std::vector<WebPluginInfo>& plugins(plugin_loader()->loaded_plugins());
plugin_loader()->TestOnPluginLoaded(0, plugin1_);
EXPECT_EQ(1u, plugin_loader()->next_load_index());
EXPECT_EQ(1u, plugins.size());
EXPECT_EQ(plugin1_.name, plugins[0].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
// Internal plugins can fail to load if they're built-in with manual
// entrypoint functions.
plugin_loader()->TestOnPluginLoadFailed(1, plugin2_.path);
EXPECT_EQ(2u, plugin_loader()->next_load_index());
EXPECT_EQ(2u, plugins.size());
EXPECT_EQ(plugin2_.name, plugins[1].name);
EXPECT_EQ(0u, plugin_loader()->internal_plugins()->size());
message_loop()->RunUntilIdle();
EXPECT_EQ(0, did_callback);
plugin_loader()->TestOnPluginLoaded(2, plugin3_);
EXPECT_EQ(3u, plugins.size());
EXPECT_EQ(plugin3_.name, plugins[2].name);
message_loop()->RunUntilIdle();
EXPECT_EQ(1, did_callback);
}
TEST_F(PluginLoaderPosixTest, AllCrashed) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
plugin_loader()->GetPlugins(callback);
// Spin the loop so that the canonical list of plugins can be set.
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(1);
message_loop()->RunUntilIdle();
AddThreePlugins();
EXPECT_EQ(0u, plugin_loader()->next_load_index());
// Mock the first two calls like normal.
testing::Expectation first =
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal()).Times(2);
// On the last call, go through the default impl.
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal())
.After(first)
.WillOnce(
testing::Invoke(plugin_loader(),
&MockPluginLoaderPosix::RealLoadPluginsInternal));
plugin_loader()->OnProcessCrashed(42);
plugin_loader()->OnProcessCrashed(42);
plugin_loader()->OnProcessCrashed(42);
message_loop()->RunUntilIdle();
EXPECT_EQ(1, did_callback);
EXPECT_EQ(0u, plugin_loader()->loaded_plugins().size());
}
TEST_F(PluginLoaderPosixTest, PluginLaunchFailed) {
int did_callback = 0;
PluginService::GetPluginsCallback callback =
base::Bind(&VerifyCallback, base::Unretained(&did_callback));
EXPECT_CALL(*plugin_loader(), LoadPluginsInternal())
.WillOnce(testing::Invoke(
plugin_loader(), &MockPluginLoaderPosix::RealLoadPluginsInternal));
plugin_loader()->GetPlugins(callback);
message_loop()->RunUntilIdle();
EXPECT_EQ(1, did_callback);
EXPECT_EQ(0u, plugin_loader()->loaded_plugins().size());
// TODO(erikchen): This is a genuine leak that should be fixed.
// https://code.google.com/p/chromium/issues/detail?id=431906
testing::Mock::AllowLeak(plugin_loader());
}
} // namespace content
| bsd-3-clause |
markYoungH/chromium.src | chrome/browser/resources/ntp4/guest_tab.html | 973 | <!doctype html>
<html i18n-values="dir:textdirection">
<head>
<meta charset="utf-8">
<title i18n-content="title"></title>
<link rel="stylesheet" href="chrome://resources/css/text_defaults.css">
<link rel="stylesheet" href="incognito_and_guest_tab.css">
<script>
// Until themes can clear the cache, force-reload the theme stylesheet.
document.write('<link id="guestthemecss" rel="stylesheet" ' +
'href="chrome://theme/css/incognito_new_tab_theme.css?' +
Date.now() + '">');
</script>
</head>
<body>
<div class="content">
<h1 i18n-content="guestTabHeading"></h1>
<p>
<span i18n-content="guestTabDescription"></span>
<a i18n-content="learnMore" i18n-values=".href:learnMoreLink"></a>
</p>
</div>
</body>
<script src="chrome://resources/js/cr.js"></script>
<script>
function themeChanged() {
document.getElementById('guestthemecss').href =
'chrome://theme/css/incognito_new_tab_theme.css?' + Date.now();
}
</script>
</html>
| bsd-3-clause |
ondra-novak/blink | LayoutTests/inspector/elements/event-listeners-about-blank.html | 1114 | <html>
<head>
<script src="../../http/tests/inspector/inspector-test.js"></script>
<script src="../../http/tests/inspector/elements-test.js"></script>
<script>
function setupEventListeners()
{
function f() {}
var frame = document.getElementById("myframe");
var body = frame.contentDocument.body;
body.addEventListener("click", f, true);
var div = frame.contentDocument.createElement("div");
div.id = "div-in-iframe";
div.addEventListener("hover", f, true);
body.appendChild(div);
}
function test()
{
WebInspector.settings.eventListenersFilter.set("all");
InspectorTest.evaluateInPage("setupEventListeners()", step1);
function step1()
{
InspectorTest.selectNodeWithId("div-in-iframe", step2);
}
function step2()
{
InspectorTest.expandAndDumpSelectedElementEventListeners(InspectorTest.completeTest);
}
}
</script>
</head>
<body onload="runTest()">
<p>
Tests event listeners output in the Elements sidebar panel when the listeners are added on an element in about:blank page.
</p>
<iframe id="myframe"></iframe>
</body>
</html>
| bsd-3-clause |
mkrzewic/AliRoot | TPC/scripts/OCDBscan/makeCalibTree.sh | 675 | #!/bin/bash
# Aruments
# 1 - run list
# 2 - start run
# 3 - end run
runList=$1
startRun=$2
endRun=$3
echo runList=$runList
echo startRun=$startRun
echo endRun=$endRun
#
workdir=${GUI_OUTDIR}/tmp/tmp${startRun}-${endRun}
backupdir=`pwd`/
mkdirhier $workdir
cp $runList $workdir
cd $workdir
source guiEnv.sh
source $ALICE_ROOT/TPC/scripts/halloWorld.sh
#
aliroot -q -b $SCRIPTDIR/ConfigOCDB.C\($2\) $SCRIPTDIR/CalibEnv.C+\(\"$runList\",$startRun,$endRun\)
echo End of job:
echo pwd=`pwd`
echo ls=
ls -alrt
echo cp dcsTime.root $GUI_OUTDIR/time/calibTreeTime_$startRun_$endRun.root
cp dcsTime.root $GUI_OUTDIR/time/calibTreeTime_$startRun_$endRun.root
cd $backupdir
| bsd-3-clause |
timopulkkinen/BubbleFish | media/audio/scoped_loop_observer.cc | 1340 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/audio/scoped_loop_observer.h"
#include "base/bind.h"
#include "base/synchronization/waitable_event.h"
namespace media {
ScopedLoopObserver::ScopedLoopObserver(
const scoped_refptr<base::MessageLoopProxy>& loop)
: loop_(loop) {
ObserveLoopDestruction(true, NULL);
}
ScopedLoopObserver::~ScopedLoopObserver() {
ObserveLoopDestruction(false, NULL);
}
void ScopedLoopObserver::ObserveLoopDestruction(bool enable,
base::WaitableEvent* done) {
// Note: |done| may be NULL.
if (loop_->BelongsToCurrentThread()) {
MessageLoop* loop = MessageLoop::current();
if (enable) {
loop->AddDestructionObserver(this);
} else {
loop->RemoveDestructionObserver(this);
}
} else {
base::WaitableEvent event(false, false);
if (loop_->PostTask(FROM_HERE,
base::Bind(&ScopedLoopObserver::ObserveLoopDestruction,
base::Unretained(this), enable, &event))) {
event.Wait();
} else {
// The message loop's thread has already terminated, so no need to wait.
}
}
if (done)
done->Signal();
}
} // namespace media.
| bsd-3-clause |
jcu-eresearch/Edgar | webapplication/lib/Cake/Controller/Component/Acl/PhpAcl.php | 13365 | <?php
/**
* PHP configuration based AclInterface implementation
*
* PHP 5
*
* CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
* Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
*
* Licensed under The MIT License
* Redistributions of files must retain the above copyright notice.
*
* @copyright Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
* @link http://cakephp.org CakePHP(tm) Project
* @package Cake.Controller.Component.Acl
* @since CakePHP(tm) v 2.1
* @license MIT License (http://www.opensource.org/licenses/mit-license.php)
*/
/**
* PhpAcl implements an access control system using a plain PHP configuration file.
* An example file can be found in app/Config/acl.php
*
* @package Cake.Controller.Component.Acl
*/
class PhpAcl extends Object implements AclInterface {
const DENY = false;
const ALLOW = true;
/**
* Options:
* - policy: determines behavior of the check method. Deny policy needs explicit allow rules, allow policy needs explicit deny rules
* - config: absolute path to config file that contains the acl rules (@see app/Config/acl.php)
*
* @var array
*/
public $options = array();
/**
* Aro Object
*
* @var PhpAro
*/
public $Aro = null;
/**
* Aco Object
*
* @var PhpAco
*/
public $Aco = null;
/**
* Constructor
*
* Sets a few default settings up.
*/
public function __construct() {
$this->options = array(
'policy' => self::DENY,
'config' => APP . 'Config' . DS . 'acl.php',
);
}
/**
* Initialize method
*
* @param AclComponent $Component Component instance
* @return void
*/
public function initialize(Component $Component) {
if (!empty($Component->settings['adapter'])) {
$this->options = array_merge($this->options, $Component->settings['adapter']);
}
App::uses('PhpReader', 'Configure');
$Reader = new PhpReader(dirname($this->options['config']) . DS);
$config = $Reader->read(basename($this->options['config']));
$this->build($config);
$Component->Aco = $this->Aco;
$Component->Aro = $this->Aro;
}
/**
* build and setup internal ACL representation
*
* @param array $config configuration array, see docs
* @return void
* @throws AclException When required keys are missing.
*/
public function build(array $config) {
if (empty($config['roles'])) {
throw new AclException(__d('cake_dev','"roles" section not found in configuration.'));
}
if (empty($config['rules']['allow']) && empty($config['rules']['deny'])) {
throw new AclException(__d('cake_dev','Neither "allow" nor "deny" rules were provided in configuration.'));
}
$rules['allow'] = !empty($config['rules']['allow']) ? $config['rules']['allow'] : array();
$rules['deny'] = !empty($config['rules']['deny']) ? $config['rules']['deny'] : array();
$roles = !empty($config['roles']) ? $config['roles'] : array();
$map = !empty($config['map']) ? $config['map'] : array();
$alias = !empty($config['alias']) ? $config['alias'] : array();
$this->Aro = new PhpAro($roles, $map, $alias);
$this->Aco = new PhpAco($rules);
}
/**
* No op method, allow cannot be done with PhpAcl
*
* @param string $aro ARO The requesting object identifier.
* @param string $aco ACO The controlled object identifier.
* @param string $action Action (defaults to *)
* @return boolean Success
*/
public function allow($aro, $aco, $action = "*") {
return $this->Aco->access($this->Aro->resolve($aro), $aco, $action, 'allow');
}
/**
* deny ARO access to ACO
*
* @param string $aro ARO The requesting object identifier.
* @param string $aco ACO The controlled object identifier.
* @param string $action Action (defaults to *)
* @return boolean Success
*/
public function deny($aro, $aco, $action = "*") {
return $this->Aco->access($this->Aro->resolve($aro), $aco, $action, 'deny');
}
/**
* No op method
*
* @param string $aro ARO The requesting object identifier.
* @param string $aco ACO The controlled object identifier.
* @param string $action Action (defaults to *)
* @return boolean Success
*/
public function inherit($aro, $aco, $action = "*") {
return false;
}
/**
* Main ACL check function. Checks to see if the ARO (access request object) has access to the
* ACO (access control object).
*
* @param string $aro ARO
* @param string $aco ACO
* @param string $action Action
* @return boolean true if access is granted, false otherwise
*/
public function check($aro, $aco, $action = "*") {
$allow = $this->options['policy'];
$prioritizedAros = $this->Aro->roles($aro);
if ($action && $action != "*") {
$aco .= '/' . $action;
}
$path = $this->Aco->path($aco);
if (empty($path)) {
return $allow;
}
foreach ($path as $depth => $node) {
foreach ($prioritizedAros as $aros) {
if (!empty($node['allow'])) {
$allow = $allow || count(array_intersect($node['allow'], $aros)) > 0;
}
if (!empty($node['deny'])) {
$allow = $allow && count(array_intersect($node['deny'], $aros)) == 0;
}
}
}
return $allow;
}
}
/**
* Access Control Object
*
*/
class PhpAco {
/**
* holds internal ACO representation
*
* @var array
*/
protected $_tree = array();
/**
* map modifiers for ACO paths to their respective PCRE pattern
*
* @var array
*/
public static $modifiers = array(
'*' => '.*',
);
public function __construct(array $rules = array()) {
foreach (array('allow', 'deny') as $type) {
if (empty($rules[$type])) {
$rules[$type] = array();
}
}
$this->build($rules['allow'], $rules['deny']);
}
/**
* return path to the requested ACO with allow and deny rules attached on each level
*
* @return array
*/
public function path($aco) {
$aco = $this->resolve($aco);
$path = array();
$level = 0;
$root = $this->_tree;
$stack = array(array($root, 0));
while (!empty($stack)) {
list($root, $level) = array_pop($stack);
if (empty($path[$level])) {
$path[$level] = array();
}
foreach ($root as $node => $elements) {
$pattern = '/^' . str_replace(array_keys(self::$modifiers), array_values(self::$modifiers), $node) . '$/';
if ($node == $aco[$level] || preg_match($pattern, $aco[$level])) {
// merge allow/denies with $path of current level
foreach (array('allow', 'deny') as $policy) {
if (!empty($elements[$policy])) {
if (empty($path[$level][$policy])) {
$path[$level][$policy] = array();
}
$path[$level][$policy] = array_merge($path[$level][$policy], $elements[$policy]);
}
}
// traverse
if (!empty($elements['children']) && isset($aco[$level + 1])) {
array_push($stack, array($elements['children'], $level + 1));
}
}
}
}
return $path;
}
/**
* allow/deny ARO access to ARO
*
* @return void
*/
public function access($aro, $aco, $action, $type = 'deny') {
$aco = $this->resolve($aco);
$depth = count($aco);
$root = $this->_tree;
$tree = &$root;
foreach ($aco as $i => $node) {
if (!isset($tree[$node])) {
$tree[$node] = array(
'children' => array(),
);
}
if ($i < $depth - 1) {
$tree = &$tree[$node]['children'];
} else {
if (empty($tree[$node][$type])) {
$tree[$node][$type] = array();
}
$tree[$node][$type] = array_merge(is_array($aro) ? $aro : array($aro), $tree[$node][$type]);
}
}
$this->_tree = &$root;
}
/**
* resolve given ACO string to a path
*
* @param string $aco ACO string
* @return array path
*/
public function resolve($aco) {
if (is_array($aco)) {
return array_map('strtolower', $aco);
}
// strip multiple occurences of '/'
$aco = preg_replace('#/+#', '/', $aco);
// make case insensitive
$aco = ltrim(strtolower($aco), '/');
return array_filter(array_map('trim', explode('/', $aco)));
}
/**
* build a tree representation from the given allow/deny informations for ACO paths
*
* @param array $allow ACO allow rules
* @param array $deny ACO deny rules
* @return void
*/
public function build(array $allow, array $deny = array()) {
$stack = array();
$this->_tree = array();
$tree = array();
$root = &$tree;
foreach ($allow as $dotPath => $aros) {
if (is_string($aros)) {
$aros = array_map('trim', explode(',', $aros));
}
$this->access($aros, $dotPath, null, 'allow');
}
foreach ($deny as $dotPath => $aros) {
if (is_string($aros)) {
$aros = array_map('trim', explode(',', $aros));
}
$this->access($aros, $dotPath, null, 'deny');
}
}
}
/**
* Access Request Object
*
*/
class PhpAro {
/**
* role to resolve to when a provided ARO is not listed in
* the internal tree
*
* @var string
*/
const DEFAULT_ROLE = 'Role/default';
/**
* map external identifiers. E.g. if
*
* array('User' => array('username' => 'jeff', 'role' => 'editor'))
*
* is passed as an ARO to one of the methods of AclComponent, PhpAcl
* will check if it can be resolved to an User or a Role defined in the
* configuration file.
*
* @var array
* @see app/Config/acl.php
*/
public $map = array(
'User' => 'User/username',
'Role' => 'User/role',
);
/**
* aliases to map
*
* @var array
*/
public $aliases = array();
/**
* internal ARO representation
*
* @var array
*/
protected $_tree = array();
public function __construct(array $aro = array(), array $map = array(), array $aliases = array()) {
if (!empty($map)) {
$this->map = $map;
}
$this->aliases = $aliases;
$this->build($aro);
}
/**
* From the perspective of the given ARO, walk down the tree and
* collect all inherited AROs levelwise such that AROs from different
* branches with equal distance to the requested ARO will be collected at the same
* index. The resulting array will contain a prioritized list of (list of) roles ordered from
* the most distant AROs to the requested one itself.
*
* @param mixed $aro An ARO identifier
* @return array prioritized AROs
*/
public function roles($aro) {
$aros = array();
$aro = $this->resolve($aro);
$stack = array(array($aro, 0));
while (!empty($stack)) {
list($element, $depth) = array_pop($stack);
$aros[$depth][] = $element;
foreach ($this->_tree as $node => $children) {
if (in_array($element, $children)) {
array_push($stack, array($node, $depth + 1));
}
}
}
return array_reverse($aros);
}
/**
* resolve an ARO identifier to an internal ARO string using
* the internal mapping information.
*
* @param mixed $aro ARO identifier (User.jeff, array('User' => ...), etc)
* @return string internal aro string (e.g. User/jeff, Role/default)
*/
public function resolve($aro) {
foreach ($this->map as $aroGroup => $map) {
list ($model, $field) = explode('/', $map, 2);
$mapped = '';
if (is_array($aro)) {
if (isset($aro['model']) && isset($aro['foreign_key']) && $aro['model'] == $aroGroup) {
$mapped = $aroGroup . '/' . $aro['foreign_key'];
} elseif (isset($aro[$model][$field])) {
$mapped = $aroGroup . '/' . $aro[$model][$field];
} elseif (isset($aro[$field])) {
$mapped = $aroGroup . '/' . $aro[$field];
}
} elseif (is_string($aro)) {
$aro = ltrim($aro, '/');
if (strpos($aro, '/') === false) {
$mapped = $aroGroup . '/' . $aro;
} else {
list($aroModel, $aroValue) = explode('/', $aro, 2);
$aroModel = Inflector::camelize($aroModel);
if ($aroModel == $model || $aroModel == $aroGroup) {
$mapped = $aroGroup . '/' . $aroValue;
}
}
}
if (isset($this->_tree[$mapped])) {
return $mapped;
}
// is there a matching alias defined (e.g. Role/1 => Role/admin)?
if (!empty($this->aliases[$mapped])) {
return $this->aliases[$mapped];
}
}
return self::DEFAULT_ROLE;
}
/**
* adds a new ARO to the tree
*
* @param array $aro one or more ARO records
* @return void
*/
public function addRole(array $aro) {
foreach ($aro as $role => $inheritedRoles) {
if (!isset($this->_tree[$role])) {
$this->_tree[$role] = array();
}
if (!empty($inheritedRoles)) {
if (is_string($inheritedRoles)) {
$inheritedRoles = array_map('trim', explode(',', $inheritedRoles));
}
foreach ($inheritedRoles as $dependency) {
// detect cycles
$roles = $this->roles($dependency);
if (in_array($role, Set::flatten($roles))) {
$path = '';
foreach ($roles as $roleDependencies) {
$path .= implode('|', (array)$roleDependencies) . ' -> ';
}
trigger_error(__d('cake_dev', 'cycle detected when inheriting %s from %s. Path: %s', $role, $dependency, $path . $role));
continue;
}
if (!isset($this->_tree[$dependency])) {
$this->_tree[$dependency] = array();
}
$this->_tree[$dependency][] = $role;
}
}
}
}
/**
* adds one or more aliases to the internal map. Overwrites existing entries.
*
* @param array $alias alias from => to (e.g. Role/13 -> Role/editor)
* @return void
*/
public function addAlias(array $alias) {
$this->aliases = array_merge($this->aliases, $alias);
}
/**
* build an ARO tree structure for internal processing
*
* @param array $aros array of AROs as key and their inherited AROs as values
* @return void
*/
public function build(array $aros) {
$this->_tree = array();
$this->addRole($aros);
}
}
| bsd-3-clause |
js0701/chromium-crosswalk | content/public/test/test_download_request_handler.h | 12805 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_PUBLIC_TEST_TEST_DOWNLOAD_REQUEST_HANDLER_H_
#define CONTENT_PUBLIC_TEST_TEST_DOWNLOAD_REQUEST_HANDLER_H_
#include <stdint.h>
#include <queue>
#include "base/callback_forward.h"
#include "base/files/file.h"
#include "base/macros.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/non_thread_safe.h"
#include "net/base/completion_callback.h"
#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/http/http_response_headers.h"
#include "net/http/http_util.h"
#include "net/url_request/url_request_job.h"
#include "url/gurl.h"
namespace content {
// A request handler that can be used to mock the behavior of a URLRequestJob
// for a download.
//
// Testing of download interruption scenarios typically involve simulating
// errors that occur:
// 1. On the client, prior to the request being sent out,
// 2. On the network, between the client and the server,
// 3. On the server,
// 4. Back on the client, while writing the response to disk,
// 5. On the client, after the response has been written to disk.
//
// This test class is meant to help test failures in #2 and #3 above. The test
// implementation depends on content::BrowserThread and assumes that the
// thread identified by BrowserThread::IO is the network task runner thread.
//
// TestDownloadRequestHandler can be used on any thread as long as it is used
// and destroyed on the same thread it was constructed on.
//
// To use the test request handler:
//
// // Define the request handler. Note that initialization of the
// // TestDownloadRequestHandler object immediately registers it as well and is
// // a blocking operation.
// TestDownloadRequestHandler request_handler;
//
// // Set up parameters for the partial request handler.
// TestDownloadRequestHandler::Parameters parameters;
//
// // Inject an error at offset 100.
// parameters.injected_errors.push(TestDownloadRequestHandler::InjectedError(
// 100, net::ERR_CONNECTION_RESET));
//
// // Start serving.
// request_handler.StartServing(parameters);
//
// At this point, you can initiate a URLRequest for request_handler.url(). The
// request will fail when offset 100 is reached with the error specified above.
class TestDownloadRequestHandler : public base::NonThreadSafe {
public:
// OnStartHandler can be used to intercept the Start() event of a new
// URLRequest. Set it as the |on_start_handler| member of Parameters below.
//
// The callback is invoked on the thread on which TestDownloadRequestHandler
// was created. Once the callback has a response ready, it can invoke the
// OnStartResponseCallback object. The latter can be invoked on any thread and
// will post back to the IO thread to continue with processing the Start()
// event.
//
// The parameters to the OnStartResponseCallback are:
//
// * a |const std::string&| containing the headers to be sent in response to
// the request. The headers should be formatted according to the
// requirements of net::HttpUtil::AssembleRawHeaders(). The headers are only
// used if the |net::Error| parameters is net::OK.
//
// * a |net::Error| indicating the result of the operation. If this parameters
// is not net::OK, then that error value is set as the result of the Start()
// operation. The headers are ignored in this case.
//
// If the error is net::OK, and the headers are empty, then the request is
// handled based on the remaining parameters in |Parameters|.
using OnStartResponseCallback =
base::Callback<void(const std::string&, net::Error)>;
using OnStartHandler = base::Callback<void(const net::HttpRequestHeaders&,
const OnStartResponseCallback&)>;
// An injected error.
struct InjectedError {
InjectedError(int64_t offset, net::Error error);
int64_t offset;
net::Error error;
};
// Parameters used by StartServing().
struct Parameters {
// Constructs a Parameters structure using the default constructor, but with
// the addition of a net::ERR_CONNECTION_RESET which will be triggered at
// byte offset (filesize / 2).
static Parameters WithSingleInterruption();
// The default constructor initializes the parameters for serving a 100 KB
// resource with no interruptions. The response contains an ETag and a
// Last-Modified header and the server supports byte range requests.
Parameters();
// Parameters is expected to be copyable and moveable.
Parameters(Parameters&&);
Parameters(const Parameters&);
Parameters& operator=(Parameters&&);
Parameters& operator=(const Parameters&);
~Parameters();
// Clears the errors in injected_errors.
void ClearInjectedErrors();
// Contents of the ETag header field of the response. No Etag header is
// sent if this field is empty.
std::string etag;
// Contents of the Last-Modified header field of the response. No
// Last-Modified header is sent if this field is empty.
std::string last_modified;
// The Content-Type of the response. No Content-Type header is sent if this
// field is empty.
std::string content_type;
// The total size of the entity. If the entire entity is requested, then
// this would be the same as the value returned in the Content-Length
// header.
int64_t size;
// Seed for the pseudo-random sequence that defines the response body
// contents. The seed is with GetPatternBytes() to generate the body of the
// response.
int pattern_generator_seed;
// If true, the response contains a 'Accept-Ranges: bytes' header.
bool support_byte_ranges;
// If on_start_handler is valid, it will be invoked when a new request is
// received. See details about the OnStartHandler above.
OnStartHandler on_start_handler;
// Errors to be injected. Each injected error is defined by an offset and an
// error. Request handler will successfully fulfil requests to read up to
// |offset|. An attempt to read the byte at |offset| will result in the
// error defined by the InjectErrors object.
//
// If a read spans the range containing |offset|, then the portion of the
// request preceding |offset| will succeed. The next read would start at
// |offset| and hence would result in an error.
//
// E.g.: injected_errors.push(InjectedError(100, ERR_CONNECTION_RESET));
//
// A network read for 1024 bytes at offset 0 would result in successfully
// reading 100 bytes (bytes with offset 0-99). The next read would,
// therefore, start at offset 100 and would result in
// ERR_CONNECTION_RESET.
//
// Injected errors are processed in the order in which they appear in
// |injected_errors|. When handling a network request for the range [S,E]
// (inclusive), all events in |injected_errors| where |offset| is less than
// S will be ignored. The first event remaining will trigger an error once
// the sequence of reads proceeds to a point where its |offset| is included
// in [S,E].
//
// This implies that |injected_errors| must be specified in increasing order
// of |offset|. I.e. |injected_errors| must be sorted by |offset|.
//
// Errors at relative offset 0 are ignored for a partial request. I.e. If
// the request is for the byte range 100-200, then an error at offset 100
// will not trigger. This is done so that non-overlapping continuation
// attempts don't require resetting parameters to succeed.
//
// E.g.: If the caller injects an error at offset 100, then a request for
// the entire entity will fail after reading 100 bytes (offsets 0 through
// 99). A subsequent request for byte range "100-" (offsets 100 through EOF)
// will succeed since the error at offset 100 is ignored.
//
// Notes:
//
// * Distinctions about which read requests signal the error is often only
// important at the //net layer. From //content, it would appear that 100
// bytes were read and then request failed with ERR_CONNECTION_RESET.
std::queue<InjectedError> injected_errors;
};
// Details about completed requests returned by GetCompletedRequestInfo().
struct CompletedRequest {
// Count of bytes read by the client of the URLRequestJob. This counts the
// number of bytes of the entity that was transferred *after* content
// decoding is complete.
int64_t transferred_byte_count = -1;
net::HttpRequestHeaders request_headers;
};
using CompletedRequests = std::vector<CompletedRequest>;
// Registers a request handler at the default URL. Call url() to determine the
// URL.
//
// Notes:
// * This constructor is only meant to be used for convenience when the caller
// is not interested in the URL used for interception. The URL used is
// generated at run time and should not be assumed to be the same across
// different runs of the same test.
//
// * Initialization of the handler synchronously runs a task on the
// BrowserThread::IO thread using a nested message loop. Only construct an
// instance of this object after browser threads have been initialized.
TestDownloadRequestHandler();
// Similar to the default constructor, but registers the handler at |url|.
//
// Notes:
// * The behavior is undefined if more than one TestDownloadRequestHandler is
// registered for the same URL.
TestDownloadRequestHandler(const GURL& url);
// Destroys and posts a task to the IO thread to dismantle the registered URL
// request interceptor. Does not wait for the task to return.
~TestDownloadRequestHandler();
// Returns the URL that this instance is intercepting URLRequests for.
const GURL& url() const { return url_; }
// Start responding to URLRequests for url() with responses based on
// |parameters|.
//
// This method invocation posts a task to the IO thread to update the
// URLRequestInterceptor with the new parameters and returns immediately. URL
// interception won't be updated until the posted task executes. The method
// returns without waiting for the posted task to complete.
//
// Calling this method does not affect URLRequests that have already started.
// The new parameters will only be used to respond to new URLRequests that are
// starting.
//
// StartServing() can be called multiple times to change the operating
// parameters of the current URL interceptor.
void StartServing(const Parameters& parameters);
// Start responding to URLRequests for url() with a static response
// containing the headers in |headers|.
//
// The format of |headers| should comply with the requirements for
// net::HttpUtil::AssembleRawHeaders().
void StartServingStaticResponse(const base::StringPiece& headers);
// Get the list of requests that have already completed.
//
// This method posts a task to the IO thread to collect the list of completed
// requests and waits for the task to complete.
//
// Requests that are currently in progress will not be reflected in
// |requests|.
void GetCompletedRequestInfo(CompletedRequests* requests);
// Generate a pseudo random pattern.
//
// |seed| is the seed for the pseudo random sequence. |offset| is the byte
// offset into the sequence. |length| is a count of bytes to generate.
// |data| receives the generated bytes and should be able to store |length|
// bytes.
//
// The pattern has the following properties:
//
// * For a given |seed|, the entire sequence of bytes is fixed. Any
// subsequence can be generated by specifying the |offset| and |length|.
//
// * The sequence is aperiodic (at least for the first 1M bytes).
//
// * |seed| is chaotic. Different seeds produce "very different" data. This
// means that there's no trivial mapping between sequences generated using
// two distinct seeds.
//
// These properties make the generated bytes useful for testing partial
// requests where the response may need to be built using a sequence of
// partial requests.
//
// Note: Don't use this function to generate a cryptographically secure
// pseudo-random sequence.
static void GetPatternBytes(int seed, int64_t offset, int length, char* data);
private:
class Interceptor;
class PartialResponseJob;
GURL url_;
base::WeakPtr<Interceptor> interceptor_;
DISALLOW_COPY_AND_ASSIGN(TestDownloadRequestHandler);
};
} // namespace content
#endif // CONTENT_PUBLIC_TEST_TEST_DOWNLOAD_REQUEST_HANDLER_H_
| bsd-3-clause |
scheib/chromium | net/base/file_stream.h | 7156 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file defines FileStream, a basic interface for reading and writing files
// synchronously or asynchronously with support for seeking to an offset.
// Note that even when used asynchronously, only one operation is supported at
// a time.
#ifndef NET_BASE_FILE_STREAM_H_
#define NET_BASE_FILE_STREAM_H_
#include <stdint.h>
#include <memory>
#include "base/files/file.h"
#include "net/base/completion_once_callback.h"
#include "net/base/net_export.h"
namespace base {
class FilePath;
class TaskRunner;
}
namespace net {
class IOBuffer;
class NET_EXPORT FileStream {
public:
// Uses |task_runner| for asynchronous operations.
explicit FileStream(const scoped_refptr<base::TaskRunner>& task_runner);
// Construct a FileStream with an already opened file. |file| must be opened
// for async reading on Windows, and sync reading everywehere else.
//
// Uses |task_runner| for asynchronous operations.
FileStream(base::File file,
const scoped_refptr<base::TaskRunner>& task_runner);
FileStream(const FileStream&) = delete;
FileStream& operator=(const FileStream&) = delete;
// The underlying file is closed automatically.
virtual ~FileStream();
// Call this method to open the FileStream asynchronously. The remaining
// methods cannot be used unless the file is opened successfully. Returns
// ERR_IO_PENDING if the operation is started. If the operation cannot be
// started then an error code is returned.
//
// Once the operation is done, |callback| will be run on the thread where
// Open() was called, with the result code. open_flags is a bitfield of
// base::File::Flags.
//
// If the file stream is not closed manually, the underlying file will be
// automatically closed when FileStream is destructed in an asynchronous
// manner (i.e. the file stream is closed in the background but you don't
// know when).
virtual int Open(const base::FilePath& path,
int open_flags,
CompletionOnceCallback callback);
// Returns ERR_IO_PENDING and closes the file asynchronously, calling
// |callback| when done.
// It is invalid to request any asynchronous operations while there is an
// in-flight asynchronous operation.
virtual int Close(CompletionOnceCallback callback);
// Returns true if Open succeeded and Close has not been called.
virtual bool IsOpen() const;
// Adjust the position from the start of the file where data is read
// asynchronously. Upon success, ERR_IO_PENDING is returned and |callback|
// will be run on the thread where Seek() was called with the the stream
// position relative to the start of the file. Otherwise, an error code is
// returned. It is invalid to request any asynchronous operations while there
// is an in-flight asynchronous operation.
virtual int Seek(int64_t offset, Int64CompletionOnceCallback callback);
// Call this method to read data from the current stream position
// asynchronously. Up to buf_len bytes will be copied into buf. (In
// other words, partial reads are allowed.) Returns the number of bytes
// copied, 0 if at end-of-file, or an error code if the operation could
// not be performed.
//
// The file must be opened with FLAG_ASYNC, and a non-null
// callback must be passed to this method. If the read could not
// complete synchronously, then ERR_IO_PENDING is returned, and the
// callback will be run on the thread where Read() was called, when the
// read has completed.
//
// It is valid to destroy or close the file stream while there is an
// asynchronous read in progress. That will cancel the read and allow
// the buffer to be freed.
//
// It is invalid to request any asynchronous operations while there is an
// in-flight asynchronous operation.
//
// This method must not be called if the stream was opened WRITE_ONLY.
virtual int Read(IOBuffer* buf, int buf_len, CompletionOnceCallback callback);
// Call this method to write data at the current stream position
// asynchronously. Up to buf_len bytes will be written from buf. (In
// other words, partial writes are allowed.) Returns the number of
// bytes written, or an error code if the operation could not be
// performed.
//
// The file must be opened with FLAG_ASYNC, and a non-null
// callback must be passed to this method. If the write could not
// complete synchronously, then ERR_IO_PENDING is returned, and the
// callback will be run on the thread where Write() was called when
// the write has completed.
//
// It is valid to destroy or close the file stream while there is an
// asynchronous write in progress. That will cancel the write and allow
// the buffer to be freed.
//
// It is invalid to request any asynchronous operations while there is an
// in-flight asynchronous operation.
//
// This method must not be called if the stream was opened READ_ONLY.
//
// Zero byte writes are not allowed.
virtual int Write(IOBuffer* buf,
int buf_len,
CompletionOnceCallback callback);
// Gets status information about File. May fail synchronously, but never
// succeeds synchronously.
//
// It is invalid to request any asynchronous operations while there is an
// in-flight asynchronous operation.
//
// |file_info| must remain valid until |callback| is invoked.
virtual int GetFileInfo(base::File::Info* file_info,
CompletionOnceCallback callback);
// Forces out a filesystem sync on this file to make sure that the file was
// written out to disk and is not currently sitting in the buffer. This does
// not have to be called, it just forces one to happen at the time of
// calling.
//
// The file must be opened with FLAG_ASYNC, and a non-null
// callback must be passed to this method. If the write could not
// complete synchronously, then ERR_IO_PENDING is returned, and the
// callback will be run on the thread where Flush() was called when
// the write has completed.
//
// It is valid to destroy or close the file stream while there is an
// asynchronous flush in progress. That will cancel the flush and allow
// the buffer to be freed.
//
// It is invalid to request any asynchronous operations while there is an
// in-flight asynchronous operation.
//
// This method should not be called if the stream was opened READ_ONLY.
virtual int Flush(CompletionOnceCallback callback);
private:
class Context;
// Context performing I/O operations. It was extracted into a separate class
// to perform asynchronous operations because FileStream can be destroyed
// before completion of an async operation. Also if a FileStream is destroyed
// without explicitly calling Close, the file should be closed asynchronously
// without delaying FileStream's destructor.
std::unique_ptr<Context> context_;
};
} // namespace net
#endif // NET_BASE_FILE_STREAM_H_
| bsd-3-clause |
scheib/chromium | third_party/blink/renderer/core/layout/svg/layout_svg_tspan.h | 1569 | /*
* Copyright (C) 2006 Oliver Hunt <ojh16@student.canterbury.ac.nz>
* Copyright (C) 2006 Apple Computer Inc.
* Copyright (C) 2009 Google Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_SVG_LAYOUT_SVG_TSPAN_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_SVG_LAYOUT_SVG_TSPAN_H_
#include "third_party/blink/renderer/core/layout/svg/layout_svg_inline.h"
namespace blink {
class LayoutSVGTSpan final : public LayoutSVGInline {
public:
explicit LayoutSVGTSpan(Element*);
bool IsOfType(LayoutObjectType type) const override;
bool IsChildAllowed(LayoutObject*, const ComputedStyle&) const override;
const char* GetName() const override {
NOT_DESTROYED();
return "LayoutSVGTSpan";
}
};
}
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_SVG_LAYOUT_SVG_TSPAN_H_
| bsd-3-clause |
nwjs/chromium.src | skia/ext/fontmgr_default.cc | 873 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "skia/ext/fontmgr_default.h"
#include "third_party/skia/include/core/SkFontMgr.h"
namespace {
SkDEBUGCODE(bool g_factory_called;)
// This is a purposefully leaky pointer that has ownership of the FontMgr.
SkFontMgr* g_fontmgr_override = nullptr;
} // namespace
namespace skia {
void OverrideDefaultSkFontMgr(sk_sp<SkFontMgr> fontmgr) {
SkASSERT(!g_factory_called);
SkSafeUnref(g_fontmgr_override);
g_fontmgr_override = fontmgr.release();
}
} // namespace skia
SK_API sk_sp<SkFontMgr> SkFontMgr::Factory() {
SkDEBUGCODE(g_factory_called = true;);
return g_fontmgr_override ? sk_ref_sp(g_fontmgr_override)
: skia::CreateDefaultSkFontMgr();
} | bsd-3-clause |
joshvera/CefSharp | CefSharp/IResourceHandler.cs | 844 | // Copyright © 2010-2015 The CefSharp Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
using System.IO;
namespace CefSharp
{
//TODO: Eval naming for this interface, not happy with this name
public interface IResourceHandler
{
/// <summary>
/// Processes request asynchronously.
/// </summary>
/// <param name="request">The request object.</param>
/// <param name="callback">The callback used to Continue or Cancel the request (async).</param>
/// <returns>true if the request is handled, false otherwise.</returns>
bool ProcessRequestAsync(IRequest request, ICallback callback);
Stream GetResponse(IResponse response, out long responseLength, out string redirectUrl);
}
}
| bsd-3-clause |
phobson/bokeh | bokeh/models/images.py | 617 | from __future__ import absolute_import
from ..model import Model
from ..core.properties import (Any, Dict, String)
class ImageSource(Model):
""" A base class for all image source types. """
_args = ('url', 'extra_url_vars')
url = String(default="", help="""
tile service url (example: http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png)
""")
extra_url_vars = Dict(String, Any, help="""
A dictionary that maps url variable template keys to values.
These variables are useful for parts of tile urls which do not change from tile to tile (e.g. server host name, or layer name).
""")
| bsd-3-clause |
ric2b/Vivaldi-browser | chromium/chromecast/browser/android/apk/src/org/chromium/chromecast/shell/CastWebContentsView.java | 4548 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chromecast.shell;
import android.content.Context;
import android.content.Intent;
import android.graphics.Color;
import android.net.Uri;
import android.os.Bundle;
import android.os.IBinder;
import android.view.LayoutInflater;
import android.view.MotionEvent;
import android.view.View;
import android.view.accessibility.AccessibilityNodeProvider;
import android.widget.FrameLayout;
import androidx.annotation.Nullable;
import org.chromium.base.Log;
import org.chromium.chromecast.base.CastSwitches;
/**
* View for displaying a WebContents in CastShell.
*
* <p>Intended to be used with {@link android.app.Presentation}.
*
* <p>
* Typically, this class is controlled by CastContentWindowAndroid through
* CastWebContentsSurfaceHelper. If the CastContentWindowAndroid is destroyed,
* CastWebContentsView should be removed from the activity holding it.
* Similarily, if the view is removed from a activity or the activity holding
* it is destroyed, CastContentWindowAndroid should be notified by intent.
*/
public class CastWebContentsView extends FrameLayout {
private static final String TAG = "CastWebContentV";
private CastWebContentsSurfaceHelper mSurfaceHelper;
public CastWebContentsView(Context context) {
super(context);
initView();
}
private void initView() {
FrameLayout.LayoutParams matchParent = new FrameLayout.LayoutParams(
FrameLayout.LayoutParams.MATCH_PARENT, FrameLayout.LayoutParams.MATCH_PARENT);
addView(LayoutInflater.from(getContext())
.inflate(R.layout.cast_web_contents_activity, null),
matchParent);
// Adds a transparent view on top to allow a highlight rectangule to be drawn when
// accessibility is turned on.
addView(new View(getContext()), matchParent);
}
public void onStart(Bundle startArgumentsBundle) {
Log.d(TAG, "onStart");
if (mSurfaceHelper != null) {
return;
}
mSurfaceHelper = new CastWebContentsSurfaceHelper(
CastWebContentsScopes.onLayoutView(getContext(),
findViewById(R.id.web_contents_container),
CastSwitches.getSwitchValueColor(
CastSwitches.CAST_APP_BACKGROUND_COLOR, Color.BLACK),
this ::getHostWindowToken),
(Uri uri) -> sendIntentSync(CastWebContentsIntentUtils.onWebContentStopped(uri)));
CastWebContentsSurfaceHelper.StartParams params =
CastWebContentsSurfaceHelper.StartParams.fromBundle(startArgumentsBundle);
if (params == null) return;
mSurfaceHelper.onNewStartParams(params);
}
public void onResume() {
Log.d(TAG, "onResume");
}
public void onPause() {
Log.d(TAG, "onPause");
}
public void onStop() {
Log.d(TAG, "onStop");
if (mSurfaceHelper != null) {
mSurfaceHelper.onDestroy();
}
}
@Nullable
protected IBinder getHostWindowToken() {
return getWindowToken();
}
private void sendIntentSync(Intent in) {
CastWebContentsIntentUtils.getLocalBroadcastManager().sendBroadcastSync(in);
}
@Override
public void setAccessibilityDelegate(AccessibilityDelegate delegate) {
View contentView = getContentView();
if (contentView != null) {
contentView.setAccessibilityDelegate(delegate);
} else {
Log.w(TAG, "Content view is null!");
}
}
@Override
public boolean onHoverEvent(MotionEvent event) {
View contentView = getContentView();
if (contentView != null) {
return contentView.onHoverEvent(event);
} else {
Log.w(TAG, "Content view is null!");
return false;
}
}
public AccessibilityNodeProvider getWebContentsAccessibilityNodeProvider() {
View contentView = getContentView();
if (contentView != null) {
return contentView.getAccessibilityNodeProvider();
} else {
Log.w(TAG, "Content view is null! Returns a null AccessibilityNodeProvider.");
return null;
}
}
private View getContentView() {
return findViewWithTag(CastWebContentsScopes.VIEW_TAG_CONTENT_VIEW);
}
}
| bsd-3-clause |
augustf/wtgsite | vendor/plugins/recaptcha/test/verify_recaptcha_test.rb | 3213 | require 'test/unit'
require 'rails/version' # For getting the rails version constants
require 'active_support/vendor' # For loading I18n
require 'mocha'
require 'net/http'
require File.dirname(__FILE__) + '/../lib/recaptcha'
class RecaptchaVerifyTest < Test::Unit::TestCase
def setup
ENV['RECAPTCHA_PRIVATE_KEY'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
@controller = TestController.new
@controller.request = stub(:remote_ip => "1.1.1.1")
@controller.params = {:recaptcha_challenge_field => "challenge", :recaptcha_response_field => "response"}
@expected_post_data = {}
@expected_post_data["privatekey"] = ENV['RECAPTCHA_PRIVATE_KEY']
@expected_post_data["remoteip"] = @controller.request.remote_ip
@expected_post_data["challenge"] = "challenge"
@expected_post_data["response"] = "response"
@expected_uri = URI.parse("http://#{Recaptcha::RECAPTCHA_VERIFY_SERVER}/verify")
end
def test_should_raise_exception_without_private_key
assert_raise Recaptcha::RecaptchaError do
ENV['RECAPTCHA_PRIVATE_KEY'] = nil
@controller.verify_recaptcha
end
end
def test_should_return_false_when_key_is_invalid
expect_http_post(response_with_body("false\ninvalid-site-private-key"))
assert !@controller.verify_recaptcha
assert_equal "invalid-site-private-key", @controller.session[:recaptcha_error]
end
def test_returns_true_on_success
@controller.session[:recaptcha_error] = "previous error that should be cleared"
expect_http_post(response_with_body("true\n"))
assert @controller.verify_recaptcha
assert_nil @controller.session[:recaptcha_error]
end
def test_errors_should_be_added_to_model
expect_http_post(response_with_body("false\nbad-news"))
errors = mock
errors.expects(:add).with(:base, "Captcha response is incorrect, please try again.")
model = mock(:valid? => false, :errors => errors)
assert !@controller.verify_recaptcha(:model => model)
assert_equal "bad-news", @controller.session[:recaptcha_error]
end
def test_returns_true_on_success_with_optional_key
@controller.session[:recaptcha_error] = "previous error that should be cleared"
# reset private key
@expected_post_data["privatekey"] = 'ADIFFERENTPRIVATEKEYXXXXXXXXXXXXXX'
expect_http_post(response_with_body("true\n"))
assert @controller.verify_recaptcha(:private_key => 'ADIFFERENTPRIVATEKEYXXXXXXXXXXXXXX')
assert_nil @controller.session[:recaptcha_error]
end
def test_timeout
expect_http_post(Timeout::Error, :exception => true)
assert !@controller.verify_recaptcha()
assert_equal "recaptcha-not-reachable", @controller.session[:recaptcha_error]
end
private
class TestController
include Recaptcha::Verify
attr_accessor :request, :params, :session
def initialize
@session = {}
end
end
def expect_http_post(response, options = {})
unless options[:exception]
Net::HTTP.expects(:post_form).with(@expected_uri, @expected_post_data).returns(response)
else
Net::HTTP.expects(:post_form).raises response
end
end
def response_with_body(body)
stub(:body => body)
end
end
| mit |
boutell/SillyCMS | src/vendor/symfony/src/Symfony/Bundle/DoctrineBundle/Tests/DependencyInjection/Fixtures/Bundles/XmlBundle/Entity/Test.php | 108 | <?php
namespace DoctrineBundle\Tests\DependencyInjection\Fixtures\Bundles\XmlBundle\Entity;
class Test
{
} | mit |
davehorton/drachtio-server | deps/boost_1_77_0/libs/unordered/test/unordered/allocator_traits.cpp | 11069 |
// Copyright 2011 Daniel James.
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <boost/core/lightweight_test.hpp>
#include <boost/limits.hpp>
#include <boost/static_assert.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/unordered/detail/implementation.hpp>
// Boilerplate
#define ALLOCATOR_METHODS(name) \
template <typename U> struct rebind \
{ \
typedef name<U> other; \
}; \
\
name() {} \
template <typename Y> name(name<Y> const&) {} \
T* address(T& r) { return &r; } \
T const* address(T const& r) { return &r; } \
T* allocate(std::size_t n) \
{ \
return static_cast<T*>(::operator new(n * sizeof(T))); \
} \
T* allocate(std::size_t n, void const*) \
{ \
return static_cast<T*>(::operator new(n * sizeof(T))); \
} \
void deallocate(T* p, std::size_t) { ::operator delete((void*)p); } \
void construct(T* p, T const& t) { new (p) T(t); } \
void destroy(T* p) { p->~T(); } \
std::size_t max_size() const \
{ \
return (std::numeric_limits<std::size_t>::max)(); \
} \
bool operator==(name<T> const&) const { return true; } \
bool operator!=(name<T> const&) const { return false; } \
/**/
#define ALLOCATOR_METHODS_TYPEDEFS(name) \
template <typename U> struct rebind \
{ \
typedef name<U> other; \
}; \
\
name() {} \
template <typename Y> name(name<Y> const&) {} \
pointer address(T& r) { return &r; } \
const_pointer address(T const& r) { return &r; } \
pointer allocate(std::size_t n) \
{ \
return pointer(::operator new(n * sizeof(T))); \
} \
pointer allocate(std::size_t n, void const*) \
{ \
return pointer(::operator new(n * sizeof(T))); \
} \
void deallocate(pointer p, std::size_t) { ::operator delete((void*)p); } \
void construct(T* p, T const& t) { new (p) T(t); } \
void destroy(T* p) { p->~T(); } \
size_type max_size() const \
{ \
return (std::numeric_limits<size_type>::max)(); \
} \
bool operator==(name<T> const&) { return true; } \
bool operator!=(name<T> const&) { return false; } \
/**/
struct yes_type
{
enum
{
value = true
};
};
struct no_type
{
enum
{
value = false
};
};
// For tracking calls...
static int selected;
void reset() { selected = 0; }
template <typename Allocator> int call_select()
{
typedef boost::unordered::detail::allocator_traits<Allocator> traits;
Allocator a;
reset();
BOOST_TEST(traits::select_on_container_copy_construction(a) == a);
return selected;
}
// Empty allocator test
template <typename T> struct empty_allocator
{
typedef T value_type;
ALLOCATOR_METHODS(empty_allocator)
};
void test_empty_allocator()
{
typedef empty_allocator<int> allocator;
typedef boost::unordered::detail::allocator_traits<allocator> traits;
#if BOOST_UNORDERED_USE_ALLOCATOR_TRAITS == 1
BOOST_STATIC_ASSERT((boost::is_same<traits::size_type,
std::make_unsigned<std::ptrdiff_t>::type>::value));
#else
BOOST_STATIC_ASSERT((boost::is_same<traits::size_type, std::size_t>::value));
#endif
BOOST_STATIC_ASSERT(
(boost::is_same<traits::difference_type, std::ptrdiff_t>::value));
BOOST_STATIC_ASSERT((boost::is_same<traits::pointer, int*>::value));
BOOST_STATIC_ASSERT(
(boost::is_same<traits::const_pointer, int const*>::value));
BOOST_STATIC_ASSERT((boost::is_same<traits::value_type, int>::value));
BOOST_TEST(!traits::propagate_on_container_copy_assignment::value);
BOOST_TEST(!traits::propagate_on_container_move_assignment::value);
BOOST_TEST(!traits::propagate_on_container_swap::value);
BOOST_TEST(traits::is_always_equal::value);
BOOST_TEST(call_select<allocator>() == 0);
}
// allocator 1
template <typename T> struct allocator1
{
typedef T value_type;
ALLOCATOR_METHODS(allocator1)
typedef yes_type propagate_on_container_copy_assignment;
typedef yes_type propagate_on_container_move_assignment;
typedef yes_type propagate_on_container_swap;
typedef yes_type is_always_equal;
allocator1<T> select_on_container_copy_construction() const
{
++selected;
return allocator1<T>();
}
};
void test_allocator1()
{
typedef allocator1<int> allocator;
typedef boost::unordered::detail::allocator_traits<allocator> traits;
#if BOOST_UNORDERED_USE_ALLOCATOR_TRAITS == 1
BOOST_STATIC_ASSERT((boost::is_same<traits::size_type,
std::make_unsigned<std::ptrdiff_t>::type>::value));
#else
BOOST_STATIC_ASSERT((boost::is_same<traits::size_type, std::size_t>::value));
#endif
BOOST_STATIC_ASSERT(
(boost::is_same<traits::difference_type, std::ptrdiff_t>::value));
BOOST_STATIC_ASSERT((boost::is_same<traits::pointer, int*>::value));
BOOST_STATIC_ASSERT(
(boost::is_same<traits::const_pointer, int const*>::value));
BOOST_STATIC_ASSERT((boost::is_same<traits::value_type, int>::value));
BOOST_TEST(traits::propagate_on_container_copy_assignment::value);
BOOST_TEST(traits::propagate_on_container_move_assignment::value);
BOOST_TEST(traits::propagate_on_container_swap::value);
BOOST_TEST(traits::is_always_equal::value);
BOOST_TEST(call_select<allocator>() == 1);
}
// allocator 2
template <typename Alloc> struct allocator2_base
{
Alloc select_on_container_copy_construction() const
{
++selected;
return Alloc();
}
};
template <typename T> struct allocator2 : allocator2_base<allocator2<T> >
{
typedef T value_type;
typedef T* pointer;
typedef T const* const_pointer;
typedef std::size_t size_type;
ALLOCATOR_METHODS(allocator2)
typedef no_type propagate_on_container_copy_assignment;
typedef no_type propagate_on_container_move_assignment;
typedef no_type propagate_on_container_swap;
typedef no_type is_always_equal;
};
void test_allocator2()
{
typedef allocator2<int> allocator;
typedef boost::unordered::detail::allocator_traits<allocator> traits;
BOOST_STATIC_ASSERT((boost::is_same<traits::size_type, std::size_t>::value));
BOOST_STATIC_ASSERT(
(boost::is_same<traits::difference_type, std::ptrdiff_t>::value));
BOOST_STATIC_ASSERT((boost::is_same<traits::pointer, int*>::value));
BOOST_STATIC_ASSERT(
(boost::is_same<traits::const_pointer, int const*>::value));
BOOST_STATIC_ASSERT((boost::is_same<traits::value_type, int>::value));
BOOST_TEST(!traits::propagate_on_container_copy_assignment::value);
BOOST_TEST(!traits::propagate_on_container_move_assignment::value);
BOOST_TEST(!traits::propagate_on_container_swap::value);
BOOST_TEST(!traits::is_always_equal::value);
BOOST_TEST(call_select<allocator>() == 1);
}
// allocator 3
template <typename T> struct ptr
{
T* value_;
ptr(void* v) : value_((T*)v) {}
T& operator*() const { return *value_; }
};
template <> struct ptr<void>
{
void* value_;
ptr(void* v) : value_(v) {}
};
template <> struct ptr<const void>
{
void const* value_;
ptr(void const* v) : value_(v) {}
};
template <typename T> struct allocator3
{
typedef T value_type;
typedef ptr<T> pointer;
typedef ptr<T const> const_pointer;
typedef unsigned short size_type;
int x; // Just to make it non-empty, so that is_always_equal is false.
ALLOCATOR_METHODS_TYPEDEFS(allocator3)
typedef yes_type propagate_on_container_copy_assignment;
typedef no_type propagate_on_container_move_assignment;
allocator3<T> select_on_container_copy_construction() const
{
++selected;
return allocator3<T>();
}
};
void test_allocator3()
{
typedef allocator3<int> allocator;
typedef boost::unordered::detail::allocator_traits<allocator> traits;
BOOST_STATIC_ASSERT(
(boost::is_same<traits::size_type, unsigned short>::value));
BOOST_STATIC_ASSERT(
(boost::is_same<traits::difference_type, std::ptrdiff_t>::value));
BOOST_STATIC_ASSERT((boost::is_same<traits::pointer, ptr<int> >::value));
BOOST_STATIC_ASSERT(
(boost::is_same<traits::const_pointer, ptr<int const> >::value));
BOOST_STATIC_ASSERT((boost::is_same<traits::value_type, int>::value));
BOOST_TEST(traits::propagate_on_container_copy_assignment::value);
BOOST_TEST(!traits::propagate_on_container_move_assignment::value);
BOOST_TEST(!traits::propagate_on_container_swap::value);
BOOST_TEST(!traits::is_always_equal::value);
BOOST_TEST(call_select<allocator>() == 1);
}
int main()
{
test_empty_allocator();
test_allocator1();
test_allocator2();
test_allocator3();
return boost::report_errors();
}
| mit |
alexispeter/CM | library/CM/Paging/StreamSubscribe/User.php | 347 | <?php
class CM_Paging_StreamSubscribe_User extends CM_Paging_StreamSubscribe_Abstract {
/**
* @param CM_Model_User $user
*/
public function __construct(CM_Model_User $user) {
$source = new CM_PagingSource_Sql('`id`', 'cm_stream_subscribe', '`userId` = ' . $user->getId());
parent::__construct($source);
}
}
| mit |
NicholasFFox/fastlane | snapshot/lib/snapshot/options.rb | 13252 | require 'fastlane_core'
require 'credentials_manager'
module Snapshot
class Options
def self.available_options
output_directory = (File.directory?("fastlane") ? "fastlane/screenshots" : "screenshots")
@options ||= [
FastlaneCore::ConfigItem.new(key: :workspace,
short_option: "-w",
env_name: "SNAPSHOT_WORKSPACE",
optional: true,
description: "Path the workspace file",
verify_block: proc do |value|
v = File.expand_path(value.to_s)
UI.user_error!("Workspace file not found at path '#{v}'") unless File.exist?(v)
UI.user_error!("Workspace file invalid") unless File.directory?(v)
UI.user_error!("Workspace file is not a workspace, must end with .xcworkspace") unless v.include?(".xcworkspace")
end),
FastlaneCore::ConfigItem.new(key: :project,
short_option: "-p",
optional: true,
env_name: "SNAPSHOT_PROJECT",
description: "Path the project file",
verify_block: proc do |value|
v = File.expand_path(value.to_s)
UI.user_error!("Project file not found at path '#{v}'") unless File.exist?(v)
UI.user_error!("Project file invalid") unless File.directory?(v)
UI.user_error!("Project file is not a project file, must end with .xcodeproj") unless v.include?(".xcodeproj")
end),
FastlaneCore::ConfigItem.new(key: :xcargs,
short_option: "-X",
env_name: "SNAPSHOT_XCARGS",
description: "Pass additional arguments to xcodebuild for the test phase. Be sure to quote the setting names and values e.g. OTHER_LDFLAGS=\"-ObjC -lstdc++\"",
optional: true,
type: :shell_string),
FastlaneCore::ConfigItem.new(key: :devices,
description: "A list of devices you want to take the screenshots from",
short_option: "-d",
type: Array,
optional: true,
verify_block: proc do |value|
available = FastlaneCore::DeviceManager.simulators
value.each do |current|
device = current.strip
unless available.any? { |d| d.name.strip == device } || device == "Mac"
UI.user_error!("Device '#{device}' not in list of available simulators '#{available.join(', ')}'")
end
end
end),
FastlaneCore::ConfigItem.new(key: :languages,
description: "A list of languages which should be used",
short_option: "-g",
type: Array,
default_value: ['en-US']),
FastlaneCore::ConfigItem.new(key: :launch_arguments,
env_name: 'SNAPSHOT_LAUNCH_ARGUMENTS',
description: "A list of launch arguments which should be used",
short_option: "-m",
type: Array,
default_value: ['']),
FastlaneCore::ConfigItem.new(key: :output_directory,
short_option: "-o",
env_name: "SNAPSHOT_OUTPUT_DIRECTORY",
description: "The directory where to store the screenshots",
default_value: output_directory),
FastlaneCore::ConfigItem.new(key: :output_simulator_logs,
env_name: "SNAPSHOT_OUTPUT_SIMULATOR_LOGS",
description: "If the logs generated by the app (e.g. using NSLog, perror, etc.) in the Simulator should be written to the output_directory",
type: TrueClass,
default_value: false,
optional: true),
FastlaneCore::ConfigItem.new(key: :ios_version,
description: "By default, the latest version should be used automatically. If you want to change it, do it here",
short_option: "-i",
optional: true),
FastlaneCore::ConfigItem.new(key: :skip_open_summary,
env_name: 'SNAPSHOT_SKIP_OPEN_SUMMARY',
description: "Don't open the HTML summary after running _snapshot_",
default_value: false,
is_string: false),
FastlaneCore::ConfigItem.new(key: :skip_helper_version_check,
env_name: 'SNAPSHOT_SKIP_SKIP_HELPER_VERSION_CHECK',
description: "Do not check for most recent SnapshotHelper code",
default_value: false,
is_string: false),
FastlaneCore::ConfigItem.new(key: :clear_previous_screenshots,
env_name: 'SNAPSHOT_CLEAR_PREVIOUS_SCREENSHOTS',
description: "Enabling this option will automatically clear previously generated screenshots before running snapshot",
default_value: false,
is_string: false),
FastlaneCore::ConfigItem.new(key: :reinstall_app,
env_name: 'SNAPSHOT_REINSTALL_APP',
description: "Enabling this option will automatically uninstall the application before running it",
default_value: false,
is_string: false),
FastlaneCore::ConfigItem.new(key: :erase_simulator,
env_name: 'SNAPSHOT_ERASE_SIMULATOR',
description: "Enabling this option will automatically erase the simulator before running the application",
default_value: false,
is_string: false),
FastlaneCore::ConfigItem.new(key: :localize_simulator,
env_name: 'SNAPSHOT_LOCALIZE_SIMULATOR',
description: "Enabling this option will configure the Simulator's system language",
default_value: false,
is_string: false),
FastlaneCore::ConfigItem.new(key: :app_identifier,
env_name: 'SNAPSHOT_APP_IDENTIFIER',
short_option: "-a",
optional: true,
description: "The bundle identifier of the app to uninstall (only needed when enabling reinstall_app)",
default_value: ENV["SNAPSHOT_APP_IDENTITIFER"] || CredentialsManager::AppfileConfig.try_fetch_value(:app_identifier)),
FastlaneCore::ConfigItem.new(key: :add_photos,
env_name: 'SNAPSHOT_PHOTOS',
short_option: "-j",
description: "A list of photos that should be added to the simulator before running the application",
type: Array,
optional: true),
FastlaneCore::ConfigItem.new(key: :add_videos,
env_name: 'SNAPSHOT_VIDEOS',
short_option: "-u",
description: "A list of videos that should be added to the simulator before running the application",
type: Array,
optional: true),
# Everything around building
FastlaneCore::ConfigItem.new(key: :buildlog_path,
short_option: "-l",
env_name: "SNAPSHOT_BUILDLOG_PATH",
description: "The directory where to store the build log",
default_value: "#{FastlaneCore::Helper.buildlog_path}/snapshot"),
FastlaneCore::ConfigItem.new(key: :clean,
short_option: "-c",
env_name: "SNAPSHOT_CLEAN",
description: "Should the project be cleaned before building it?",
is_string: false,
default_value: false),
FastlaneCore::ConfigItem.new(key: :configuration,
short_option: "-q",
env_name: "SNAPSHOT_CONFIGURATION",
description: "The configuration to use when building the app. Defaults to 'Release'",
optional: true),
FastlaneCore::ConfigItem.new(key: :xcpretty_args,
short_option: "-x",
env_name: "SNAPSHOT_XCPRETTY_ARGS",
description: "Additional xcpretty arguments",
is_string: true,
optional: true),
FastlaneCore::ConfigItem.new(key: :sdk,
short_option: "-k",
env_name: "SNAPSHOT_SDK",
description: "The SDK that should be used for building the application",
optional: true),
FastlaneCore::ConfigItem.new(key: :scheme,
short_option: "-s",
env_name: 'SNAPSHOT_SCHEME',
description: "The scheme you want to use, this must be the scheme for the UI Tests",
optional: true), # optional true because we offer a picker to the user
FastlaneCore::ConfigItem.new(key: :number_of_retries,
short_option: "-n",
env_name: 'SNAPSHOT_NUMBER_OF_RETRIES',
description: "The number of times a test can fail before snapshot should stop retrying",
type: Integer,
default_value: 1),
FastlaneCore::ConfigItem.new(key: :stop_after_first_error,
env_name: 'SNAPSHOT_BREAK_ON_FIRST_ERROR',
description: "Should snapshot stop immediately after the tests completely failed on one device?",
default_value: false,
is_string: false),
FastlaneCore::ConfigItem.new(key: :derived_data_path,
short_option: "-f",
env_name: "SNAPSHOT_DERIVED_DATA_PATH",
description: "The directory where build products and other derived data will go",
optional: true),
FastlaneCore::ConfigItem.new(key: :test_target_name,
env_name: "SNAPSHOT_TEST_TARGET_NAME",
description: "The name of the target you want to test (if you desire to override the Target Application from Xcode)",
optional: true),
FastlaneCore::ConfigItem.new(key: :namespace_log_files,
env_name: "SNAPSHOT_NAMESPACE_LOG_FILES",
description: "Separate the log files per device and per language",
optional: true,
is_string: false)
]
end
end
end
| mit |
hotchandanisagar/odata.net | test/FunctionalTests/Tests/DataOData/Tests/OData.TDD.Tests/Common/JsonLight/JsonLightUtils.cs | 1894 | //---------------------------------------------------------------------
// <copyright file="JsonLightUtils.cs" company="Microsoft">
// Copyright (C) Microsoft Corporation. All rights reserved. See License.txt in the project root for license information.
// </copyright>
//---------------------------------------------------------------------
namespace Microsoft.Test.OData.TDD.Tests.Common.JsonLight
{
using System.Collections.Generic;
using Microsoft.OData.Core;
using Microsoft.OData.Core.JsonLight;
public static class JsonLightUtils
{
/// <summary>The default streaming Json Light media type.</summary>
internal static readonly ODataMediaType JsonLightStreamingMediaType = new ODataMediaType(
MimeConstants.MimeApplicationType,
MimeConstants.MimeJsonSubType,
new[]{
new KeyValuePair<string, string>(MimeConstants.MimeMetadataParameterName, MimeConstants.MimeMetadataParameterValueMinimal),
new KeyValuePair<string, string>(MimeConstants.MimeStreamingParameterName, MimeConstants.MimeParameterValueTrue),
new KeyValuePair<string, string>(MimeConstants.MimeIeee754CompatibleParameterName, MimeConstants.MimeParameterValueFalse)
});
/// <summary>
/// Gets the name of the property annotation property.
/// </summary>
/// <param name="propertyName">The name of the property to annotate.</param>
/// <param name="annotationName">The name of the annotation.</param>
/// <returns>The property name for the annotation property.</returns>
public static string GetPropertyAnnotationName(string propertyName, string annotationName)
{
return propertyName + JsonLightConstants.ODataPropertyAnnotationSeparatorChar + annotationName;
}
}
} | mit |
thlorenz/procps | deps/procps/contrib/minimal.c | 19296 | /*
* Copyright 1998,2004 by Albert Cahalan
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* This is a minimal /bin/ps, designed to be smaller than the old ps
* while still supporting some of the more important features of the
* new ps. (for total size, note that this ps does not need libproc)
* It is suitable for Linux-on-a-floppy systems only.
*
* Maintainers: do not compile or install for normal systems.
* Anyone needing this will want to tweak their compiler anyway.
*/
#include <pwd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <dirent.h>
#define DEV_ENCODE(M,m) ( \
( (M&0xfff) << 8) | ( (m&0xfff00) << 12) | (m&0xff) \
)
///////////////////////////////////////////////////////
#ifdef __sun__
#include <sys/mkdev.h>
#define _STRUCTURED_PROC 1
#include <sys/procfs.h>
#define NO_TTY_VALUE DEV_ENCODE(-1,-1)
#define HZ 1 // only bother with seconds
#endif
///////////////////////////////////////////////////////
#ifdef __FreeBSD__
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/stat.h>
#include <sys/proc.h>
#include <sys/user.h>
#define NO_TTY_VALUE DEV_ENCODE(-1,-1)
#define HZ 1 // only bother with seconds
#endif
///////////////////////////////////////////////////////
#ifdef __linux__
#include <asm/param.h> /* HZ */
#include <asm/page.h> /* PAGE_SIZE */
#define NO_TTY_VALUE DEV_ENCODE(0,0)
#ifndef HZ
#warning HZ not defined, assuming it is 100
#define HZ 100
#endif
#endif
///////////////////////////////////////////////////////////
#ifndef PAGE_SIZE
#warning PAGE_SIZE not defined, using sysconf() to determine correct value
#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
#endif
static char P_tty_text[16];
static char P_cmd[16];
static char P_state;
static int P_euid;
static int P_pid;
static int P_ppid, P_pgrp, P_session, P_tty_num, P_tpgid;
static unsigned long P_flags, P_min_flt, P_cmin_flt, P_maj_flt, P_cmaj_flt, P_utime, P_stime;
static long P_cutime, P_cstime, P_priority, P_nice, P_timeout, P_alarm;
static unsigned long P_start_time, P_vsize;
static long P_rss;
static unsigned long P_rss_rlim, P_start_code, P_end_code, P_start_stack, P_kstk_esp, P_kstk_eip;
static unsigned P_signal, P_blocked, P_sigignore, P_sigcatch;
static unsigned long P_wchan, P_nswap, P_cnswap;
#if 0
static int screen_cols = 80;
static int w_count;
#endif
static int want_one_pid;
static const char *want_one_command;
static int select_notty;
static int select_all;
static int ps_format;
static int old_h_option;
/* we only pretend to support this */
static int show_args; /* implicit with -f and all BSD options */
static int bsd_c_option; /* this option overrides the above */
static int ps_argc; /* global argc */
static char **ps_argv; /* global argv */
static int thisarg; /* index into ps_argv */
static char *flagptr; /* current location in ps_argv[thisarg] */
static void usage(void){
fprintf(stderr,
"-C select by command name (minimal ps only accepts one)\n"
"-p select by process ID (minimal ps only accepts one)\n"
"-e all processes (same as ax)\n"
"a all processes w/ tty, including other users\n"
"x processes w/o controlling ttys\n"
"-f full format\n"
"-j,j job control format\n"
"v virtual memory format\n"
"-l,l long format\n"
"u user-oriented format\n"
"-o user-defined format (limited support, only \"ps -o pid=\")\n"
"h no header\n"
/*
"-A all processes (same as ax)\n"
"c true command name\n"
"-w,w wide output\n"
*/
);
exit(1);
}
/*
* Return the next argument, or call the usage function.
* This handles both: -oFOO -o FOO
*/
static const char *get_opt_arg(void){
const char *ret;
ret = flagptr+1; /* assume argument is part of ps_argv[thisarg] */
if(*ret) return ret;
if(++thisarg >= ps_argc) usage(); /* there is nothing left */
/* argument is the new ps_argv[thisarg] */
ret = ps_argv[thisarg];
if(!ret || !*ret) usage();
return ret;
}
/* return the PID, or 0 if nothing good */
static void parse_pid(const char *str){
char *endp;
int num;
if(!str) goto bad;
num = strtol(str, &endp, 0);
if(*endp != '\0') goto bad;
if(num<1) goto bad;
if(want_one_pid) goto bad;
want_one_pid = num;
return;
bad:
usage();
}
/***************** parse SysV options, including Unix98 *****************/
static void parse_sysv_option(void){
do{
switch(*flagptr){
/**** selection ****/
case 'C': /* end */
if(want_one_command) usage();
want_one_command = get_opt_arg();
return; /* can't have any more options */
case 'p': /* end */
parse_pid(get_opt_arg());
return; /* can't have any more options */
case 'A':
case 'e':
select_all++;
select_notty++;
case 'w': /* here for now, since the real one is not used */
break;
/**** output format ****/
case 'f':
show_args = 1;
/* FALL THROUGH */
case 'j':
case 'l':
if(ps_format) usage();
ps_format = *flagptr;
break;
case 'o': /* end */
/* We only support a limited form: "ps -o pid=" (yes, just "pid=") */
if(strcmp(get_opt_arg(),"pid=")) usage();
if(ps_format) usage();
ps_format = 'o';
old_h_option++;
return; /* can't have any more options */
/**** other stuff ****/
#if 0
case 'w':
w_count++;
break;
#endif
default:
usage();
} /* switch */
}while(*++flagptr);
}
/************************* parse BSD options **********************/
static void parse_bsd_option(void){
do{
switch(*flagptr){
/**** selection ****/
case 'a':
select_all++;
break;
case 'x':
select_notty++;
break;
case 'p': /* end */
parse_pid(get_opt_arg());
return; /* can't have any more options */
/**** output format ****/
case 'j':
case 'l':
case 'u':
case 'v':
if(ps_format) usage();
ps_format = 0x80 | *flagptr; /* use 0x80 to tell BSD from SysV */
break;
/**** other stuff ****/
case 'c':
bsd_c_option++;
#if 0
break;
#endif
case 'w':
#if 0
w_count++;
#endif
break;
case 'h':
old_h_option++;
break;
default:
usage();
} /* switch */
}while(*++flagptr);
}
#if 0
#include <termios.h>
/* not used yet */
static void choose_dimensions(void){
struct winsize ws;
char *columns;
/* screen_cols is 80 by default */
if(ioctl(1, TIOCGWINSZ, &ws) != -1 && ws.ws_col>30) screen_cols = ws.ws_col;
columns = getenv("COLUMNS");
if(columns && *columns){
long t;
char *endptr;
t = strtol(columns, &endptr, 0);
if(!*endptr && (t>30) && (t<(long)999999999)) screen_cols = (int)t;
}
if(w_count && (screen_cols<132)) screen_cols=132;
if(w_count>1) screen_cols=999999999;
}
#endif
static void arg_parse(int argc, char *argv[]){
int sel = 0; /* to verify option sanity */
ps_argc = argc;
ps_argv = argv;
thisarg = 0;
/**** iterate over the args ****/
while(++thisarg < ps_argc){
flagptr = ps_argv[thisarg];
switch(*flagptr){
case '0' ... '9':
show_args = 1;
parse_pid(flagptr);
break;
case '-':
flagptr++;
parse_sysv_option();
break;
default:
show_args = 1;
parse_bsd_option();
break;
}
}
/**** sanity check and clean-up ****/
if(want_one_pid) sel++;
if(want_one_command) sel++;
if(select_notty || select_all) sel++;
if(sel>1 || select_notty>1 || select_all>1 || bsd_c_option>1 || old_h_option>1) usage();
if(bsd_c_option) show_args = 0;
}
#ifdef __sun__
/* return 1 if it works, or 0 for failure */
static int stat2proc(int pid) {
struct psinfo p; // /proc/*/psinfo, struct psinfo, psinfo_t
char buf[32];
int num;
int fd;
int tty_maj, tty_min;
snprintf(buf, sizeof buf, "/proc/%d/psinfo", pid);
if ( (fd = open(buf, O_RDONLY, 0) ) == -1 ) return 0;
num = read(fd, &p, sizeof p);
close(fd);
if(num != sizeof p) return 0;
num = PRFNSZ;
if (num >= sizeof P_cmd) num = sizeof P_cmd - 1;
memcpy(P_cmd, p.pr_fname, num); // p.pr_fname or p.pr_lwp.pr_name
P_cmd[num] = '\0';
P_pid = p.pr_pid;
P_ppid = p.pr_ppid;
P_pgrp = p.pr_pgid;
P_session = p.pr_sid;
P_euid = p.pr_euid;
P_rss = p.pr_rssize;
P_vsize = p.pr_size;
P_start_time = p.pr_start.tv_sec;
P_wchan = p.pr_lwp.pr_wchan;
P_state = p.pr_lwp.pr_sname;
P_nice = p.pr_lwp.pr_nice;
P_priority = p.pr_lwp.pr_pri; // or pr_oldpri
// P_ruid = p.pr_uid;
// P_rgid = p.pr_gid;
// P_egid = p.pr_egid;
#if 0
// don't support these
P_tpgid; P_flags,
P_min_flt, P_cmin_flt, P_maj_flt, P_cmaj_flt, P_utime, P_stime;
P_cutime, P_cstime, P_timeout, P_alarm;
P_rss_rlim, P_start_code, P_end_code, P_start_stack, P_kstk_esp, P_kstk_eip;
P_signal, P_blocked, P_sigignore, P_sigcatch;
P_nswap, P_cnswap;
#endif
// we like it Linux-encoded :-)
tty_maj = major(p.pr_ttydev);
tty_min = minor(p.pr_ttydev);
P_tty_num = DEV_ENCODE(tty_maj,tty_min);
snprintf(P_tty_text, sizeof P_tty_text, "%3d,%-3d", tty_maj, tty_min);
#if 1
if (tty_maj == 24) snprintf(P_tty_text, sizeof P_tty_text, "pts/%-3u", tty_min);
if (P_tty_num == NO_TTY_VALUE) memcpy(P_tty_text, " ? ", 8);
if (P_tty_num == DEV_ENCODE(0,0)) memcpy(P_tty_text, "console", 8);
#endif
if(P_pid != pid) return 0;
return 1;
}
#endif
#ifdef __FreeBSD__
/* return 1 if it works, or 0 for failure */
static int stat2proc(int pid) {
char buf[400];
int num;
int fd;
char* tmp;
int tty_maj, tty_min;
snprintf(buf, 32, "/proc/%d/status", pid);
if ( (fd = open(buf, O_RDONLY, 0) ) == -1 ) return 0;
num = read(fd, buf, sizeof buf - 1);
close(fd);
if(num<43) return 0;
buf[num] = '\0';
P_state = '-';
// FreeBSD /proc/*/status is seriously fucked. Unlike the Linux
// files, we can't use strrchr to find the end of a command name.
// Spaces in command names do not get escaped. To avoid spoofing,
// one may skip 20 characters and then look _forward_ only to
// find a pattern of entries that are {with,with,without} a comma.
// The entry without a comma is wchan. Then count backwards!
//
// Don't bother for now. FreeBSD isn't worth the trouble.
tmp = strchr(buf,' ');
num = tmp - buf;
if (num >= sizeof P_cmd) num = sizeof P_cmd - 1;
memcpy(P_cmd,buf,num);
P_cmd[num] = '\0';
num = sscanf(tmp+1,
"%d %d %d %d "
"%d,%d "
"%*s "
"%ld,%*d "
"%ld,%*d "
"%ld,%*d "
"%*s "
"%d %d ",
&P_pid, &P_ppid, &P_pgrp, &P_session,
&tty_maj, &tty_min,
/* SKIP funny flags thing */
&P_start_time, /* SKIP microseconds */
&P_utime, /* SKIP microseconds */
&P_stime, /* SKIP microseconds */
/* SKIP &P_wchan, for now -- it is a string */
&P_euid, &P_euid // don't know which is which
);
/* fprintf(stderr, "stat2proc converted %d fields.\n",num); */
snprintf(P_tty_text, sizeof P_tty_text, "%3d,%-3d", tty_maj, tty_min);
P_tty_num = DEV_ENCODE(tty_maj,tty_min);
// tty decode is 224 to 256 bytes on i386
#if 1
tmp = NULL;
if (tty_maj == 5) tmp = " ttyp%c ";
if (tty_maj == 12) tmp = " ttyv%c ";
if (tty_maj == 28) tmp = " ttyd%c ";
if (P_tty_num == NO_TTY_VALUE) tmp = " ? ";
if (P_tty_num == DEV_ENCODE(0,0)) tmp = "console";
if (P_tty_num == DEV_ENCODE(12,255)) tmp = "consolectl";
if (tmp) {
snprintf(
P_tty_text,
sizeof P_tty_text,
tmp,
"0123456789abcdefghijklmnopqrstuvwxyz"[tty_min&31]
);
}
#endif
if(num < 9) return 0;
if(P_pid != pid) return 0;
return 1;
}
#endif
#ifdef __linux__
/* return 1 if it works, or 0 for failure */
static int stat2proc(int pid) {
char buf[800]; /* about 40 fields, 64-bit decimal is about 20 chars */
int num;
int fd;
char* tmp;
struct stat sb; /* stat() used to get EUID */
snprintf(buf, 32, "/proc/%d/stat", pid);
if ( (fd = open(buf, O_RDONLY, 0) ) == -1 ) return 0;
num = read(fd, buf, sizeof buf - 1);
fstat(fd, &sb);
P_euid = sb.st_uid;
close(fd);
if(num<80) return 0;
buf[num] = '\0';
tmp = strrchr(buf, ')'); /* split into "PID (cmd" and "<rest>" */
*tmp = '\0'; /* replace trailing ')' with NUL */
/* parse these two strings separately, skipping the leading "(". */
memset(P_cmd, 0, sizeof P_cmd); /* clear */
sscanf(buf, "%d (%15c", &P_pid, P_cmd); /* comm[16] in kernel */
num = sscanf(tmp + 2, /* skip space after ')' too */
"%c "
"%d %d %d %d %d "
"%lu %lu %lu %lu %lu %lu %lu "
"%ld %ld %ld %ld %ld %ld "
"%lu %lu "
"%ld "
"%lu %lu %lu %lu %lu %lu "
"%u %u %u %u " /* no use for RT signals */
"%lu %lu %lu",
&P_state,
&P_ppid, &P_pgrp, &P_session, &P_tty_num, &P_tpgid,
&P_flags, &P_min_flt, &P_cmin_flt, &P_maj_flt, &P_cmaj_flt, &P_utime, &P_stime,
&P_cutime, &P_cstime, &P_priority, &P_nice, &P_timeout, &P_alarm,
&P_start_time, &P_vsize,
&P_rss,
&P_rss_rlim, &P_start_code, &P_end_code, &P_start_stack, &P_kstk_esp, &P_kstk_eip,
&P_signal, &P_blocked, &P_sigignore, &P_sigcatch,
&P_wchan, &P_nswap, &P_cnswap
);
/* fprintf(stderr, "stat2proc converted %d fields.\n",num); */
P_vsize /= 1024;
P_rss *= (PAGE_SIZE/1024);
memcpy(P_tty_text, " ? ", 8);
if (P_tty_num != NO_TTY_VALUE) {
int tty_maj = (P_tty_num>>8)&0xfff;
int tty_min = (P_tty_num&0xff) | ((P_tty_num>>12)&0xfff00);
snprintf(P_tty_text, sizeof P_tty_text, "%3d,%-3d", tty_maj, tty_min);
}
if(num < 30) return 0;
if(P_pid != pid) return 0;
return 1;
}
#endif
static const char *do_time(unsigned long t){
int hh,mm,ss;
static char buf[32];
int cnt = 0;
t /= HZ;
ss = t%60;
t /= 60;
mm = t%60;
t /= 60;
hh = t%24;
t /= 24;
if(t) cnt = snprintf(buf, sizeof buf, "%d-", (int)t);
snprintf(cnt + buf, sizeof(buf)-cnt, "%02d:%02d:%02d", hh, mm, ss);
return buf;
}
static const char *do_user(void){
static char buf[32];
static struct passwd *p;
static int lastuid = -1;
if(P_euid != lastuid){
p = getpwuid(P_euid);
if(p) snprintf(buf, sizeof buf, "%-8.8s", p->pw_name);
else snprintf(buf, sizeof buf, "%5d ", P_euid);
}
return buf;
}
static const char *do_cpu(int longform){
static char buf[8];
strcpy(buf," - ");
if(!longform) buf[2] = '\0';
return buf;
}
static const char *do_mem(int longform){
static char buf[8];
strcpy(buf," - ");
if(!longform) buf[2] = '\0';
return buf;
}
static const char *do_stime(void){
static char buf[32];
strcpy(buf," - ");
return buf;
}
static void print_proc(void){
switch(ps_format){
case 0:
printf("%5d %s %s", P_pid, P_tty_text, do_time(P_utime+P_stime));
break;
case 'o':
printf("%d\n", P_pid);
return; /* don't want the command */
case 'l':
printf(
"0 %c %5d %5d %5d %s %3d %3d - "
"%5ld %06x %s %s",
P_state, P_euid, P_pid, P_ppid, do_cpu(0),
(int)P_priority, (int)P_nice, P_vsize/(PAGE_SIZE/1024),
(unsigned)(P_wchan&0xffffff), P_tty_text, do_time(P_utime+P_stime)
);
break;
case 'f':
printf(
"%8s %5d %5d %s %s %s %s",
do_user(), P_pid, P_ppid, do_cpu(0), do_stime(), P_tty_text, do_time(P_utime+P_stime)
);
break;
case 'j':
printf(
"%5d %5d %5d %s %s",
P_pid, P_pgrp, P_session, P_tty_text, do_time(P_utime+P_stime)
);
break;
case 'u'|0x80:
printf(
"%8s %5d %s %s %5ld %4ld %s %c %s %s",
do_user(), P_pid, do_cpu(1), do_mem(1), P_vsize, P_rss, P_tty_text, P_state,
do_stime(), do_time(P_utime+P_stime)
);
break;
case 'v'|0x80:
printf(
"%5d %s %c %s %6d - - %5d %s",
P_pid, P_tty_text, P_state, do_time(P_utime+P_stime), (int)P_maj_flt,
(int)P_rss, do_mem(1)
);
break;
case 'j'|0x80:
printf(
"%5d %5d %5d %5d %s %5d %c %5d %s",
P_ppid, P_pid, P_pgrp, P_session, P_tty_text, P_tpgid, P_state, P_euid, do_time(P_utime+P_stime)
);
break;
case 'l'|0x80:
printf(
"0 %5d %5d %5d %3d %3d "
"%5ld %4ld %06x %c %s %s",
P_euid, P_pid, P_ppid, (int)P_priority, (int)P_nice,
P_vsize, P_rss, (unsigned)(P_wchan&0xffffff), P_state, P_tty_text, do_time(P_utime+P_stime)
);
break;
default:
;
}
if(show_args) printf(" [%s]\n", P_cmd);
else printf(" %s\n", P_cmd);
}
int main(int argc, char *argv[]){
arg_parse(argc, argv);
#if 0
choose_dimensions();
#endif
if(!old_h_option){
const char *head;
switch(ps_format){
default: /* can't happen */
case 0: head = " PID TTY TIME CMD"; break;
case 'l': head = "F S UID PID PPID C PRI NI ADDR SZ WCHAN TTY TIME CMD"; break;
case 'f': head = "USER PID PPID C STIME TTY TIME CMD"; break;
case 'j': head = " PID PGID SID TTY TIME CMD"; break;
case 'u'|0x80: head = "USER PID %CPU %MEM VSZ RSS TTY S START TIME COMMAND"; break;
case 'v'|0x80: head = " PID TTY S TIME MAJFL TRS DRS RSS %MEM COMMAND"; break;
case 'j'|0x80: head = " PPID PID PGID SID TTY TPGID S UID TIME COMMAND"; break;
case 'l'|0x80: head = "F UID PID PPID PRI NI VSZ RSS WCHAN S TTY TIME COMMAND"; break;
}
printf("%s\n",head);
}
if(want_one_pid){
if(stat2proc(want_one_pid)) print_proc();
else exit(1);
}else{
struct dirent *ent; /* dirent handle */
DIR *dir;
int ouruid;
int found_a_proc;
found_a_proc = 0;
ouruid = getuid();
dir = opendir("/proc");
while(( ent = readdir(dir) )){
if(*ent->d_name<'0' || *ent->d_name>'9') continue;
if(!stat2proc(atoi(ent->d_name))) continue;
if(want_one_command){
if(strcmp(want_one_command,P_cmd)) continue;
}else{
if(!select_notty && P_tty_num==NO_TTY_VALUE) continue;
if(!select_all && P_euid!=ouruid) continue;
}
found_a_proc++;
print_proc();
}
closedir(dir);
exit(!found_a_proc);
}
return 0;
}
| mit |
colinrubbert/codetriage | test/integration/maintaining_repo_subscriptions_test.rb | 1192 | require "test_helper"
class MaintainingRepoSubscriptionsTest < ActionDispatch::IntegrationTest
fixtures :repos
def triage_the_sandbox
login_via_github
visit "/"
click_link "issue_triage_sandbox"
click_button "I Want to Triage: bemurphy/issue_triage_sandbox"
end
test "subscribing to a repo" do
assert_difference 'ActionMailer::Base.deliveries.size', +1 do
triage_the_sandbox
assert page.has_content?("issue_triage_sandbox")
end
assert_equal IssueAssignment.last.delivered, true
end
test "send an issue! button" do
triage_the_sandbox
assert_difference 'ActionMailer::Base.deliveries.size', +1 do
click_link "issue_triage_sandbox"
click_link "Send new issue!"
assert page.has_content?("You will receive an email with your new issue shortly")
end
assert_equal IssueAssignment.last.delivered, true
end
test "listing subscribers" do
triage_the_sandbox
click_link 'issue_triage_sandbox'
click_link 'Subscribers'
assert page.has_content?("@mockstar")
end
test "list only favorite languages" do
login_via_github
visit "/"
assert !page.has_content?("javascript")
end
end
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.