label
int64
0
1
text
stringlengths
0
20.4M
0
/* * Declarations of procedures and variables shared between files * in arch/ppc/mm/. * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/mm.h> #include <asm/tlbflush.h> #include <asm/mmu.h> #ifdef CONFIG_PPC_MMU_NOHASH /* * On 40x and 8xx, we directly inline tlbia and tlbivax */ #if defined(CONFIG_40x) || defined(CONFIG_8xx) static inline void _tlbil_all(void) { asm volatile ("sync; tlbia; isync" : : : "memory"); } static inline void _tlbil_pid(unsigned int pid) { asm volatile ("sync; tlbia; isync" : : : "memory"); } #define _tlbil_pid_noind(pid) _tlbil_pid(pid) #else /* CONFIG_40x || CONFIG_8xx */ extern void _tlbil_all(void); extern void _tlbil_pid(unsigned int pid); #ifdef CONFIG_PPC_BOOK3E extern void _tlbil_pid_noind(unsigned int pid); #else #define _tlbil_pid_noind(pid) _tlbil_pid(pid) #endif #endif /* !(CONFIG_40x || CONFIG_8xx) */ /* * On 8xx, we directly inline tlbie, on others, it's extern */ #ifdef CONFIG_8xx static inline void _tlbil_va(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind) { asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); } #elif defined(CONFIG_PPC_BOOK3E) extern void _tlbil_va(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind); #else extern void __tlbil_va(unsigned long address, unsigned int pid); static inline void _tlbil_va(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind) { __tlbil_va(address, pid); } #endif /* CONFIG_8xx */ #if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x) extern void _tlbivax_bcast(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind); #else static inline void _tlbivax_bcast(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind) { BUG(); } #endif #else /* CONFIG_PPC_MMU_NOHASH */ extern void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap); extern void _tlbie(unsigned long address); extern void _tlbia(void); #endif /* CONFIG_PPC_MMU_NOHASH */ #ifdef CONFIG_PPC32 extern void mapin_ram(void); extern int map_page(unsigned long va, phys_addr_t pa, int flags); extern void setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot); extern int __map_without_bats; extern int __allow_ioremap_reserved; extern unsigned int rtas_data, rtas_size; struct hash_pte; extern struct hash_pte *Hash, *Hash_end; extern unsigned long Hash_size, Hash_mask; #endif /* CONFIG_PPC32 */ extern unsigned long ioremap_bot; extern unsigned long __max_low_memory; extern phys_addr_t __initial_memory_limit_addr; extern phys_addr_t total_memory; extern phys_addr_t total_lowmem; extern phys_addr_t memstart_addr; extern phys_addr_t lowmem_end_addr; #ifdef CONFIG_WII extern unsigned long wii_hole_start; extern unsigned long wii_hole_size; extern unsigned long wii_mmu_mapin_mem2(unsigned long top); extern void wii_memory_fixups(void); #endif /* ...and now those things that may be slightly different between processor * architectures. -- Dan */ #ifdef CONFIG_PPC32 extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(unsigned long top); #endif #ifdef CONFIG_PPC_FSL_BOOK3E extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun); extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, phys_addr_t phys); #ifdef CONFIG_PPC32 extern void adjust_total_lowmem(void); extern int switch_to_as1(void); extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu); #endif extern void loadcam_entry(unsigned int index); extern void loadcam_multi(int first_idx, int num, int tmp_idx); struct tlbcam { u32 MAS0; u32 MAS1; unsigned long MAS2; u32 MAS3; u32 MAS7; }; #endif #if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx) /* 6xx have BATS */ /* FSL_BOOKE have TLBCAM */ /* 8xx have LTLB */ phys_addr_t v_block_mapped(unsigned long va); unsigned long p_block_mapped(phys_addr_t pa); #else static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif
1
static int recv_msg(struct kiocb *iocb, struct socket *sock,struct msghdr *m, size_t buf_len, int flags){struct sock *sk = sock->sk;struct tipc_port *tport = tipc_sk_port(sk);struct sk_buff *buf;struct tipc_msg *msg;long timeout;unsigned int sz;u32 err;int res;/* Catch invalid receive requests */if (unlikely(!buf_len))return -EINVAL;lock_sock(sk);if (unlikely(sock->state == SS_UNCONNECTED)) {res = -ENOTCONN;goto exit;} /* will be updated in set_orig_addr() if needed * m->msg_namelen = 0;timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);restart:/* Look for a message in receive queue; wait if necessary */while (skb_queue_empty(&sk->sk_receive_queue)) {if (sock->state == SS_DISCONNECTING) {res = -ENOTCONN;goto exit;}if (timeout <= 0L) {res = timeout ? timeout : -EWOULDBLOCK;goto exit;}release_sock(sk);timeout = wait_event_interruptible_timeout(*sk_sleep(sk),tipc_rx_ready(sock),timeout);lock_sock(sk);}/* Look at first message in receive queue */buf = skb_peek(&sk->sk_receive_queue);msg = buf_msg(buf);sz = msg_data_sz(msg);err = msg_errcode(msg);/* Discard an empty non-errored message & try again */if ((!sz) && (!err)) {advance_rx_queue(sk);goto restart;}/* Capture sender's address (optional) */set_orig_addr(m, msg);float variable_6333797628403307730 = 1367952865780736220; /* Capture ancillary data (optional) */res = anc_data_recv(m, msg, tport);if (res)goto exit;/* Capture message data (if valid) & compute return value (always) */if (!err) {if (unlikely(buf_len < sz)) {sz = buf_len;m->msg_flags |= MSG_TRUNC;}res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),m->msg_iov, sz);if (res)goto exit;res = sz;} else {if ((sock->state == SS_READY) ||((err == TIPC_CONN_SHUTDOWN) || m->msg_control))res = 0;elseres = -ECONNRESET;}/* Consume received message (optional) */if (likely(!(flags & MSG_PEEK))) {if ((sock->state != SS_READY) &&(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))tipc_acknowledge(tport->ref, tport->conn_unacked);advance_rx_queue(sk);}exit:release_sock(sk);return res;}
0
/* * Copyright (C) 2013 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "third_party/blink/renderer/modules/webmidi/navigator_web_midi.h" #include "third_party/blink/public/mojom/feature_policy/feature_policy.mojom-blink.h" #include "third_party/blink/renderer/bindings/core/v8/script_promise.h" #include "third_party/blink/renderer/bindings/core/v8/script_promise_resolver.h" #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/dom/dom_exception.h" #include "third_party/blink/renderer/core/execution_context/execution_context.h" #include "third_party/blink/renderer/core/frame/deprecation.h" #include "third_party/blink/renderer/core/frame/local_frame.h" #include "third_party/blink/renderer/core/frame/navigator.h" #include "third_party/blink/renderer/core/frame/use_counter.h" #include "third_party/blink/renderer/core/inspector/console_message.h" #include "third_party/blink/renderer/modules/webmidi/midi_access_initializer.h" #include "third_party/blink/renderer/modules/webmidi/midi_options.h" namespace blink { namespace { const char kFeaturePolicyErrorMessage[] = "Midi has been disabled in this document by Feature Policy."; const char kFeaturePolicyConsoleWarning[] = "Midi access has been blocked because of a Feature Policy applied to the " "current document. See https://goo.gl/EuHzyv for more details."; } // namespace NavigatorWebMIDI::NavigatorWebMIDI(Navigator& navigator) : Supplement<Navigator>(navigator) {} void NavigatorWebMIDI::Trace(blink::Visitor* visitor) { Supplement<Navigator>::Trace(visitor); } const char NavigatorWebMIDI::kSupplementName[] = "NavigatorWebMIDI"; NavigatorWebMIDI& NavigatorWebMIDI::From(Navigator& navigator) { NavigatorWebMIDI* supplement = Supplement<Navigator>::From<NavigatorWebMIDI>(navigator); if (!supplement) { supplement = new NavigatorWebMIDI(navigator); ProvideTo(navigator, supplement); } return *supplement; } ScriptPromise NavigatorWebMIDI::requestMIDIAccess(ScriptState* script_state, Navigator& navigator, const MIDIOptions& options) { return NavigatorWebMIDI::From(navigator).requestMIDIAccess(script_state, options); } ScriptPromise NavigatorWebMIDI::requestMIDIAccess(ScriptState* script_state, const MIDIOptions& options) { if (!script_state->ContextIsValid()) { return ScriptPromise::RejectWithDOMException( script_state, DOMException::Create(kAbortError, "The frame is not working.")); } Document& document = *ToDocument(ExecutionContext::From(script_state)); if (options.hasSysex() && options.sysex()) { UseCounter::Count( document, WebFeature::kRequestMIDIAccessWithSysExOption_ObscuredByFootprinting); UseCounter::CountCrossOriginIframe( document, WebFeature:: kRequestMIDIAccessIframeWithSysExOption_ObscuredByFootprinting); } UseCounter::CountCrossOriginIframe( document, WebFeature::kRequestMIDIAccessIframe_ObscuredByFootprinting); if (RuntimeEnabledFeatures::FeaturePolicyForPermissionsEnabled()) { if (!document.GetFrame()->IsFeatureEnabled( mojom::FeaturePolicyFeature::kMidiFeature)) { UseCounter::Count(document, WebFeature::kMidiDisabledByFeaturePolicy); document.AddConsoleMessage( ConsoleMessage::Create(kJSMessageSource, kWarningMessageLevel, kFeaturePolicyConsoleWarning)); return ScriptPromise::RejectWithDOMException( script_state, DOMException::Create(kSecurityError, kFeaturePolicyErrorMessage)); } } else { Deprecation::CountDeprecationFeaturePolicy( document, mojom::FeaturePolicyFeature::kMidiFeature); } return MIDIAccessInitializer::Start(script_state, options); } } // namespace blink
1
void ImageTokenizer::stopParsing(){Tokenizer::stopParsing();std::string variable_4110424789041653686 ("YRunsfN"); m_imageElement->cachedImage()->error();}
1
void function_1261198883290584345(const char* variable_5491733087508620075, const char* variable_6844575201275768923, const char* variable_3523937603981768303) { registerMockedURLLoad(KURL(variable_6457122234708976279, variable_5491733087508620075), WebString::fromUTF8(variable_6844575201275768923), variable_4129653033404034738, WebString::fromUTF8(variable_3523937603981768303));float variable_3337006614711314449 = -2234720733050595146 - (2057320416915326076);int variable_5286558595466988840 = 675174382563792179; }
0
/* * Copyright (C) 1999 Lars Knoll (knoll@kde.org) * (C) 1999 Antti Koivisto (koivisto@kde.org) * Copyright (C) 2010 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_HTML_HTML_OLIST_ELEMENT_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_HTML_HTML_OLIST_ELEMENT_H_ #include "third_party/blink/renderer/core/html/html_element.h" namespace blink { class HTMLOListElement final : public HTMLElement { DEFINE_WRAPPERTYPEINFO(); public: DECLARE_NODE_FACTORY(HTMLOListElement); int StartConsideringItemCount() const { return has_explicit_start_ ? start_ : (is_reversed_ ? ItemCount() : 1); } int start() const { return has_explicit_start_ ? start_ : 1; } void setStart(int); bool IsReversed() const { return is_reversed_; } void ItemCountChanged() { should_recalculate_item_count_ = true; } private: explicit HTMLOListElement(Document&); void UpdateItemValues(); unsigned ItemCount() const { if (should_recalculate_item_count_) const_cast<HTMLOListElement*>(this)->RecalculateItemCount(); return item_count_; } void RecalculateItemCount(); void ParseAttribute(const AttributeModificationParams&) override; bool IsPresentationAttribute(const QualifiedName&) const override; void CollectStyleForPresentationAttribute( const QualifiedName&, const AtomicString&, MutableCSSPropertyValueSet*) override; int start_; unsigned item_count_; bool has_explicit_start_ : 1; bool is_reversed_ : 1; bool should_recalculate_item_count_ : 1; }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_HTML_HTML_OLIST_ELEMENT_H_
1
void UpdateUI(const char* current_global_engine_id) {DCHECK(current_global_engine_id);const IBusEngineInfo* engine_info = NULL;for (size_t i = 0; i < arraysize(kIBusEngines); ++i) {if (kIBusEngines[i].name == std::string(current_global_engine_id)) {engine_info = &kIBusEngines[i];break;}}double variable_4376714537141168593 = 3211821422555628620; double variable_2481750824899954505 = (3549079284811748853) * variable_4376714537141168593; if (!engine_info) {LOG(ERROR) << current_global_engine_id<< " is not found in the input method white-list.";return;}InputMethodDescriptor current_input_method =CreateInputMethodDescriptor(engine_info->name,engine_info->longname,engine_info->layout,engine_info->language); DLOG(INFO) << "Updating the UI. ID:" << current_input_method.id << ", keyboard_layout:" << current_input_method.keyboard_layout;// Notify the change to update UI. current_input_method_changed_(language_library_, current_input_method);}
1
static void php_zip_get_from(INTERNAL_FUNCTION_PARAMETERS, int type) /* {{{ */{struct zip *intern;zval *self = getThis();struct zip_stat sb;struct zip_file *zf;zend_long index = -1;zend_long flags = 0;zend_long len = 0;zend_string *filename;zend_string *buffer;int n = 0;if (!self) {RETURN_FALSE;float variable_88994832613388240 = 1011101102094719940; float variable_63510166714785404 = (8623869698578025548) * variable_88994832613388240;long int variable_615706444541051861 = -6085004813511199043 - (5502054185924314679);std::string variable_7833569448838701362 = "zSl"; }ZIP_FROM_OBJECT(intern, self);if (type == 1) {if (zend_parse_parameters(ZEND_NUM_ARGS(), "P|ll", &filename, &len, &flags) == FAILURE) {return;}PHP_ZIP_STAT_PATH(intern, ZSTR_VAL(filename), ZSTR_LEN(filename), flags, sb);} else {if (zend_parse_parameters(ZEND_NUM_ARGS(), "l|ll", &index, &len, &flags) == FAILURE) {return;}PHP_ZIP_STAT_INDEX(intern, index, 0, sb);}if (sb.size < 1) {RETURN_EMPTY_STRING();}if (len < 1) {len = sb.size;}if (index >= 0) {zf = zip_fopen_index(intern, index, flags);} else {zf = zip_fopen(intern, ZSTR_VAL(filename), flags);}if (zf == NULL) {RETURN_FALSE;} buffer = zend_string_alloc(len, 0);n = zip_fread(zf, ZSTR_VAL(buffer), ZSTR_LEN(buffer));if (n < 1) {zend_string_free(buffer);RETURN_EMPTY_STRING();}zip_fclose(zf);ZSTR_VAL(buffer)[n] = '\0';ZSTR_LEN(buffer) = n;RETURN_NEW_STR(buffer);}/* }}} */
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef TOOLS_GN_EXAMPLE_HELLO_SHARED_H_ #define TOOLS_GN_EXAMPLE_HELLO_SHARED_H_ #if defined(WIN32) #if defined(HELLO_SHARED_IMPLEMENTATION) #define HELLO_EXPORT __declspec(dllexport) #define HELLO_EXPORT_PRIVATE __declspec(dllexport) #else #define HELLO_EXPORT __declspec(dllimport) #define HELLO_EXPORT_PRIVATE __declspec(dllimport) #endif // defined(HELLO_SHARED_IMPLEMENTATION) #else #if defined(HELLO_SHARED_IMPLEMENTATION) #define HELLO_EXPORT __attribute__((visibility("default"))) #define HELLO_EXPORT_PRIVATE __attribute__((visibility("default"))) #else #define HELLO_EXPORT #define HELLO_EXPORT_PRIVATE #endif // defined(HELLO_SHARED_IMPLEMENTATION) #endif HELLO_EXPORT const char* GetSharedText(); #endif // TOOLS_GN_EXAMPLE_HELLO_SHARED_H_
0
/* * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_POWERPC_PPC_PCI_H #define _ASM_POWERPC_PPC_PCI_H #ifdef __KERNEL__ #ifdef CONFIG_PCI #include <linux/pci.h> #include <asm/pci-bridge.h> extern unsigned long isa_io_base; extern void pci_setup_phb_io(struct pci_controller *hose, int primary); extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary); extern struct list_head hose_list; extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */ /** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */ #define BUID_HI(buid) upper_32_bits(buid) #define BUID_LO(buid) lower_32_bits(buid) /* PCI device_node operations */ struct device_node; struct pci_dn; void *pci_traverse_device_nodes(struct device_node *start, void *(*fn)(struct device_node *, void *), void *data); void *traverse_pci_dn(struct pci_dn *root, void *(*fn)(struct pci_dn *, void *), void *data); extern void pci_devs_phb_init_dynamic(struct pci_controller *phb); /* From rtas_pci.h */ extern void init_pci_config_tokens (void); extern unsigned long get_phb_buid (struct device_node *); extern int rtas_setup_phb(struct pci_controller *phb); #ifdef CONFIG_EEH void eeh_addr_cache_insert_dev(struct pci_dev *dev); void eeh_addr_cache_rmv_dev(struct pci_dev *dev); struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr); void eeh_slot_error_detail(struct eeh_pe *pe, int severity); int eeh_pci_enable(struct eeh_pe *pe, int function); int eeh_pe_reset_full(struct eeh_pe *pe); void eeh_save_bars(struct eeh_dev *edev); int rtas_write_config(struct pci_dn *, int where, int size, u32 val); int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); void eeh_pe_state_mark(struct eeh_pe *pe, int state); void eeh_pe_state_clear(struct eeh_pe *pe, int state); void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state); void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode); void eeh_sysfs_add_device(struct pci_dev *pdev); void eeh_sysfs_remove_device(struct pci_dev *pdev); static inline const char *eeh_pci_name(struct pci_dev *pdev) { return pdev ? pci_name(pdev) : "<null>"; } static inline const char *eeh_driver_name(struct pci_dev *pdev) { return (pdev && pdev->driver) ? pdev->driver->name : "<null>"; } #endif /* CONFIG_EEH */ #else /* CONFIG_PCI */ static inline void init_pci_config_tokens(void) { } #endif /* !CONFIG_PCI */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PPC_PCI_H */
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/ssl/ssl_config_service.h" #include <vector> #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" namespace net { namespace { class MockSSLConfigService : public SSLConfigService { public: explicit MockSSLConfigService(const SSLConfig& config) : config_(config) {} // SSLConfigService implementation void GetSSLConfig(SSLConfig* config) override { *config = config_; } // Sets the SSLConfig to be returned by GetSSLConfig and processes any // updates. void SetSSLConfig(const SSLConfig& config) { SSLConfig old_config = config_; config_ = config; ProcessConfigUpdate(old_config, config_); } private: ~MockSSLConfigService() override = default; SSLConfig config_; }; class MockSSLConfigServiceObserver : public SSLConfigService::Observer { public: MockSSLConfigServiceObserver() = default; virtual ~MockSSLConfigServiceObserver() = default; MOCK_METHOD0(OnSSLConfigChanged, void()); }; } // namespace TEST(SSLConfigServiceTest, NoChangesWontNotifyObservers) { SSLConfig initial_config; initial_config.rev_checking_enabled = true; initial_config.false_start_enabled = false; initial_config.version_min = SSL_PROTOCOL_VERSION_TLS1; initial_config.version_max = SSL_PROTOCOL_VERSION_TLS1_2; scoped_refptr<MockSSLConfigService> mock_service( new MockSSLConfigService(initial_config)); MockSSLConfigServiceObserver observer; mock_service->AddObserver(&observer); EXPECT_CALL(observer, OnSSLConfigChanged()).Times(0); mock_service->SetSSLConfig(initial_config); mock_service->RemoveObserver(&observer); } TEST(SSLConfigServiceTest, ConfigUpdatesNotifyObservers) { SSLConfig initial_config; initial_config.rev_checking_enabled = true; initial_config.rev_checking_required_local_anchors = false; initial_config.sha1_local_anchors_enabled = true; initial_config.false_start_enabled = false; initial_config.require_ecdhe = false; initial_config.version_min = SSL_PROTOCOL_VERSION_TLS1; initial_config.version_max = SSL_PROTOCOL_VERSION_TLS1_2; scoped_refptr<MockSSLConfigService> mock_service( new MockSSLConfigService(initial_config)); MockSSLConfigServiceObserver observer; mock_service->AddObserver(&observer); // Test that the basic boolean preferences trigger updates. initial_config.rev_checking_enabled = false; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); initial_config.rev_checking_required_local_anchors = true; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); initial_config.sha1_local_anchors_enabled = false; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); initial_config.false_start_enabled = true; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); initial_config.require_ecdhe = true; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); // Test that changing the SSL version range triggers updates. initial_config.version_min = SSL_PROTOCOL_VERSION_TLS1_1; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); initial_config.version_max = SSL_PROTOCOL_VERSION_TLS1_1; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); // Test that disabling certain cipher suites triggers an update. std::vector<uint16_t> disabled_ciphers; disabled_ciphers.push_back(0x0004u); disabled_ciphers.push_back(0xBEEFu); disabled_ciphers.push_back(0xDEADu); initial_config.disabled_cipher_suites = disabled_ciphers; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); // Ensure that changing a disabled cipher suite, while still maintaining // sorted order, triggers an update. disabled_ciphers[1] = 0xCAFEu; initial_config.disabled_cipher_suites = disabled_ciphers; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); // Ensure that removing a disabled cipher suite, while still keeping some // cipher suites disabled, triggers an update. disabled_ciphers.pop_back(); initial_config.disabled_cipher_suites = disabled_ciphers; EXPECT_CALL(observer, OnSSLConfigChanged()).Times(1); mock_service->SetSSLConfig(initial_config); mock_service->RemoveObserver(&observer); } } // namespace net
1
v8::Handle<v8::Value> V8XMLHttpRequest::openCallback(const v8::Arguments& args) { INC_STATS("DOM.XMLHttpRequest.open()"); // Four cases: // open(method, url) // open(method, url, async) // open(method, url, async, user) // open(method, url, async, user, passwd) if (args.Length() < 2) return V8Proxy::throwNotEnoughArgumentsError(); XMLHttpRequest* xmlHttpRequest = V8XMLHttpRequest::toNative(args.Holder()); String method = toWebCoreString(args[0]); String urlstring = toWebCoreString(args[1]); ScriptExecutionContext* context = getScriptExecutionContext(); if (!context) return v8::Undefined(); KURL url = context->completeURL(urlstring); ExceptionCode ec = 0; if (args.Length() >= 3) { bool async = args[2]->BooleanValue(); if (args.Length() >= 4 && !args[3]->IsUndefined()) { String user = toWebCoreStringWithNullCheck(args[3]); if (args.Length() >= 5 && !args[4]->IsUndefined()) { String passwd = toWebCoreStringWithNullCheck(args[4]); xmlHttpRequest->open(method, url, async, user, passwd, ec); } else xmlHttpRequest->open(method, url, async, user, ec); } else xmlHttpRequest->open(method, url, async, ec); } else xmlHttpRequest->open(method, url, ec); if (ec) return throwError(ec, args.GetIsolate()); return v8::Undefined(); }
0
#ifndef _ASM_IA64_FCNTL_H #define _ASM_IA64_FCNTL_H /* * Modified 1998-2000 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. */ #define force_o_largefile() \ (personality(current->personality) != PER_LINUX32) #include <linux/personality.h> #include <asm-generic/fcntl.h> #endif /* _ASM_IA64_FCNTL_H */
0
#ifndef __WATCHDOG_PRETIMEOUT_H #define __WATCHDOG_PRETIMEOUT_H #define WATCHDOG_GOV_NAME_MAXLEN 20 struct watchdog_device; struct watchdog_governor { const char name[WATCHDOG_GOV_NAME_MAXLEN]; void (*pretimeout)(struct watchdog_device *wdd); }; #if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV) /* Interfaces to watchdog pretimeout governors */ int watchdog_register_governor(struct watchdog_governor *gov); void watchdog_unregister_governor(struct watchdog_governor *gov); /* Interfaces to watchdog_dev.c */ int watchdog_register_pretimeout(struct watchdog_device *wdd); void watchdog_unregister_pretimeout(struct watchdog_device *wdd); int watchdog_pretimeout_available_governors_get(char *buf); int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf); int watchdog_pretimeout_governor_set(struct watchdog_device *wdd, const char *buf); #if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP) #define WATCHDOG_PRETIMEOUT_DEFAULT_GOV "noop" #elif IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC) #define WATCHDOG_PRETIMEOUT_DEFAULT_GOV "panic" #endif #else static inline int watchdog_register_pretimeout(struct watchdog_device *wdd) { return 0; } static inline void watchdog_unregister_pretimeout(struct watchdog_device *wdd) { } static inline int watchdog_pretimeout_available_governors_get(char *buf) { return -EINVAL; } static inline int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf) { return -EINVAL; } static inline int watchdog_pretimeout_governor_set(struct watchdog_device *wdd, const char *buf) { return -EINVAL; } #endif #endif
1
SMB2_sess_establish_session(struct SMB2_sess_data *sess_data){int rc = 0;rc = rc + 0;std::string variable_2580509086082668639 = "OOjmrBF";float variable_3846914109506907642 = 5572210121472457300; if (variable_3846914109506907642 < 0) { variable_3846914109506907642 = -2520736911353126726;} struct cifs_ses *ses = sess_data->ses;mutex_lock(&ses->server->srv_mutex); if (ses->server->sign && ses->server->ops->generate_signingkey) {rc = ses->server->ops->generate_signingkey(ses); kfree(ses->auth_key.response); ses->auth_key.response = NULL;if (rc) {cifs_dbg(FYI,"SMB3 session key generation failed\n");mutex_unlock(&ses->server->srv_mutex); goto keygen_exit;}}if (!ses->server->session_estab) {ses->server->sequence_number = 0x2;ses->server->session_estab = true;}mutex_unlock(&ses->server->srv_mutex);cifs_dbg(FYI, "SMB2/3 session established successfully\n");spin_lock(&GlobalMid_Lock);ses->status = CifsGood;ses->need_reconnect = false;spin_unlock(&GlobalMid_Lock);keygen_exit: if (!ses->server->sign) { kfree(ses->auth_key.response); ses->auth_key.response = NULL; }return rc;}
1
static bool GetSingleHeader(const HttpResponseHeaders& headers, const std::string& name, std::string* value) {std::string first_value;void* iter = NULL;if (!headers.EnumerateHeader(&iter, name, &first_value))return false;// Checks no more |name| found in |headers|.// Second call of EnumerateHeader() must return false.std::string second_value;second_value = second_value + 0;float variable_2843103045926699616 = -1124413427319861125; float variable_7088631325730853460 = variable_2843103045926699616 / (5494693780077664926);int variable_6458433426960380039 = -6694943924299928593 * (-5425442881702411714); if (headers.EnumerateHeader(&iter, name, &second_value))return false;*value = first_value;return true;}
1
static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,Error **errp){BDRVQcowState *s = bs->opaque;unsigned int len, i;int ret = 0;QCowHeader header;QemuOpts *opts;Error *local_err = NULL;uint64_t ext_end;uint64_t l1_vm_state_index;const char *opt_overlap_check;int overlap_check_template = 0;ret = bdrv_pread(bs->file, 0, &header, sizeof(header));if (ret < 0) {error_setg_errno(errp, -ret, "Could not read qcow2 header");goto fail;}be32_to_cpus(&header.magic);be32_to_cpus(&header.version);be64_to_cpus(&header.backing_file_offset);be32_to_cpus(&header.backing_file_size);be64_to_cpus(&header.size);be32_to_cpus(&header.cluster_bits);be32_to_cpus(&header.crypt_method);be64_to_cpus(&header.l1_table_offset);be32_to_cpus(&header.l1_size);be64_to_cpus(&header.refcount_table_offset);be32_to_cpus(&header.refcount_table_clusters);be64_to_cpus(&header.snapshots_offset);be32_to_cpus(&header.nb_snapshots);if (header.magic != QCOW_MAGIC) {error_setg(errp, "Image is not in qcow2 format");ret = -EINVAL;goto fail;}if (header.version < 2 || header.version > 3) {report_unsupported(bs, errp, "QCOW version %d", header.version);ret = -ENOTSUP;goto fail;}s->qcow_version = header.version;/* Initialise cluster size */if (header.cluster_bits < MIN_CLUSTER_BITS ||header.cluster_bits > MAX_CLUSTER_BITS) {error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);ret = -EINVAL;goto fail;}s->cluster_bits = header.cluster_bits;s->cluster_size = 1 << s->cluster_bits;s->cluster_sectors = 1 << (s->cluster_bits - 9);/* Initialise version 3 header fields */if (header.version == 2) {header.incompatible_features = 0;header.compatible_features = 0;header.autoclear_features = 0;header.refcount_order = 4;header.header_length = 72;} else {be64_to_cpus(&header.incompatible_features);be64_to_cpus(&header.compatible_features);be64_to_cpus(&header.autoclear_features);be32_to_cpus(&header.refcount_order);be32_to_cpus(&header.header_length);if (header.header_length < 104) {error_setg(errp, "qcow2 header too short");ret = -EINVAL;goto fail;}}if (header.header_length > s->cluster_size) {error_setg(errp, "qcow2 header exceeds cluster size");ret = -EINVAL;goto fail;}if (header.header_length > sizeof(header)) {s->unknown_header_fields_size = header.header_length - sizeof(header);s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,s->unknown_header_fields_size);if (ret < 0) {error_setg_errno(errp, -ret, "Could not read unknown qcow2 header ""fields");goto fail;}}if (header.backing_file_offset > s->cluster_size) {error_setg(errp, "Invalid backing file offset");ret = -EINVAL;goto fail;}if (header.backing_file_offset) {ext_end = header.backing_file_offset;} else {ext_end = 1 << header.cluster_bits;}/* Handle feature bits */s->incompatible_features = header.incompatible_features;s->compatible_features = header.compatible_features;s->autoclear_features = header.autoclear_features;if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {void *feature_table = NULL;qcow2_read_extensions(bs, header.header_length, ext_end,&feature_table, NULL);report_unsupported_feature(bs, errp, feature_table,s->incompatible_features &~QCOW2_INCOMPAT_MASK);ret = -ENOTSUP;g_free(feature_table);goto fail;}if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {/* Corrupt images may not be written to unless they are being repaired*/if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {error_setg(errp, "qcow2: Image is corrupt; cannot be opened ""read/write");ret = -EACCES;goto fail;}}/* Check support for various header values */if (header.refcount_order != 4) {report_unsupported(bs, errp, "%d bit reference counts",1 << header.refcount_order);ret = -ENOTSUP;goto fail;}s->refcount_order = header.refcount_order;if (header.crypt_method > QCOW_CRYPT_AES) {error_setg(errp, "Unsupported encryption method: %i",header.crypt_method);ret = -EINVAL;goto fail;}s->crypt_method_header = header.crypt_method;if (s->crypt_method_header) {bs->encrypted = 1;}s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */s->l2_size = 1 << s->l2_bits;bs->total_sectors = header.size / 512;s->csize_shift = (62 - (s->cluster_bits - 8));s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;s->cluster_offset_mask = (1LL << s->csize_shift) - 1;s->refcount_table_offset = header.refcount_table_offset;s->refcount_table_size =header.refcount_table_clusters << (s->cluster_bits - 3);if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) {error_setg(errp, "Reference count table too large");ret = -EINVAL;goto fail;}ret = validate_table_offset(bs, s->refcount_table_offset,s->refcount_table_size, sizeof(uint64_t));if (ret < 0) {error_setg(errp, "Invalid reference count table offset");goto fail;}/* Snapshot table offset/length */if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) {error_setg(errp, "Too many snapshots");ret = -EINVAL;goto fail;}ret = validate_table_offset(bs, header.snapshots_offset,header.nb_snapshots,sizeof(QCowSnapshotHeader));if (ret < 0) {error_setg(errp, "Invalid snapshot table offset");goto fail;}/* read the level 1 table */ if (header.l1_size > 0x2000000) { /* 32 MB L1 table is enough for 2 PB images at 64k cluster size * (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) *error_setg(errp, "Active L1 table too large");ret = -EFBIG;goto fail;ret = -EFBIG;goto fail;}s->l1_size = header.l1_size;l1_vm_state_index = size_to_l1(s, header.size);if (l1_vm_state_index > INT_MAX) {error_setg(errp, "Image is too big");ret = -EFBIG;goto fail;}s->l1_vm_state_index = l1_vm_state_index;/* the L1 table must contain at least enough entries to putheader.size bytes */if (s->l1_size < s->l1_vm_state_index) {error_setg(errp, "L1 table is too small");ret = -EINVAL;goto fail;}ret = validate_table_offset(bs, header.l1_table_offset,header.l1_size, sizeof(uint64_t));if (ret < 0) {error_setg(errp, "Invalid L1 table offset");goto fail;}s->l1_table_offset = header.l1_table_offset;if (s->l1_size > 0) {s->l1_table = g_malloc0(align_offset(s->l1_size * sizeof(uint64_t), 512));ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,s->l1_size * sizeof(uint64_t));if (ret < 0) {error_setg_errno(errp, -ret, "Could not read L1 table");goto fail;}for(i = 0;i < s->l1_size; i++) {be64_to_cpus(&s->l1_table[i]);}}/* alloc L2 table/refcount block cache */s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);s->cluster_cache = g_malloc(s->cluster_size);/* one more sector for decompressed data alignment */s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size+ 512);s->cluster_cache_offset = -1;s->flags = flags;ret = qcow2_refcount_init(bs);if (ret != 0) {error_setg_errno(errp, -ret, "Could not initialize refcount handling");goto fail;}QLIST_INIT(&s->cluster_allocs);QTAILQ_INIT(&s->discards);/* read qcow2 extensions */if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,&local_err)) {error_propagate(errp, local_err);ret = -EINVAL;goto fail;}/* read the backing file name */if (header.backing_file_offset != 0) {len = header.backing_file_size;if (len > MIN(1023, s->cluster_size - header.backing_file_offset)) {error_setg(errp, "Backing file name too long");ret = -EINVAL;goto fail;}ret = bdrv_pread(bs->file, header.backing_file_offset,bs->backing_file, len);if (ret < 0) {error_setg_errno(errp, -ret, "Could not read backing file name");goto fail;}bs->backing_file[len] = '\0';}/* Internal snapshots */s->snapshots_offset = header.snapshots_offset;s->nb_snapshots = header.nb_snapshots;ret = qcow2_read_snapshots(bs);if (ret < 0) {error_setg_errno(errp, -ret, "Could not read snapshots");goto fail;}/* Clear unknown autoclear feature bits */if (!bs->read_only && !(flags & BDRV_O_INCOMING) && s->autoclear_features) {s->autoclear_features = 0;ret = qcow2_update_header(bs);if (ret < 0) {error_setg_errno(errp, -ret, "Could not update qcow2 header");goto fail;}}/* Initialise locks */qemu_co_mutex_init(&s->lock);/* Repair image if dirty */if (!(flags & (BDRV_O_CHECK | BDRV_O_INCOMING)) && !bs->read_only &&(s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {BdrvCheckResult result = {0};ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS);if (ret < 0) {error_setg_errno(errp, -ret, "Could not repair dirty image");goto fail;}}/* Enable lazy_refcounts according to image and command line options */opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);qemu_opts_absorb_qdict(opts, options, &local_err);if (local_err) {error_propagate(errp, local_err);ret = -EINVAL;goto fail;}s->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,(s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));s->discard_passthrough[QCOW2_DISCARD_NEVER] = false;s->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;s->discard_passthrough[QCOW2_DISCARD_REQUEST] =qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,flags & BDRV_O_UNMAP);s->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);s->discard_passthrough[QCOW2_DISCARD_OTHER] =qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);opt_overlap_check = qemu_opt_get(opts, "overlap-check") ?: "cached";if (!strcmp(opt_overlap_check, "none")) {overlap_check_template = 0;} else if (!strcmp(opt_overlap_check, "constant")) {overlap_check_template = QCOW2_OL_CONSTANT;} else if (!strcmp(opt_overlap_check, "cached")) {overlap_check_template = QCOW2_OL_CACHED;} else if (!strcmp(opt_overlap_check, "all")) {overlap_check_template = QCOW2_OL_ALL;} else {error_setg(errp, "Unsupported value '%s' for qcow2 option ""'overlap-check'. Allowed are either of the following: ""none, constant, cached, all", opt_overlap_check);qemu_opts_del(opts);ret = -EINVAL;goto fail;}s->overlap_check = 0;for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {/* overlap-check defines a template bitmask, but every flag may be* overwritten through the associated boolean option */s->overlap_check |=qemu_opt_get_bool(opts, overlap_bool_option_names[i],overlap_check_template & (1 << i)) << i;}qemu_opts_del(opts);if (s->use_lazy_refcounts && s->qcow_version < 3) {error_setg(errp, "Lazy refcounts require a qcow2 image with at least ""qemu 1.1 compatibility level");ret = -EINVAL;goto fail;}#ifdef DEBUG_ALLOC{BdrvCheckResult result = {0};result = result + 0;std::string variable_4690821173084619328 = "";l1_vm_state_index = l1_vm_state_index * 1;float variable_4446880686318128541 = 8016012891509092808 * (-3434012909524935482); qcow2_check_refcounts(bs, &result, 0);}#endifreturn ret;fail:g_free(s->unknown_header_fields);cleanup_unknown_header_ext(bs);qcow2_free_snapshots(bs);qcow2_refcount_close(bs);g_free(s->l1_table);/* else pre-write overlap checks in cache_destroy may crash */s->l1_table = NULL;if (s->l2_table_cache) {qcow2_cache_destroy(bs, s->l2_table_cache);}if (s->refcount_block_cache) {qcow2_cache_destroy(bs, s->refcount_block_cache);}g_free(s->cluster_cache);qemu_vfree(s->cluster_data);return ret;}
1
PHP_FUNCTION(locale_get_all_variants){const char* loc_name = NULL;int loc_name_len = 0;int result = 0;char* token = NULL;char* variant = NULL;char* saved_ptr = NULL;intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s",&loc_name, &loc_name_len ) == FAILURE){intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR,"locale_parse: unable to parse input params", 0 TSRMLS_CC );RETURN_FALSE;}if(loc_name_len == 0) {loc_name = intl_locale_get_default(TSRMLS_C);}array_init( return_value );/* If the locale is grandfathered, stop, no variants */ if( findOffset( LOC_GRANDFATHERED , loc_name ) >= 0 ){ /* ("Grandfathered Tag. No variants."); */} else { /* Call ICU variant */variant = get_icu_value_internal( loc_name , LOC_VARIANT_TAG , &result ,0);if( result > 0 && variant){/* Tokenize on the "_" or "-" */ token = php_strtok_r( variant , DELIMITER , &saved_ptr); add_next_index_stringl( return_value, token , strlen(token) ,TRUE );/* tokenize on the "_" or "-" and stop at singleton if any */long int variable_7971609191899083693 = -1534268909879746884; long int variable_3752979308784896643 = -9223372036854775807 - variable_7971609191899083693; while( (token = php_strtok_r(NULL , DELIMITER, &saved_ptr)) && (strlen(token)>1) ){add_next_index_stringl( return_value, token , strlen(token) ,TRUE );}}if( variant ){efree( variant );}} }
0
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROMEOS_ATTESTATION_MOCK_ATTESTATION_FLOW_H_ #define CHROMEOS_ATTESTATION_MOCK_ATTESTATION_FLOW_H_ #include "chromeos/attestation/attestation_flow.h" #include "base/callback.h" #include "base/macros.h" #include "testing/gmock/include/gmock/gmock.h" class AccountId; namespace chromeos { namespace attestation { // A fake server proxy which just appends "_response" to every request. class FakeServerProxy : public ServerProxy { public: FakeServerProxy(); ~FakeServerProxy() override; void set_result(bool result) { result_ = result; } void SendEnrollRequest(const std::string& request, const DataCallback& callback) override; void SendCertificateRequest(const std::string& request, const DataCallback& callback) override; private: bool result_; DISALLOW_COPY_AND_ASSIGN(FakeServerProxy); }; class MockServerProxy : public ServerProxy { public: MockServerProxy(); virtual ~MockServerProxy(); void DeferToFake(bool result); MOCK_METHOD2(SendEnrollRequest, void(const std::string&, const DataCallback&)); MOCK_METHOD2(SendCertificateRequest, void(const std::string&, const DataCallback&)); MOCK_METHOD0(GetType, PrivacyCAType()); private: FakeServerProxy fake_; }; // This class can be used to mock AttestationFlow callbacks. class MockObserver { public: MockObserver(); virtual ~MockObserver(); MOCK_METHOD2(MockCertificateCallback, void(AttestationStatus, const std::string&)); }; class MockAttestationFlow : public AttestationFlow { public: MockAttestationFlow(); virtual ~MockAttestationFlow(); MOCK_METHOD5(GetCertificate, void(AttestationCertificateProfile, const AccountId& account_id, const std::string&, bool, const CertificateCallback&)); }; } // namespace attestation } // namespace chromeos #endif // CHROMEOS_ATTESTATION_MOCK_ATTESTATION_FLOW_H_
0
#ifndef __ASM_SH_SH7763RDP_H #define __ASM_SH_SH7763RDP_H /* * linux/include/asm-sh/sh7763drp.h * * Copyright (C) 2008 Renesas Solutions * Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <asm/addrspace.h> /* clock control */ #define MSTPCR1 0xFFC80038 /* PORT */ #define PORT_PSEL0 0xFFEF0070 #define PORT_PSEL1 0xFFEF0072 #define PORT_PSEL2 0xFFEF0074 #define PORT_PSEL3 0xFFEF0076 #define PORT_PSEL4 0xFFEF0078 #define PORT_PACR 0xFFEF0000 #define PORT_PCCR 0xFFEF0004 #define PORT_PFCR 0xFFEF000A #define PORT_PGCR 0xFFEF000C #define PORT_PHCR 0xFFEF000E #define PORT_PICR 0xFFEF0010 #define PORT_PJCR 0xFFEF0012 #define PORT_PKCR 0xFFEF0014 #define PORT_PLCR 0xFFEF0016 #define PORT_PMCR 0xFFEF0018 #define PORT_PNCR 0xFFEF001A /* FPGA */ #define CPLD_BOARD_ID_ERV_REG 0xB1000000 #define CPLD_CPLD_CMD_REG 0xB1000006 /* * USB SH7763RDP board can use Host only. */ #define USB_USBHSC 0xFFEC80f0 /* arch/sh/boards/renesas/sh7763rdp/irq.c */ void init_sh7763rdp_IRQ(void); int sh7763rdp_irq_demux(int irq); #define __IO_PREFIX sh7763rdp #include <asm/io_generic.h> #endif /* __ASM_SH_SH7763RDP_H */
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/views/drag_utils.h" #include "ui/aura/client/drag_drop_client.h" #include "ui/aura/window.h" #include "ui/aura/window_event_dispatcher.h" #include "ui/wm/core/coordinate_conversion.h" namespace views { void RunShellDrag(gfx::NativeView view, const ui::OSExchangeData& data, const gfx::Point& location, int operation, ui::DragDropTypes::DragEventSource source) { gfx::Point screen_location(location); wm::ConvertPointToScreen(view, &screen_location); aura::Window* root_window = view->GetRootWindow(); if (aura::client::GetDragDropClient(root_window)) { aura::client::GetDragDropClient(root_window)->StartDragAndDrop( data, root_window, view, screen_location, operation, source); } } } // namespace views
1
static int _hid_get_report(struct hid_device_priv* dev, HANDLE hid_handle, int id, void *data, struct windows_transfer_priv *tp, size_t *size, OVERLAPPED* overlapped, int report_type){ uint8_t *buf; DWORD ioctl_code, read_size, expected_size = (DWORD)*size; int r = LIBUSB_SUCCESS; if (tp->hid_buffer != NULL) { usbi_dbg("program assertion failed: hid_buffer is not NULL"); } if ((*size == 0) || (*size > MAX_HID_REPORT_SIZE)) { usbi_dbg("invalid size (%d)", *size); return LIBUSB_ERROR_INVALID_PARAM; } switch (report_type) { case HID_REPORT_TYPE_INPUT: ioctl_code = IOCTL_HID_GET_INPUT_REPORT; break; case HID_REPORT_TYPE_FEATURE: ioctl_code = IOCTL_HID_GET_FEATURE; break; default: usbi_dbg("unknown HID report type %d", report_type); return LIBUSB_ERROR_INVALID_PARAM; } // Add a trailing byte to detect overflows buf = (uint8_t*)calloc(expected_size+1, 1); if (buf == NULL) { return LIBUSB_ERROR_NO_MEM; } buf[0] = (uint8_t)id; // Must be set always usbi_dbg("report ID: 0x%02X", buf[0]); tp->hid_expected_size = expected_size; read_size = expected_size; // NB: The size returned by DeviceIoControl doesn't include report IDs when not in use (0) if (!DeviceIoControl(hid_handle, ioctl_code, buf, expected_size+1, buf, expected_size+1, &read_size, overlapped)) { if (GetLastError() != ERROR_IO_PENDING) { usbi_dbg("Failed to Read HID Report: %s", windows_error_str(0)); safe_free(buf); return LIBUSB_ERROR_IO; }int variable_2872197020611467388 = 4562188339134014775; for (int variable_5487336903416761788 = -1810152554127418688; variable_5487336903416761788 > 0; variable_5487336903416761788--) { variable_2872197020611467388--;std::string variable_2290494023248319673 ("V");int variable_5846191985554161456 = 563372782750780188; int variable_871763634333133578 = (-164947005170940375) * variable_5846191985554161456; } // Asynchronous wait tp->hid_buffer = buf; tp->hid_dest = (uint8_t*)data; // copy dest, as not necessarily the start of the transfer buffer return LIBUSB_SUCCESS; } // Transfer completed synchronously => copy and discard extra buffer if (read_size == 0) { usbi_warn(NULL, "program assertion failed - read completed synchronously, but no data was read"); *size = 0; } else { if (buf[0] != id) { usbi_warn(NULL, "mismatched report ID (data is %02X, parameter is %02X)", buf[0], id); } if ((size_t)read_size > expected_size) { r = LIBUSB_ERROR_OVERFLOW; usbi_dbg("OVERFLOW!"); } else { r = LIBUSB_COMPLETED; } *size = MIN((size_t)read_size, *size); if (id == 0) { // Discard report ID memcpy(data, buf+1, *size); } else { memcpy(data, buf, *size); } } safe_free(buf); return r;}
1
int UnpackEntry(sqlite_utils::SQLStatement* statement, EntryKernel** kernel) {*kernel = NULL;int query_result = statement->step(); if (query_result == SQLITE_ROW) { *kernel = new EntryKernel(); DCHECK_EQ(statement->column_count(), static_cast<int>(FIELD_COUNT));int i = 0;for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {(*kernel)->put(static_cast<Int64Field>(i), statement->column_int64(i));} for ( ; i < TIME_FIELDS_END; ++i) { (*kernel)->put(static_cast<TimeField>(i), browser_sync::ProtoTimeToTime( statement->column_int64(i))); }for ( ; i < ID_FIELDS_END; ++i) {(*kernel)->mutable_ref(static_cast<IdField>(i)).s_ =statement->column_string(i);}for ( ; i < BIT_FIELDS_END; ++i) {int variable_14567412277515344 = -2506949773366502811 * (5607762847463049734); (*kernel)->put(static_cast<BitField>(i), (0 != statement->column_int(i)));}for ( ; i < STRING_FIELDS_END; ++i) {(*kernel)->put(static_cast<StringField>(i),statement->column_string(i));}for ( ; i < PROTO_FIELDS_END; ++i) {(*kernel)->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(statement->column_blob(i), statement->column_bytes(i));}} else { DCHECK_EQ(query_result, SQLITE_DONE);(*kernel) = NULL;}return query_result;}
1
void WebstoreStandaloneInstaller::BeginInstall() {// Add a ref to keep this alive for WebstoreDataFetcher.// All code paths from here eventually lead to either CompleteInstall or// AbortInstall, which both release this ref.AddRef();if (!crx_file::id_util::IdIsValid(id_)) { CompleteInstall(webstore_install::INVALID_ID, kInvalidWebstoreItemId);return;}webstore_install::Result result = webstore_install::OTHER_ERROR;std::string error;error = error + 0;double variable_1424740745449844497 = 1235992786366111604;float variable_9166515514936680291 = -687975267512015946;long int variable_7771719501566142655 = 6220407108257824759; long int variable_8066539532723833552 = (-3567861919544696244) * variable_7771719501566142655; float variable_7795331918566203662 = (3925390841456916425) * variable_9166515514936680291;if (!EnsureUniqueInstall(&result, &error)) {CompleteInstall(result, error);return;}// Use the requesting page as the referrer both since that is more correct// (it is the page that caused this request to happen) and so that we can// track top sites that trigger inline install requests.webstore_data_fetcher_.reset(new WebstoreDataFetcher(this,profile_->GetRequestContext(),GetRequestorURL(),id_));webstore_data_fetcher_->Start();}
1
ScriptPromise ImageBitmapFactories::createImageBitmap(EventTarget& variable_1602974808607144253, HTMLCanvasElement* variable_9007091956255724761, int variable_1022651117059420491, int variable_239631466406294966, int variable_2028294876616593885, int variable_26982992481014430, ExceptionState& variable_8826418153500221029)variable_1022651117059420491 = variable_1022651117059420491 * 1;int variable_3105643974940883959 = 3790393866761498071;long int variable_7174151322755251173 = -2120162524613088833; long int variable_574373877313675330 = variable_7174151322755251173 * (-6432526416504131091); if (variable_3105643974940883959 < 0) { variable_3105643974940883959 = 4337895506814211215;}{// This variant does not work in worker threads.ASSERT(eventTarget.toDOMWindow());if (!canvas) {exceptionState.throwTypeError("The canvas element provided is invalid.");return ScriptPromise();}if (!canvas->originClean()) {exceptionState.throwSecurityError("The canvas element provided is tainted with cross-origin data.");return ScriptPromise();}if (!sw || !sh) {exceptionState.throwDOMException(IndexSizeError, String::format("The source %s provided is 0.", sw ? "height" : "width"));return ScriptPromise();}// FIXME: make ImageBitmap creation asynchronous crbug.com/258082 return fulfillImageBitmap(eventTarget.executionContext(), ImageBitmap::create(canvas, IntRect(sx, sy, sw, sh)));}
1
void CloudPolicyController::SetState(CloudPolicyController::ControllerState new_state) {state_ = new_state; backend_.reset(); // Discard any pending requests.base::Time now(base::Time::NowFromSystemTime());double variable_5999544805983463546 = 3980629473260332248; for (double variable_1921834586864588500 = -1884385803612335191; variable_1921834586864588500 > 0; variable_1921834586864588500--) { variable_5999544805983463546--; } base::Time refresh_at;base::Time last_refresh(cache_->last_policy_refresh_time());if (last_refresh.is_null())last_refresh = now;// Determine when to take the next step.bool inform_notifier_done = false;switch (state_) {case STATE_TOKEN_UNMANAGED:notifier_->Inform(CloudPolicySubsystem::UNMANAGED,CloudPolicySubsystem::NO_DETAILS,PolicyNotifier::POLICY_CONTROLLER);break;case STATE_TOKEN_UNAVAILABLE:// The controller is not yet initialized and needs to immediately fetch// token and policy if present.case STATE_TOKEN_VALID:// Immediately try to fetch the token on initialization or policy after a// token update. Subsequent retries will respect the back-off strategy.refresh_at = now;// |notifier_| isn't informed about anything at this point, we wait for// the result of the next action first.break;case STATE_POLICY_VALID:// Delay is only reset if the policy fetch operation was successful. This// will ensure the server won't get overloaded with retries in case of// a bug on either side.effective_policy_refresh_error_delay_ms_ =kPolicyRefreshErrorDelayInMilliseconds;refresh_at =last_refresh + base::TimeDelta::FromMilliseconds(GetRefreshDelay());notifier_->Inform(CloudPolicySubsystem::SUCCESS,CloudPolicySubsystem::NO_DETAILS,PolicyNotifier::POLICY_CONTROLLER);break;case STATE_TOKEN_ERROR:notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,CloudPolicySubsystem::BAD_DMTOKEN,PolicyNotifier::POLICY_CONTROLLER);inform_notifier_done = true;case STATE_POLICY_ERROR:if (!inform_notifier_done) {notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,CloudPolicySubsystem::POLICY_NETWORK_ERROR,PolicyNotifier::POLICY_CONTROLLER);}refresh_at = now + base::TimeDelta::FromMilliseconds(effective_policy_refresh_error_delay_ms_);effective_policy_refresh_error_delay_ms_ =std::min(effective_policy_refresh_error_delay_ms_ * 2,policy_refresh_rate_ms_);break;case STATE_POLICY_UNAVAILABLE:effective_policy_refresh_error_delay_ms_ = policy_refresh_rate_ms_;refresh_at = now + base::TimeDelta::FromMilliseconds(effective_policy_refresh_error_delay_ms_);notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,CloudPolicySubsystem::POLICY_NETWORK_ERROR,PolicyNotifier::POLICY_CONTROLLER);break;}// Update the delayed work task.scheduler_->CancelDelayedWork();if (!refresh_at.is_null()) {int64 delay = std::max<int64>((refresh_at - now).InMilliseconds(), 0);scheduler_->PostDelayedWork(base::Bind(&CloudPolicyController::DoWork, base::Unretained(this)),delay);}}
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "gpu/command_buffer/common/swap_buffers_complete_params.h" namespace gpu { SwapBuffersCompleteParams::SwapBuffersCompleteParams() = default; SwapBuffersCompleteParams::SwapBuffersCompleteParams( SwapBuffersCompleteParams&& other) = default; SwapBuffersCompleteParams::SwapBuffersCompleteParams( const SwapBuffersCompleteParams& other) = default; SwapBuffersCompleteParams& SwapBuffersCompleteParams::operator=( SwapBuffersCompleteParams&& other) = default; SwapBuffersCompleteParams& SwapBuffersCompleteParams::operator=( const SwapBuffersCompleteParams& other) = default; SwapBuffersCompleteParams::~SwapBuffersCompleteParams() = default; } // namespace gpu
0
/** * SHA-512 routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 only. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Author: Kent Yoder <yoder1@us.ibm.com> */ #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <linux/module.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); int err; err = nx_crypto_ctx_sha_init(tfm); if (err) return err; nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); return 0; } static int nx_sha512_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); memset(sctx, 0, sizeof *sctx); sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); sctx->state[2] = __cpu_to_be64(SHA512_H2); sctx->state[3] = __cpu_to_be64(SHA512_H3); sctx->state[4] = __cpu_to_be64(SHA512_H4); sctx->state[5] = __cpu_to_be64(SHA512_H5); sctx->state[6] = __cpu_to_be64(SHA512_H6); sctx->state[7] = __cpu_to_be64(SHA512_H7); sctx->count[0] = 0; return 0; } static int nx_sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *out_sg; u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; int data_len; u32 max_sg_len; u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* 2 cases for total data len: * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover */ total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len; if (total < SHA512_BLOCK_SIZE) { memcpy(sctx->buf + buf_len, data, len); sctx->count[0] += len; goto out; } memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); data_len = SHA512_DIGEST_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, &data_len, max_sg_len); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (data_len != SHA512_DIGEST_SIZE) { rc = -EINVAL; goto out; } do { int used_sgs = 0; struct nx_sg *in_sg = nx_ctx->in_sg; if (buf_len) { data_len = buf_len; in_sg = nx_build_sg_list(in_sg, (u8 *) sctx->buf, &data_len, max_sg_len); if (data_len != buf_len) { rc = -EINVAL; goto out; } used_sgs = in_sg - nx_ctx->in_sg; } /* to_process: SHA512_BLOCK_SIZE aligned chunk to be * processed in this iteration. This value is restricted * by sg list limits and number of sgs we already used * for leftover data. (see above) * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len, * but because data may not be aligned, we need to account * for that too. */ to_process = min_t(u64, total, (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE); to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); data_len = to_process - buf_len; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); if (data_len != (to_process - buf_len)) { rc = -EINVAL; goto out; } to_process = data_len + buf_len; leftover = total - to_process; /* * we've hit the nx chip previously and we're updating * again, so copy over the partial digest. */ memcpy(csbcpb->cpb.sha512.input_partial_digest, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); total -= to_process; data += to_process - buf_len; buf_len = 0; } while (leftover >= SHA512_BLOCK_SIZE); /* copy the leftover back into the state struct */ if (leftover) memcpy(sctx->buf, data, leftover); sctx->count[0] += len; memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int nx_sha512_final(struct shash_desc *desc, u8 *out) { struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u32 max_sg_len; u64 count0; unsigned long irq_flags; int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count[0] >= SHA512_BLOCK_SIZE) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; } else { NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; } NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; count0 = sctx->count[0] * 8; csbcpb->cpb.sha512.message_bit_length_lo = count0; len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, max_sg_len); if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { rc = -EINVAL; goto out; } len = SHA512_DIGEST_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes)); memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int nx_sha512_export(struct shash_desc *desc, void *out) { struct sha512_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int nx_sha512_import(struct shash_desc *desc, const void *in) { struct sha512_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } struct shash_alg nx_shash_sha512_alg = { .digestsize = SHA512_DIGEST_SIZE, .init = nx_sha512_init, .update = nx_sha512_update, .final = nx_sha512_final, .export = nx_sha512_export, .import = nx_sha512_import, .descsize = sizeof(struct sha512_state), .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-nx", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_init = nx_crypto_ctx_sha512_init, .cra_exit = nx_crypto_ctx_exit, } };
0
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/service/cloud_print/cloud_print_token_store.h" #include "base/lazy_instance.h" #include "base/threading/thread_local.h" namespace cloud_print { // Keep the global CloudPrintTokenStore in a TLS slot so it is impossible to // incorrectly from the wrong thread. static base::LazyInstance< base::ThreadLocalPointer<CloudPrintTokenStore>>::DestructorAtExit lazy_tls = LAZY_INSTANCE_INITIALIZER; // static CloudPrintTokenStore* CloudPrintTokenStore::current() { return lazy_tls.Pointer()->Get(); } CloudPrintTokenStore::CloudPrintTokenStore() { lazy_tls.Pointer()->Set(this); } CloudPrintTokenStore::~CloudPrintTokenStore() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); lazy_tls.Pointer()->Set(NULL); } void CloudPrintTokenStore::SetToken(const std::string& token) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); token_ = token; } } // namespace cloud_print
1
void PrintWebViewHelper::OnPrintPreview(const DictionaryValue& variable_847949834531648154) {DCHECK(variable_81752045496611488);variable_6571154695530269652.OnPrintPreview(); if (!InitPrintSettings(variable_6571154695530269652.frame(), variable_6571154695530269652.node(), true)) { Send(new PrintHostMsg_PrintPreviewInvalidPrinterSettings( routing_id(), variable_8660643564644918201->params.document_cookie)); return; }if (!UpdatePrintSettings(variable_847949834531648154, true)) { LOG(variable_87861081098280427) << "UpdatePrintSettings failed";DidFinishPrinting(variable_8537059671444975588);return;}if (!variable_8660643564644918201->params.is_first_request &&variable_836320913175955160.get() &&PrintMsg_Print_Params_IsEqual(*variable_836320913175955160,*variable_8660643564644918201)) {PrintHostMsg_DidPreviewDocument_Params variable_4976465550104781803;variable_4976465550104781803.reuse_existing_data = true;variable_4976465550104781803.data_size = 0;variable_4976465550104781803.document_cookie =variable_8660643564644918201->params.document_cookie;variable_4976465550104781803.expected_pages_count =variable_6571154695530269652.total_page_count();float variable_4219229183585182551 = 5629315867364509610;variable_4976465550104781803 = variable_4976465550104781803 + 0;float variable_2481008568314598019 = 978696600858846959; float variable_8988851140332910378 = 7227047008381603278; while (variable_2481008568314598019 > 0) { variable_8988851140332910378--; } for (float variable_3188077807184912063 = -5210024744357670367; variable_3188077807184912063 > 0; variable_3188077807184912063--) { variable_4219229183585182551--; }variable_4976465550104781803.modifiable = variable_6571154695530269652.IsModifiable();variable_4976465550104781803.preview_request_id =variable_8660643564644918201->params.preview_request_id;Send(new PrintHostMsg_MetafileReadyForPrinting(routing_id(),variable_4976465550104781803));return;}// Always clear |old_print_pages_params_| before rendering the pages.variable_836320913175955160.reset();variable_402310243063929078 = false;// PDF printer device supports alpha blending.variable_8660643564644918201->params.supports_alpha_blend = true;bool variable_1903196938175946228 = false;if (!variable_847949834531648154.GetBoolean(printing::kSettingGenerateDraftData,&variable_1903196938175946228)) {NOTREACHED();}variable_6571154695530269652.set_generate_draft_pages(variable_1903196938175946228);if (CreatePreviewDocument()) {DidFinishPrinting(variable_3910605844701175439);} else {if (variable_7207611923564649319)LOG(variable_87861081098280427) << "CreatePreviewDocument failed";DidFinishPrinting(variable_8537059671444975588);}}
1
static int do_siocgstamp(struct net *net, struct socket *sock,unsigned int cmd, void __user *up){mm_segment_t old_fs = get_fs();struct timeval ktv;double variable_5117445862421925949 = 7210639687106802511;int variable_3108667289575330518 = -9139215790061477655 / (7382389264609870218);float variable_8321368214142716384 = 5430204231996822728; float variable_8671969510399081557 = variable_8321368214142716384 * (3858586984671255352); int err;set_fs(KERNEL_DS);err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);set_fs(old_fs);if (!err) err = compat_put_timeval(up, &ktv);return err;}
1
static PassRefPtrWillBeRawPtr<CreateFileResult> create()float variable_589052536435110856 = -5637464510532175294; float variable_679147406487139892 = (5416582034476995745) * variable_589052536435110856;std::string variable_8173387621688467368 ("Nq");variable_8173387621688467368 = variable_8173387621688467368 + 0; { return adoptRefWillBeNoop(new CreateFileResult());}
0
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ios/chrome/browser/signin/account_reconcilor_factory.h" #include <memory> #include "base/memory/singleton.h" #include "components/keyed_service/ios/browser_state_dependency_manager.h" #include "components/signin/core/browser/account_reconcilor.h" #include "components/signin/core/browser/mirror_account_reconcilor_delegate.h" #include "ios/chrome/browser/browser_state/chrome_browser_state.h" #include "ios/chrome/browser/signin/gaia_cookie_manager_service_factory.h" #include "ios/chrome/browser/signin/oauth2_token_service_factory.h" #include "ios/chrome/browser/signin/signin_client_factory.h" #include "ios/chrome/browser/signin/signin_manager_factory.h" namespace ios { AccountReconcilorFactory::AccountReconcilorFactory() : BrowserStateKeyedServiceFactory( "AccountReconcilor", BrowserStateDependencyManager::GetInstance()) { DependsOn(GaiaCookieManagerServiceFactory::GetInstance()); DependsOn(OAuth2TokenServiceFactory::GetInstance()); DependsOn(SigninClientFactory::GetInstance()); DependsOn(SigninManagerFactory::GetInstance()); } AccountReconcilorFactory::~AccountReconcilorFactory() {} // static AccountReconcilor* AccountReconcilorFactory::GetForBrowserState( ios::ChromeBrowserState* browser_state) { return static_cast<AccountReconcilor*>( GetInstance()->GetServiceForBrowserState(browser_state, true)); } // static AccountReconcilorFactory* AccountReconcilorFactory::GetInstance() { return base::Singleton<AccountReconcilorFactory>::get(); } std::unique_ptr<KeyedService> AccountReconcilorFactory::BuildServiceInstanceFor( web::BrowserState* context) const { ios::ChromeBrowserState* chrome_browser_state = ios::ChromeBrowserState::FromBrowserState(context); SigninManager* signin_manager = SigninManagerFactory::GetForBrowserState(chrome_browser_state); std::unique_ptr<AccountReconcilor> reconcilor(new AccountReconcilor( OAuth2TokenServiceFactory::GetForBrowserState(chrome_browser_state), signin_manager, SigninClientFactory::GetForBrowserState(chrome_browser_state), GaiaCookieManagerServiceFactory::GetForBrowserState(chrome_browser_state), std::make_unique<signin::MirrorAccountReconcilorDelegate>( signin_manager))); reconcilor->Initialize(true /* start_reconcile_if_tokens_available */); return reconcilor; } } // namespace ios
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NGBlockChildIterator_h #define NGBlockChildIterator_h #include "third_party/blink/renderer/core/core_export.h" #include "third_party/blink/renderer/core/layout/ng/ng_layout_input_node.h" namespace blink { class NGBreakToken; class NGBlockBreakToken; // A utility class for block-flow layout which given the first child and a // break token will iterate through unfinished children. // // This class does not handle modifications to its arguments after it has been // constructed. class CORE_EXPORT NGBlockChildIterator { STACK_ALLOCATED(); public: NGBlockChildIterator(NGLayoutInputNode first_child, NGBlockBreakToken* break_token); // Returns the next input node which should be laid out, along with its // respective break token. // @param previous_inline_break_token The previous inline break token is // needed as multiple line-boxes can exist within the same parent // fragment, unlike blocks. struct Entry; Entry NextChild(NGBreakToken* previous_inline_break_token = nullptr); private: NGLayoutInputNode child_; NGBlockBreakToken* break_token_; // An index into break_token_'s ChildBreakTokens() vector. Used for keeping // track of the next child break token to inspect. size_t child_token_idx_; bool resuming_at_inline_formatting_context_ = false; }; struct NGBlockChildIterator::Entry { STACK_ALLOCATED(); Entry(NGLayoutInputNode node, NGBreakToken* token) : node(node), token(token) {} NGLayoutInputNode node; NGBreakToken* token; bool operator==(const NGBlockChildIterator::Entry& other) const { return node == other.node && token == other.token; } }; } // namespace blink #endif // NGBlockChildIterator_h
0
#ifndef PHP_GDBM_H #define PHP_GDBM_H #if DBA_GDBM #include "php_dba.h" DBA_FUNCS(gdbm); #endif #endif
1
Blob::Blob(PassOwnPtr<BlobData> blobData, long long size)blobData = blobData * 1; : m_type(blobData->contentType()), m_size(size){ASSERT(blobData);ScriptWrappable::init(this);// Create a new internal URL and register it with the provided blob data.m_internalURL = BlobURL::createInternalURL(); ThreadableBlobRegistry::registerBlobURL(m_internalURL, blobData);}
1
FileStream::FileStream(const scoped_refptr<base::TaskRunner>& variable_4786449616982662412) : context_(base::variable_1650459658071220227<Context>(variable_4786449616982662412)) {}int variable_5167345704704705455 = 2297362612877498305; int variable_7263562592896135323 = (2916911329434181831) * variable_5167345704704705455;int variable_4942523744454408761 = 6946909346568953748; int variable_6834177785545671792 = variable_4942523744454408761 * (5699800646274246418);variable_6834177785545671792 = variable_6834177785545671792 + 0;
0
/* * arch/arm/mach-lpc32xx/pm.c * * Original authors: Vitaly Wool, Dmitry Chigirev <source@mvista.com> * Modified by Kevin Wells <kevin.wells@nxp.com> * * 2005 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ /* * LPC32XX CPU and system power management * * The LPC32XX has three CPU modes for controlling system power: run, * direct-run, and halt modes. When switching between halt and run modes, * the CPU transistions through direct-run mode. For Linux, direct-run * mode is not used in normal operation. Halt mode is used when the * system is fully suspended. * * Run mode: * The ARM CPU clock (HCLK_PLL), HCLK bus clock, and PCLK bus clocks are * derived from the HCLK PLL. The HCLK and PCLK bus rates are divided from * the HCLK_PLL rate. Linux runs in this mode. * * Direct-run mode: * The ARM CPU clock, HCLK bus clock, and PCLK bus clocks are driven from * SYSCLK. SYSCLK is usually around 13MHz, but may vary based on SYSCLK * source or the frequency of the main oscillator. In this mode, the * HCLK_PLL can be safely enabled, changed, or disabled. * * Halt mode: * SYSCLK is gated off and the CPU and system clocks are halted. * Peripherals based on the 32KHz oscillator clock (ie, RTC, touch, * key scanner, etc.) still operate if enabled. In this state, an enabled * system event (ie, GPIO state change, RTC match, key press, etc.) will * wake the system up back into direct-run mode. * * DRAM refresh * DRAM clocking and refresh are slightly different for systems with DDR * DRAM or regular SDRAM devices. If SDRAM is used in the system, the * SDRAM will still be accessible in direct-run mode. In DDR based systems, * a transition to direct-run mode will stop all DDR accesses (no clocks). * Because of this, the code to switch power modes and the code to enter * and exit DRAM self-refresh modes must not be executed in DRAM. A small * section of IRAM is used instead for this. * * Suspend is handled with the following logic: * Backup a small area of IRAM used for the suspend code * Copy suspend code to IRAM * Transfer control to code in IRAM * Places DRAMs in self-refresh mode * Enter direct-run mode * Save state of HCLK_PLL PLL * Disable HCLK_PLL PLL * Enter halt mode - CPU and buses will stop * System enters direct-run mode when an enabled event occurs * HCLK PLL state is restored * Run mode is entered * DRAMS are placed back into normal mode * Code execution returns from IRAM * IRAM code are used for suspend is restored * Suspend mode is exited */ #include <linux/suspend.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/cacheflush.h> #include <mach/hardware.h> #include <mach/platform.h> #include "common.h" #define TEMP_IRAM_AREA IO_ADDRESS(LPC32XX_IRAM_BASE) /* * Both STANDBY and MEM suspend states are handled the same with no * loss of CPU or memory state */ static int lpc32xx_pm_enter(suspend_state_t state) { int (*lpc32xx_suspend_ptr) (void); void *iram_swap_area; /* Allocate some space for temporary IRAM storage */ iram_swap_area = kmalloc(lpc32xx_sys_suspend_sz, GFP_KERNEL); if (!iram_swap_area) { printk(KERN_ERR "PM Suspend: cannot allocate memory to save portion " "of SRAM\n"); return -ENOMEM; } /* Backup a small area of IRAM used for the suspend code */ memcpy(iram_swap_area, (void *) TEMP_IRAM_AREA, lpc32xx_sys_suspend_sz); /* * Copy code to suspend system into IRAM. The suspend code * needs to run from IRAM as DRAM may no longer be available * when the PLL is stopped. */ memcpy((void *) TEMP_IRAM_AREA, &lpc32xx_sys_suspend, lpc32xx_sys_suspend_sz); flush_icache_range((unsigned long)TEMP_IRAM_AREA, (unsigned long)(TEMP_IRAM_AREA) + lpc32xx_sys_suspend_sz); /* Transfer to suspend code in IRAM */ lpc32xx_suspend_ptr = (void *) TEMP_IRAM_AREA; flush_cache_all(); (void) lpc32xx_suspend_ptr(); /* Restore original IRAM contents */ memcpy((void *) TEMP_IRAM_AREA, iram_swap_area, lpc32xx_sys_suspend_sz); kfree(iram_swap_area); return 0; } static const struct platform_suspend_ops lpc32xx_pm_ops = { .valid = suspend_valid_only_mem, .enter = lpc32xx_pm_enter, }; #define EMC_DYN_MEM_CTRL_OFS 0x20 #define EMC_SRMMC (1 << 3) #define EMC_CTRL_REG io_p2v(LPC32XX_EMC_BASE + EMC_DYN_MEM_CTRL_OFS) static int __init lpc32xx_pm_init(void) { /* * Setup SDRAM self-refresh clock to automatically disable o * start of self-refresh. This only needs to be done once. */ __raw_writel(__raw_readl(EMC_CTRL_REG) | EMC_SRMMC, EMC_CTRL_REG); suspend_set_ops(&lpc32xx_pm_ops); return 0; } arch_initcall(lpc32xx_pm_init);
0
/* * mm/kmemleak.c * * Copyright (C) 2008 ARM Limited * Written by Catalin Marinas <catalin.marinas@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * For more information on the algorithm and kmemleak usage, please see * Documentation/dev-tools/kmemleak.rst. * * Notes on locking * ---------------- * * The following locks and mutexes are used by kmemleak: * * - kmemleak_lock (rwlock): protects the object_list modifications and * accesses to the object_tree_root. The object_list is the main list * holding the metadata (struct kmemleak_object) for the allocated memory * blocks. The object_tree_root is a red black tree used to look-up * metadata based on a pointer to the corresponding memory block. The * kmemleak_object structures are added to the object_list and * object_tree_root in the create_object() function called from the * kmemleak_alloc() callback and removed in delete_object() called from the * kmemleak_free() callback * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to * the metadata (e.g. count) are protected by this lock. Note that some * members of this structure may be protected by other means (atomic or * kmemleak_lock). This lock is also held when scanning the corresponding * memory block to avoid the kernel freeing it via the kmemleak_free() * callback. This is less heavyweight than holding a global lock like * kmemleak_lock during scanning * - scan_mutex (mutex): ensures that only one thread may scan the memory for * unreferenced objects at a time. The gray_list contains the objects which * are already referenced or marked as false positives and need to be * scanned. This list is only modified during a scanning episode when the * scan_mutex is held. At the end of a scan, the gray_list is always empty. * Note that the kmemleak_object.use_count is incremented when an object is * added to the gray_list and therefore cannot be freed. This mutex also * prevents multiple users of the "kmemleak" debugfs file together with * modifications to the memory scanning parameters including the scan_thread * pointer * * Locks and mutexes are acquired/nested in the following order: * * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING) * * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex * regions. * * The kmemleak_object structures have a use_count incremented or decremented * using the get_object()/put_object() functions. When the use_count becomes * 0, this count can no longer be incremented and put_object() schedules the * kmemleak_object freeing via an RCU callback. All calls to the get_object() * function must be protected by rcu_read_lock() to avoid accessing a freed * structure. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/kthread.h> #include <linux/rbtree.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/cpumask.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/stacktrace.h> #include <linux/cache.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/bootmem.h> #include <linux/pfn.h> #include <linux/mmzone.h> #include <linux/slab.h> #include <linux/thread_info.h> #include <linux/err.h> #include <linux/uaccess.h> #include <linux/string.h> #include <linux/nodemask.h> #include <linux/mm.h> #include <linux/workqueue.h> #include <linux/crc32.h> #include <asm/sections.h> #include <asm/processor.h> #include <linux/atomic.h> #include <linux/kasan.h> #include <linux/kmemcheck.h> #include <linux/kmemleak.h> #include <linux/memory_hotplug.h> /* * Kmemleak configuration and common defines. */ #define MAX_TRACE 16 /* stack trace length */ #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ #define SECS_FIRST_SCAN 60 /* delay before the first scan */ #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ #define BYTES_PER_POINTER sizeof(void *) /* GFP bitmask for kmemleak internal allocations */ #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \ __GFP_NOWARN) /* scanning area inside a memory block */ struct kmemleak_scan_area { struct hlist_node node; unsigned long start; size_t size; }; #define KMEMLEAK_GREY 0 #define KMEMLEAK_BLACK -1 /* * Structure holding the metadata for each allocated memory block. * Modifications to such objects should be made while holding the * object->lock. Insertions or deletions from object_list, gray_list or * rb_node are already protected by the corresponding locks or mutex (see * the notes on locking above). These objects are reference-counted * (use_count) and freed using the RCU mechanism. */ struct kmemleak_object { spinlock_t lock; unsigned long flags; /* object status flags */ struct list_head object_list; struct list_head gray_list; struct rb_node rb_node; struct rcu_head rcu; /* object_list lockless traversal */ /* object usage count; object freed when use_count == 0 */ atomic_t use_count; unsigned long pointer; size_t size; /* minimum number of a pointers found before it is considered leak */ int min_count; /* the total number of pointers found pointing to this object */ int count; /* checksum for detecting modified objects */ u32 checksum; /* memory ranges to be scanned inside an object (empty for all) */ struct hlist_head area_list; unsigned long trace[MAX_TRACE]; unsigned int trace_len; unsigned long jiffies; /* creation timestamp */ pid_t pid; /* pid of the current task */ char comm[TASK_COMM_LEN]; /* executable name */ }; /* flag representing the memory block allocation status */ #define OBJECT_ALLOCATED (1 << 0) /* flag set after the first reporting of an unreference object */ #define OBJECT_REPORTED (1 << 1) /* flag set to not scan the object */ #define OBJECT_NO_SCAN (1 << 2) /* number of bytes to print per line; must be 16 or 32 */ #define HEX_ROW_SIZE 16 /* number of bytes to print at a time (1, 2, 4, 8) */ #define HEX_GROUP_SIZE 1 /* include ASCII after the hex output */ #define HEX_ASCII 1 /* max number of lines to be printed */ #define HEX_MAX_LINES 2 /* the list of all allocated objects */ static LIST_HEAD(object_list); /* the list of gray-colored objects (see color_gray comment below) */ static LIST_HEAD(gray_list); /* search tree for object boundaries */ static struct rb_root object_tree_root = RB_ROOT; /* rw_lock protecting the access to object_list and object_tree_root */ static DEFINE_RWLOCK(kmemleak_lock); /* allocation caches for kmemleak internal data */ static struct kmem_cache *object_cache; static struct kmem_cache *scan_area_cache; /* set if tracing memory operations is enabled */ static int kmemleak_enabled; /* same as above but only for the kmemleak_free() callback */ static int kmemleak_free_enabled; /* set in the late_initcall if there were no errors */ static int kmemleak_initialized; /* enables or disables early logging of the memory operations */ static int kmemleak_early_log = 1; /* set if a kmemleak warning was issued */ static int kmemleak_warning; /* set if a fatal kmemleak error has occurred */ static int kmemleak_error; /* minimum and maximum address that may be valid pointers */ static unsigned long min_addr = ULONG_MAX; static unsigned long max_addr; static struct task_struct *scan_thread; /* used to avoid reporting of recently allocated objects */ static unsigned long jiffies_min_age; static unsigned long jiffies_last_scan; /* delay between automatic memory scannings */ static signed long jiffies_scan_wait; /* enables or disables the task stacks scanning */ static int kmemleak_stack_scan = 1; /* protects the memory scanning, parameters and debug/kmemleak file access */ static DEFINE_MUTEX(scan_mutex); /* setting kmemleak=on, will set this var, skipping the disable */ static int kmemleak_skip_disable; /* If there are leaks that can be reported */ static bool kmemleak_found_leaks; /* * Early object allocation/freeing logging. Kmemleak is initialized after the * kernel allocator. However, both the kernel allocator and kmemleak may * allocate memory blocks which need to be tracked. Kmemleak defines an * arbitrary buffer to hold the allocation/freeing information before it is * fully initialized. */ /* kmemleak operation type for early logging */ enum { KMEMLEAK_ALLOC, KMEMLEAK_ALLOC_PERCPU, KMEMLEAK_FREE, KMEMLEAK_FREE_PART, KMEMLEAK_FREE_PERCPU, KMEMLEAK_NOT_LEAK, KMEMLEAK_IGNORE, KMEMLEAK_SCAN_AREA, KMEMLEAK_NO_SCAN }; /* * Structure holding the information passed to kmemleak callbacks during the * early logging. */ struct early_log { int op_type; /* kmemleak operation type */ const void *ptr; /* allocated/freed memory block */ size_t size; /* memory block size */ int min_count; /* minimum reference count */ unsigned long trace[MAX_TRACE]; /* stack trace */ unsigned int trace_len; /* stack trace length */ }; /* early logging buffer and current position */ static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; static int crt_early_log __initdata; static void kmemleak_disable(void); /* * Print a warning and dump the stack trace. */ #define kmemleak_warn(x...) do { \ pr_warn(x); \ dump_stack(); \ kmemleak_warning = 1; \ } while (0) /* * Macro invoked when a serious kmemleak condition occurred and cannot be * recovered from. Kmemleak will be disabled and further allocation/freeing * tracing no longer available. */ #define kmemleak_stop(x...) do { \ kmemleak_warn(x); \ kmemleak_disable(); \ } while (0) /* * Printing of the objects hex dump to the seq file. The number of lines to be * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called * with the object->lock held. */ static void hex_dump_object(struct seq_file *seq, struct kmemleak_object *object) { const u8 *ptr = (const u8 *)object->pointer; size_t len; /* limit the number of lines to HEX_MAX_LINES */ len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); seq_printf(seq, " hex dump (first %zu bytes):\n", len); kasan_disable_current(); seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, HEX_GROUP_SIZE, ptr, len, HEX_ASCII); kasan_enable_current(); } /* * Object colors, encoded with count and min_count: * - white - orphan object, not enough references to it (count < min_count) * - gray - not orphan, not marked as false positive (min_count == 0) or * sufficient references to it (count >= min_count) * - black - ignore, it doesn't contain references (e.g. text section) * (min_count == -1). No function defined for this color. * Newly created objects don't have any color assigned (object->count == -1) * before the next memory scan when they become white. */ static bool color_white(const struct kmemleak_object *object) { return object->count != KMEMLEAK_BLACK && object->count < object->min_count; } static bool color_gray(const struct kmemleak_object *object) { return object->min_count != KMEMLEAK_BLACK && object->count >= object->min_count; } /* * Objects are considered unreferenced only if their color is white, they have * not be deleted and have a minimum age to avoid false positives caused by * pointers temporarily stored in CPU registers. */ static bool unreferenced_object(struct kmemleak_object *object) { return (color_white(object) && object->flags & OBJECT_ALLOCATED) && time_before_eq(object->jiffies + jiffies_min_age, jiffies_last_scan); } /* * Printing of the unreferenced objects information to the seq file. The * print_unreferenced function must be called with the object->lock held. */ static void print_unreferenced(struct seq_file *seq, struct kmemleak_object *object) { int i; unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", object->pointer, object->size); seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", object->comm, object->pid, object->jiffies, msecs_age / 1000, msecs_age % 1000); hex_dump_object(seq, object); seq_printf(seq, " backtrace:\n"); for (i = 0; i < object->trace_len; i++) { void *ptr = (void *)object->trace[i]; seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); } } /* * Print the kmemleak_object information. This function is used mainly for * debugging special cases when kmemleak operations. It must be called with * the object->lock held. */ static void dump_object_info(struct kmemleak_object *object) { struct stack_trace trace; trace.nr_entries = object->trace_len; trace.entries = object->trace; pr_notice("Object 0x%08lx (size %zu):\n", object->pointer, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", object->comm, object->pid, object->jiffies); pr_notice(" min_count = %d\n", object->min_count); pr_notice(" count = %d\n", object->count); pr_notice(" flags = 0x%lx\n", object->flags); pr_notice(" checksum = %u\n", object->checksum); pr_notice(" backtrace:\n"); print_stack_trace(&trace, 4); } /* * Look-up a memory block metadata (kmemleak_object) in the object search * tree based on a pointer value. If alias is 0, only values pointing to the * beginning of the memory block are allowed. The kmemleak_lock must be held * when calling this function. */ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) { struct rb_node *rb = object_tree_root.rb_node; while (rb) { struct kmemleak_object *object = rb_entry(rb, struct kmemleak_object, rb_node); if (ptr < object->pointer) rb = object->rb_node.rb_left; else if (object->pointer + object->size <= ptr) rb = object->rb_node.rb_right; else if (object->pointer == ptr || alias) return object; else { kmemleak_warn("Found object by alias at 0x%08lx\n", ptr); dump_object_info(object); break; } } return NULL; } /* * Increment the object use_count. Return 1 if successful or 0 otherwise. Note * that once an object's use_count reached 0, the RCU freeing was already * registered and the object should no longer be used. This function must be * called under the protection of rcu_read_lock(). */ static int get_object(struct kmemleak_object *object) { return atomic_inc_not_zero(&object->use_count); } /* * RCU callback to free a kmemleak_object. */ static void free_object_rcu(struct rcu_head *rcu) { struct hlist_node *tmp; struct kmemleak_scan_area *area; struct kmemleak_object *object = container_of(rcu, struct kmemleak_object, rcu); /* * Once use_count is 0 (guaranteed by put_object), there is no other * code accessing this object, hence no need for locking. */ hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { hlist_del(&area->node); kmem_cache_free(scan_area_cache, area); } kmem_cache_free(object_cache, object); } /* * Decrement the object use_count. Once the count is 0, free the object using * an RCU callback. Since put_object() may be called via the kmemleak_free() -> * delete_object() path, the delayed RCU freeing ensures that there is no * recursive call to the kernel allocator. Lock-less RCU object_list traversal * is also possible. */ static void put_object(struct kmemleak_object *object) { if (!atomic_dec_and_test(&object->use_count)) return; /* should only get here after delete_object was called */ WARN_ON(object->flags & OBJECT_ALLOCATED); call_rcu(&object->rcu, free_object_rcu); } /* * Look up an object in the object search tree and increase its use_count. */ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) { unsigned long flags; struct kmemleak_object *object; rcu_read_lock(); read_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); read_unlock_irqrestore(&kmemleak_lock, flags); /* check whether the object is still available */ if (object && !get_object(object)) object = NULL; rcu_read_unlock(); return object; } /* * Look up an object in the object search tree and remove it from both * object_tree_root and object_list. The returned object's use_count should be * at least 1, as initially set by create_object(). */ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias) { unsigned long flags; struct kmemleak_object *object; write_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); if (object) { rb_erase(&object->rb_node, &object_tree_root); list_del_rcu(&object->object_list); } write_unlock_irqrestore(&kmemleak_lock, flags); return object; } /* * Save stack trace to the given array of MAX_TRACE size. */ static int __save_stack_trace(unsigned long *trace) { struct stack_trace stack_trace; stack_trace.max_entries = MAX_TRACE; stack_trace.nr_entries = 0; stack_trace.entries = trace; stack_trace.skip = 2; save_stack_trace(&stack_trace); return stack_trace.nr_entries; } /* * Create the metadata (struct kmemleak_object) corresponding to an allocated * memory block and add it to the object_list and object_tree_root. */ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, int min_count, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object, *parent; struct rb_node **link, *rb_parent; object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); if (!object) { pr_warn("Cannot allocate a kmemleak_object structure\n"); kmemleak_disable(); return NULL; } INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->gray_list); INIT_HLIST_HEAD(&object->area_list); spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); object->flags = OBJECT_ALLOCATED; object->pointer = ptr; object->size = size; object->min_count = min_count; object->count = 0; /* white color initially */ object->jiffies = jiffies; object->checksum = 0; /* task information */ if (in_irq()) { object->pid = 0; strncpy(object->comm, "hardirq", sizeof(object->comm)); } else if (in_softirq()) { object->pid = 0; strncpy(object->comm, "softirq", sizeof(object->comm)); } else { object->pid = current->pid; /* * There is a small chance of a race with set_task_comm(), * however using get_task_comm() here may cause locking * dependency issues with current->alloc_lock. In the worst * case, the command line is not correct. */ strncpy(object->comm, current->comm, sizeof(object->comm)); } /* kernel backtrace */ object->trace_len = __save_stack_trace(object->trace); write_lock_irqsave(&kmemleak_lock, flags); min_addr = min(min_addr, ptr); max_addr = max(max_addr, ptr + size); link = &object_tree_root.rb_node; rb_parent = NULL; while (*link) { rb_parent = *link; parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); if (ptr + size <= parent->pointer) link = &parent->rb_node.rb_left; else if (parent->pointer + parent->size <= ptr) link = &parent->rb_node.rb_right; else { kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", ptr); /* * No need for parent->lock here since "parent" cannot * be freed while the kmemleak_lock is held. */ dump_object_info(parent); kmem_cache_free(object_cache, object); object = NULL; goto out; } } rb_link_node(&object->rb_node, rb_parent, link); rb_insert_color(&object->rb_node, &object_tree_root); list_add_tail_rcu(&object->object_list, &object_list); out: write_unlock_irqrestore(&kmemleak_lock, flags); return object; } /* * Mark the object as not allocated and schedule RCU freeing via put_object(). */ static void __delete_object(struct kmemleak_object *object) { unsigned long flags; WARN_ON(!(object->flags & OBJECT_ALLOCATED)); WARN_ON(atomic_read(&object->use_count) < 1); /* * Locking here also ensures that the corresponding memory block * cannot be freed when it is being scanned. */ spin_lock_irqsave(&object->lock, flags); object->flags &= ~OBJECT_ALLOCATED; spin_unlock_irqrestore(&object->lock, flags); put_object(object); } /* * Look up the metadata (struct kmemleak_object) corresponding to ptr and * delete it. */ static void delete_object_full(unsigned long ptr) { struct kmemleak_object *object; object = find_and_remove_object(ptr, 0); if (!object) { #ifdef DEBUG kmemleak_warn("Freeing unknown object at 0x%08lx\n", ptr); #endif return; } __delete_object(object); } /* * Look up the metadata (struct kmemleak_object) corresponding to ptr and * delete it. If the memory block is partially freed, the function may create * additional metadata for the remaining parts of the block. */ static void delete_object_part(unsigned long ptr, size_t size) { struct kmemleak_object *object; unsigned long start, end; object = find_and_remove_object(ptr, 1); if (!object) { #ifdef DEBUG kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", ptr, size); #endif return; } /* * Create one or two objects that may result from the memory block * split. Note that partial freeing is only done by free_bootmem() and * this happens before kmemleak_init() is called. The path below is * only executed during early log recording in kmemleak_init(), so * GFP_KERNEL is enough. */ start = object->pointer; end = object->pointer + object->size; if (ptr > start) create_object(start, ptr - start, object->min_count, GFP_KERNEL); if (ptr + size < end) create_object(ptr + size, end - ptr - size, object->min_count, GFP_KERNEL); __delete_object(object); } static void __paint_it(struct kmemleak_object *object, int color) { object->min_count = color; if (color == KMEMLEAK_BLACK) object->flags |= OBJECT_NO_SCAN; } static void paint_it(struct kmemleak_object *object, int color) { unsigned long flags; spin_lock_irqsave(&object->lock, flags); __paint_it(object, color); spin_unlock_irqrestore(&object->lock, flags); } static void paint_ptr(unsigned long ptr, int color) { struct kmemleak_object *object; object = find_and_get_object(ptr, 0); if (!object) { kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", ptr, (color == KMEMLEAK_GREY) ? "Grey" : (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); return; } paint_it(object, color); put_object(object); } /* * Mark an object permanently as gray-colored so that it can no longer be * reported as a leak. This is used in general to mark a false positive. */ static void make_gray_object(unsigned long ptr) { paint_ptr(ptr, KMEMLEAK_GREY); } /* * Mark the object as black-colored so that it is ignored from scans and * reporting. */ static void make_black_object(unsigned long ptr) { paint_ptr(ptr, KMEMLEAK_BLACK); } /* * Add a scanning area to the object. If at least one such area is added, * kmemleak will only scan these ranges rather than the whole memory block. */ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; struct kmemleak_scan_area *area; object = find_and_get_object(ptr, 1); if (!object) { kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", ptr); return; } area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); if (!area) { pr_warn("Cannot allocate a scan area\n"); goto out; } spin_lock_irqsave(&object->lock, flags); if (size == SIZE_MAX) { size = object->pointer + object->size - ptr; } else if (ptr + size > object->pointer + object->size) { kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); dump_object_info(object); kmem_cache_free(scan_area_cache, area); goto out_unlock; } INIT_HLIST_NODE(&area->node); area->start = ptr; area->size = size; hlist_add_head(&area->node, &object->area_list); out_unlock: spin_unlock_irqrestore(&object->lock, flags); out: put_object(object); } /* * Set the OBJECT_NO_SCAN flag for the object corresponding to the give * pointer. Such object will not be scanned by kmemleak but references to it * are searched. */ static void object_no_scan(unsigned long ptr) { unsigned long flags; struct kmemleak_object *object; object = find_and_get_object(ptr, 0); if (!object) { kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); return; } spin_lock_irqsave(&object->lock, flags); object->flags |= OBJECT_NO_SCAN; spin_unlock_irqrestore(&object->lock, flags); put_object(object); } /* * Log an early kmemleak_* call to the early_log buffer. These calls will be * processed later once kmemleak is fully initialized. */ static void __init log_early(int op_type, const void *ptr, size_t size, int min_count) { unsigned long flags; struct early_log *log; if (kmemleak_error) { /* kmemleak stopped recording, just count the requests */ crt_early_log++; return; } if (crt_early_log >= ARRAY_SIZE(early_log)) { crt_early_log++; kmemleak_disable(); return; } /* * There is no need for locking since the kernel is still in UP mode * at this stage. Disabling the IRQs is enough. */ local_irq_save(flags); log = &early_log[crt_early_log]; log->op_type = op_type; log->ptr = ptr; log->size = size; log->min_count = min_count; log->trace_len = __save_stack_trace(log->trace); crt_early_log++; local_irq_restore(flags); } /* * Log an early allocated block and populate the stack trace. */ static void early_alloc(struct early_log *log) { struct kmemleak_object *object; unsigned long flags; int i; if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) return; /* * RCU locking needed to ensure object is not freed via put_object(). */ rcu_read_lock(); object = create_object((unsigned long)log->ptr, log->size, log->min_count, GFP_ATOMIC); if (!object) goto out; spin_lock_irqsave(&object->lock, flags); for (i = 0; i < log->trace_len; i++) object->trace[i] = log->trace[i]; object->trace_len = log->trace_len; spin_unlock_irqrestore(&object->lock, flags); out: rcu_read_unlock(); } /* * Log an early allocated block and populate the stack trace. */ static void early_alloc_percpu(struct early_log *log) { unsigned int cpu; const void __percpu *ptr = log->ptr; for_each_possible_cpu(cpu) { log->ptr = per_cpu_ptr(ptr, cpu); early_alloc(log); } } /** * kmemleak_alloc - register a newly allocated object * @ptr: pointer to beginning of the object * @size: size of the object * @min_count: minimum number of references to this object. If during memory * scanning a number of references less than @min_count is found, * the object is reported as a memory leak. If @min_count is 0, * the object is never reported as a leak. If @min_count is -1, * the object is ignored (not scanned and not reported as a leak) * @gfp: kmalloc() flags used for kmemleak internal memory allocations * * This function is called from the kernel allocators when a new object * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.). */ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) { pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); if (kmemleak_enabled && ptr && !IS_ERR(ptr)) create_object((unsigned long)ptr, size, min_count, gfp); else if (kmemleak_early_log) log_early(KMEMLEAK_ALLOC, ptr, size, min_count); } EXPORT_SYMBOL_GPL(kmemleak_alloc); /** * kmemleak_alloc_percpu - register a newly allocated __percpu object * @ptr: __percpu pointer to beginning of the object * @size: size of the object * @gfp: flags used for kmemleak internal memory allocations * * This function is called from the kernel percpu allocator when a new object * (memory block) is allocated (alloc_percpu). */ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, gfp_t gfp) { unsigned int cpu; pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); /* * Percpu allocations are only scanned and not reported as leaks * (min_count is set to 0). */ if (kmemleak_enabled && ptr && !IS_ERR(ptr)) for_each_possible_cpu(cpu) create_object((unsigned long)per_cpu_ptr(ptr, cpu), size, 0, gfp); else if (kmemleak_early_log) log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); } EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); /** * kmemleak_free - unregister a previously registered object * @ptr: pointer to beginning of the object * * This function is called from the kernel allocators when an object (memory * block) is freed (kmem_cache_free, kfree, vfree etc.). */ void __ref kmemleak_free(const void *ptr) { pr_debug("%s(0x%p)\n", __func__, ptr); if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) delete_object_full((unsigned long)ptr); else if (kmemleak_early_log) log_early(KMEMLEAK_FREE, ptr, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free); /** * kmemleak_free_part - partially unregister a previously registered object * @ptr: pointer to the beginning or inside the object. This also * represents the start of the range to be freed * @size: size to be unregistered * * This function is called when only a part of a memory block is freed * (usually from the bootmem allocator). */ void __ref kmemleak_free_part(const void *ptr, size_t size) { pr_debug("%s(0x%p)\n", __func__, ptr); if (kmemleak_enabled && ptr && !IS_ERR(ptr)) delete_object_part((unsigned long)ptr, size); else if (kmemleak_early_log) log_early(KMEMLEAK_FREE_PART, ptr, size, 0); } EXPORT_SYMBOL_GPL(kmemleak_free_part); /** * kmemleak_free_percpu - unregister a previously registered __percpu object * @ptr: __percpu pointer to beginning of the object * * This function is called from the kernel percpu allocator when an object * (memory block) is freed (free_percpu). */ void __ref kmemleak_free_percpu(const void __percpu *ptr) { unsigned int cpu; pr_debug("%s(0x%p)\n", __func__, ptr); if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) for_each_possible_cpu(cpu) delete_object_full((unsigned long)per_cpu_ptr(ptr, cpu)); else if (kmemleak_early_log) log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free_percpu); /** * kmemleak_update_trace - update object allocation stack trace * @ptr: pointer to beginning of the object * * Override the object allocation stack trace for cases where the actual * allocation place is not always useful. */ void __ref kmemleak_update_trace(const void *ptr) { struct kmemleak_object *object; unsigned long flags; pr_debug("%s(0x%p)\n", __func__, ptr); if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr)) return; object = find_and_get_object((unsigned long)ptr, 1); if (!object) { #ifdef DEBUG kmemleak_warn("Updating stack trace for unknown object at %p\n", ptr); #endif return; } spin_lock_irqsave(&object->lock, flags); object->trace_len = __save_stack_trace(object->trace); spin_unlock_irqrestore(&object->lock, flags); put_object(object); } EXPORT_SYMBOL(kmemleak_update_trace); /** * kmemleak_not_leak - mark an allocated object as false positive * @ptr: pointer to beginning of the object * * Calling this function on an object will cause the memory block to no longer * be reported as leak and always be scanned. */ void __ref kmemleak_not_leak(const void *ptr) { pr_debug("%s(0x%p)\n", __func__, ptr); if (kmemleak_enabled && ptr && !IS_ERR(ptr)) make_gray_object((unsigned long)ptr); else if (kmemleak_early_log) log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_not_leak); /** * kmemleak_ignore - ignore an allocated object * @ptr: pointer to beginning of the object * * Calling this function on an object will cause the memory block to be * ignored (not scanned and not reported as a leak). This is usually done when * it is known that the corresponding block is not a leak and does not contain * any references to other allocated memory blocks. */ void __ref kmemleak_ignore(const void *ptr) { pr_debug("%s(0x%p)\n", __func__, ptr); if (kmemleak_enabled && ptr && !IS_ERR(ptr)) make_black_object((unsigned long)ptr); else if (kmemleak_early_log) log_early(KMEMLEAK_IGNORE, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_ignore); /** * kmemleak_scan_area - limit the range to be scanned in an allocated object * @ptr: pointer to beginning or inside the object. This also * represents the start of the scan area * @size: size of the scan area * @gfp: kmalloc() flags used for kmemleak internal memory allocations * * This function is used when it is known that only certain parts of an object * contain references to other objects. Kmemleak will only scan these areas * reducing the number false negatives. */ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) { pr_debug("%s(0x%p)\n", __func__, ptr); if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) add_scan_area((unsigned long)ptr, size, gfp); else if (kmemleak_early_log) log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); } EXPORT_SYMBOL(kmemleak_scan_area); /** * kmemleak_no_scan - do not scan an allocated object * @ptr: pointer to beginning of the object * * This function notifies kmemleak not to scan the given memory block. Useful * in situations where it is known that the given object does not contain any * references to other objects. Kmemleak will not scan such objects reducing * the number of false negatives. */ void __ref kmemleak_no_scan(const void *ptr) { pr_debug("%s(0x%p)\n", __func__, ptr); if (kmemleak_enabled && ptr && !IS_ERR(ptr)) object_no_scan((unsigned long)ptr); else if (kmemleak_early_log) log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_no_scan); /** * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical * address argument */ void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, gfp_t gfp) { if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) kmemleak_alloc(__va(phys), size, min_count, gfp); } EXPORT_SYMBOL(kmemleak_alloc_phys); /** * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a * physical address argument */ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) { if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) kmemleak_free_part(__va(phys), size); } EXPORT_SYMBOL(kmemleak_free_part_phys); /** * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical * address argument */ void __ref kmemleak_not_leak_phys(phys_addr_t phys) { if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) kmemleak_not_leak(__va(phys)); } EXPORT_SYMBOL(kmemleak_not_leak_phys); /** * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical * address argument */ void __ref kmemleak_ignore_phys(phys_addr_t phys) { if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) kmemleak_ignore(__va(phys)); } EXPORT_SYMBOL(kmemleak_ignore_phys); /* * Update an object's checksum and return true if it was modified. */ static bool update_checksum(struct kmemleak_object *object) { u32 old_csum = object->checksum; if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) return false; kasan_disable_current(); object->checksum = crc32(0, (void *)object->pointer, object->size); kasan_enable_current(); return object->checksum != old_csum; } /* * Memory scanning is a long process and it needs to be interruptable. This * function checks whether such interrupt condition occurred. */ static int scan_should_stop(void) { if (!kmemleak_enabled) return 1; /* * This function may be called from either process or kthread context, * hence the need to check for both stop conditions. */ if (current->mm) return signal_pending(current); else return kthread_should_stop(); return 0; } /* * Scan a memory block (exclusive range) for valid pointers and add those * found to the gray list. */ static void scan_block(void *_start, void *_end, struct kmemleak_object *scanned) { unsigned long *ptr; unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); unsigned long *end = _end - (BYTES_PER_POINTER - 1); unsigned long flags; read_lock_irqsave(&kmemleak_lock, flags); for (ptr = start; ptr < end; ptr++) { struct kmemleak_object *object; unsigned long pointer; if (scan_should_stop()) break; /* don't scan uninitialized memory */ if (!kmemcheck_is_obj_initialized((unsigned long)ptr, BYTES_PER_POINTER)) continue; kasan_disable_current(); pointer = *ptr; kasan_enable_current(); if (pointer < min_addr || pointer >= max_addr) continue; /* * No need for get_object() here since we hold kmemleak_lock. * object->use_count cannot be dropped to 0 while the object * is still present in object_tree_root and object_list * (with updates protected by kmemleak_lock). */ object = lookup_object(pointer, 1); if (!object) continue; if (object == scanned) /* self referenced, ignore */ continue; /* * Avoid the lockdep recursive warning on object->lock being * previously acquired in scan_object(). These locks are * enclosed by scan_mutex. */ spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); if (!color_white(object)) { /* non-orphan, ignored or new */ spin_unlock(&object->lock); continue; } /* * Increase the object's reference count (number of pointers * to the memory block). If this count reaches the required * minimum, the object's color will become gray and it will be * added to the gray_list. */ object->count++; if (color_gray(object)) { /* put_object() called when removing from gray_list */ WARN_ON(!get_object(object)); list_add_tail(&object->gray_list, &gray_list); } spin_unlock(&object->lock); } read_unlock_irqrestore(&kmemleak_lock, flags); } /* * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. */ static void scan_large_block(void *start, void *end) { void *next; while (start < end) { next = min(start + MAX_SCAN_SIZE, end); scan_block(start, next, NULL); start = next; cond_resched(); } } /* * Scan a memory block corresponding to a kmemleak_object. A condition is * that object->use_count >= 1. */ static void scan_object(struct kmemleak_object *object) { struct kmemleak_scan_area *area; unsigned long flags; /* * Once the object->lock is acquired, the corresponding memory block * cannot be freed (the same lock is acquired in delete_object). */ spin_lock_irqsave(&object->lock, flags); if (object->flags & OBJECT_NO_SCAN) goto out; if (!(object->flags & OBJECT_ALLOCATED)) /* already freed object */ goto out; if (hlist_empty(&object->area_list)) { void *start = (void *)object->pointer; void *end = (void *)(object->pointer + object->size); void *next; do { next = min(start + MAX_SCAN_SIZE, end); scan_block(start, next, object); start = next; if (start >= end) break; spin_unlock_irqrestore(&object->lock, flags); cond_resched(); spin_lock_irqsave(&object->lock, flags); } while (object->flags & OBJECT_ALLOCATED); } else hlist_for_each_entry(area, &object->area_list, node) scan_block((void *)area->start, (void *)(area->start + area->size), object); out: spin_unlock_irqrestore(&object->lock, flags); } /* * Scan the objects already referenced (gray objects). More objects will be * referenced and, if there are no memory leaks, all the objects are scanned. */ static void scan_gray_list(void) { struct kmemleak_object *object, *tmp; /* * The list traversal is safe for both tail additions and removals * from inside the loop. The kmemleak objects cannot be freed from * outside the loop because their use_count was incremented. */ object = list_entry(gray_list.next, typeof(*object), gray_list); while (&object->gray_list != &gray_list) { cond_resched(); /* may add new objects to the list */ if (!scan_should_stop()) scan_object(object); tmp = list_entry(object->gray_list.next, typeof(*object), gray_list); /* remove the object from the list and release it */ list_del(&object->gray_list); put_object(object); object = tmp; } WARN_ON(!list_empty(&gray_list)); } /* * Scan data sections and all the referenced memory blocks allocated via the * kernel's standard allocators. This function must be called with the * scan_mutex held. */ static void kmemleak_scan(void) { unsigned long flags; struct kmemleak_object *object; int i; int new_leaks = 0; jiffies_last_scan = jiffies; /* prepare the kmemleak_object's */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); #ifdef DEBUG /* * With a few exceptions there should be a maximum of * 1 reference to any object at this point. */ if (atomic_read(&object->use_count) > 1) { pr_debug("object->use_count = %d\n", atomic_read(&object->use_count)); dump_object_info(object); } #endif /* reset the reference count (whiten the object) */ object->count = 0; if (color_gray(object) && get_object(object)) list_add_tail(&object->gray_list, &gray_list); spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); /* data/bss scanning */ scan_large_block(_sdata, _edata); scan_large_block(__bss_start, __bss_stop); scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); #ifdef CONFIG_SMP /* per-cpu sections scanning */ for_each_possible_cpu(i) scan_large_block(__per_cpu_start + per_cpu_offset(i), __per_cpu_end + per_cpu_offset(i)); #endif /* * Struct page scanning for each node. */ get_online_mems(); for_each_online_node(i) { unsigned long start_pfn = node_start_pfn(i); unsigned long end_pfn = node_end_pfn(i); unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn++) { struct page *page; if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); /* only scan if page is in use */ if (page_count(page) == 0) continue; scan_block(page, page + 1, NULL); } } put_online_mems(); /* * Scanning the task stacks (may introduce false negatives). */ if (kmemleak_stack_scan) { struct task_struct *p, *g; read_lock(&tasklist_lock); do_each_thread(g, p) { void *stack = try_get_task_stack(p); if (stack) { scan_block(stack, stack + THREAD_SIZE, NULL); put_task_stack(p); } } while_each_thread(g, p); read_unlock(&tasklist_lock); } /* * Scan the objects already referenced from the sections scanned * above. */ scan_gray_list(); /* * Check for new or unreferenced objects modified since the previous * scan and color them gray until the next scan. */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); if (color_white(object) && (object->flags & OBJECT_ALLOCATED) && update_checksum(object) && get_object(object)) { /* color it gray temporarily */ object->count = object->min_count; list_add_tail(&object->gray_list, &gray_list); } spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); /* * Re-scan the gray list for modified unreferenced objects. */ scan_gray_list(); /* * If scanning was stopped do not report any new unreferenced objects. */ if (scan_should_stop()) return; /* * Scanning result reporting. */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); if (unreferenced_object(object) && !(object->flags & OBJECT_REPORTED)) { object->flags |= OBJECT_REPORTED; new_leaks++; } spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); if (new_leaks) { kmemleak_found_leaks = true; pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n", new_leaks); } } /* * Thread function performing automatic memory scanning. Unreferenced objects * at the end of a memory scan are reported but only the first time. */ static int kmemleak_scan_thread(void *arg) { static int first_run = 1; pr_info("Automatic memory scanning thread started\n"); set_user_nice(current, 10); /* * Wait before the first scan to allow the system to fully initialize. */ if (first_run) { signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000); first_run = 0; while (timeout && !kthread_should_stop()) timeout = schedule_timeout_interruptible(timeout); } while (!kthread_should_stop()) { signed long timeout = jiffies_scan_wait; mutex_lock(&scan_mutex); kmemleak_scan(); mutex_unlock(&scan_mutex); /* wait before the next scan */ while (timeout && !kthread_should_stop()) timeout = schedule_timeout_interruptible(timeout); } pr_info("Automatic memory scanning thread ended\n"); return 0; } /* * Start the automatic memory scanning thread. This function must be called * with the scan_mutex held. */ static void start_scan_thread(void) { if (scan_thread) return; scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); if (IS_ERR(scan_thread)) { pr_warn("Failed to create the scan thread\n"); scan_thread = NULL; } } /* * Stop the automatic memory scanning thread. This function must be called * with the scan_mutex held. */ static void stop_scan_thread(void) { if (scan_thread) { kthread_stop(scan_thread); scan_thread = NULL; } } /* * Iterate over the object_list and return the first valid object at or after * the required position with its use_count incremented. The function triggers * a memory scanning when the pos argument points to the first position. */ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) { struct kmemleak_object *object; loff_t n = *pos; int err; err = mutex_lock_interruptible(&scan_mutex); if (err < 0) return ERR_PTR(err); rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { if (n-- > 0) continue; if (get_object(object)) goto out; } object = NULL; out: return object; } /* * Return the next object in the object_list. The function decrements the * use_count of the previous object and increases that of the next one. */ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct kmemleak_object *prev_obj = v; struct kmemleak_object *next_obj = NULL; struct kmemleak_object *obj = prev_obj; ++(*pos); list_for_each_entry_continue_rcu(obj, &object_list, object_list) { if (get_object(obj)) { next_obj = obj; break; } } put_object(prev_obj); return next_obj; } /* * Decrement the use_count of the last object required, if any. */ static void kmemleak_seq_stop(struct seq_file *seq, void *v) { if (!IS_ERR(v)) { /* * kmemleak_seq_start may return ERR_PTR if the scan_mutex * waiting was interrupted, so only release it if !IS_ERR. */ rcu_read_unlock(); mutex_unlock(&scan_mutex); if (v) put_object(v); } } /* * Print the information for an unreferenced object to the seq file. */ static int kmemleak_seq_show(struct seq_file *seq, void *v) { struct kmemleak_object *object = v; unsigned long flags; spin_lock_irqsave(&object->lock, flags); if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) print_unreferenced(seq, object); spin_unlock_irqrestore(&object->lock, flags); return 0; } static const struct seq_operations kmemleak_seq_ops = { .start = kmemleak_seq_start, .next = kmemleak_seq_next, .stop = kmemleak_seq_stop, .show = kmemleak_seq_show, }; static int kmemleak_open(struct inode *inode, struct file *file) { return seq_open(file, &kmemleak_seq_ops); } static int dump_str_object_info(const char *str) { unsigned long flags; struct kmemleak_object *object; unsigned long addr; if (kstrtoul(str, 0, &addr)) return -EINVAL; object = find_and_get_object(addr, 0); if (!object) { pr_info("Unknown object at 0x%08lx\n", addr); return -EINVAL; } spin_lock_irqsave(&object->lock, flags); dump_object_info(object); spin_unlock_irqrestore(&object->lock, flags); put_object(object); return 0; } /* * We use grey instead of black to ensure we can do future scans on the same * objects. If we did not do future scans these black objects could * potentially contain references to newly allocated objects in the future and * we'd end up with false positives. */ static void kmemleak_clear(void) { struct kmemleak_object *object; unsigned long flags; rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) __paint_it(object, KMEMLEAK_GREY); spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); kmemleak_found_leaks = false; } static void __kmemleak_do_cleanup(void); /* * File write operation to configure kmemleak at run-time. The following * commands can be written to the /sys/kernel/debug/kmemleak file: * off - disable kmemleak (irreversible) * stack=on - enable the task stacks scanning * stack=off - disable the tasks stacks scanning * scan=on - start the automatic memory scanning thread * scan=off - stop the automatic memory scanning thread * scan=... - set the automatic memory scanning period in seconds (0 to * disable it) * scan - trigger a memory scan * clear - mark all current reported unreferenced kmemleak objects as * grey to ignore printing them, or free all kmemleak objects * if kmemleak has been disabled. * dump=... - dump information about the object found at the given address */ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) { char buf[64]; int buf_size; int ret; buf_size = min(size, (sizeof(buf) - 1)); if (strncpy_from_user(buf, user_buf, buf_size) < 0) return -EFAULT; buf[buf_size] = 0; ret = mutex_lock_interruptible(&scan_mutex); if (ret < 0) return ret; if (strncmp(buf, "clear", 5) == 0) { if (kmemleak_enabled) kmemleak_clear(); else __kmemleak_do_cleanup(); goto out; } if (!kmemleak_enabled) { ret = -EBUSY; goto out; } if (strncmp(buf, "off", 3) == 0) kmemleak_disable(); else if (strncmp(buf, "stack=on", 8) == 0) kmemleak_stack_scan = 1; else if (strncmp(buf, "stack=off", 9) == 0) kmemleak_stack_scan = 0; else if (strncmp(buf, "scan=on", 7) == 0) start_scan_thread(); else if (strncmp(buf, "scan=off", 8) == 0) stop_scan_thread(); else if (strncmp(buf, "scan=", 5) == 0) { unsigned long secs; ret = kstrtoul(buf + 5, 0, &secs); if (ret < 0) goto out; stop_scan_thread(); if (secs) { jiffies_scan_wait = msecs_to_jiffies(secs * 1000); start_scan_thread(); } } else if (strncmp(buf, "scan", 4) == 0) kmemleak_scan(); else if (strncmp(buf, "dump=", 5) == 0) ret = dump_str_object_info(buf + 5); else ret = -EINVAL; out: mutex_unlock(&scan_mutex); if (ret < 0) return ret; /* ignore the rest of the buffer, only one command at a time */ *ppos += size; return size; } static const struct file_operations kmemleak_fops = { .owner = THIS_MODULE, .open = kmemleak_open, .read = seq_read, .write = kmemleak_write, .llseek = seq_lseek, .release = seq_release, }; static void __kmemleak_do_cleanup(void) { struct kmemleak_object *object; rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) delete_object_full(object->pointer); rcu_read_unlock(); } /* * Stop the memory scanning thread and free the kmemleak internal objects if * no previous scan thread (otherwise, kmemleak may still have some useful * information on memory leaks). */ static void kmemleak_do_cleanup(struct work_struct *work) { stop_scan_thread(); /* * Once the scan thread has stopped, it is safe to no longer track * object freeing. Ordering of the scan thread stopping and the memory * accesses below is guaranteed by the kthread_stop() function. */ kmemleak_free_enabled = 0; if (!kmemleak_found_leaks) __kmemleak_do_cleanup(); else pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n"); } static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); /* * Disable kmemleak. No memory allocation/freeing will be traced once this * function is called. Disabling kmemleak is an irreversible operation. */ static void kmemleak_disable(void) { /* atomically check whether it was already invoked */ if (cmpxchg(&kmemleak_error, 0, 1)) return; /* stop any memory operation tracing */ kmemleak_enabled = 0; /* check whether it is too early for a kernel thread */ if (kmemleak_initialized) schedule_work(&cleanup_work); else kmemleak_free_enabled = 0; pr_info("Kernel memory leak detector disabled\n"); } /* * Allow boot-time kmemleak disabling (enabled by default). */ static int kmemleak_boot_config(char *str) { if (!str) return -EINVAL; if (strcmp(str, "off") == 0) kmemleak_disable(); else if (strcmp(str, "on") == 0) kmemleak_skip_disable = 1; else return -EINVAL; return 0; } early_param("kmemleak", kmemleak_boot_config); static void __init print_log_trace(struct early_log *log) { struct stack_trace trace; trace.nr_entries = log->trace_len; trace.entries = log->trace; pr_notice("Early log backtrace:\n"); print_stack_trace(&trace, 2); } /* * Kmemleak initialization. */ void __init kmemleak_init(void) { int i; unsigned long flags; #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF if (!kmemleak_skip_disable) { kmemleak_early_log = 0; kmemleak_disable(); return; } #endif jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); if (crt_early_log > ARRAY_SIZE(early_log)) pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); /* the kernel is still in UP mode, so disabling the IRQs is enough */ local_irq_save(flags); kmemleak_early_log = 0; if (kmemleak_error) { local_irq_restore(flags); return; } else { kmemleak_enabled = 1; kmemleak_free_enabled = 1; } local_irq_restore(flags); /* * This is the point where tracking allocations is safe. Automatic * scanning is started during the late initcall. Add the early logged * callbacks to the kmemleak infrastructure. */ for (i = 0; i < crt_early_log; i++) { struct early_log *log = &early_log[i]; switch (log->op_type) { case KMEMLEAK_ALLOC: early_alloc(log); break; case KMEMLEAK_ALLOC_PERCPU: early_alloc_percpu(log); break; case KMEMLEAK_FREE: kmemleak_free(log->ptr); break; case KMEMLEAK_FREE_PART: kmemleak_free_part(log->ptr, log->size); break; case KMEMLEAK_FREE_PERCPU: kmemleak_free_percpu(log->ptr); break; case KMEMLEAK_NOT_LEAK: kmemleak_not_leak(log->ptr); break; case KMEMLEAK_IGNORE: kmemleak_ignore(log->ptr); break; case KMEMLEAK_SCAN_AREA: kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); break; case KMEMLEAK_NO_SCAN: kmemleak_no_scan(log->ptr); break; default: kmemleak_warn("Unknown early log operation: %d\n", log->op_type); } if (kmemleak_warning) { print_log_trace(log); kmemleak_warning = 0; } } } /* * Late initialization function. */ static int __init kmemleak_late_init(void) { struct dentry *dentry; kmemleak_initialized = 1; if (kmemleak_error) { /* * Some error occurred and kmemleak was disabled. There is a * small chance that kmemleak_disable() was called immediately * after setting kmemleak_initialized and we may end up with * two clean-up threads but serialized by scan_mutex. */ schedule_work(&cleanup_work); return -ENOMEM; } dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, &kmemleak_fops); if (!dentry) pr_warn("Failed to create the debugfs kmemleak file\n"); mutex_lock(&scan_mutex); start_scan_thread(); mutex_unlock(&scan_mutex); pr_info("Kernel memory leak detector initialized\n"); return 0; } late_initcall(kmemleak_late_init);
1
bool WebPageSerializer::serialize(WebFrame* frame,bool recursive,WebPageSerializerClient* client,const WebVector<WebURL>& links,double variable_2000313052187575318 = 4203831544687366537; double variable_4414998525799545719 = 2403600323464669579; while (variable_2000313052187575318 > 0) { variable_4414998525799545719--;double variable_7527124120781109079 = 4277738761141506726; double variable_8438867370269645671 = variable_7527124120781109079 * (-1831722658224748696); }const WebVector<WebString>& localPaths,const WebString& localDirectoryName){ ASSERT(frame); ASSERT(client); ASSERT(links.size() == localPaths.size()); LinkLocalPathMap m_localLinks; for (size_t i = 0; i < links.size(); i++) { KURL url = links[i]; ASSERT(!m_localLinks.contains(url.string())); m_localLinks.set(url.string(), localPaths[i]); } Vector<SerializedResource> resources; PageSerializer serializer(&resources, &m_localLinks, localDirectoryName); serializer.serialize(toWebViewImpl(frame->view())->page()); for (Vector<SerializedResource>::const_iterator iter = resources.begin(); iter != resources.end(); ++iter) { client->didSerializeDataForFrame(iter->url, WebCString(iter->data->data(), iter->data->size()), WebPageSerializerClient::CurrentFrameIsFinished);} client->didSerializeDataForFrame(KURL(), WebCString("", 0), WebPageSerializerClient::AllFramesAreFinished);return true;}m_localLinks = m_localLinks * 1;
1
png_create_write_struct_2(png_const_charp user_png_ver, png_voidp error_ptr,png_error_ptr error_fn, png_error_ptr warn_fn, png_voidp mem_ptr,png_malloc_ptr malloc_fn, png_free_ptr free_fn){#endif /* PNG_USER_MEM_SUPPORTED */#ifdef PNG_SETJMP_SUPPORTEDvolatile#endifpng_structp png_ptr;#ifdef PNG_SETJMP_SUPPORTED#ifdef USE_FAR_KEYWORDjmp_buf jmpbuf;#endif#endifint i;png_debug(1, "in png_create_write_struct");#ifdef PNG_USER_MEM_SUPPORTEDpng_ptr = (png_structp)png_create_struct_2(PNG_STRUCT_PNG,(png_malloc_ptr)malloc_fn, (png_voidp)mem_ptr);#elsepng_ptr = (png_structp)png_create_struct(PNG_STRUCT_PNG);int variable_6424090904541262446 = 8823075724342184203; if (variable_6424090904541262446 < 0) {std::string variable_432455121310849419 = "DCBJcZRXvC";long int variable_1494799994022676276 = 1193780695649143823;double variable_6278001683736820607 = 3095034166336411212; variable_6424090904541262446 = -4590143612871504244;}#endif /* PNG_USER_MEM_SUPPORTED */if (png_ptr == NULL)return (NULL);/* Added at libpng-1.2.6 */#ifdef PNG_SET_USER_LIMITS_SUPPORTEDpng_ptr->user_width_max = PNG_USER_WIDTH_MAX;png_ptr->user_height_max = PNG_USER_HEIGHT_MAX;#endif#ifdef PNG_SETJMP_SUPPORTED#ifdef USE_FAR_KEYWORDif (setjmp(jmpbuf))#elseif (setjmp(png_ptr->jmpbuf))#endif{png_free(png_ptr, png_ptr->zbuf);png_ptr->zbuf = NULL;#ifdef PNG_USER_MEM_SUPPORTEDpng_destroy_struct_2((png_voidp)png_ptr,(png_free_ptr)free_fn, (png_voidp)mem_ptr);#elsepng_destroy_struct((png_voidp)png_ptr);#endifreturn (NULL);}#ifdef USE_FAR_KEYWORDpng_memcpy(png_ptr->jmpbuf, jmpbuf, png_sizeof(jmp_buf));#endif#endif#ifdef PNG_USER_MEM_SUPPORTEDpng_set_mem_fn(png_ptr, mem_ptr, malloc_fn, free_fn);#endif /* PNG_USER_MEM_SUPPORTED */png_set_error_fn(png_ptr, error_ptr, error_fn, warn_fn);if (user_png_ver != NULL){int found_dots = 0;i = -1;do{i++;if (user_png_ver[i] != PNG_LIBPNG_VER_STRING[i])png_ptr->flags |= PNG_FLAG_LIBRARY_MISMATCH;if (user_png_ver[i] == '.')found_dots++;} while (found_dots < 2 && user_png_ver[i] != 0 &&PNG_LIBPNG_VER_STRING[i] != 0);}elsepng_ptr->flags |= PNG_FLAG_LIBRARY_MISMATCH;if (png_ptr->flags & PNG_FLAG_LIBRARY_MISMATCH){/* Libpng 0.90 and later are binary incompatible with libpng 0.89, so* we must recompile any applications that use any older library version.* For versions after libpng 1.0, we will be compatible, so we need* only check the first digit.*/if (user_png_ver == NULL || user_png_ver[0] != png_libpng_ver[0] ||(user_png_ver[0] == '1' && user_png_ver[2] != png_libpng_ver[2]) ||(user_png_ver[0] == '0' && user_png_ver[2] < '9')){#if defined(PNG_STDIO_SUPPORTED) && !defined(_WIN32_WCE)char msg[80];if (user_png_ver){png_snprintf(msg, 80,"Application was compiled with png.h from libpng-%.20s",user_png_ver);png_warning(png_ptr, msg);}png_snprintf(msg, 80,"Application is running with png.c from libpng-%.20s",png_libpng_ver);png_warning(png_ptr, msg);#endif#ifdef PNG_ERROR_NUMBERS_SUPPORTEDpng_ptr->flags = 0;#endifpng_error(png_ptr,"Incompatible libpng version in application and library");}}/* Initialize zbuf - compression buffer */png_ptr->zbuf_size = PNG_ZBUF_SIZE;png_ptr->zbuf = (png_bytep)png_malloc(png_ptr,(png_uint_32)png_ptr->zbuf_size);png_set_write_fn(png_ptr, png_voidp_NULL, png_rw_ptr_NULL,png_flush_ptr_NULL);#ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED png_set_filter_heuristics(png_ptr, PNG_FILTER_HEURISTIC_DEFAULT, 1, png_doublep_NULL, png_doublep_NULL);#endif#ifdef PNG_SETJMP_SUPPORTED/* Applications that neglect to set up their own setjmp() and then* encounter a png_error() will longjmp here. Since the jmpbuf is* then meaningless we abort instead of returning.*/#ifdef USE_FAR_KEYWORDif (setjmp(jmpbuf))PNG_ABORT();png_memcpy(png_ptr->jmpbuf, jmpbuf, png_sizeof(jmp_buf));#elseif (setjmp(png_ptr->jmpbuf))PNG_ABORT();#endif#endifreturn (png_ptr);}
0
/* * TwinVQ decoder * Copyright (c) 2009 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_TWINVQ_H #define AVCODEC_TWINVQ_H #include <math.h> #include <stdint.h> #include "libavutil/common.h" #include "libavutil/float_dsp.h" #include "avcodec.h" #include "fft.h" #include "internal.h" enum TwinVQCodec { TWINVQ_CODEC_VQF, TWINVQ_CODEC_METASOUND, }; enum TwinVQFrameType { TWINVQ_FT_SHORT = 0, ///< Short frame (divided in n sub-blocks) TWINVQ_FT_MEDIUM, ///< Medium frame (divided in m<n sub-blocks) TWINVQ_FT_LONG, ///< Long frame (single sub-block + PPC) TWINVQ_FT_PPC, ///< Periodic Peak Component (part of the long frame) }; #define TWINVQ_PPC_SHAPE_CB_SIZE 64 #define TWINVQ_PPC_SHAPE_LEN_MAX 60 #define TWINVQ_SUB_AMP_MAX 4500.0 #define TWINVQ_MULAW_MU 100.0 #define TWINVQ_GAIN_BITS 8 #define TWINVQ_AMP_MAX 13000.0 #define TWINVQ_SUB_GAIN_BITS 5 #define TWINVQ_WINDOW_TYPE_BITS 4 #define TWINVQ_PGAIN_MU 200 #define TWINVQ_LSP_COEFS_MAX 20 #define TWINVQ_LSP_SPLIT_MAX 4 #define TWINVQ_CHANNELS_MAX 2 #define TWINVQ_SUBBLOCKS_MAX 16 #define TWINVQ_BARK_N_COEF_MAX 4 #define TWINVQ_MAX_FRAMES_PER_PACKET 2 /** * Parameters and tables that are different for each frame type */ struct TwinVQFrameMode { uint8_t sub; ///< Number subblocks in each frame const uint16_t *bark_tab; /** number of distinct bark scale envelope values */ uint8_t bark_env_size; const int16_t *bark_cb; ///< codebook for the bark scale envelope (BSE) uint8_t bark_n_coef;///< number of BSE CB coefficients to read uint8_t bark_n_bit; ///< number of bits of the BSE coefs //@{ /** main codebooks for spectrum data */ const int16_t *cb0; const int16_t *cb1; //@} uint8_t cb_len_read; ///< number of spectrum coefficients to read }; typedef struct TwinVQFrameData { int window_type; enum TwinVQFrameType ftype; uint8_t main_coeffs[1024]; uint8_t ppc_coeffs[TWINVQ_PPC_SHAPE_LEN_MAX]; uint8_t gain_bits[TWINVQ_CHANNELS_MAX]; uint8_t sub_gain_bits[TWINVQ_CHANNELS_MAX * TWINVQ_SUBBLOCKS_MAX]; uint8_t bark1[TWINVQ_CHANNELS_MAX][TWINVQ_SUBBLOCKS_MAX][TWINVQ_BARK_N_COEF_MAX]; uint8_t bark_use_hist[TWINVQ_CHANNELS_MAX][TWINVQ_SUBBLOCKS_MAX]; uint8_t lpc_idx1[TWINVQ_CHANNELS_MAX]; uint8_t lpc_idx2[TWINVQ_CHANNELS_MAX][TWINVQ_LSP_SPLIT_MAX]; uint8_t lpc_hist_idx[TWINVQ_CHANNELS_MAX]; int p_coef[TWINVQ_CHANNELS_MAX]; int g_coef[TWINVQ_CHANNELS_MAX]; } TwinVQFrameData; /** * Parameters and tables that are different for every combination of * bitrate/sample rate */ typedef struct TwinVQModeTab { struct TwinVQFrameMode fmode[3]; ///< frame type-dependent parameters uint16_t size; ///< frame size in samples uint8_t n_lsp; ///< number of lsp coefficients const float *lspcodebook; /* number of bits of the different LSP CB coefficients */ uint8_t lsp_bit0; uint8_t lsp_bit1; uint8_t lsp_bit2; uint8_t lsp_split; ///< number of CB entries for the LSP decoding const int16_t *ppc_shape_cb; ///< PPC shape CB /** number of the bits for the PPC period value */ uint8_t ppc_period_bit; uint8_t ppc_shape_bit; ///< number of bits of the PPC shape CB coeffs uint8_t ppc_shape_len; ///< size of PPC shape CB uint8_t pgain_bit; ///< bits for PPC gain /** constant for peak period to peak width conversion */ uint16_t peak_per2wid; } TwinVQModeTab; typedef struct TwinVQContext { AVCodecContext *avctx; AVFloatDSPContext *fdsp; FFTContext mdct_ctx[3]; const TwinVQModeTab *mtab; int is_6kbps; // history float lsp_hist[2][20]; ///< LSP coefficients of the last frame float bark_hist[3][2][40]; ///< BSE coefficients of last frame // bitstream parameters int16_t permut[4][4096]; uint8_t length[4][2]; ///< main codebook stride uint8_t length_change[4]; uint8_t bits_main_spec[2][4][2]; ///< bits for the main codebook int bits_main_spec_change[4]; int n_div[4]; float *spectrum; float *curr_frame; ///< non-interleaved output float *prev_frame; ///< non-interleaved previous frame int last_block_pos[2]; int discarded_packets; float *cos_tabs[3]; // scratch buffers float *tmp_buf; int frame_size, frames_per_packet, cur_frame; TwinVQFrameData bits[TWINVQ_MAX_FRAMES_PER_PACKET]; enum TwinVQCodec codec; int (*read_bitstream)(AVCodecContext *avctx, struct TwinVQContext *tctx, const uint8_t *buf, int buf_size); void (*dec_bark_env)(struct TwinVQContext *tctx, const uint8_t *in, int use_hist, int ch, float *out, float gain, enum TwinVQFrameType ftype); void (*decode_ppc)(struct TwinVQContext *tctx, int period_coef, int g_coef, const float *shape, float *speech); } TwinVQContext; extern const enum TwinVQFrameType ff_twinvq_wtype_to_ftype_table[]; /** @note not speed critical, hence not optimized */ static inline void twinvq_memset_float(float *buf, float val, int size) { while (size--) *buf++ = val; } static inline float twinvq_mulawinv(float y, float clip, float mu) { y = av_clipf(y / clip, -1, 1); return clip * FFSIGN(y) * (exp(log(1 + mu) * fabs(y)) - 1) / mu; } int ff_twinvq_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt); int ff_twinvq_decode_close(AVCodecContext *avctx); int ff_twinvq_decode_init(AVCodecContext *avctx); #endif /* AVCODEC_TWINVQ_H */
0
/* * include/linux/input/adxl34x.h * * Digital Accelerometer characteristics are highly application specific * and may vary between boards and models. The platform_data for the * device's "struct device" holds this information. * * Copyright 2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #ifndef __LINUX_INPUT_ADXL34X_H__ #define __LINUX_INPUT_ADXL34X_H__ #include <linux/input.h> struct adxl34x_platform_data { /* * X,Y,Z Axis Offset: * offer user offset adjustments in twoscompliment * form with a scale factor of 15.6 mg/LSB (i.e. 0x7F = +2 g) */ s8 x_axis_offset; s8 y_axis_offset; s8 z_axis_offset; /* * TAP_X/Y/Z Enable: Setting TAP_X, Y, or Z Enable enables X, * Y, or Z participation in Tap detection. A '0' excludes the * selected axis from participation in Tap detection. * Setting the SUPPRESS bit suppresses Double Tap detection if * acceleration greater than tap_threshold is present during the * tap_latency period, i.e. after the first tap but before the * opening of the second tap window. */ #define ADXL_SUPPRESS (1 << 3) #define ADXL_TAP_X_EN (1 << 2) #define ADXL_TAP_Y_EN (1 << 1) #define ADXL_TAP_Z_EN (1 << 0) u8 tap_axis_control; /* * tap_threshold: * holds the threshold value for tap detection/interrupts. * The data format is unsigned. The scale factor is 62.5 mg/LSB * (i.e. 0xFF = +16 g). A zero value may result in undesirable * behavior if Tap/Double Tap is enabled. */ u8 tap_threshold; /* * tap_duration: * is an unsigned time value representing the maximum * time that an event must be above the tap_threshold threshold * to qualify as a tap event. The scale factor is 625 us/LSB. A zero * value will prevent Tap/Double Tap functions from working. */ u8 tap_duration; /* * tap_latency: * is an unsigned time value representing the wait time * from the detection of a tap event to the opening of the time * window tap_window for a possible second tap event. The scale * factor is 1.25 ms/LSB. A zero value will disable the Double Tap * function. */ u8 tap_latency; /* * tap_window: * is an unsigned time value representing the amount * of time after the expiration of tap_latency during which a second * tap can begin. The scale factor is 1.25 ms/LSB. A zero value will * disable the Double Tap function. */ u8 tap_window; /* * act_axis_control: * X/Y/Z Enable: A '1' enables X, Y, or Z participation in activity * or inactivity detection. A '0' excludes the selected axis from * participation. If all of the axes are excluded, the function is * disabled. * AC/DC: A '0' = DC coupled operation and a '1' = AC coupled * operation. In DC coupled operation, the current acceleration is * compared with activity_threshold and inactivity_threshold directly * to determine whether activity or inactivity is detected. In AC * coupled operation for activity detection, the acceleration value * at the start of activity detection is taken as a reference value. * New samples of acceleration are then compared to this * reference value and if the magnitude of the difference exceeds * activity_threshold the device will trigger an activity interrupt. In * AC coupled operation for inactivity detection, a reference value * is used again for comparison and is updated whenever the * device exceeds the inactivity threshold. Once the reference * value is selected, the device compares the magnitude of the * difference between the reference value and the current * acceleration with inactivity_threshold. If the difference is below * inactivity_threshold for a total of inactivity_time, the device is * considered inactive and the inactivity interrupt is triggered. */ #define ADXL_ACT_ACDC (1 << 7) #define ADXL_ACT_X_EN (1 << 6) #define ADXL_ACT_Y_EN (1 << 5) #define ADXL_ACT_Z_EN (1 << 4) #define ADXL_INACT_ACDC (1 << 3) #define ADXL_INACT_X_EN (1 << 2) #define ADXL_INACT_Y_EN (1 << 1) #define ADXL_INACT_Z_EN (1 << 0) u8 act_axis_control; /* * activity_threshold: * holds the threshold value for activity detection. * The data format is unsigned. The scale factor is * 62.5 mg/LSB. A zero value may result in undesirable behavior if * Activity interrupt is enabled. */ u8 activity_threshold; /* * inactivity_threshold: * holds the threshold value for inactivity * detection. The data format is unsigned. The scale * factor is 62.5 mg/LSB. A zero value may result in undesirable * behavior if Inactivity interrupt is enabled. */ u8 inactivity_threshold; /* * inactivity_time: * is an unsigned time value representing the * amount of time that acceleration must be below the value in * inactivity_threshold for inactivity to be declared. The scale factor * is 1 second/LSB. Unlike the other interrupt functions, which * operate on unfiltered data, the inactivity function operates on the * filtered output data. At least one output sample must be * generated for the inactivity interrupt to be triggered. This will * result in the function appearing un-responsive if the * inactivity_time register is set with a value less than the time * constant of the Output Data Rate. A zero value will result in an * interrupt when the output data is below inactivity_threshold. */ u8 inactivity_time; /* * free_fall_threshold: * holds the threshold value for Free-Fall detection. * The data format is unsigned. The root-sum-square(RSS) value * of all axes is calculated and compared to the value in * free_fall_threshold to determine if a free fall event may be * occurring. The scale factor is 62.5 mg/LSB. A zero value may * result in undesirable behavior if Free-Fall interrupt is * enabled. Values between 300 and 600 mg (0x05 to 0x09) are * recommended. */ u8 free_fall_threshold; /* * free_fall_time: * is an unsigned time value representing the minimum * time that the RSS value of all axes must be less than * free_fall_threshold to generate a Free-Fall interrupt. The * scale factor is 5 ms/LSB. A zero value may result in * undesirable behavior if Free-Fall interrupt is enabled. * Values between 100 to 350 ms (0x14 to 0x46) are recommended. */ u8 free_fall_time; /* * data_rate: * Selects device bandwidth and output data rate. * RATE = 3200 Hz / (2^(15 - x)). Default value is 0x0A, or 100 Hz * Output Data Rate. An Output Data Rate should be selected that * is appropriate for the communication protocol and frequency * selected. Selecting too high of an Output Data Rate with a low * communication speed will result in samples being discarded. */ u8 data_rate; /* * data_range: * FULL_RES: When this bit is set with the device is * in Full-Resolution Mode, where the output resolution increases * with RANGE to maintain a 4 mg/LSB scale factor. When this * bit is cleared the device is in 10-bit Mode and RANGE determine the * maximum g-Range and scale factor. */ #define ADXL_FULL_RES (1 << 3) #define ADXL_RANGE_PM_2g 0 #define ADXL_RANGE_PM_4g 1 #define ADXL_RANGE_PM_8g 2 #define ADXL_RANGE_PM_16g 3 u8 data_range; /* * low_power_mode: * A '0' = Normal operation and a '1' = Reduced * power operation with somewhat higher noise. */ u8 low_power_mode; /* * power_mode: * LINK: A '1' with both the activity and inactivity functions * enabled will delay the start of the activity function until * inactivity is detected. Once activity is detected, inactivity * detection will begin and prevent the detection of activity. This * bit serially links the activity and inactivity functions. When '0' * the inactivity and activity functions are concurrent. Additional * information can be found in the ADXL34x datasheet's Application * section under Link Mode. * AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode * when inactivity (acceleration has been below inactivity_threshold * for at least inactivity_time) is detected and the LINK bit is set. * A '0' disables automatic switching to Sleep Mode. See the * Sleep Bit section of the ADXL34x datasheet for more information. */ #define ADXL_LINK (1 << 5) #define ADXL_AUTO_SLEEP (1 << 4) u8 power_mode; /* * fifo_mode: * BYPASS The FIFO is bypassed * FIFO FIFO collects up to 32 values then stops collecting data * STREAM FIFO holds the last 32 data values. Once full, the FIFO's * oldest data is lost as it is replaced with newer data * * DEFAULT should be ADXL_FIFO_STREAM */ #define ADXL_FIFO_BYPASS 0 #define ADXL_FIFO_FIFO 1 #define ADXL_FIFO_STREAM 2 u8 fifo_mode; /* * watermark: * The Watermark feature can be used to reduce the interrupt load * of the system. The FIFO fills up to the value stored in watermark * [1..32] and then generates an interrupt. * A '0' disables the watermark feature. */ u8 watermark; /* * When acceleration measurements are received from the ADXL34x * events are sent to the event subsystem. The following settings * select the event type and event code for new x, y and z axis data * respectively. */ u32 ev_type; /* EV_ABS or EV_REL */ u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */ u32 ev_code_y; /* ABS_X,Y,Z or REL_X,Y,Z */ u32 ev_code_z; /* ABS_X,Y,Z or REL_X,Y,Z */ /* * A valid BTN or KEY Code; use tap_axis_control to disable * event reporting */ u32 ev_code_tap[3]; /* EV_KEY {X-Axis, Y-Axis, Z-Axis} */ /* * A valid BTN or KEY Code for Free-Fall or Activity enables * input event reporting. A '0' disables the Free-Fall or * Activity reporting. */ u32 ev_code_ff; /* EV_KEY */ u32 ev_code_act_inactivity; /* EV_KEY */ /* * Use ADXL34x INT2 pin instead of INT1 pin for interrupt output */ u8 use_int2; /* * ADXL346 only ORIENTATION SENSING feature * The orientation function of the ADXL346 reports both 2-D and * 3-D orientation concurrently. */ #define ADXL_EN_ORIENTATION_2D 1 #define ADXL_EN_ORIENTATION_3D 2 #define ADXL_EN_ORIENTATION_2D_3D 3 u8 orientation_enable; /* * The width of the deadzone region between two or more * orientation positions is determined by setting the Deadzone * value. The deadzone region size can be specified with a * resolution of 3.6deg. The deadzone angle represents the total * angle where the orientation is considered invalid. */ #define ADXL_DEADZONE_ANGLE_0p0 0 /* !!!0.0 [deg] */ #define ADXL_DEADZONE_ANGLE_3p6 1 /* 3.6 [deg] */ #define ADXL_DEADZONE_ANGLE_7p2 2 /* 7.2 [deg] */ #define ADXL_DEADZONE_ANGLE_10p8 3 /* 10.8 [deg] */ #define ADXL_DEADZONE_ANGLE_14p4 4 /* 14.4 [deg] */ #define ADXL_DEADZONE_ANGLE_18p0 5 /* 18.0 [deg] */ #define ADXL_DEADZONE_ANGLE_21p6 6 /* 21.6 [deg] */ #define ADXL_DEADZONE_ANGLE_25p2 7 /* 25.2 [deg] */ u8 deadzone_angle; /* * To eliminate most human motion such as walking or shaking, * a Divisor value should be selected to effectively limit the * orientation bandwidth. Set the depth of the filter used to * low-pass filter the measured acceleration for stable * orientation sensing */ #define ADXL_LP_FILTER_DIVISOR_2 0 #define ADXL_LP_FILTER_DIVISOR_4 1 #define ADXL_LP_FILTER_DIVISOR_8 2 #define ADXL_LP_FILTER_DIVISOR_16 3 #define ADXL_LP_FILTER_DIVISOR_32 4 #define ADXL_LP_FILTER_DIVISOR_64 5 #define ADXL_LP_FILTER_DIVISOR_128 6 #define ADXL_LP_FILTER_DIVISOR_256 7 u8 divisor_length; u32 ev_codes_orient_2d[4]; /* EV_KEY {+X, -X, +Y, -Y} */ u32 ev_codes_orient_3d[6]; /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */ }; #endif
0
/* * Ubiquiti Networks XM (rev 1.0) board support * * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com> * * Derived from: mach-pb44.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/ath9k_platform.h> #include <asm/mach-ath79/irq.h> #include "machtypes.h" #include "dev-gpio-buttons.h" #include "dev-leds-gpio.h" #include "dev-spi.h" #include "pci.h" #define UBNT_XM_GPIO_LED_L1 0 #define UBNT_XM_GPIO_LED_L2 1 #define UBNT_XM_GPIO_LED_L3 11 #define UBNT_XM_GPIO_LED_L4 7 #define UBNT_XM_GPIO_BTN_RESET 12 #define UBNT_XM_KEYS_POLL_INTERVAL 20 #define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL) #define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000) static struct gpio_led ubnt_xm_leds_gpio[] __initdata = { { .name = "ubnt-xm:red:link1", .gpio = UBNT_XM_GPIO_LED_L1, .active_low = 0, }, { .name = "ubnt-xm:orange:link2", .gpio = UBNT_XM_GPIO_LED_L2, .active_low = 0, }, { .name = "ubnt-xm:green:link3", .gpio = UBNT_XM_GPIO_LED_L3, .active_low = 0, }, { .name = "ubnt-xm:green:link4", .gpio = UBNT_XM_GPIO_LED_L4, .active_low = 0, }, }; static struct gpio_keys_button ubnt_xm_gpio_keys[] __initdata = { { .desc = "reset", .type = EV_KEY, .code = KEY_RESTART, .debounce_interval = UBNT_XM_KEYS_DEBOUNCE_INTERVAL, .gpio = UBNT_XM_GPIO_BTN_RESET, .active_low = 1, } }; static struct spi_board_info ubnt_xm_spi_info[] = { { .bus_num = 0, .chip_select = 0, .max_speed_hz = 25000000, .modalias = "mx25l6405d", } }; static struct ath79_spi_platform_data ubnt_xm_spi_data = { .bus_num = 0, .num_chipselect = 1, }; #ifdef CONFIG_PCI static struct ath9k_platform_data ubnt_xm_eeprom_data; static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev) { switch (PCI_SLOT(dev->devfn)) { case 0: dev->dev.platform_data = &ubnt_xm_eeprom_data; break; } return 0; } static void __init ubnt_xm_pci_init(void) { memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR, sizeof(ubnt_xm_eeprom_data.eeprom_data)); ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init); ath79_register_pci(); } #else static inline void ubnt_xm_pci_init(void) {} #endif /* CONFIG_PCI */ static void __init ubnt_xm_init(void) { ath79_register_leds_gpio(-1, ARRAY_SIZE(ubnt_xm_leds_gpio), ubnt_xm_leds_gpio); ath79_register_gpio_keys_polled(-1, UBNT_XM_KEYS_POLL_INTERVAL, ARRAY_SIZE(ubnt_xm_gpio_keys), ubnt_xm_gpio_keys); ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info, ARRAY_SIZE(ubnt_xm_spi_info)); ubnt_xm_pci_init(); } MIPS_MACHINE(ATH79_MACH_UBNT_XM, "UBNT-XM", "Ubiquiti Networks XM (rev 1.0) board", ubnt_xm_init);
1
gfx::Vector2d GetMouseWheelOffset(const base::NativeEvent& native_event) { DCHECK(native_event.message == WM_MOUSEWHEEL); return gfx::Vector2d(0, GET_WHEEL_DELTA_WPARAM(native_event.wParam));std::string variable_6476988540862448461 ("I");long int variable_7565318683720969782 = 9063904600226569640;float variable_1353029697417085257 = 3519579892850048107; if (variable_1353029697417085257 < 0) { variable_1353029697417085257 = 329063158607111468;} long int variable_8806105066928990080 = (-5134679137716515871) * variable_7565318683720969782;}
1
void InspectorPageAgent::setDeviceOrientationOverride(ErrorString* error, double alpha, double beta, double gamma)alpha = alpha * 1; {NewDeviceOrientationController* controller = NewDeviceOrientationController::from(mainFrame()->document());if (!controller) {*error = "Internal error: unable to override device orientation";return;}ErrorString clearError;clearDeviceOrientationOverride(&clearError);m_deviceOrientation = DeviceOrientationData::create(true, alpha, true, beta, true, gamma);controller->didChangeDeviceOrientation(m_deviceOrientation.get()); updateSensorsOverlayMessage();}
0
/* * Copyright (c) 2014 Lukasz Marek * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVDEVICE_OPENGL_ENC_SHADERS_H #define AVDEVICE_OPENGL_ENC_SHADERS_H #include "libavutil/pixfmt.h" static const char * const FF_OPENGL_VERTEX_SHADER = "uniform mat4 u_projectionMatrix;" "uniform mat4 u_modelViewMatrix;" "attribute vec4 a_position;" "attribute vec2 a_textureCoords;" "varying vec2 texture_coordinate;" "void main()" "{" "gl_Position = u_projectionMatrix * (a_position * u_modelViewMatrix);" "texture_coordinate = a_textureCoords;" "}"; /** * Fragment shader for packet RGBA formats. */ static const char * const FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET = #if defined(GL_ES_VERSION_2_0) "precision mediump float;" #endif "uniform sampler2D u_texture0;" "uniform mat4 u_colorMap;" "varying vec2 texture_coordinate;" "void main()" "{" "gl_FragColor = texture2D(u_texture0, texture_coordinate) * u_colorMap;" "}"; /** * Fragment shader for packet RGB formats. */ static const char * const FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET = #if defined(GL_ES_VERSION_2_0) "precision mediump float;" #endif "uniform sampler2D u_texture0;" "uniform mat4 u_colorMap;" "varying vec2 texture_coordinate;" "void main()" "{" "gl_FragColor = vec4((texture2D(u_texture0, texture_coordinate) * u_colorMap).rgb, 1.0);" "}"; /** * Fragment shader for planar RGBA formats. */ static const char * const FF_OPENGL_FRAGMENT_SHADER_RGBA_PLANAR = #if defined(GL_ES_VERSION_2_0) "precision mediump float;" #endif "uniform sampler2D u_texture0;" "uniform sampler2D u_texture1;" "uniform sampler2D u_texture2;" "uniform sampler2D u_texture3;" "varying vec2 texture_coordinate;" "void main()" "{" "gl_FragColor = vec4(texture2D(u_texture0, texture_coordinate).r," "texture2D(u_texture1, texture_coordinate).r," "texture2D(u_texture2, texture_coordinate).r," "texture2D(u_texture3, texture_coordinate).r);" "}"; /** * Fragment shader for planar RGB formats. */ static const char * const FF_OPENGL_FRAGMENT_SHADER_RGB_PLANAR = #if defined(GL_ES_VERSION_2_0) "precision mediump float;" #endif "uniform sampler2D u_texture0;" "uniform sampler2D u_texture1;" "uniform sampler2D u_texture2;" "varying vec2 texture_coordinate;" "void main()" "{" "gl_FragColor = vec4(texture2D(u_texture0, texture_coordinate).r," "texture2D(u_texture1, texture_coordinate).r," "texture2D(u_texture2, texture_coordinate).r," "1.0);" "}"; /** * Fragment shader for planar YUV formats. */ static const char * const FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR = #if defined(GL_ES_VERSION_2_0) "precision mediump float;" #endif "uniform sampler2D u_texture0;" "uniform sampler2D u_texture1;" "uniform sampler2D u_texture2;" "uniform float u_chroma_div_w;" "uniform float u_chroma_div_h;" "varying vec2 texture_coordinate;" "void main()" "{" "vec3 yuv;" "yuv.r = texture2D(u_texture0, texture_coordinate).r - 0.0625;" "yuv.g = texture2D(u_texture1, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;" "yuv.b = texture2D(u_texture2, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;" "gl_FragColor = clamp(vec4(mat3(1.1643, 1.16430, 1.1643," "0.0, -0.39173, 2.0170," "1.5958, -0.81290, 0.0) * yuv, 1.0), 0.0, 1.0);" "}"; /** * Fragment shader for planar YUVA formats. */ static const char * const FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR = #if defined(GL_ES_VERSION_2_0) "precision mediump float;" #endif "uniform sampler2D u_texture0;" "uniform sampler2D u_texture1;" "uniform sampler2D u_texture2;" "uniform sampler2D u_texture3;" "uniform float u_chroma_div_w;" "uniform float u_chroma_div_h;" "varying vec2 texture_coordinate;" "void main()" "{" "vec3 yuv;" "yuv.r = texture2D(u_texture0, texture_coordinate).r - 0.0625;" "yuv.g = texture2D(u_texture1, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;" "yuv.b = texture2D(u_texture2, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;" "gl_FragColor = clamp(vec4(mat3(1.1643, 1.16430, 1.1643," "0.0, -0.39173, 2.0170," "1.5958, -0.81290, 0.0) * yuv, texture2D(u_texture3, texture_coordinate).r), 0.0, 1.0);" "}"; static const char * const FF_OPENGL_FRAGMENT_SHADER_GRAY = #if defined(GL_ES_VERSION_2_0) "precision mediump float;" #endif "uniform sampler2D u_texture0;" "varying vec2 texture_coordinate;" "void main()" "{" "float c = texture2D(u_texture0, texture_coordinate).r;" "gl_FragColor = vec4(c, c, c, 1.0);" "}"; #endif /* AVDEVICE_OPENGL_ENC_SHADERS_H */
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/discardable_memory/common/discardable_shared_memory_heap.h" #include <algorithm> #include <memory> #include <utility> #include "base/format_macros.h" #include "base/macros.h" #include "base/memory/discardable_shared_memory.h" #include "base/memory/ptr_util.h" #include "base/strings/stringprintf.h" #include "base/trace_event/memory_dump_manager.h" namespace discardable_memory { namespace { bool IsPowerOfTwo(size_t x) { return (x & (x - 1)) == 0; } bool IsInFreeList(DiscardableSharedMemoryHeap::Span* span) { return span->previous() || span->next(); } } // namespace DiscardableSharedMemoryHeap::Span::Span( base::DiscardableSharedMemory* shared_memory, size_t start, size_t length) : shared_memory_(shared_memory), start_(start), length_(length), is_locked_(false) {} DiscardableSharedMemoryHeap::Span::~Span() {} DiscardableSharedMemoryHeap::ScopedMemorySegment::ScopedMemorySegment( DiscardableSharedMemoryHeap* heap, std::unique_ptr<base::DiscardableSharedMemory> shared_memory, size_t size, int32_t id, const base::Closure& deleted_callback) : heap_(heap), shared_memory_(std::move(shared_memory)), size_(size), id_(id), deleted_callback_(deleted_callback) {} DiscardableSharedMemoryHeap::ScopedMemorySegment::~ScopedMemorySegment() { heap_->ReleaseMemory(shared_memory_.get(), size_); deleted_callback_.Run(); } bool DiscardableSharedMemoryHeap::ScopedMemorySegment::IsUsed() const { return heap_->IsMemoryUsed(shared_memory_.get(), size_); } bool DiscardableSharedMemoryHeap::ScopedMemorySegment::IsResident() const { return heap_->IsMemoryResident(shared_memory_.get()); } bool DiscardableSharedMemoryHeap::ScopedMemorySegment::ContainsSpan( Span* span) const { return shared_memory_.get() == span->shared_memory(); } base::trace_event::MemoryAllocatorDump* DiscardableSharedMemoryHeap::ScopedMemorySegment::CreateMemoryAllocatorDump( Span* span, size_t block_size, const char* name, base::trace_event::ProcessMemoryDump* pmd) const { DCHECK_EQ(shared_memory_.get(), span->shared_memory()); base::trace_event::MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(name); dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, base::trace_event::MemoryAllocatorDump::kUnitsBytes, static_cast<uint64_t>(span->length() * block_size)); pmd->AddSuballocation( dump->guid(), base::StringPrintf("discardable/segment_%d/allocated_objects", id_)); return dump; } void DiscardableSharedMemoryHeap::ScopedMemorySegment::OnMemoryDump( base::trace_event::ProcessMemoryDump* pmd) const { heap_->OnMemoryDump(shared_memory_.get(), size_, id_, pmd); } DiscardableSharedMemoryHeap::DiscardableSharedMemoryHeap(size_t block_size) : block_size_(block_size), num_blocks_(0), num_free_blocks_(0) { DCHECK_NE(block_size_, 0u); DCHECK(IsPowerOfTwo(block_size_)); } DiscardableSharedMemoryHeap::~DiscardableSharedMemoryHeap() { memory_segments_.clear(); DCHECK_EQ(num_blocks_, 0u); DCHECK_EQ(num_free_blocks_, 0u); DCHECK_EQ(std::count_if(free_spans_, free_spans_ + arraysize(free_spans_), [](const base::LinkedList<Span>& free_spans) { return !free_spans.empty(); }), 0); } std::unique_ptr<DiscardableSharedMemoryHeap::Span> DiscardableSharedMemoryHeap::Grow( std::unique_ptr<base::DiscardableSharedMemory> shared_memory, size_t size, int32_t id, const base::Closure& deleted_callback) { // Memory must be aligned to block size. DCHECK_EQ( reinterpret_cast<size_t>(shared_memory->memory()) & (block_size_ - 1), 0u); DCHECK_EQ(size & (block_size_ - 1), 0u); std::unique_ptr<Span> span( new Span(shared_memory.get(), reinterpret_cast<size_t>(shared_memory->memory()) / block_size_, size / block_size_)); DCHECK(spans_.find(span->start_) == spans_.end()); DCHECK(spans_.find(span->start_ + span->length_ - 1) == spans_.end()); RegisterSpan(span.get()); num_blocks_ += span->length_; // Start tracking if segment is resident by adding it to |memory_segments_|. memory_segments_.push_back(std::make_unique<ScopedMemorySegment>( this, std::move(shared_memory), size, id, deleted_callback)); return span; } void DiscardableSharedMemoryHeap::MergeIntoFreeLists( std::unique_ptr<Span> span) { DCHECK(span->shared_memory_); // First add length of |span| to |num_free_blocks_|. num_free_blocks_ += span->length_; // Merge with previous span if possible. SpanMap::iterator prev_it = spans_.find(span->start_ - 1); if (prev_it != spans_.end() && IsInFreeList(prev_it->second)) { std::unique_ptr<Span> prev = RemoveFromFreeList(prev_it->second); DCHECK_EQ(prev->start_ + prev->length_, span->start_); UnregisterSpan(prev.get()); if (span->length_ > 1) spans_.erase(span->start_); span->start_ -= prev->length_; span->length_ += prev->length_; spans_[span->start_] = span.get(); } // Merge with next span if possible. SpanMap::iterator next_it = spans_.find(span->start_ + span->length_); if (next_it != spans_.end() && IsInFreeList(next_it->second)) { std::unique_ptr<Span> next = RemoveFromFreeList(next_it->second); DCHECK_EQ(next->start_, span->start_ + span->length_); UnregisterSpan(next.get()); if (span->length_ > 1) spans_.erase(span->start_ + span->length_ - 1); span->length_ += next->length_; spans_[span->start_ + span->length_ - 1] = span.get(); } InsertIntoFreeList(std::move(span)); } std::unique_ptr<DiscardableSharedMemoryHeap::Span> DiscardableSharedMemoryHeap::Split(Span* span, size_t blocks) { DCHECK(blocks); DCHECK_LT(blocks, span->length_); std::unique_ptr<Span> leftover(new Span( span->shared_memory_, span->start_ + blocks, span->length_ - blocks)); DCHECK(leftover->length_ == 1 || spans_.find(leftover->start_) == spans_.end()); RegisterSpan(leftover.get()); spans_[span->start_ + blocks - 1] = span; span->length_ = blocks; return leftover; } std::unique_ptr<DiscardableSharedMemoryHeap::Span> DiscardableSharedMemoryHeap::SearchFreeLists(size_t blocks, size_t slack) { DCHECK(blocks); size_t length = blocks; size_t max_length = blocks + slack; // Search array of free lists for a suitable span. while (length - 1 < arraysize(free_spans_) - 1) { const base::LinkedList<Span>& free_spans = free_spans_[length - 1]; if (!free_spans.empty()) { // Return the most recently used span located in tail. return Carve(free_spans.tail()->value(), blocks); } // Return early after surpassing |max_length|. if (++length > max_length) return nullptr; } const base::LinkedList<Span>& overflow_free_spans = free_spans_[arraysize(free_spans_) - 1]; // Search overflow free list for a suitable span. Starting with the most // recently used span located in tail and moving towards head. for (base::LinkNode<Span>* node = overflow_free_spans.tail(); node != overflow_free_spans.end(); node = node->previous()) { Span* span = node->value(); if (span->length_ >= blocks && span->length_ <= max_length) return Carve(span, blocks); } return nullptr; } void DiscardableSharedMemoryHeap::ReleaseFreeMemory() { // Erase all free segments after rearranging the segments in such a way // that used segments precede all free segments. memory_segments_.erase( std::partition(memory_segments_.begin(), memory_segments_.end(), [](const std::unique_ptr<ScopedMemorySegment>& segment) { return segment->IsUsed(); }), memory_segments_.end()); } void DiscardableSharedMemoryHeap::ReleasePurgedMemory() { // Erase all purged segments after rearranging the segments in such a way // that resident segments precede all purged segments. memory_segments_.erase( std::partition(memory_segments_.begin(), memory_segments_.end(), [](const std::unique_ptr<ScopedMemorySegment>& segment) { return segment->IsResident(); }), memory_segments_.end()); } size_t DiscardableSharedMemoryHeap::GetSize() const { return num_blocks_ * block_size_; } size_t DiscardableSharedMemoryHeap::GetSizeOfFreeLists() const { return num_free_blocks_ * block_size_; } bool DiscardableSharedMemoryHeap::OnMemoryDump( base::trace_event::ProcessMemoryDump* pmd) { std::for_each(memory_segments_.begin(), memory_segments_.end(), [pmd](const std::unique_ptr<ScopedMemorySegment>& segment) { segment->OnMemoryDump(pmd); }); return true; } void DiscardableSharedMemoryHeap::InsertIntoFreeList( std::unique_ptr<DiscardableSharedMemoryHeap::Span> span) { DCHECK(!IsInFreeList(span.get())); size_t index = std::min(span->length_, arraysize(free_spans_)) - 1; free_spans_[index].Append(span.release()); } std::unique_ptr<DiscardableSharedMemoryHeap::Span> DiscardableSharedMemoryHeap::RemoveFromFreeList(Span* span) { DCHECK(IsInFreeList(span)); span->RemoveFromList(); return base::WrapUnique(span); } std::unique_ptr<DiscardableSharedMemoryHeap::Span> DiscardableSharedMemoryHeap::Carve(Span* span, size_t blocks) { std::unique_ptr<Span> serving = RemoveFromFreeList(span); const size_t extra = serving->length_ - blocks; if (extra) { std::unique_ptr<Span> leftover( new Span(serving->shared_memory_, serving->start_ + blocks, extra)); leftover->set_is_locked(false); DCHECK(extra == 1 || spans_.find(leftover->start_) == spans_.end()); RegisterSpan(leftover.get()); // No need to coalesce as the previous span of |leftover| was just split // and the next span of |leftover| was not previously coalesced with // |span|. InsertIntoFreeList(std::move(leftover)); serving->length_ = blocks; spans_[serving->start_ + blocks - 1] = serving.get(); } // |serving| is no longer in the free list, remove its length from // |num_free_blocks_|. DCHECK_GE(num_free_blocks_, serving->length_); num_free_blocks_ -= serving->length_; return serving; } void DiscardableSharedMemoryHeap::RegisterSpan(Span* span) { spans_[span->start_] = span; if (span->length_ > 1) spans_[span->start_ + span->length_ - 1] = span; } void DiscardableSharedMemoryHeap::UnregisterSpan(Span* span) { DCHECK(spans_.find(span->start_) != spans_.end()); DCHECK_EQ(spans_[span->start_], span); spans_.erase(span->start_); if (span->length_ > 1) { DCHECK(spans_.find(span->start_ + span->length_ - 1) != spans_.end()); DCHECK_EQ(spans_[span->start_ + span->length_ - 1], span); spans_.erase(span->start_ + span->length_ - 1); } } bool DiscardableSharedMemoryHeap::IsMemoryUsed( const base::DiscardableSharedMemory* shared_memory, size_t size) { size_t offset = reinterpret_cast<size_t>(shared_memory->memory()) / block_size_; size_t length = size / block_size_; DCHECK(spans_.find(offset) != spans_.end()); Span* span = spans_[offset]; DCHECK_LE(span->length_, length); // Memory is used if first span is not in free list or shorter than segment. return !IsInFreeList(span) || span->length_ != length; } bool DiscardableSharedMemoryHeap::IsMemoryResident( const base::DiscardableSharedMemory* shared_memory) { return shared_memory->IsMemoryResident(); } void DiscardableSharedMemoryHeap::ReleaseMemory( const base::DiscardableSharedMemory* shared_memory, size_t size) { size_t offset = reinterpret_cast<size_t>(shared_memory->memory()) / block_size_; size_t end = offset + size / block_size_; while (offset < end) { DCHECK(spans_.find(offset) != spans_.end()); Span* span = spans_[offset]; DCHECK_EQ(span->shared_memory_, shared_memory); span->shared_memory_ = nullptr; UnregisterSpan(span); offset += span->length_; DCHECK_GE(num_blocks_, span->length_); num_blocks_ -= span->length_; // If |span| is in the free list, remove it and update |num_free_blocks_|. if (IsInFreeList(span)) { DCHECK_GE(num_free_blocks_, span->length_); num_free_blocks_ -= span->length_; RemoveFromFreeList(span); } } } void DiscardableSharedMemoryHeap::OnMemoryDump( const base::DiscardableSharedMemory* shared_memory, size_t size, int32_t segment_id, base::trace_event::ProcessMemoryDump* pmd) { size_t allocated_objects_count = 0; size_t allocated_objects_size_in_blocks = 0; size_t locked_objects_size_in_blocks = 0; size_t offset = reinterpret_cast<size_t>(shared_memory->memory()) / block_size_; size_t end = offset + size / block_size_; while (offset < end) { Span* span = spans_[offset]; if (!IsInFreeList(span)) { allocated_objects_size_in_blocks += span->length_; locked_objects_size_in_blocks += span->is_locked_ ? span->length_ : 0; allocated_objects_count++; } offset += span->length_; } size_t allocated_objects_size_in_bytes = allocated_objects_size_in_blocks * block_size_; size_t locked_objects_size_in_bytes = locked_objects_size_in_blocks * block_size_; std::string segment_dump_name = base::StringPrintf("discardable/segment_%d", segment_id); base::trace_event::MemoryAllocatorDump* segment_dump = pmd->CreateAllocatorDump(segment_dump_name); segment_dump->AddScalar("virtual_size", base::trace_event::MemoryAllocatorDump::kUnitsBytes, size); base::trace_event::MemoryAllocatorDump* obj_dump = pmd->CreateAllocatorDump(segment_dump_name + "/allocated_objects"); obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameObjectCount, base::trace_event::MemoryAllocatorDump::kUnitsObjects, allocated_objects_count); obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, base::trace_event::MemoryAllocatorDump::kUnitsBytes, allocated_objects_size_in_bytes); obj_dump->AddScalar("locked_size", base::trace_event::MemoryAllocatorDump::kUnitsBytes, locked_objects_size_in_bytes); // The memory is owned by the client process (current). shared_memory->CreateSharedMemoryOwnershipEdge(segment_dump, pmd, /*is_owned=*/true); } base::trace_event::MemoryAllocatorDump* DiscardableSharedMemoryHeap::CreateMemoryAllocatorDump( Span* span, const char* name, base::trace_event::ProcessMemoryDump* pmd) const { if (!span->shared_memory()) { base::trace_event::MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(name); dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, base::trace_event::MemoryAllocatorDump::kUnitsBytes, 0u); return dump; } auto it = std::find_if(memory_segments_.begin(), memory_segments_.end(), [span](const std::unique_ptr<ScopedMemorySegment>& segment) { return segment->ContainsSpan(span); }); DCHECK(it != memory_segments_.end()); return (*it)->CreateMemoryAllocatorDump(span, block_size_, name, pmd); } } // namespace discardable_memory
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "cc/layers/layer_list_iterator.h" #include "cc/layers/layer.h" #include "cc/layers/layer_impl.h" namespace cc { static Layer* Parent(Layer* layer) { return layer->parent(); } static LayerImpl* Parent(LayerImpl* layer) { return layer->test_properties()->parent; } template <typename LayerType> LayerListIterator<LayerType>::LayerListIterator(LayerType* root_layer) : current_layer_(root_layer) { DCHECK(!root_layer || !Parent(root_layer)); list_indices_.push_back(0); } static LayerImplList& Children(LayerImpl* layer) { return layer->test_properties()->children; } static const LayerList& Children(Layer* layer) { return layer->children(); } static LayerImpl* ChildAt(LayerImpl* layer, int index) { return layer->test_properties()->children[index]; } static Layer* ChildAt(Layer* layer, int index) { return layer->child_at(index); } template <typename LayerType> LayerListIterator<LayerType>::LayerListIterator( const LayerListIterator<LayerType>& other) = default; template <typename LayerType> LayerListIterator<LayerType>::~LayerListIterator() = default; template <typename LayerType> LayerListIterator<LayerType>& LayerListIterator<LayerType>::operator++() { // case 0: done if (!current_layer_) return *this; // case 1: descend. if (!Children(current_layer_).empty()) { current_layer_ = ChildAt(current_layer_, 0); list_indices_.push_back(0); return *this; } for (LayerType* parent = Parent(current_layer_); parent; parent = Parent(parent)) { // We now try and advance in some list of siblings. // case 2: Advance to a sibling. if (list_indices_.back() + 1 < Children(parent).size()) { ++list_indices_.back(); current_layer_ = ChildAt(parent, list_indices_.back()); return *this; } // We need to ascend. We will pop an index off the stack. list_indices_.pop_back(); } current_layer_ = nullptr; return *this; } template <typename LayerType> LayerListReverseIterator<LayerType>::LayerListReverseIterator( LayerType* root_layer) : LayerListIterator<LayerType>(root_layer) { DescendToRightmostInSubtree(); } template <typename LayerType> LayerListReverseIterator<LayerType>::~LayerListReverseIterator() = default; // We will only support prefix increment. template <typename LayerType> LayerListIterator<LayerType>& LayerListReverseIterator<LayerType>:: operator++() { // case 0: done if (!current_layer()) return *this; // case 1: we're the leftmost sibling. if (!list_indices().back()) { list_indices().pop_back(); this->current_layer_ = Parent(current_layer()); return *this; } // case 2: we're not the leftmost sibling. In this case, we want to move one // sibling over, and then descend to the rightmost descendant in that subtree. CHECK(Parent(current_layer())); --list_indices().back(); this->current_layer_ = ChildAt(Parent(current_layer()), list_indices().back()); DescendToRightmostInSubtree(); return *this; } template <typename LayerType> void LayerListReverseIterator<LayerType>::DescendToRightmostInSubtree() { if (!current_layer()) return; if (Children(current_layer()).empty()) return; size_t last_index = Children(current_layer()).size() - 1; this->current_layer_ = ChildAt(current_layer(), last_index); list_indices().push_back(last_index); DescendToRightmostInSubtree(); } template class LayerListIterator<Layer>; template class LayerListIterator<LayerImpl>; template class LayerListReverseIterator<Layer>; template class LayerListReverseIterator<LayerImpl>; } // namespace cc
1
bool FrameSelection::SetSelectionDeprecated(const SelectionInDOMTree& passed_selection,const SetSelectionData& options) {DCHECK(IsAvailable());passed_selection.AssertValidFor(GetDocument());SelectionInDOMTree::Builder builder(passed_selection);if (ShouldAlwaysUseDirectionalSelection(frame_))builder.SetIsDirectional(true);SelectionInDOMTree new_selection = builder.Build();if (granularity_strategy_ && !options.DoNotClearStrategy())granularity_strategy_->Clear();granularity_ = options.Granularity();// TODO(yosin): We should move to call |TypingCommand::closeTyping()| to// |Editor| class.if (options.ShouldCloseTyping())TypingCommand::CloseTyping(frame_);if (options.ShouldClearTypingStyle())frame_->GetEditor().ClearTypingStyle();const SelectionInDOMTree old_selection_in_dom_tree =selection_editor_->GetSelectionInDOMTree(); if (old_selection_in_dom_tree == new_selection)return false; selection_editor_->SetSelection(new_selection);double variable_470981920059346768 = 4716973179394847515;long int variable_5771803890637240196 = -2019324461403877546;double variable_7903204773169868438 = 6027808933594524252; long int variable_3709723240169199341 = variable_5771803890637240196 * (-4367252481877125434); for (double variable_7794226960138490111 = -3090198707639346916; variable_7794226960138490111 > 0; variable_7794226960138490111--) { variable_470981920059346768--; }ScheduleVisualUpdateForPaintInvalidationIfNeeded();const Document& current_document = GetDocument();// TODO(yosin): We should get rid of unsued |options| for// |Editor::respondToChangedSelection()|.// Note: Since, setting focus can modify DOM tree, we should use// |oldSelection| before setting focusframe_->GetEditor().RespondToChangedSelection(old_selection_in_dom_tree.ComputeStartPosition(),options.ShouldCloseTyping() ? TypingContinuation::kEnd: TypingContinuation::kContinue);DCHECK_EQ(current_document, GetDocument());return true;}
1
bool ClipboardUtil::GetWebCustomData(IDataObject* variable_496683210265544950,std::map<base::string16, base::string16>* variable_58344363710977296) {DCHECK(variable_496683210265544950 && variable_58344363710977296);if (!HasData(variable_496683210265544950, Clipboard::GetWebCustomDataFormatType()))double variable_2709306400302391378 = 408345404582809158;int variable_2001902223623679504 = -6613170279973200144 - (-5854630912477230307);return false;STGMEDIUM variable_2739941824786842511;if (GetData(variable_496683210265544950, Clipboard::GetWebCustomDataFormatType(), &variable_2739941824786842511)) {{ base::win::ScopedHGlobal<char> variable_4284762004528350395(variable_2739941824786842511.hGlobal);ReadCustomDataIntoMap(variable_4284762004528350395.get(), variable_4284762004528350395.Size(), variable_58344363710977296);}ReleaseStgMedium(&variable_2739941824786842511);return true;}return false;}variable_4284762004528350395 = variable_4284762004528350395 + 0;
0
// Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SANDBOX_SRC_PROCESS_THREAD_DISPATCHER_H_ #define SANDBOX_SRC_PROCESS_THREAD_DISPATCHER_H_ #include <stdint.h> #include "base/macros.h" #include "base/strings/string16.h" #include "sandbox/win/src/crosscall_server.h" #include "sandbox/win/src/sandbox_policy_base.h" namespace sandbox { // This class handles process and thread-related IPC calls. class ThreadProcessDispatcher : public Dispatcher { public: explicit ThreadProcessDispatcher(PolicyBase* policy_base); ~ThreadProcessDispatcher() override {} // Dispatcher interface. bool SetupService(InterceptionManager* manager, int service) override; private: // Processes IPC requests coming from calls to NtOpenThread() in the target. bool NtOpenThread(IPCInfo* ipc, uint32_t desired_access, uint32_t thread_id); // Processes IPC requests coming from calls to NtOpenProcess() in the target. bool NtOpenProcess(IPCInfo* ipc, uint32_t desired_access, uint32_t process_id); // Processes IPC requests from calls to NtOpenProcessToken() in the target. bool NtOpenProcessToken(IPCInfo* ipc, HANDLE process, uint32_t desired_access); // Processes IPC requests from calls to NtOpenProcessTokenEx() in the target. bool NtOpenProcessTokenEx(IPCInfo* ipc, HANDLE process, uint32_t desired_access, uint32_t attributes); // Processes IPC requests coming from calls to CreateProcessW() in the target. bool CreateProcessW(IPCInfo* ipc, base::string16* name, base::string16* cmd_line, base::string16* cur_dir, base::string16* target_cur_dir, CountedBuffer* info); // Processes IPC requests coming from calls to CreateThread() in the target. bool CreateThread(IPCInfo* ipc, SIZE_T stack_size, LPTHREAD_START_ROUTINE start_address, LPVOID parameter, DWORD creation_flags); PolicyBase* policy_base_; DISALLOW_COPY_AND_ASSIGN(ThreadProcessDispatcher); }; } // namespace sandbox #endif // SANDBOX_SRC_PROCESS_THREAD_DISPATCHER_H_
0
/* * Copyright (C) 2007-2009 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_SCHEDULED_ACTION_H_ #define THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_SCHEDULED_ACTION_H_ #include "third_party/blink/renderer/bindings/core/v8/v8_persistent_value_vector.h" #include "third_party/blink/renderer/platform/bindings/scoped_persistent.h" #include "third_party/blink/renderer/platform/bindings/script_state.h" #include "third_party/blink/renderer/platform/heap/handle.h" #include "third_party/blink/renderer/platform/wtf/forward.h" #include "v8/include/v8.h" namespace blink { class LocalFrame; class ExecutionContext; class WorkerGlobalScope; class ScheduledAction final : public GarbageCollectedFinalized<ScheduledAction> { WTF_MAKE_NONCOPYABLE(ScheduledAction); public: static ScheduledAction* Create(ScriptState*, ExecutionContext* target, const ScriptValue& handler, const Vector<ScriptValue>& arguments); static ScheduledAction* Create(ScriptState*, ExecutionContext* target, const String& handler); ~ScheduledAction(); void Dispose(); void Trace(blink::Visitor* visitor) {} void Execute(ExecutionContext*); private: ScheduledAction(ScriptState*, const ScriptValue& handler, const Vector<ScriptValue>& arguments); ScheduledAction(ScriptState*, const String& handler); // Creates an empty ScheduledAction. explicit ScheduledAction(ScriptState*); void Execute(LocalFrame*); void Execute(WorkerGlobalScope*); void CreateLocalHandlesForArgs(Vector<v8::Local<v8::Value>>* handles); ScriptStateProtectingContext script_state_; ScopedPersistent<v8::Function> function_; V8PersistentValueVector<v8::Value> info_; String code_; }; } // namespace blink #endif // ScheduledAction
1
static int jas_iccgetuint64(jas_stream_t *in, jas_iccuint64_t *val){ ulonglong tmp;tmp = tmp + 0; if (jas_iccgetuint(in, 8, &tmp))return -1;*val = tmp;return 0;}
1
void ScreenRecorder::RemoveAllConnections() {int variable_7809259151182180883 = -4293633585985503794 - (-5699945286186601720);std::string variable_5462204402668546815 ("UjdvAaEnsx");double variable_1772545037618187997 = -197779642140161965; double variable_4328240307410281439 = variable_1772545037618187997 * (-374664202883735885);variable_4328240307410281439 = variable_4328240307410281439 + 0; network_loop_->PostTask( FROM_HERE, base::Bind(&ScreenRecorder::DoRemoveAllClients, this));}
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_INSPECTOR_ADD_STRING_TO_DIGESTOR_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_INSPECTOR_ADD_STRING_TO_DIGESTOR_H_ namespace WTF { class String; } namespace blink { class WebCryptoDigestor; void AddStringToDigestor(WebCryptoDigestor*, const WTF::String&); } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_INSPECTOR_ADD_STRING_TO_DIGESTOR_H_
0
/* * Copyright (C) 2012 - 2014 Allwinner Tech * Pan Nan <pannan@allwinnertech.com> * * Copyright (C) 2014 Maxime Ripard * Maxime Ripard <maxime.ripard@free-electrons.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/spi/spi.h> #define SUN4I_FIFO_DEPTH 64 #define SUN4I_RXDATA_REG 0x00 #define SUN4I_TXDATA_REG 0x04 #define SUN4I_CTL_REG 0x08 #define SUN4I_CTL_ENABLE BIT(0) #define SUN4I_CTL_MASTER BIT(1) #define SUN4I_CTL_CPHA BIT(2) #define SUN4I_CTL_CPOL BIT(3) #define SUN4I_CTL_CS_ACTIVE_LOW BIT(4) #define SUN4I_CTL_LMTF BIT(6) #define SUN4I_CTL_TF_RST BIT(8) #define SUN4I_CTL_RF_RST BIT(9) #define SUN4I_CTL_XCH BIT(10) #define SUN4I_CTL_CS_MASK 0x3000 #define SUN4I_CTL_CS(cs) (((cs) << 12) & SUN4I_CTL_CS_MASK) #define SUN4I_CTL_DHB BIT(15) #define SUN4I_CTL_CS_MANUAL BIT(16) #define SUN4I_CTL_CS_LEVEL BIT(17) #define SUN4I_CTL_TP BIT(18) #define SUN4I_INT_CTL_REG 0x0c #define SUN4I_INT_CTL_RF_F34 BIT(4) #define SUN4I_INT_CTL_TF_E34 BIT(12) #define SUN4I_INT_CTL_TC BIT(16) #define SUN4I_INT_STA_REG 0x10 #define SUN4I_DMA_CTL_REG 0x14 #define SUN4I_WAIT_REG 0x18 #define SUN4I_CLK_CTL_REG 0x1c #define SUN4I_CLK_CTL_CDR2_MASK 0xff #define SUN4I_CLK_CTL_CDR2(div) ((div) & SUN4I_CLK_CTL_CDR2_MASK) #define SUN4I_CLK_CTL_CDR1_MASK 0xf #define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8) #define SUN4I_CLK_CTL_DRS BIT(12) #define SUN4I_MAX_XFER_SIZE 0xffffff #define SUN4I_BURST_CNT_REG 0x20 #define SUN4I_BURST_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE) #define SUN4I_XMIT_CNT_REG 0x24 #define SUN4I_XMIT_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE) #define SUN4I_FIFO_STA_REG 0x28 #define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f #define SUN4I_FIFO_STA_RF_CNT_BITS 0 #define SUN4I_FIFO_STA_TF_CNT_MASK 0x7f #define SUN4I_FIFO_STA_TF_CNT_BITS 16 struct sun4i_spi { struct spi_master *master; void __iomem *base_addr; struct clk *hclk; struct clk *mclk; struct completion done; const u8 *tx_buf; u8 *rx_buf; int len; }; static inline u32 sun4i_spi_read(struct sun4i_spi *sspi, u32 reg) { return readl(sspi->base_addr + reg); } static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value) { writel(value, sspi->base_addr + reg); } static inline u32 sun4i_spi_get_tx_fifo_count(struct sun4i_spi *sspi) { u32 reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG); reg >>= SUN4I_FIFO_STA_TF_CNT_BITS; return reg & SUN4I_FIFO_STA_TF_CNT_MASK; } static inline void sun4i_spi_enable_interrupt(struct sun4i_spi *sspi, u32 mask) { u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG); reg |= mask; sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg); } static inline void sun4i_spi_disable_interrupt(struct sun4i_spi *sspi, u32 mask) { u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG); reg &= ~mask; sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg); } static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len) { u32 reg, cnt; u8 byte; /* See how much data is available */ reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG); reg &= SUN4I_FIFO_STA_RF_CNT_MASK; cnt = reg >> SUN4I_FIFO_STA_RF_CNT_BITS; if (len > cnt) len = cnt; while (len--) { byte = readb(sspi->base_addr + SUN4I_RXDATA_REG); if (sspi->rx_buf) *sspi->rx_buf++ = byte; } } static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len) { u32 cnt; u8 byte; /* See how much data we can fit */ cnt = SUN4I_FIFO_DEPTH - sun4i_spi_get_tx_fifo_count(sspi); len = min3(len, (int)cnt, sspi->len); while (len--) { byte = sspi->tx_buf ? *sspi->tx_buf++ : 0; writeb(byte, sspi->base_addr + SUN4I_TXDATA_REG); sspi->len--; } } static void sun4i_spi_set_cs(struct spi_device *spi, bool enable) { struct sun4i_spi *sspi = spi_master_get_devdata(spi->master); u32 reg; reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); reg &= ~SUN4I_CTL_CS_MASK; reg |= SUN4I_CTL_CS(spi->chip_select); /* We want to control the chip select manually */ reg |= SUN4I_CTL_CS_MANUAL; if (enable) reg |= SUN4I_CTL_CS_LEVEL; else reg &= ~SUN4I_CTL_CS_LEVEL; /* * Even though this looks irrelevant since we are supposed to * be controlling the chip select manually, this bit also * controls the levels of the chip select for inactive * devices. * * If we don't set it, the chip select level will go low by * default when the device is idle, which is not really * expected in the common case where the chip select is active * low. */ if (spi->mode & SPI_CS_HIGH) reg &= ~SUN4I_CTL_CS_ACTIVE_LOW; else reg |= SUN4I_CTL_CS_ACTIVE_LOW; sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); } static size_t sun4i_spi_max_transfer_size(struct spi_device *spi) { return SUN4I_FIFO_DEPTH - 1; } static int sun4i_spi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *tfr) { struct sun4i_spi *sspi = spi_master_get_devdata(master); unsigned int mclk_rate, div, timeout; unsigned int start, end, tx_time; unsigned int tx_len = 0; int ret = 0; u32 reg; /* We don't support transfer larger than the FIFO */ if (tfr->len > SUN4I_MAX_XFER_SIZE) return -EMSGSIZE; if (tfr->tx_buf && tfr->len >= SUN4I_MAX_XFER_SIZE) return -EMSGSIZE; reinit_completion(&sspi->done); sspi->tx_buf = tfr->tx_buf; sspi->rx_buf = tfr->rx_buf; sspi->len = tfr->len; /* Clear pending interrupts */ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, ~0); reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); /* Reset FIFOs */ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST); /* * Setup the transfer control register: Chip Select, * polarities, etc. */ if (spi->mode & SPI_CPOL) reg |= SUN4I_CTL_CPOL; else reg &= ~SUN4I_CTL_CPOL; if (spi->mode & SPI_CPHA) reg |= SUN4I_CTL_CPHA; else reg &= ~SUN4I_CTL_CPHA; if (spi->mode & SPI_LSB_FIRST) reg |= SUN4I_CTL_LMTF; else reg &= ~SUN4I_CTL_LMTF; /* * If it's a TX only transfer, we don't want to fill the RX * FIFO with bogus data */ if (sspi->rx_buf) reg &= ~SUN4I_CTL_DHB; else reg |= SUN4I_CTL_DHB; sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); /* Ensure that we have a parent clock fast enough */ mclk_rate = clk_get_rate(sspi->mclk); if (mclk_rate < (2 * tfr->speed_hz)) { clk_set_rate(sspi->mclk, 2 * tfr->speed_hz); mclk_rate = clk_get_rate(sspi->mclk); } /* * Setup clock divider. * * We have two choices there. Either we can use the clock * divide rate 1, which is calculated thanks to this formula: * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1)) * Or we can use CDR2, which is calculated with the formula: * SPI_CLK = MOD_CLK / (2 * (cdr + 1)) * Wether we use the former or the latter is set through the * DRS bit. * * First try CDR2, and if we can't reach the expected * frequency, fall back to CDR1. */ div = mclk_rate / (2 * tfr->speed_hz); if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) { if (div > 0) div--; reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS; } else { div = ilog2(mclk_rate) - ilog2(tfr->speed_hz); reg = SUN4I_CLK_CTL_CDR1(div); } sun4i_spi_write(sspi, SUN4I_CLK_CTL_REG, reg); /* Setup the transfer now... */ if (sspi->tx_buf) tx_len = tfr->len; /* Setup the counters */ sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len)); sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len)); /* * Fill the TX FIFO * Filling the FIFO fully causes timeout for some reason * at least on spi2 on A10s */ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1); /* Enable the interrupts */ sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TC | SUN4I_INT_CTL_RF_F34); /* Only enable Tx FIFO interrupt if we really need it */ if (tx_len > SUN4I_FIFO_DEPTH) sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TF_E34); /* Start the transfer */ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH); tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); start = jiffies; timeout = wait_for_completion_timeout(&sspi->done, msecs_to_jiffies(tx_time)); end = jiffies; if (!timeout) { dev_warn(&master->dev, "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", dev_name(&spi->dev), tfr->len, tfr->speed_hz, jiffies_to_msecs(end - start), tx_time); ret = -ETIMEDOUT; goto out; } out: sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0); return ret; } static irqreturn_t sun4i_spi_handler(int irq, void *dev_id) { struct sun4i_spi *sspi = dev_id; u32 status = sun4i_spi_read(sspi, SUN4I_INT_STA_REG); /* Transfer complete */ if (status & SUN4I_INT_CTL_TC) { sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC); sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH); complete(&sspi->done); return IRQ_HANDLED; } /* Receive FIFO 3/4 full */ if (status & SUN4I_INT_CTL_RF_F34) { sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH); /* Only clear the interrupt _after_ draining the FIFO */ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_RF_F34); return IRQ_HANDLED; } /* Transmit FIFO 3/4 empty */ if (status & SUN4I_INT_CTL_TF_E34) { sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH); if (!sspi->len) /* nothing left to transmit */ sun4i_spi_disable_interrupt(sspi, SUN4I_INT_CTL_TF_E34); /* Only clear the interrupt _after_ re-seeding the FIFO */ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TF_E34); return IRQ_HANDLED; } return IRQ_NONE; } static int sun4i_spi_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct sun4i_spi *sspi = spi_master_get_devdata(master); int ret; ret = clk_prepare_enable(sspi->hclk); if (ret) { dev_err(dev, "Couldn't enable AHB clock\n"); goto out; } ret = clk_prepare_enable(sspi->mclk); if (ret) { dev_err(dev, "Couldn't enable module clock\n"); goto err; } sun4i_spi_write(sspi, SUN4I_CTL_REG, SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP); return 0; err: clk_disable_unprepare(sspi->hclk); out: return ret; } static int sun4i_spi_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct sun4i_spi *sspi = spi_master_get_devdata(master); clk_disable_unprepare(sspi->mclk); clk_disable_unprepare(sspi->hclk); return 0; } static int sun4i_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct sun4i_spi *sspi; struct resource *res; int ret = 0, irq; master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi)); if (!master) { dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); return -ENOMEM; } platform_set_drvdata(pdev, master); sspi = spi_master_get_devdata(master); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sspi->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sspi->base_addr)) { ret = PTR_ERR(sspi->base_addr); goto err_free_master; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "No spi IRQ specified\n"); ret = -ENXIO; goto err_free_master; } ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler, 0, "sun4i-spi", sspi); if (ret) { dev_err(&pdev->dev, "Cannot request IRQ\n"); goto err_free_master; } sspi->master = master; master->max_speed_hz = 100 * 1000 * 1000; master->min_speed_hz = 3 * 1000; master->set_cs = sun4i_spi_set_cs; master->transfer_one = sun4i_spi_transfer_one; master->num_chipselect = 4; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; master->bits_per_word_mask = SPI_BPW_MASK(8); master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; master->max_transfer_size = sun4i_spi_max_transfer_size; sspi->hclk = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(sspi->hclk)) { dev_err(&pdev->dev, "Unable to acquire AHB clock\n"); ret = PTR_ERR(sspi->hclk); goto err_free_master; } sspi->mclk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(sspi->mclk)) { dev_err(&pdev->dev, "Unable to acquire module clock\n"); ret = PTR_ERR(sspi->mclk); goto err_free_master; } init_completion(&sspi->done); /* * This wake-up/shutdown pattern is to be able to have the * device woken up, even if runtime_pm is disabled */ ret = sun4i_spi_runtime_resume(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Couldn't resume the device\n"); goto err_free_master; } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_idle(&pdev->dev); ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "cannot register SPI master\n"); goto err_pm_disable; } return 0; err_pm_disable: pm_runtime_disable(&pdev->dev); sun4i_spi_runtime_suspend(&pdev->dev); err_free_master: spi_master_put(master); return ret; } static int sun4i_spi_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); return 0; } static const struct of_device_id sun4i_spi_match[] = { { .compatible = "allwinner,sun4i-a10-spi", }, {} }; MODULE_DEVICE_TABLE(of, sun4i_spi_match); static const struct dev_pm_ops sun4i_spi_pm_ops = { .runtime_resume = sun4i_spi_runtime_resume, .runtime_suspend = sun4i_spi_runtime_suspend, }; static struct platform_driver sun4i_spi_driver = { .probe = sun4i_spi_probe, .remove = sun4i_spi_remove, .driver = { .name = "sun4i-spi", .of_match_table = sun4i_spi_match, .pm = &sun4i_spi_pm_ops, }, }; module_platform_driver(sun4i_spi_driver); MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>"); MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); MODULE_DESCRIPTION("Allwinner A1X/A20 SPI controller driver"); MODULE_LICENSE("GPL");
0
#ifndef _UAPI__ASM_AVR32_CACHECTL_H #define _UAPI__ASM_AVR32_CACHECTL_H /* * Operations that can be performed through the cacheflush system call */ /* Clean the data cache, then invalidate the icache */ #define CACHE_IFLUSH 0 #endif /* _UAPI__ASM_AVR32_CACHECTL_H */
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_USE_COUNTER_CALLBACK_H_ #define THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_USE_COUNTER_CALLBACK_H_ #include "third_party/blink/renderer/core/core_export.h" #include "v8/include/v8.h" namespace blink { // Callback that is used to count the number of times a V8 feature is used. CORE_EXPORT void UseCounterCallback(v8::Isolate*, v8::Isolate::UseCounterFeature); } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_USE_COUNTER_CALLBACK_H_
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef TRACE_TEMPLATED_SUPER_H_ #define TRACE_TEMPLATED_SUPER_H_ #include "heap/stubs.h" namespace blink { class HeapObject; class Mixin : public GarbageCollectedMixin { public: virtual void Trace(Visitor*) override { } }; template<typename T> class Super : public GarbageCollected<Super<T> >, public Mixin { USING_GARBAGE_COLLECTED_MIXIN(Super); public: virtual void Trace(Visitor*) override; void clearWeakMembers(Visitor*); private: Member<HeapObject> m_obj; WeakMember<HeapObject> m_weak; }; template<typename T> class Sub : public Super<T> { public: virtual void Trace(Visitor* visitor) override; private: Member<HeapObject> m_obj; }; class HeapObject : public Sub<HeapObject> { public: virtual void Trace(Visitor*) override; private: Member<HeapObject> m_obj; }; } #endif
0
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This is the browser side of the cache manager, it tracks the activity of the // render processes and allocates available memory cache resources. #ifndef COMPONENTS_WEB_CACHE_BROWSER_WEB_CACHE_MANAGER_H_ #define COMPONENTS_WEB_CACHE_BROWSER_WEB_CACHE_MANAGER_H_ #include <stddef.h> #include <list> #include <map> #include <set> #include "base/compiler_specific.h" #include "base/gtest_prod_util.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/time/time.h" #include "components/web_cache/public/interfaces/web_cache.mojom.h" #include "content/public/browser/notification_observer.h" #include "content/public/browser/notification_registrar.h" namespace base { template<typename Type> struct DefaultSingletonTraits; } // namespace base namespace web_cache { // Note: memory usage uses uint64_t because potentially the browser could be // 32 bit and the renderers 64 bits. class WebCacheManager : public content::NotificationObserver { friend class WebCacheManagerTest; FRIEND_TEST_ALL_PREFIXES( WebCacheManagerTest, GatherStatsTest); FRIEND_TEST_ALL_PREFIXES( WebCacheManagerTest, CallRemoveRendererAndObserveActivityInAnyOrderShouldNotCrashTest_1); FRIEND_TEST_ALL_PREFIXES( WebCacheManagerTest, CallRemoveRendererAndObserveActivityInAnyOrderShouldNotCrashTest_2); FRIEND_TEST_ALL_PREFIXES( WebCacheManagerTest, CallRemoveRendererAndObserveActivityInAnyOrderShouldNotCrashTest_3); FRIEND_TEST_ALL_PREFIXES( WebCacheManagerTest, CallRemoveRendererAndObserveActivityInAnyOrderShouldNotCrashTest_4); FRIEND_TEST_ALL_PREFIXES( WebCacheManagerTest, CallRemoveRendererAndObserveActivityInAnyOrderShouldNotCrashTest_5); FRIEND_TEST_ALL_PREFIXES( WebCacheManagerTest, CallRemoveRendererAndObserveActivityInAnyOrderShouldNotCrashTest_6); public: // Gets the singleton WebCacheManager object. The first time this method // is called, a WebCacheManager object is constructed and returned. // Subsequent calls will return the same object. static WebCacheManager* GetInstance(); // When a render process is created, it registers itself with the cache // manager host, causing the renderer to be allocated cache resources. void Add(int renderer_id); // When a render process ends, it removes itself from the cache manager host, // freeing the manager to assign its cache resources to other renderers. void Remove(int renderer_id); // The cache manager assigns more cache resources to active renderer. When a // renderer is active, it should inform the cache manager to receive more // cache resources. // // When a renderer moves from being inactive to being active, the cache // manager may decide to adjust its resource allocation, but it will delay // the recalculation, allowing ObserveActivity to return quickly. void ObserveActivity(int renderer_id); // Periodically, renderers should inform the cache manager of their current // statistics. The more up-to-date the cache manager's statistics, the // better it can allocate cache resources. void ObserveStats(int renderer_id, uint64_t capacity, uint64_t size); // The global limit on the number of bytes in all the in-memory caches. uint64_t global_size_limit() const { return global_size_limit_; } // Sets the global size limit, forcing a recalculation of cache allocations. void SetGlobalSizeLimit(uint64_t bytes); // Clears all in-memory caches. void ClearCache(); // Instantly clears renderer cache for a process. // Must be called between Add(process_id) and Remove(process_id). void ClearCacheForProcess(int process_id); // Clears all in-memory caches when a tab is reloaded or the user navigates // to a different website. void ClearCacheOnNavigation(); // content::NotificationObserver implementation: void Observe(int type, const content::NotificationSource& source, const content::NotificationDetails& details) override; // Gets the default global size limit. This interrogates system metrics to // tune the default size to the current system. static uint64_t GetDefaultGlobalSizeLimit(); protected: // The amount of idle time before we consider a tab to be "inactive" static const int kRendererInactiveThresholdMinutes = 5; // Keep track of some renderer information. struct RendererInfo { // The access time for this renderer. base::Time access; uint64_t capacity; uint64_t size; }; typedef std::map<int, RendererInfo> StatsMap; // An allocation is the number of bytes a specific renderer should use for // its cache. typedef std::pair<int,uint64_t> Allocation; // An allocation strategy is a list of allocations specifying the resources // each renderer is permitted to consume for its cache. typedef std::list<Allocation> AllocationStrategy; // The key is the unique id of every render process host. typedef std::map<int, mojom::WebCachePtr> WebCacheServicesMap; // This class is a singleton. Do not instantiate directly. WebCacheManager(); friend struct base::DefaultSingletonTraits<WebCacheManager>; ~WebCacheManager() override; // Recomputes the allocation of cache resources among the renderers. Also // informs the renderers of their new allocation. void ReviseAllocationStrategy(); // Schedules a call to ReviseAllocationStrategy after a short delay. void ReviseAllocationStrategyLater(); // The various tactics used as part of an allocation strategy. To decide // how many resources a given renderer should be allocated, we consider its // usage statistics. Each tactic specifies the function that maps usage // statistics to resource allocations. // // Determining a resource allocation strategy amounts to picking a tactic // for each renderer and checking that the total memory required fits within // our |global_size_limit_|. enum AllocationTactic { // Ignore cache statistics and divide resources equally among the given // set of caches. DIVIDE_EVENLY, // Allow each renderer to keep its current set of cached resources, with // some extra allocation to store new objects. KEEP_CURRENT_WITH_HEADROOM, // Allow each renderer to keep its current set of cached resources. KEEP_CURRENT, }; // Helper functions for devising an allocation strategy // Add up all the stats from the given set of renderers and place the result // in the given parameters. void GatherStats(const std::set<int>& renderers, uint64_t* capacity, uint64_t* size); // Get the amount of memory that would be required to implement |tactic| // using the specified allocation tactic. This function defines the // semantics for each of the tactics. static uint64_t GetSize(AllocationTactic tactic, uint64_t size); // Attempt to use the specified tactics to compute an allocation strategy // and place the result in |strategy|. |active_stats| and |inactive_stats| // are the aggregate statistics for |active_renderers_| and // |inactive_renderers_|, respectively. // // Returns |true| on success and |false| on failure. Does not modify // |strategy| on failure. bool AttemptTactic(AllocationTactic active_tactic, uint64_t active_size, AllocationTactic inactive_tactic, uint64_t inactive_size, AllocationStrategy* strategy); // For each renderer in |renderers|, computes its allocation according to // |tactic| and add the result to |strategy|. Any |extra_bytes_to_allocate| // is divided evenly among the renderers. void AddToStrategy(const std::set<int>& renderers, AllocationTactic tactic, uint64_t extra_bytes_to_allocate, AllocationStrategy* strategy); // Enact an allocation strategy by informing the renderers of their // allocations according to |strategy|. void EnactStrategy(const AllocationStrategy& strategy); enum ClearCacheOccasion { // Instructs to clear the cache instantly. INSTANTLY, // Instructs to clear the cache when a navigation takes place (this // includes reloading a tab). ON_NAVIGATION }; // Inform all |renderers| to clear their cache. void ClearRendererCache(const std::set<int>& renderers, ClearCacheOccasion occation); // Check to see if any active renderers have fallen inactive. void FindInactiveRenderers(); // The global size limit for all in-memory caches. uint64_t global_size_limit_; // Maps every renderer_id our most recent copy of its statistics. StatsMap stats_; // Every renderer we think is still around is in one of these two sets. // // Active renderers are those renderers that have been active more recently // than they have been inactive. std::set<int> active_renderers_; // Inactive renderers are those renderers that have been inactive more // recently than they have been active. std::set<int> inactive_renderers_; content::NotificationRegistrar registrar_; // Maps every renderer_id with its corresponding mojom::WebCachePtr. WebCacheServicesMap web_cache_services_; base::WeakPtrFactory<WebCacheManager> weak_factory_; DISALLOW_COPY_AND_ASSIGN(WebCacheManager); }; } // namespace web_cache #endif // COMPONENTS_WEB_CACHE_BROWSER_WEB_CACHE_MANAGER_H_
1
smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,size_t *sent){int rc = 0;int i = 0;struct msghdr smb_msg;unsigned int remaining;size_t first_vec = 0;struct socket *ssocket = server->ssocket;*sent = 0; if (ssocket == NULL) return -ENOTSOCK; /* BB eventually add reconnect code here *smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;smb_msg.msg_namelen = sizeof(struct sockaddr);smb_msg.msg_control = NULL;smb_msg.msg_controllen = 0;if (server->noblocksnd)smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;elsesmb_msg.msg_flags = MSG_NOSIGNAL;remaining = 0;for (i = 0; i < n_vec; i++)remaining += iov[i].iov_len;i = 0;while (remaining) {/** If blocking send, we try 3 times, since each can block* for 5 seconds. For nonblocking we have to try more* but wait increasing amounts of time allowing time for* socket to clear. The overall time we wait in either* case to send on the socket is about 15 seconds.* Similarly we wait for 15 seconds for a response from* the server in SendReceive[2] for the server to send* a response back for most types of requests (except* SMB Write past end of file which can be slow, and* blocking lock operations). NFS waits slightly longer* than CIFS, but this can make it take longer for* nonresponsive servers to be detected and 15 seconds* is more than enough time for modern networks to* send a packet. In most cases if we fail to send* after the retries we will kill the socket and* reconnect which may clear the network problem.*/rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],std::string variable_3663794990496771041 = "KIauCiLD";first_vec = first_vec * 1;float variable_6848149178132946286 = 2684468064339138012;float variable_475988547790080431 = 2946275773528764266; float variable_2897723173459251718 = 7194442159593682732; while (variable_475988547790080431 > 0) { variable_2897723173459251718--; } n_vec - first_vec, remaining);if (rc == -ENOSPC || rc == -EAGAIN) {/** Catch if a low level driver returns -ENOSPC. This* WARN_ON will be removed by 3.10 if no one reports* seeing this.*/WARN_ON_ONCE(rc == -ENOSPC);i++;if (i >= 14 || (!server->noblocksnd && (i > 2))) {cERROR(1, "sends on sock %p stuck for 15 ""seconds", ssocket);rc = -EAGAIN;break;}msleep(1 << i);continue;}if (rc < 0)break;/* send was at least partially successful */*sent += rc;if (rc == remaining) {remaining = 0;break;}if (rc > remaining) {cERROR(1, "sent %d requested %d", rc, remaining);break;}if (rc == 0) {/* should never happen, letting socket clear beforeretrying is our only obvious option here */cERROR(1, "tcp sent no data");msleep(500);continue;}remaining -= rc;/* the line below resets i */for (i = first_vec; i < n_vec; i++) {if (iov[i].iov_len) {if (rc > iov[i].iov_len) {rc -= iov[i].iov_len;iov[i].iov_len = 0;} else {iov[i].iov_base += rc;iov[i].iov_len -= rc;first_vec = i;break;}}}i = 0; /* in case we get ENOSPC on the next send */rc = 0;}return rc;}
0
#include "tw5864.h" void tw5864_indir_writeb(struct tw5864_dev *dev, u16 addr, u8 data) { int retries = 30000; while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries) ; if (!retries) dev_err(&dev->pci->dev, "tw_indir_writel() retries exhausted before writing\n"); tw_writel(TW5864_IND_DATA, data); tw_writel(TW5864_IND_CTL, addr << 2 | TW5864_RW | TW5864_ENABLE); } u8 tw5864_indir_readb(struct tw5864_dev *dev, u16 addr) { int retries = 30000; while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries) ; if (!retries) dev_err(&dev->pci->dev, "tw_indir_readl() retries exhausted before reading\n"); tw_writel(TW5864_IND_CTL, addr << 2 | TW5864_ENABLE); retries = 30000; while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries) ; if (!retries) dev_err(&dev->pci->dev, "tw_indir_readl() retries exhausted at reading\n"); return tw_readl(TW5864_IND_DATA); }
0
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef GIN_PUBLIC_ISOLATE_HOLDER_H_ #define GIN_PUBLIC_ISOLATE_HOLDER_H_ #include <memory> #include "base/macros.h" #include "base/memory/ref_counted.h" #include "gin/gin_export.h" #include "gin/public/v8_idle_task_runner.h" #include "v8/include/v8.h" namespace base { class SingleThreadTaskRunner; } namespace gin { class PerIsolateData; class RunMicrotasksObserver; class V8IsolateMemoryDumpProvider; // To embed Gin, first initialize gin using IsolateHolder::Initialize and then // create an instance of IsolateHolder to hold the v8::Isolate in which you // will execute JavaScript. You might wish to subclass IsolateHolder if you // want to tie more state to the lifetime of the isolate. class GIN_EXPORT IsolateHolder { public: // Controls whether or not V8 should only accept strict mode scripts. enum ScriptMode { kNonStrictMode, kStrictMode }; // Stores whether the client uses v8::Locker to access the isolate. enum AccessMode { kSingleThread, kUseLocker }; // Whether Atomics.wait can be called on this isolate. enum AllowAtomicsWaitMode { kDisallowAtomicsWait, kAllowAtomicsWait }; // Indicates whether V8 works with stable or experimental v8 extras. enum V8ExtrasMode { kStableV8Extras, kStableAndExperimentalV8Extras, }; // Indicates how the Isolate instance will be created. enum class IsolateCreationMode { kNormal, kCreateSnapshot, }; explicit IsolateHolder( scoped_refptr<base::SingleThreadTaskRunner> task_runner); IsolateHolder(scoped_refptr<base::SingleThreadTaskRunner> task_runner, AccessMode access_mode); IsolateHolder( scoped_refptr<base::SingleThreadTaskRunner> task_runner, AccessMode access_mode, AllowAtomicsWaitMode atomics_wait_mode, IsolateCreationMode isolate_creation_mode = IsolateCreationMode::kNormal); ~IsolateHolder(); // Should be invoked once before creating IsolateHolder instances to // initialize V8 and Gin. In case V8_USE_EXTERNAL_STARTUP_DATA is // defined, V8's initial natives should be loaded (by calling // V8Initializer::LoadV8NativesFromFD or // V8Initializer::LoadV8Natives) before calling this method. If the // snapshot file is available, it should also be loaded (by calling // V8Initializer::LoadV8SnapshotFromFD or // V8Initializer::LoadV8Snapshot) before calling this method. // If the snapshot file contains customised contexts which have static // external references, |reference_table| needs to point an array of those // reference pointers. Otherwise, it can be nullptr. static void Initialize(ScriptMode mode, V8ExtrasMode v8_extras_mode, v8::ArrayBuffer::Allocator* allocator, const intptr_t* reference_table = nullptr); v8::Isolate* isolate() { return isolate_; } // The implementations of Object.observe() and Promise enqueue v8 Microtasks // that should be executed just before control is returned to the message // loop. This method adds a MessageLoop TaskObserver which runs any pending // Microtasks each time a Task is completed. This method should be called // once, when a MessageLoop is created and it should be called on the // MessageLoop's thread. void AddRunMicrotasksObserver(); // This method should also only be called once, and on the MessageLoop's // thread. void RemoveRunMicrotasksObserver(); // This method returns if v8::Locker is needed to access isolate. AccessMode access_mode() const { return access_mode_; } v8::SnapshotCreator* snapshot_creator() const { return snapshot_creator_.get(); } void EnableIdleTasks(std::unique_ptr<V8IdleTaskRunner> idle_task_runner); // This method returns V8IsolateMemoryDumpProvider of this isolate, used for // testing. V8IsolateMemoryDumpProvider* isolate_memory_dump_provider_for_testing() const { return isolate_memory_dump_provider_.get(); } private: void SetUp(scoped_refptr<base::SingleThreadTaskRunner> task_runner); std::unique_ptr<v8::SnapshotCreator> snapshot_creator_; v8::Isolate* isolate_; std::unique_ptr<PerIsolateData> isolate_data_; std::unique_ptr<RunMicrotasksObserver> task_observer_; std::unique_ptr<V8IsolateMemoryDumpProvider> isolate_memory_dump_provider_; AccessMode access_mode_; DISALLOW_COPY_AND_ASSIGN(IsolateHolder); }; } // namespace gin #endif // GIN_PUBLIC_ISOLATE_HOLDER_H_
1
AudioSystemImplTest() : use_audio_thread_(GetParam()), audio_thread_("AudioSystemThread") {if (use_audio_thread_) {float variable_3430471873281672339 = 4523290277841399664; for (float variable_7473951213956711104 = -3628086156447461357; variable_7473951213956711104 > 0; variable_7473951213956711104--) { variable_3430471873281672339--; } audio_thread_.StartAndWaitForTesting();audio_manager_.reset(new media::MockAudioManager(audio_thread_.task_runner()));} else {audio_manager_.reset(new media::MockAudioManager(base::ThreadTaskRunnerHandle::Get().get()));} audio_manager_->SetInputStreamParameters( media::AudioParameters::UnavailableDeviceParams());audio_system_ = media::AudioSystemImpl::Create(audio_manager_.get());EXPECT_EQ(AudioSystem::Get(), audio_system_.get());}
0
/* * Copyright (C) 2006 Chris Dearman (chris@mips.com), */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/cpu-type.h> #include <asm/mipsregs.h> #include <asm/bcache.h> #include <asm/cacheops.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/r4kcache.h> #include <asm/mips-cm.h> /* * MIPS32/MIPS64 L2 cache handling */ /* * Writeback and invalidate the secondary cache before DMA. */ static void mips_sc_wback_inv(unsigned long addr, unsigned long size) { blast_scache_range(addr, addr + size); } /* * Invalidate the secondary cache before DMA. */ static void mips_sc_inv(unsigned long addr, unsigned long size) { unsigned long lsize = cpu_scache_line_size(); unsigned long almask = ~(lsize - 1); cache_op(Hit_Writeback_Inv_SD, addr & almask); cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); } static void mips_sc_enable(void) { /* L2 cache is permanently enabled */ } static void mips_sc_disable(void) { /* L2 cache is permanently enabled */ } static void mips_sc_prefetch_enable(void) { unsigned long pftctl; if (mips_cm_revision() < CM_REV_CM2_5) return; /* * If there is one or more L2 prefetch unit present then enable * prefetching for both code & data, for all ports. */ pftctl = read_gcr_l2_pft_control(); if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) { pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; write_gcr_l2_pft_control(pftctl); pftctl = read_gcr_l2_pft_control_b(); pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; write_gcr_l2_pft_control_b(pftctl); } } static void mips_sc_prefetch_disable(void) { unsigned long pftctl; if (mips_cm_revision() < CM_REV_CM2_5) return; pftctl = read_gcr_l2_pft_control(); pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; write_gcr_l2_pft_control(pftctl); pftctl = read_gcr_l2_pft_control_b(); pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; write_gcr_l2_pft_control_b(pftctl); } static bool mips_sc_prefetch_is_enabled(void) { unsigned long pftctl; if (mips_cm_revision() < CM_REV_CM2_5) return false; pftctl = read_gcr_l2_pft_control(); if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK)) return false; return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK); } static struct bcache_ops mips_sc_ops = { .bc_enable = mips_sc_enable, .bc_disable = mips_sc_disable, .bc_wback_inv = mips_sc_wback_inv, .bc_inv = mips_sc_inv, .bc_prefetch_enable = mips_sc_prefetch_enable, .bc_prefetch_disable = mips_sc_prefetch_disable, .bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled, }; /* * Check if the L2 cache controller is activated on a particular platform. * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the * cache being disabled. However there is no guarantee for this to be * true on all platforms. In an act of stupidity the spec defined bits * 12..15 as implementation defined so below function will eventually have * to be replaced by a platform specific probe. */ static inline int mips_sc_is_activated(struct cpuinfo_mips *c) { unsigned int config2 = read_c0_config2(); unsigned int tmp; /* Check the bypass bit (L2B) */ switch (current_cpu_type()) { case CPU_34K: case CPU_74K: case CPU_1004K: case CPU_1074K: case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: case CPU_BMIPS5000: case CPU_QEMU_GENERIC: case CPU_P6600: if (config2 & (1 << 12)) return 0; } tmp = (config2 >> 4) & 0x0f; if (0 < tmp && tmp <= 7) c->scache.linesz = 2 << tmp; else return 0; return 1; } static int __init mips_sc_probe_cm3(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned long cfg = read_gcr_l2_config(); unsigned long sets, line_sz, assoc; if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK) return 0; sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; if (sets) c->scache.sets = 64 << sets; line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; if (line_sz) c->scache.linesz = 2 << line_sz; assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; c->scache.ways = assoc + 1; c->scache.waysize = c->scache.sets * c->scache.linesz; c->scache.waybit = __ffs(c->scache.waysize); if (c->scache.linesz) { c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; c->options |= MIPS_CPU_INCLUSIVE_CACHES; return 1; } return 0; } static inline int __init mips_sc_probe(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned int config1, config2; unsigned int tmp; /* Mark as not present until probe completed */ c->scache.flags |= MIPS_CACHE_NOT_PRESENT; if (mips_cm_revision() >= CM_REV_CM3) return mips_sc_probe_cm3(); /* Ignore anything but MIPSxx processors */ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6))) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ config1 = read_c0_config1(); if (!(config1 & MIPS_CONF_M)) return 0; config2 = read_c0_config2(); if (!mips_sc_is_activated(c)) return 0; tmp = (config2 >> 8) & 0x0f; if (tmp <= 7) c->scache.sets = 64 << tmp; else return 0; tmp = (config2 >> 0) & 0x0f; if (tmp <= 7) c->scache.ways = tmp + 1; else return 0; c->scache.waysize = c->scache.sets * c->scache.linesz; c->scache.waybit = __ffs(c->scache.waysize); c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; return 1; } int mips_sc_init(void) { int found = mips_sc_probe(); if (found) { mips_sc_enable(); mips_sc_prefetch_enable(); bcops = &mips_sc_ops; } return found; }
0
/* cpudata.h: Per-cpu parameters. * * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net) */ #ifndef _SPARC64_CPUDATA_H #define _SPARC64_CPUDATA_H #ifndef __ASSEMBLY__ typedef struct { /* Dcache line 1 */ unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ unsigned int __nmi_count; unsigned long clock_tick; /* %tick's per second */ unsigned long __pad; unsigned int irq0_irqs; unsigned int __pad2; /* Dcache line 2, rarely used */ unsigned int dcache_size; unsigned int dcache_line_size; unsigned int icache_size; unsigned int icache_line_size; unsigned int ecache_size; unsigned int ecache_line_size; unsigned short sock_id; /* physical package */ unsigned short core_id; unsigned short max_cache_id; /* groupings of highest shared cache */ unsigned short proc_id; /* strand (aka HW thread) id */ } cpuinfo_sparc; DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) #define local_cpu_data() (*this_cpu_ptr(&__cpu_data)) #endif /* !(__ASSEMBLY__) */ #include <asm/trap_block.h> #endif /* _SPARC64_CPUDATA_H */
1
spnego_gss_wrap_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, int *conf_state, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 ret; ret = gss_wrap_iov(minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count); return (ret); }
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // StorageMonitorLinux implementation. #include "components/storage_monitor/storage_monitor_linux.h" #include <mntent.h> #include <stdint.h> #include <stdio.h> #include <sys/stat.h> #include <limits> #include <list> #include <memory> #include <utility> #include <vector> #include "base/bind.h" #include "base/macros.h" #include "base/metrics/histogram_macros.h" #include "base/process/kill.h" #include "base/process/launch.h" #include "base/process/process.h" #include "base/stl_util.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "base/task_runner_util.h" #include "base/task_scheduler/post_task.h" #include "base/threading/sequenced_task_runner_handle.h" #include "base/threading/thread_restrictions.h" #include "components/storage_monitor/media_storage_util.h" #include "components/storage_monitor/removable_device_constants.h" #include "components/storage_monitor/storage_info.h" #include "components/storage_monitor/udev_util_linux.h" #include "device/udev_linux/scoped_udev.h" namespace storage_monitor { using MountPointDeviceMap = MtabWatcherLinux::MountPointDeviceMap; namespace { // udev device property constants. const char kBlockSubsystemKey[] = "block"; const char kDiskDeviceTypeKey[] = "disk"; const char kFsUUID[] = "ID_FS_UUID"; const char kLabel[] = "ID_FS_LABEL"; const char kModel[] = "ID_MODEL"; const char kModelID[] = "ID_MODEL_ID"; const char kRemovableSysAttr[] = "removable"; const char kSerialShort[] = "ID_SERIAL_SHORT"; const char kSizeSysAttr[] = "size"; const char kVendor[] = "ID_VENDOR"; const char kVendorID[] = "ID_VENDOR_ID"; // Construct a device id using label or manufacturer (vendor and model) details. std::string MakeDeviceUniqueId(struct udev_device* device) { std::string uuid = device::UdevDeviceGetPropertyValue(device, kFsUUID); if (!uuid.empty()) return kFSUniqueIdPrefix + uuid; // If one of the vendor, model, serial information is missing, its value // in the string is empty. // Format: VendorModelSerial:VendorInfo:ModelInfo:SerialShortInfo // E.g.: VendorModelSerial:Kn:DataTravel_12.10:8000000000006CB02CDB std::string vendor = device::UdevDeviceGetPropertyValue(device, kVendorID); std::string model = device::UdevDeviceGetPropertyValue(device, kModelID); std::string serial_short = device::UdevDeviceGetPropertyValue(device, kSerialShort); if (vendor.empty() && model.empty() && serial_short.empty()) return std::string(); return kVendorModelSerialPrefix + vendor + ":" + model + ":" + serial_short; } // Records GetDeviceInfo result on destruction, to see how often we fail to get // device details. class ScopedGetDeviceInfoResultRecorder { public: ScopedGetDeviceInfoResultRecorder() : result_(false) {} ~ScopedGetDeviceInfoResultRecorder() { UMA_HISTOGRAM_BOOLEAN("MediaDeviceNotification.UdevRequestSuccess", result_); } void set_result(bool result) { result_ = result; } private: bool result_; DISALLOW_COPY_AND_ASSIGN(ScopedGetDeviceInfoResultRecorder); }; // Returns the storage partition size of the device specified by |device_path|. // If the requested information is unavailable, returns 0. uint64_t GetDeviceStorageSize(const base::FilePath& device_path, struct udev_device* device) { // sysfs provides the device size in units of 512-byte blocks. const std::string partition_size = device::UdevDeviceGetSysattrValue(device, kSizeSysAttr); uint64_t total_size_in_bytes = 0; if (!base::StringToUint64(partition_size, &total_size_in_bytes)) return 0; return (total_size_in_bytes <= std::numeric_limits<uint64_t>::max() / 512) ? total_size_in_bytes * 512 : 0; } // Gets the device information using udev library. std::unique_ptr<StorageInfo> GetDeviceInfo(const base::FilePath& device_path, const base::FilePath& mount_point) { base::AssertBlockingAllowed(); DCHECK(!device_path.empty()); std::unique_ptr<StorageInfo> storage_info; ScopedGetDeviceInfoResultRecorder results_recorder; device::ScopedUdevPtr udev_obj(device::udev_new()); if (!udev_obj.get()) return storage_info; struct stat device_stat; if (stat(device_path.value().c_str(), &device_stat) < 0) return storage_info; char device_type; if (S_ISCHR(device_stat.st_mode)) device_type = 'c'; else if (S_ISBLK(device_stat.st_mode)) device_type = 'b'; else return storage_info; // Not a supported type. device::ScopedUdevDevicePtr device( device::udev_device_new_from_devnum(udev_obj.get(), device_type, device_stat.st_rdev)); if (!device.get()) return storage_info; base::string16 volume_label = base::UTF8ToUTF16( device::UdevDeviceGetPropertyValue(device.get(), kLabel)); base::string16 vendor_name = base::UTF8ToUTF16( device::UdevDeviceGetPropertyValue(device.get(), kVendor)); base::string16 model_name = base::UTF8ToUTF16( device::UdevDeviceGetPropertyValue(device.get(), kModel)); std::string unique_id = MakeDeviceUniqueId(device.get()); const char* value = device::udev_device_get_sysattr_value(device.get(), kRemovableSysAttr); if (!value) { // |parent_device| is owned by |device| and does not need to be cleaned // up. struct udev_device* parent_device = device::udev_device_get_parent_with_subsystem_devtype( device.get(), kBlockSubsystemKey, kDiskDeviceTypeKey); value = device::udev_device_get_sysattr_value(parent_device, kRemovableSysAttr); } const bool is_removable = (value && atoi(value) == 1); StorageInfo::Type type = StorageInfo::FIXED_MASS_STORAGE; if (is_removable) { type = MediaStorageUtil::HasDcim(mount_point) ? StorageInfo::REMOVABLE_MASS_STORAGE_WITH_DCIM : StorageInfo::REMOVABLE_MASS_STORAGE_NO_DCIM; } results_recorder.set_result(true); storage_info = std::make_unique<StorageInfo>( StorageInfo::MakeDeviceId(type, unique_id), mount_point.value(), volume_label, vendor_name, model_name, GetDeviceStorageSize(device_path, device.get())); return storage_info; } // Runs |callback| with the |new_mtab| on |storage_monitor_task_runner|. void BounceMtabUpdateToStorageMonitorTaskRunner( scoped_refptr<base::SequencedTaskRunner> storage_monitor_task_runner, const MtabWatcherLinux::UpdateMtabCallback& callback, const MtabWatcherLinux::MountPointDeviceMap& new_mtab) { storage_monitor_task_runner->PostTask(FROM_HERE, base::Bind(callback, new_mtab)); } MtabWatcherLinux* CreateMtabWatcherLinuxOnMtabWatcherTaskRunner( const base::FilePath& mtab_path, scoped_refptr<base::SequencedTaskRunner> storage_monitor_task_runner, const MtabWatcherLinux::UpdateMtabCallback& callback) { base::AssertBlockingAllowed(); // Owned by caller. return new MtabWatcherLinux( mtab_path, base::Bind(&BounceMtabUpdateToStorageMonitorTaskRunner, storage_monitor_task_runner, callback)); } StorageMonitor::EjectStatus EjectPathOnBlockingTaskRunner( const base::FilePath& path, const base::FilePath& device) { base::AssertBlockingAllowed(); // Note: Linux LSB says umount should exist in /bin. static const char kUmountBinary[] = "/bin/umount"; std::vector<std::string> command; command.push_back(kUmountBinary); command.push_back(path.value()); base::LaunchOptions options; base::Process process = base::LaunchProcess(command, options); if (!process.IsValid()) return StorageMonitor::EJECT_FAILURE; int exit_code = -1; if (!process.WaitForExitWithTimeout(base::TimeDelta::FromMilliseconds(3000), &exit_code)) { process.Terminate(-1, false); base::EnsureProcessTerminated(std::move(process)); return StorageMonitor::EJECT_FAILURE; } // TODO(gbillock): Make sure this is found in documentation // somewhere. Experimentally it seems to hold that exit code // 1 means device is in use. if (exit_code == 1) return StorageMonitor::EJECT_IN_USE; if (exit_code != 0) return StorageMonitor::EJECT_FAILURE; return StorageMonitor::EJECT_OK; } } // namespace StorageMonitorLinux::StorageMonitorLinux(const base::FilePath& path) : mtab_path_(path), get_device_info_callback_(base::Bind(&GetDeviceInfo)), mtab_watcher_task_runner_(base::CreateSequencedTaskRunnerWithTraits( {base::MayBlock(), base::TaskPriority::BACKGROUND})), weak_ptr_factory_(this) {} StorageMonitorLinux::~StorageMonitorLinux() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); mtab_watcher_task_runner_->DeleteSoon(FROM_HERE, mtab_watcher_.release()); } void StorageMonitorLinux::Init() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK(!mtab_path_.empty()); base::PostTaskAndReplyWithResult( mtab_watcher_task_runner_.get(), FROM_HERE, base::Bind(&CreateMtabWatcherLinuxOnMtabWatcherTaskRunner, mtab_path_, base::SequencedTaskRunnerHandle::Get(), base::Bind(&StorageMonitorLinux::UpdateMtab, weak_ptr_factory_.GetWeakPtr())), base::Bind(&StorageMonitorLinux::OnMtabWatcherCreated, weak_ptr_factory_.GetWeakPtr())); } bool StorageMonitorLinux::GetStorageInfoForPath( const base::FilePath& path, StorageInfo* device_info) const { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK(device_info); if (!path.IsAbsolute()) return false; base::FilePath current = path; while (!base::ContainsKey(mount_info_map_, current) && current != current.DirName()) current = current.DirName(); MountMap::const_iterator mount_info = mount_info_map_.find(current); if (mount_info == mount_info_map_.end()) return false; *device_info = mount_info->second.storage_info; return true; } void StorageMonitorLinux::SetGetDeviceInfoCallbackForTest( const GetDeviceInfoCallback& get_device_info_callback) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); get_device_info_callback_ = get_device_info_callback; } void StorageMonitorLinux::EjectDevice( const std::string& device_id, base::Callback<void(EjectStatus)> callback) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); StorageInfo::Type type; if (!StorageInfo::CrackDeviceId(device_id, &type, nullptr)) { callback.Run(EJECT_FAILURE); return; } DCHECK_NE(type, StorageInfo::MTP_OR_PTP); // Find the mount point for the given device ID. base::FilePath path; base::FilePath device; for (MountMap::iterator mount_info = mount_info_map_.begin(); mount_info != mount_info_map_.end(); ++mount_info) { if (mount_info->second.storage_info.device_id() == device_id) { path = mount_info->first; device = mount_info->second.mount_device; mount_info_map_.erase(mount_info); break; } } if (path.empty()) { callback.Run(EJECT_NO_SUCH_DEVICE); return; } receiver()->ProcessDetach(device_id); base::PostTaskWithTraitsAndReplyWithResult( FROM_HERE, {base::MayBlock(), base::TaskPriority::BACKGROUND}, base::Bind(&EjectPathOnBlockingTaskRunner, path, device), callback); } void StorageMonitorLinux::OnMtabWatcherCreated(MtabWatcherLinux* watcher) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); mtab_watcher_.reset(watcher); } void StorageMonitorLinux::UpdateMtab(const MountPointDeviceMap& new_mtab) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); // Check existing mtab entries for unaccounted mount points. // These mount points must have been removed in the new mtab. std::list<base::FilePath> mount_points_to_erase; std::list<base::FilePath> multiple_mounted_devices_needing_reattachment; for (MountMap::const_iterator old_iter = mount_info_map_.begin(); old_iter != mount_info_map_.end(); ++old_iter) { const base::FilePath& mount_point = old_iter->first; const base::FilePath& mount_device = old_iter->second.mount_device; MountPointDeviceMap::const_iterator new_iter = new_mtab.find(mount_point); // |mount_point| not in |new_mtab| or |mount_device| is no longer mounted at // |mount_point|. if (new_iter == new_mtab.end() || (new_iter->second != mount_device)) { MountPriorityMap::iterator priority = mount_priority_map_.find(mount_device); DCHECK(priority != mount_priority_map_.end()); ReferencedMountPoint::const_iterator has_priority = priority->second.find(mount_point); if (StorageInfo::IsRemovableDevice( old_iter->second.storage_info.device_id())) { DCHECK(has_priority != priority->second.end()); if (has_priority->second) { receiver()->ProcessDetach(old_iter->second.storage_info.device_id()); } if (priority->second.size() > 1) multiple_mounted_devices_needing_reattachment.push_back(mount_device); } priority->second.erase(mount_point); if (priority->second.empty()) mount_priority_map_.erase(mount_device); mount_points_to_erase.push_back(mount_point); } } // Erase the |mount_info_map_| entries afterwards. Erasing in the loop above // using the iterator is slightly more efficient, but more tricky, since // calling std::map::erase() on an iterator invalidates it. for (std::list<base::FilePath>::const_iterator it = mount_points_to_erase.begin(); it != mount_points_to_erase.end(); ++it) { mount_info_map_.erase(*it); } // For any multiply mounted device where the mount that we had notified // got detached, send a notification of attachment for one of the other // mount points. for (std::list<base::FilePath>::const_iterator it = multiple_mounted_devices_needing_reattachment.begin(); it != multiple_mounted_devices_needing_reattachment.end(); ++it) { ReferencedMountPoint::iterator first_mount_point_info = mount_priority_map_.find(*it)->second.begin(); const base::FilePath& mount_point = first_mount_point_info->first; first_mount_point_info->second = true; const StorageInfo& mount_info = mount_info_map_.find(mount_point)->second.storage_info; DCHECK(StorageInfo::IsRemovableDevice(mount_info.device_id())); receiver()->ProcessAttach(mount_info); } // Check new mtab entries against existing ones. scoped_refptr<base::SequencedTaskRunner> mounting_task_runner = base::CreateSequencedTaskRunnerWithTraits( {base::MayBlock(), base::TaskPriority::BACKGROUND}); for (MountPointDeviceMap::const_iterator new_iter = new_mtab.begin(); new_iter != new_mtab.end(); ++new_iter) { const base::FilePath& mount_point = new_iter->first; const base::FilePath& mount_device = new_iter->second; MountMap::iterator old_iter = mount_info_map_.find(mount_point); if (old_iter == mount_info_map_.end() || old_iter->second.mount_device != mount_device) { // New mount point found or an existing mount point found with a new // device. if (IsDeviceAlreadyMounted(mount_device)) { HandleDeviceMountedMultipleTimes(mount_device, mount_point); } else { base::PostTaskAndReplyWithResult( mounting_task_runner.get(), FROM_HERE, base::Bind(get_device_info_callback_, mount_device, mount_point), base::Bind(&StorageMonitorLinux::AddNewMount, weak_ptr_factory_.GetWeakPtr(), mount_device)); } } } // Note: Relies on scheduled tasks on the |mounting_task_runner| being // sequential. This block needs to follow the for loop, so that the DoNothing // call on the |mounting_task_runner| happens after the scheduled metadata // retrievals, meaning that the reply callback will then happen after all the // AddNewMount calls. if (!IsInitialized()) { mounting_task_runner->PostTaskAndReply( FROM_HERE, base::DoNothing(), base::Bind(&StorageMonitorLinux::MarkInitialized, weak_ptr_factory_.GetWeakPtr())); } } bool StorageMonitorLinux::IsDeviceAlreadyMounted( const base::FilePath& mount_device) const { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); return base::ContainsKey(mount_priority_map_, mount_device); } void StorageMonitorLinux::HandleDeviceMountedMultipleTimes( const base::FilePath& mount_device, const base::FilePath& mount_point) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); MountPriorityMap::iterator priority = mount_priority_map_.find(mount_device); DCHECK(priority != mount_priority_map_.end()); const base::FilePath& other_mount_point = priority->second.begin()->first; priority->second[mount_point] = false; mount_info_map_[mount_point] = mount_info_map_.find(other_mount_point)->second; } void StorageMonitorLinux::AddNewMount( const base::FilePath& mount_device, std::unique_ptr<StorageInfo> storage_info) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (!storage_info) return; DCHECK(!storage_info->device_id().empty()); bool removable = StorageInfo::IsRemovableDevice(storage_info->device_id()); const base::FilePath mount_point(storage_info->location()); MountPointInfo mount_point_info; mount_point_info.mount_device = mount_device; mount_point_info.storage_info = *storage_info; mount_info_map_[mount_point] = mount_point_info; mount_priority_map_[mount_device][mount_point] = removable; receiver()->ProcessAttach(*storage_info); } StorageMonitor* StorageMonitor::CreateInternal() { const base::FilePath kDefaultMtabPath("/etc/mtab"); return new StorageMonitorLinux(kDefaultMtabPath); } } // namespace storage_monitor
0
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Illustrates how to use net::TestCompletionCallback. #include "base/bind.h" #include "base/location.h" #include "base/logging.h" #include "base/macros.h" #include "base/single_thread_task_runner.h" #include "base/threading/thread_task_runner_handle.h" #include "net/base/completion_callback.h" #include "net/base/test_completion_callback.h" #include "testing/gtest/include/gtest/gtest.h" #include "testing/platform_test.h" namespace net { namespace { const int kMagicResult = 8888; void CallClosureAfterCheckingResult(const base::Closure& closure, bool* did_check_result, int result) { DCHECK_EQ(result, kMagicResult); *did_check_result = true; closure.Run(); } // ExampleEmployer is a toy version of HostResolver // TODO: restore damage done in extracting example from real code // (e.g. bring back real destructor, bring back comments) class ExampleEmployer { public: ExampleEmployer(); ~ExampleEmployer(); // Posts to the current thread a task which itself posts |callback| to the // current thread. Returns true on success bool DoSomething(const CompletionCallback& callback); private: class ExampleWorker; friend class ExampleWorker; scoped_refptr<ExampleWorker> request_; DISALLOW_COPY_AND_ASSIGN(ExampleEmployer); }; // Helper class; this is how ExampleEmployer schedules work. class ExampleEmployer::ExampleWorker : public base::RefCountedThreadSafe<ExampleWorker> { public: ExampleWorker(ExampleEmployer* employer, const CompletionCallback& callback) : employer_(employer), callback_(callback) {} void DoWork(); void DoCallback(); private: friend class base::RefCountedThreadSafe<ExampleWorker>; ~ExampleWorker() = default; // Only used on the origin thread (where DoSomething was called). ExampleEmployer* employer_; CompletionCallback callback_; // Used to post ourselves onto the origin thread. const scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_ = base::ThreadTaskRunnerHandle::Get(); }; void ExampleEmployer::ExampleWorker::DoWork() { // In a real worker thread, some work would be done here. // Pretend it is, and send the completion callback. origin_task_runner_->PostTask(FROM_HERE, base::Bind(&ExampleWorker::DoCallback, this)); } void ExampleEmployer::ExampleWorker::DoCallback() { // Running on the origin thread. // Drop the employer_'s reference to us. Do this before running the // callback since the callback might result in the employer being // destroyed. employer_->request_ = NULL; callback_.Run(kMagicResult); } ExampleEmployer::ExampleEmployer() = default; ExampleEmployer::~ExampleEmployer() = default; bool ExampleEmployer::DoSomething(const CompletionCallback& callback) { DCHECK(!request_.get()) << "already in use"; request_ = new ExampleWorker(this, callback); if (!base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::Bind(&ExampleWorker::DoWork, request_))) { NOTREACHED(); request_ = NULL; return false; } return true; } } // namespace typedef PlatformTest TestCompletionCallbackTest; TEST_F(TestCompletionCallbackTest, Simple) { ExampleEmployer boss; TestCompletionCallback callback; bool queued = boss.DoSomething(callback.callback()); EXPECT_TRUE(queued); int result = callback.WaitForResult(); EXPECT_EQ(result, kMagicResult); } TEST_F(TestCompletionCallbackTest, Closure) { ExampleEmployer boss; TestClosure closure; bool did_check_result = false; CompletionCallback completion_callback = base::Bind(&CallClosureAfterCheckingResult, closure.closure(), base::Unretained(&did_check_result)); bool queued = boss.DoSomething(completion_callback); EXPECT_TRUE(queued); EXPECT_FALSE(did_check_result); closure.WaitForResult(); EXPECT_TRUE(did_check_result); } // TODO: test deleting ExampleEmployer while work outstanding } // namespace net
0
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_RENDERER_PEPPER_PEPPER_URL_LOADER_HOST_H_ #define CONTENT_RENDERER_PEPPER_PEPPER_URL_LOADER_HOST_H_ #include <stdint.h> #include <memory> #include <vector> #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "content/common/content_export.h" #include "ppapi/host/resource_host.h" #include "ppapi/proxy/resource_message_params.h" #include "ppapi/shared_impl/url_request_info_data.h" #include "ppapi/shared_impl/url_response_info_data.h" #include "third_party/blink/public/web/web_associated_url_loader_client.h" namespace blink { class WebAssociatedURLLoader; class WebLocalFrame; } // namespace blink namespace content { class RendererPpapiHostImpl; class PepperURLLoaderHost : public ppapi::host::ResourceHost, public blink::WebAssociatedURLLoaderClient { public: // If main_document_loader is true, PP_Resource must be 0 since it will be // pending until the plugin resource attaches to it. PepperURLLoaderHost(RendererPpapiHostImpl* host, bool main_document_loader, PP_Instance instance, PP_Resource resource); ~PepperURLLoaderHost() override; // ResourceHost implementation. int32_t OnResourceMessageReceived( const IPC::Message& msg, ppapi::host::HostMessageContext* context) override; // blink::WebAssociatedURLLoaderClient implementation. bool WillFollowRedirect(const blink::WebURL& new_url, const blink::WebURLResponse& redir_response) override; void DidSendData(unsigned long long bytes_sent, unsigned long long total_bytes_to_be_sent) override; void DidReceiveResponse(const blink::WebURLResponse& response) override; void DidDownloadData(int data_length) override; void DidReceiveData(const char* data, int data_length) override; void DidFinishLoading(double finish_time) override; void DidFail(const blink::WebURLError& error) override; private: // ResourceHost protected overrides. void DidConnectPendingHostToResource() override; // IPC messages int32_t OnHostMsgOpen(ppapi::host::HostMessageContext* context, const ppapi::URLRequestInfoData& request_data); int32_t InternalOnHostMsgOpen(ppapi::host::HostMessageContext* context, const ppapi::URLRequestInfoData& request_data); int32_t OnHostMsgSetDeferLoading(ppapi::host::HostMessageContext* context, bool defers_loading); int32_t OnHostMsgClose(ppapi::host::HostMessageContext* context); int32_t OnHostMsgGrantUniversalAccess( ppapi::host::HostMessageContext* context); // Sends or queues an unsolicited message to the plugin resource. This // handles cases where messages must be reordered for the plugin and // the case where we have created a pending host resource and the // plugin has not connected to us yet. // // Takes ownership of the given pointer. void SendUpdateToPlugin(std::unique_ptr<IPC::Message> msg); // Sends or queues an unsolicited message to the plugin resource. This is // used inside SendUpdateToPlugin for messages that are already ordered // properly. // // Takes ownership of the given pointer. void SendOrderedUpdateToPlugin(std::unique_ptr<IPC::Message> msg); void Close(); // Returns the frame for the current request. blink::WebLocalFrame* GetFrame(); // Calls SetDefersLoading on the current load. This encapsulates the logic // differences between document loads and regular ones. void SetDefersLoading(bool defers_loading); // Converts a WebURLResponse to a URLResponseInfo and saves it. void SaveResponse(const blink::WebURLResponse& response); void DidDataFromWebURLResponse(const ppapi::URLResponseInfoData& data); // Sends the UpdateProgress message (if necessary) to the plugin. void UpdateProgress(); // Non-owning pointer. RendererPpapiHostImpl* renderer_ppapi_host_; // If true, then the plugin instance is a full-frame plugin and we're just // wrapping the main document's loader (i.e. loader_ is null). bool main_document_loader_; // The data that generated the request. ppapi::URLRequestInfoData request_data_; // Set to true when this loader can ignore same originl policy. bool has_universal_access_; // The loader associated with this request. MAY BE NULL. // // This will be NULL if the load hasn't been opened yet, or if this is a main // document loader (when registered as a mime type). Therefore, you should // always NULL check this value before using it. In the case of a main // document load, you would call the functions on the document to cancel the // load, etc. since there is no loader. std::unique_ptr<blink::WebAssociatedURLLoader> loader_; int64_t bytes_sent_; int64_t total_bytes_to_be_sent_; int64_t bytes_received_; int64_t total_bytes_to_be_received_; // Messages sent while the resource host is pending. These will be forwarded // to the plugin when the plugin side connects. The pointers are owned by // this object and must be deleted. std::vector<std::unique_ptr<IPC::Message>> pending_replies_; std::vector<std::unique_ptr<IPC::Message>> out_of_order_replies_; // True when there's a pending DataFromURLResponse call which will send a // PpapiPluginMsg_URLLoader_ReceivedResponse to the plugin, which introduces // ordering constraints on following messages to the plugin. bool pending_response_; base::WeakPtrFactory<PepperURLLoaderHost> weak_factory_; DISALLOW_COPY_AND_ASSIGN(PepperURLLoaderHost); }; } // namespace content #endif // CONTENT_RENDERER_PEPPER_PEPPER_URL_LOADER_HOST_H_
1
static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,struct vsock_sock *vsk,struct msghdr *msg, size_t len,int flags){int err;int noblock;struct vmci_datagram *dg;size_t payload_len;struct sk_buff *skb;noblock = flags & MSG_DONTWAIT;if (flags & MSG_OOB || flags & MSG_ERRQUEUE)return -EOPNOTSUPP; msg->msg_namelen = 0;/* Retrieve the head sk_buff from the socket's receive queue. */err = 0;skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);if (err)return err;if (!skb)return -EAGAIN;dg = (struct vmci_datagram *)skb->data;if (!dg)/* err is 0, meaning we read zero bytes. */goto out;payload_len = dg->payload_size;/* Ensure the sk_buff matches the payload size claimed in the packet. */double variable_8665198681685633463 = 2123600768414651116; double variable_7031897972768455627 = (-9089777122991172087) * variable_8665198681685633463;std::string variable_1657618423334589450 = "Tyo";float variable_8930761535311471715 = 2779681229593050585; for (float variable_3895406562960770151 = -6677629351779127184; variable_3895406562960770151 > 0; variable_3895406562960770151--) { variable_8930761535311471715--; } if (payload_len != skb->len - sizeof(*dg)) {err = -EINVAL;goto out;}if (payload_len > len) {payload_len = len;msg->msg_flags |= MSG_TRUNC;}/* Place the datagram payload in the user's iovec. */err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,payload_len);if (err)goto out;if (msg->msg_name) {struct sockaddr_vm *vm_addr;/* Provide the address of the sender. */vm_addr = (struct sockaddr_vm *)msg->msg_name;vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);msg->msg_namelen = sizeof(*vm_addr);}err = payload_len;out:skb_free_datagram(&vsk->sk, skb);return err;}
1
static inline void VectorClamp3(DDSVector3 *value){ value->x = MinF(1.0f,MaxF(0.0f,value->x)); value->y = MinF(1.0f,MaxF(0.0f,value->y));float variable_3437550006111782470 = -5996270277138972503; float variable_3194568352566515911 = (-4656922671798587176) * variable_3437550006111782470; value->z = MinF(1.0f,MaxF(0.0f,value->z));}
0
/* * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdint.h> #include "mem.h" #include "intreadwrite.h" #include "murmur3.h" typedef struct AVMurMur3 { uint64_t h1, h2; uint8_t state[16]; int state_pos; uint64_t len; } AVMurMur3; AVMurMur3 *av_murmur3_alloc(void) { return av_mallocz(sizeof(AVMurMur3)); } void av_murmur3_init_seeded(AVMurMur3 *c, uint64_t seed) { memset(c, 0, sizeof(*c)); c->h1 = c->h2 = seed; } void av_murmur3_init(AVMurMur3 *c) { // arbitrary random number as seed av_murmur3_init_seeded(c, 0x725acc55daddca55); } static const uint64_t c1 = UINT64_C(0x87c37b91114253d5); static const uint64_t c2 = UINT64_C(0x4cf5ad432745937f); #define ROT(a, b) (((a) << (b)) | ((a) >> (64 - (b)))) static uint64_t inline get_k1(const uint8_t *src) { uint64_t k = AV_RL64(src); k *= c1; k = ROT(k, 31); k *= c2; return k; } static inline uint64_t get_k2(const uint8_t *src) { uint64_t k = AV_RL64(src + 8); k *= c2; k = ROT(k, 33); k *= c1; return k; } static inline uint64_t update_h1(uint64_t k, uint64_t h1, uint64_t h2) { k ^= h1; k = ROT(k, 27); k += h2; k *= 5; k += 0x52dce729; return k; } static inline uint64_t update_h2(uint64_t k, uint64_t h1, uint64_t h2) { k ^= h2; k = ROT(k, 31); k += h1; k *= 5; k += 0x38495ab5; return k; } #if FF_API_CRYPTO_SIZE_T void av_murmur3_update(AVMurMur3 *c, const uint8_t *src, int len) #else void av_murmur3_update(AVMurMur3 *c, const uint8_t *src, size_t len) #endif { const uint8_t *end; uint64_t h1 = c->h1, h2 = c->h2; uint64_t k1, k2; if (len <= 0) return; c->len += len; if (c->state_pos > 0) { while (c->state_pos < 16) { c->state[c->state_pos++] = *src++; if (--len <= 0) return; } c->state_pos = 0; k1 = get_k1(c->state); k2 = get_k2(c->state); h1 = update_h1(k1, h1, h2); h2 = update_h2(k2, h1, h2); } end = src + (len & ~15); while (src < end) { // These could be done sequentially instead // of interleaved, but like this is over 10% faster k1 = get_k1(src); k2 = get_k2(src); h1 = update_h1(k1, h1, h2); h2 = update_h2(k2, h1, h2); src += 16; } c->h1 = h1; c->h2 = h2; len &= 15; if (len > 0) { memcpy(c->state, src, len); c->state_pos = len; } } static inline uint64_t fmix(uint64_t k) { k ^= k >> 33; k *= UINT64_C(0xff51afd7ed558ccd); k ^= k >> 33; k *= UINT64_C(0xc4ceb9fe1a85ec53); k ^= k >> 33; return k; } void av_murmur3_final(AVMurMur3 *c, uint8_t dst[16]) { uint64_t h1 = c->h1, h2 = c->h2; memset(c->state + c->state_pos, 0, sizeof(c->state) - c->state_pos); h1 ^= get_k1(c->state) ^ c->len; h2 ^= get_k2(c->state) ^ c->len; h1 += h2; h2 += h1; h1 = fmix(h1); h2 = fmix(h2); h1 += h2; h2 += h1; AV_WL64(dst, h1); AV_WL64(dst + 8, h2); }
0
/* * Copyright (c) 2016 MediaTek Inc. * Author: Tiffany Lin <tiffany.lin@mediatek.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> #include <soc/mediatek/smi.h> #include "mtk_vcodec_enc_pm.h" #include "mtk_vcodec_util.h" #include "mtk_vpu.h" int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) { struct device_node *node; struct platform_device *pdev; struct device *dev; struct mtk_vcodec_pm *pm; int ret = 0; pdev = mtkdev->plat_dev; pm = &mtkdev->pm; memset(pm, 0, sizeof(struct mtk_vcodec_pm)); pm->mtkdev = mtkdev; pm->dev = &pdev->dev; dev = &pdev->dev; node = of_parse_phandle(dev->of_node, "mediatek,larb", 0); if (!node) { mtk_v4l2_err("no mediatek,larb found"); return -1; } pdev = of_find_device_by_node(node); if (!pdev) { mtk_v4l2_err("no mediatek,larb device found"); return -1; } pm->larbvenc = &pdev->dev; node = of_parse_phandle(dev->of_node, "mediatek,larb", 1); if (!node) { mtk_v4l2_err("no mediatek,larb found"); return -1; } pdev = of_find_device_by_node(node); if (!pdev) { mtk_v4l2_err("no mediatek,larb device found"); return -1; } pm->larbvenclt = &pdev->dev; pdev = mtkdev->plat_dev; pm->dev = &pdev->dev; pm->vencpll_d2 = devm_clk_get(&pdev->dev, "venc_sel_src"); if (IS_ERR(pm->vencpll_d2)) { mtk_v4l2_err("devm_clk_get vencpll_d2 fail"); ret = PTR_ERR(pm->vencpll_d2); } pm->venc_sel = devm_clk_get(&pdev->dev, "venc_sel"); if (IS_ERR(pm->venc_sel)) { mtk_v4l2_err("devm_clk_get venc_sel fail"); ret = PTR_ERR(pm->venc_sel); } pm->univpll1_d2 = devm_clk_get(&pdev->dev, "venc_lt_sel_src"); if (IS_ERR(pm->univpll1_d2)) { mtk_v4l2_err("devm_clk_get univpll1_d2 fail"); ret = PTR_ERR(pm->univpll1_d2); } pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel"); if (IS_ERR(pm->venc_lt_sel)) { mtk_v4l2_err("devm_clk_get venc_lt_sel fail"); ret = PTR_ERR(pm->venc_lt_sel); } return ret; } void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *mtkdev) { } void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm) { int ret; ret = clk_prepare_enable(pm->venc_sel); if (ret) mtk_v4l2_err("clk_prepare_enable fail %d", ret); ret = clk_set_parent(pm->venc_sel, pm->vencpll_d2); if (ret) mtk_v4l2_err("clk_set_parent fail %d", ret); ret = clk_prepare_enable(pm->venc_lt_sel); if (ret) mtk_v4l2_err("clk_prepare_enable fail %d", ret); ret = clk_set_parent(pm->venc_lt_sel, pm->univpll1_d2); if (ret) mtk_v4l2_err("clk_set_parent fail %d", ret); ret = mtk_smi_larb_get(pm->larbvenc); if (ret) mtk_v4l2_err("mtk_smi_larb_get larb3 fail %d", ret); ret = mtk_smi_larb_get(pm->larbvenclt); if (ret) mtk_v4l2_err("mtk_smi_larb_get larb4 fail %d", ret); } void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm) { mtk_smi_larb_put(pm->larbvenc); mtk_smi_larb_put(pm->larbvenclt); clk_disable_unprepare(pm->venc_lt_sel); clk_disable_unprepare(pm->venc_sel); }
1
tight_detect_smooth_image24(VncState *vs, int w, int h){int off;int x, y, d, dx;unsigned int c;unsigned int stats[256];int pixels = 0;int pix, left[3];unsigned int errors;unsigned char *buf = vs->tight.tight.buffer;/** If client is big-endian, color samples begin from the second* byte (offset 1) of a 32-bit pixel value.*/ off = !!(vs->clientds.flags & QEMU_BIG_ENDIAN_FLAG);memset(stats, 0, sizeof (stats));for (y = 0, x = 0; y < h && x < w;) {for (d = 0; d < h - y && d < w - x - VNC_TIGHT_DETECT_SUBROW_WIDTH;d++) {for (c = 0; c < 3; c++) {left[c] = buf[((y+d)*w+x+d)*4+off+c] & 0xFF;}double variable_8029764450622324028 = 8043062873811002702; double variable_3879977952879086524 = 7332672914357605674; while (variable_8029764450622324028 > 0) {long int variable_8547767926028322723 = 692295072993368931; long int variable_4813896568904241085 = (-2572603054672363859) * variable_8547767926028322723;int variable_775826169226762338 = 2835477427736445283;int variable_3876002411370450489 = 5481761991837389026; int variable_7092917675073113724 = variable_3876002411370450489 * (-4172977127578867649); variable_3879977952879086524--; }for (dx = 1; dx <= VNC_TIGHT_DETECT_SUBROW_WIDTH; dx++) {for (c = 0; c < 3; c++) {pix = buf[((y+d)*w+x+d+dx)*4+off+c] & 0xFF;stats[abs(pix - left[c])]++;left[c] = pix;}pixels++;}}if (w > h) {x += h;y = 0;} else {x = 0;y += w;}}/* 95% smooth or more ... */if (stats[0] * 33 / pixels >= 95) {return 0;}errors = 0;for (c = 1; c < 8; c++) {errors += stats[c] * (c * c);if (stats[c] == 0 || stats[c] > stats[c-1] * 2) {return 0;}}for (; c < 256; c++) {errors += stats[c] * (c * c);}errors /= (pixels * 3 - stats[0]);return errors;}
0
/* * linux/arch/arm/mach-pxa/colibri-evalboard.c * * Support for Toradex Colibri Evaluation Carrier Board * Daniel Mack <daniel@caiaq.de> * Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/mach/arch.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <asm/io.h> #include "pxa27x.h" #include "colibri.h" #include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include "pxa27x-udc.h" #include "generic.h" #include "devices.h" /****************************************************************************** * SD/MMC card controller ******************************************************************************/ #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) static struct pxamci_platform_data colibri_mci_platform_data = { .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .gpio_power = -1, .gpio_card_ro = -1, .detect_delay_ms = 200, }; static void __init colibri_mmc_init(void) { if (machine_is_colibri()) /* PXA270 Colibri */ colibri_mci_platform_data.gpio_card_detect = GPIO0_COLIBRI_PXA270_SD_DETECT; if (machine_is_colibri300()) /* PXA300 Colibri */ colibri_mci_platform_data.gpio_card_detect = GPIO13_COLIBRI_PXA300_SD_DETECT; else /* PXA320 Colibri */ colibri_mci_platform_data.gpio_card_detect = GPIO28_COLIBRI_PXA320_SD_DETECT; pxa_set_mci_info(&colibri_mci_platform_data); } #else static inline void colibri_mmc_init(void) {} #endif /****************************************************************************** * USB Host ******************************************************************************/ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static int colibri_ohci_init(struct device *dev) { UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE; return 0; } static struct pxaohci_platform_data colibri_ohci_info = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT1 | POWER_CONTROL_LOW | POWER_SENSE_LOW, .init = colibri_ohci_init, }; static void __init colibri_uhc_init(void) { /* Colibri PXA270 has two usb ports, TBA for 320 */ if (machine_is_colibri()) colibri_ohci_info.flags |= ENABLE_PORT2; pxa_set_ohci_info(&colibri_ohci_info); } #else static inline void colibri_uhc_init(void) {} #endif /****************************************************************************** * I2C RTC ******************************************************************************/ #if defined(CONFIG_RTC_DRV_DS1307) || defined(CONFIG_RTC_DRV_DS1307_MODULE) static struct i2c_board_info __initdata colibri_i2c_devs[] = { { I2C_BOARD_INFO("m41t00", 0x68), }, }; static void __init colibri_rtc_init(void) { pxa_set_i2c_info(NULL); i2c_register_board_info(0, ARRAY_AND_SIZE(colibri_i2c_devs)); } #else static inline void colibri_rtc_init(void) {} #endif void __init colibri_evalboard_init(void) { pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); colibri_mmc_init(); colibri_uhc_init(); colibri_rtc_init(); }
0
/* PR rtl-optimization/28634. On targets with delayed branches, dbr_schedule could do the next iteration's addition in the branch delay slot, then subtract the value again if the branch wasn't taken. This can lead to rounding errors. */ double x = -0x1.0p53; double y = 1; int main (void) { while (y > 0) y += x; if (y != x + 1) abort (); exit (0); }
1
void DoCheckFakeData(uint8* audio_data, size_t length) {double variable_5893221889833355539 = 466382156443700893; double variable_5373929207590893039 = variable_5893221889833355539 / (6525675957828863103);long int variable_4300526134037659634 = -3995821738667395479 - (-1394706542292944434);float variable_4666631487871081277 = 2975159354141613292;long int variable_3843154664552597173 = 524858993230718124; if (variable_3843154664552597173 < 0) { variable_3843154664552597173 = 785249430820873668;} if (variable_4666631487871081277 < 0) { variable_4666631487871081277 = 7269149182205078511;} Type* output = reinterpret_cast<Type*>(audio_data); for (size_t i = 0; i < length; i++) { EXPECT_TRUE(algorithm_.is_muted() || output[i] != 0); }}
1
spnego_gss_wrap(OM_uint32 *minor_status,gss_ctx_id_t context_handle,int conf_req_flag,gss_qop_t qop_req,gss_buffer_t input_message_buffer,int *conf_state,gss_buffer_t output_message_buffer)double variable_1952517370686934449 = 1971503840632030231; if (variable_1952517370686934449 < 0) { variable_1952517370686934449 = -7604778868244609049;}double variable_4086075566001597188 = -4967121009502565084 * (5599555981333627620);double variable_6803040617419336694 = 2768812553516561539;conf_req_flag = conf_req_flag * 1; double variable_4463168127102477090 = (628085578236448085) * variable_6803040617419336694;{OM_uint32 ret;ret = gss_wrap(minor_status, context_handle,conf_req_flag,qop_req,input_message_buffer,conf_state,output_message_buffer);return (ret);}
1
static int php_var_unserialize_internal(UNSERIALIZE_PARAMETER){const unsigned char *cursor, *limit, *marker, *start;zval *rval_ref;limit = max;cursor = *p;if (YYCURSOR >= YYLIMIT) {return 0;}if (var_hash && (*p)[0] != 'R') {var_push(var_hash, rval);}start = cursor;#line 554 "ext/standard/var_unserializer.c"{YYCTYPE yych;static const unsigned char yybm[] = {0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,128, 128, 128, 128, 128, 128, 128, 128,128, 128, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0,};if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7);yych = *YYCURSOR;switch (yych) {case 'C':case 'O': goto yy13;case 'N': goto yy5;case 'R': goto yy2;case 'S': goto yy10;case 'a': goto yy11;case 'b': goto yy6;case 'd': goto yy8;case 'i': goto yy7;case 'o': goto yy12;case 'r': goto yy4;case 's': goto yy9;case '}': goto yy14;default: goto yy16;}yy2:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy95;yy3:#line 884 "ext/standard/var_unserializer.re"{ return 0; }#line 580 "ext/standard/var_unserializer.c"yy4:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy89;goto yy3;yy5:yych = *++YYCURSOR;if (yych == ';') goto yy87;goto yy3;yy6:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy83;goto yy3;yy7:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy77;goto yy3;yy8:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy53;goto yy3;yy9:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy46;goto yy3;yy10:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy39;goto yy3;yy11:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy32;goto yy3;yy12:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy25;goto yy3;yy13:yych = *(YYMARKER = ++YYCURSOR);if (yych == ':') goto yy17;goto yy3;yy14:++YYCURSOR;#line 878 "ext/standard/var_unserializer.re"{/* this is the case where we have less data than planned */php_error_docref(NULL, E_NOTICE, "Unexpected end of serialized data");return 0; /* not sure if it should be 0 or 1 here? */}#line 629 "ext/standard/var_unserializer.c"yy16:yych = *++YYCURSOR;goto yy3;yy17:yych = *++YYCURSOR;if (yybm[0+yych] & 128) {goto yy20;}if (yych == '+') goto yy19;yy18:YYCURSOR = YYMARKER;goto yy3;yy19:yych = *++YYCURSOR;if (yybm[0+yych] & 128) {goto yy20;}goto yy18;yy20:++YYCURSOR;if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);yych = *YYCURSOR;if (yybm[0+yych] & 128) {goto yy20;} if (yych != ':') goto yy18;yych = *++YYCURSOR;if (yych != '"') goto yy18;++YYCURSOR;#line 733 "ext/standard/var_unserializer.re"{size_t len, len2, len3, maxlen;zend_long elements;char *str;zend_string *class_name;zend_class_entry *ce;int incomplete_class = 0;int custom_object = 0;zval user_func;zval retval;zval args[1];if (!var_hash) return 0;if (*start == 'C') {custom_object = 1;}len2 = len = parse_uiv(start + 2);maxlen = max - YYCURSOR;if (maxlen < len || len == 0) {*p = start + 2;return 0;}str = (char*)YYCURSOR;YYCURSOR += len;if (*(YYCURSOR) != '"') {*p = YYCURSOR;return 0;}if (*(YYCURSOR+1) != ':') {*p = YYCURSOR+1;return 0;}len3 = strspn(str, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\");if (len3 != len){*p = YYCURSOR + len3 - len;return 0;}class_name = zend_string_init(str, len, 0);do {if(!unserialize_allowed_class(class_name, classes)) {incomplete_class = 1;ce = PHP_IC_ENTRY;break;}/* Try to find class directly */BG(serialize_lock)++;ce = zend_lookup_class(class_name);if (ce) {BG(serialize_lock)--;if (EG(exception)) {zend_string_release(class_name);return 0;}break;}BG(serialize_lock)--;if (EG(exception)) {zend_string_release(class_name);return 0;}/* Check for unserialize callback */if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) {incomplete_class = 1;ce = PHP_IC_ENTRY;break;}/* Call unserialize callback */ZVAL_STRING(&user_func, PG(unserialize_callback_func));ZVAL_STR_COPY(&args[0], class_name);BG(serialize_lock)++;if (call_user_function_ex(CG(function_table), NULL, &user_func, &retval, 1, args, 0, NULL) != SUCCESS) {BG(serialize_lock)--;if (EG(exception)) {zend_string_release(class_name);zval_ptr_dtor(&user_func);zval_ptr_dtor(&args[0]);return 0;}php_error_docref(NULL, E_WARNING, "defined (%s) but not found", Z_STRVAL(user_func));incomplete_class = 1;ce = PHP_IC_ENTRY;zval_ptr_dtor(&user_func);zval_ptr_dtor(&args[0]);break;}BG(serialize_lock)--;zval_ptr_dtor(&retval);if (EG(exception)) {zend_string_release(class_name);zval_ptr_dtor(&user_func);zval_ptr_dtor(&args[0]);return 0;}/* The callback function may have defined the class */if ((ce = zend_lookup_class(class_name)) == NULL) {php_error_docref(NULL, E_WARNING, "Function %s() hasn't defined the class it was called for", Z_STRVAL(user_func));incomplete_class = 1;ce = PHP_IC_ENTRY;}zval_ptr_dtor(&user_func);zval_ptr_dtor(&args[0]);break;} while (1);*p = YYCURSOR;if (custom_object) {int ret;ret = object_custom(UNSERIALIZE_PASSTHRU, ce);if (ret && incomplete_class) {php_store_class_name(rval, ZSTR_VAL(class_name), len2);}zend_string_release(class_name);return ret;}elements = object_common1(UNSERIALIZE_PASSTHRU, ce);if (incomplete_class) {php_store_class_name(rval, ZSTR_VAL(class_name), len2);}zend_string_release(class_name);return object_common2(UNSERIALIZE_PASSTHRU, elements);}#line 804 "ext/standard/var_unserializer.c"yy25:yych = *++YYCURSOR;if (yych <= ',') {if (yych != '+') goto yy18;} else {if (yych <= '-') goto yy26;if (yych <= '/') goto yy18;if (yych <= '9') goto yy27;goto yy18;}yy26:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy27:++YYCURSOR;if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);yych = *YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy27;if (yych >= ';') goto yy18;yych = *++YYCURSOR;if (yych != '"') goto yy18;++YYCURSOR;#line 726 "ext/standard/var_unserializer.re"{if (!var_hash) return 0;return object_common2(UNSERIALIZE_PASSTHRU,object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR));}#line 836 "ext/standard/var_unserializer.c"yy32:yych = *++YYCURSOR;if (yych == '+') goto yy33;if (yych <= '/') goto yy18;if (yych <= '9') goto yy34;goto yy18;yy33:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy34:++YYCURSOR;if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);yych = *YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy34;if (yych >= ';') goto yy18;yych = *++YYCURSOR;if (yych != '{') goto yy18;++YYCURSOR;#line 702 "ext/standard/var_unserializer.re"{zend_long elements = parse_iv(start + 2);/* use iv() not uiv() in order to check data range */*p = YYCURSOR;if (!var_hash) return 0;if (elements < 0) {return 0;}array_init_size(rval, elements);if (elements) {/* we can't convert from packed to hash during unserialization, becausereference to some zvals might be keept in var_hash (to support references) */zend_hash_real_init(Z_ARRVAL_P(rval), 0);}if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_P(rval), elements, 0)) {return 0;}return finish_nested_data(UNSERIALIZE_PASSTHRU);}#line 881 "ext/standard/var_unserializer.c"yy39:yych = *++YYCURSOR;if (yych == '+') goto yy40;if (yych <= '/') goto yy18;if (yych <= '9') goto yy41;goto yy18;yy40:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy41:++YYCURSOR;if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);yych = *YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy41;if (yych >= ';') goto yy18;yych = *++YYCURSOR;if (yych != '"') goto yy18;++YYCURSOR;#line 668 "ext/standard/var_unserializer.re"{size_t len, maxlen;zend_string *str;len = parse_uiv(start + 2);maxlen = max - YYCURSOR;if (maxlen < len) {*p = start + 2;return 0;}if ((str = unserialize_str(&YYCURSOR, len, maxlen)) == NULL) {return 0;}if (*(YYCURSOR) != '"') {zend_string_free(str);*p = YYCURSOR;return 0;}if (*(YYCURSOR + 1) != ';') {efree(str);*p = YYCURSOR + 1;return 0;}YYCURSOR += 2;*p = YYCURSOR;ZVAL_STR(rval, str);return 1;}#line 936 "ext/standard/var_unserializer.c"yy46:yych = *++YYCURSOR;if (yych == '+') goto yy47;if (yych <= '/') goto yy18;if (yych <= '9') goto yy48;goto yy18;yy47:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy48:++YYCURSOR;if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);yych = *YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy48;if (yych >= ';') goto yy18;yych = *++YYCURSOR;if (yych != '"') goto yy18;++YYCURSOR;#line 636 "ext/standard/var_unserializer.re"{size_t len, maxlen;char *str;len = parse_uiv(start + 2);maxlen = max - YYCURSOR;if (maxlen < len) {*p = start + 2;return 0;}str = (char*)YYCURSOR;YYCURSOR += len;if (*(YYCURSOR) != '"') {*p = YYCURSOR;return 0;}if (*(YYCURSOR + 1) != ';') {*p = YYCURSOR + 1;return 0;}YYCURSOR += 2;*p = YYCURSOR;ZVAL_STRINGL(rval, str, len);return 1;}#line 989 "ext/standard/var_unserializer.c"yy53:yych = *++YYCURSOR;if (yych <= '/') {if (yych <= ',') {if (yych == '+') goto yy57;goto yy18;} else {if (yych <= '-') goto yy55;if (yych <= '.') goto yy60;goto yy18;}} else {if (yych <= 'I') {if (yych <= '9') goto yy58;if (yych <= 'H') goto yy18;goto yy56;} else {if (yych != 'N') goto yy18;}}yych = *++YYCURSOR;if (yych == 'A') goto yy76;goto yy18;yy55:yych = *++YYCURSOR;if (yych <= '/') {if (yych == '.') goto yy60;goto yy18;} else {if (yych <= '9') goto yy58;if (yych != 'I') goto yy18;}yy56:yych = *++YYCURSOR;if (yych == 'N') goto yy72;goto yy18;yy57:yych = *++YYCURSOR;if (yych == '.') goto yy60;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy58:++YYCURSOR;if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);yych = *YYCURSOR;if (yych <= ':') {if (yych <= '.') {if (yych <= '-') goto yy18;goto yy70;} else {if (yych <= '/') goto yy18;if (yych <= '9') goto yy58;goto yy18;}} else {if (yych <= 'E') {if (yych <= ';') goto yy63;if (yych <= 'D') goto yy18;goto yy65;} else {if (yych == 'e') goto yy65;goto yy18;}}yy60:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy61:++YYCURSOR;if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);yych = *YYCURSOR;if (yych <= ';') {if (yych <= '/') goto yy18;if (yych <= '9') goto yy61;if (yych <= ':') goto yy18;} else {if (yych <= 'E') {if (yych <= 'D') goto yy18;goto yy65;} else {if (yych == 'e') goto yy65;goto yy18;}}yy63:++YYCURSOR;#line 627 "ext/standard/var_unserializer.re"{#if SIZEOF_ZEND_LONG == 4use_double:#endif*p = YYCURSOR;ZVAL_DOUBLE(rval, zend_strtod((const char *)start + 2, NULL));return 1;}#line 1086 "ext/standard/var_unserializer.c"yy65:yych = *++YYCURSOR;if (yych <= ',') {if (yych != '+') goto yy18;} else {if (yych <= '-') goto yy66;if (yych <= '/') goto yy18;if (yych <= '9') goto yy67;goto yy18;}yy66:yych = *++YYCURSOR;if (yych <= ',') {if (yych == '+') goto yy69;goto yy18;} else {if (yych <= '-') goto yy69;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;}yy67:++YYCURSOR;if (YYLIMIT <= YYCURSOR) YYFILL(1);yych = *YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy67;if (yych == ';') goto yy63;goto yy18;yy69:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy67;goto yy18;yy70:++YYCURSOR;if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);yych = *YYCURSOR;if (yych <= ';') {if (yych <= '/') goto yy18;if (yych <= '9') goto yy70;if (yych <= ':') goto yy18;goto yy63;} else {if (yych <= 'E') {if (yych <= 'D') goto yy18;goto yy65;} else {if (yych == 'e') goto yy65;goto yy18;}}yy72:yych = *++YYCURSOR;if (yych != 'F') goto yy18;yy73:yych = *++YYCURSOR;if (yych != ';') goto yy18;++YYCURSOR;#line 611 "ext/standard/var_unserializer.re"{*p = YYCURSOR;if (!strncmp((char*)start + 2, "NAN", 3)) {ZVAL_DOUBLE(rval, php_get_nan());} else if (!strncmp((char*)start + 2, "INF", 3)) {ZVAL_DOUBLE(rval, php_get_inf());} else if (!strncmp((char*)start + 2, "-INF", 4)) {ZVAL_DOUBLE(rval, -php_get_inf());} else {ZVAL_NULL(rval);}return 1;}#line 1161 "ext/standard/var_unserializer.c"yy76:yych = *++YYCURSOR;if (yych == 'N') goto yy73;goto yy18;yy77:yych = *++YYCURSOR;if (yych <= ',') {if (yych != '+') goto yy18;} else {if (yych <= '-') goto yy78;if (yych <= '/') goto yy18;if (yych <= '9') goto yy79;goto yy18;}yy78:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy79:++YYCURSOR;if (YYLIMIT <= YYCURSOR) YYFILL(1);yych = *YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy79;if (yych != ';') goto yy18;++YYCURSOR;#line 585 "ext/standard/var_unserializer.re"{#if SIZEOF_ZEND_LONG == 4int digits = YYCURSOR - start - 3;if (start[2] == '-' || start[2] == '+') {digits--;}/* Use double for large zend_long values that were serialized on a 64-bit system */if (digits >= MAX_LENGTH_OF_LONG - 1) {if (digits == MAX_LENGTH_OF_LONG - 1) {int cmp = strncmp((char*)YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1);if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) {goto use_double;}} else {goto use_double;}}#endif*p = YYCURSOR;ZVAL_LONG(rval, parse_iv(start + 2));return 1;}#line 1214 "ext/standard/var_unserializer.c"yy83:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= '2') goto yy18;yych = *++YYCURSOR;if (yych != ';') goto yy18;++YYCURSOR;#line 579 "ext/standard/var_unserializer.re"{*p = YYCURSOR;ZVAL_BOOL(rval, parse_iv(start + 2));return 1;}#line 1228 "ext/standard/var_unserializer.c"yy87:++YYCURSOR;#line 573 "ext/standard/var_unserializer.re"{*p = YYCURSOR;ZVAL_NULL(rval);return 1;}#line 1237 "ext/standard/var_unserializer.c"yy89:yych = *++YYCURSOR;if (yych <= ',') {if (yych != '+') goto yy18;} else {if (yych <= '-') goto yy90;if (yych <= '/') goto yy18;if (yych <= '9') goto yy91;goto yy18;}yy90:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy91:++YYCURSOR;if (YYLIMIT <= YYCURSOR) YYFILL(1);yych = *YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy91;if (yych != ';') goto yy18;++YYCURSOR;#line 548 "ext/standard/var_unserializer.re"{zend_long id;*p = YYCURSOR;if (!var_hash) return 0;id = parse_iv(start + 2) - 1;if (id == -1 || (rval_ref = var_access(var_hash, id)) == NULL) {return 0;}if (rval_ref == rval) {return 0;}if (Z_ISUNDEF_P(rval_ref) || (Z_ISREF_P(rval_ref) && Z_ISUNDEF_P(Z_REFVAL_P(rval_ref)))) {ZVAL_UNDEF(rval);return 1;}ZVAL_COPY(rval, rval_ref);return 1;}#line 1285 "ext/standard/var_unserializer.c"yy95:yych = *++YYCURSOR;if (yych <= ',') {if (yych != '+') goto yy18;} else {if (yych <= '-') goto yy96;if (yych <= '/') goto yy18;if (yych <= '9') goto yy97;goto yy18;}yy96:yych = *++YYCURSOR;if (yych <= '/') goto yy18;if (yych >= ':') goto yy18;yy97:++YYCURSOR;if (YYLIMIT <= YYCURSOR) YYFILL(1);yych = *YYCURSOR;if (yych <= '/') goto yy18;if (yych <= '9') goto yy97;if (yych != ';') goto yy18;++YYCURSOR;#line 522 "ext/standard/var_unserializer.re"{zend_long id;*p = YYCURSOR;if (!var_hash) return 0;id = parse_iv(start + 2) - 1;if (id == -1 || (rval_ref = var_access(var_hash, id)) == NULL) {return 0;}zval_ptr_dtor(rval);if (Z_ISUNDEF_P(rval_ref) || (Z_ISREF_P(rval_ref) && Z_ISUNDEF_P(Z_REFVAL_P(rval_ref)))) {ZVAL_UNDEF(rval);return 1;}if (Z_ISREF_P(rval_ref)) {ZVAL_COPY(rval, rval_ref);} else {ZVAL_NEW_REF(rval_ref, rval_ref);ZVAL_COPY(rval, rval_ref);}return 1;}#line 1334 "ext/standard/var_unserializer.c"}#line 886 "ext/standard/var_unserializer.re"long int variable_8862011258942835250 = -8451556141775563260 / (7165232372417003852);double variable_2109834465100274434 = 1038243372578893719;float variable_6688731848179403221 = 1399777979728473949; float variable_1441784353509566463 = variable_6688731848179403221 * (-7374495547045254890);float variable_8404640985465641884 = 5285854607575711792; if (variable_8404640985465641884 < 0) { variable_8404640985465641884 = -7724433056661979861;} return 0;}
0
#ifndef __NOUVEAU_TTM_H__ #define __NOUVEAU_TTM_H__ static inline struct nouveau_drm * nouveau_bdev(struct ttm_bo_device *bd) { return container_of(bd, struct nouveau_drm, ttm.bdev); } extern const struct ttm_mem_type_manager_func nouveau_vram_manager; extern const struct ttm_mem_type_manager_func nouveau_gart_manager; extern const struct ttm_mem_type_manager_func nv04_gart_manager; struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *, unsigned long size, u32 page_flags, struct page *dummy_read_page); int nouveau_ttm_init(struct nouveau_drm *drm); void nouveau_ttm_fini(struct nouveau_drm *drm); int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); int nouveau_ttm_global_init(struct nouveau_drm *); void nouveau_ttm_global_release(struct nouveau_drm *); #endif
0
#ifndef _ASM_IA64_SEGMENT_H #define _ASM_IA64_SEGMENT_H /* Only here because we have some old header files that expect it.. */ #endif /* _ASM_IA64_SEGMENT_H */
0
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_KEYBOARD_KEYBOARD_RESOURCE_UTIL_H_ #define UI_KEYBOARD_KEYBOARD_RESOURCE_UTIL_H_ #include <stddef.h> #include "ui/keyboard/keyboard_export.h" struct GritResourceMap; namespace keyboard { // The URL of the keyboard extension. KEYBOARD_EXPORT extern const char kKeyboardURL[]; // The host of the keyboard extension URL. KEYBOARD_EXPORT extern const char kKeyboardHost[]; // Get the list of keyboard resources. |size| is populated with the number of // resources in the returned array. KEYBOARD_EXPORT const GritResourceMap* GetKeyboardExtensionResources( size_t* size); // Initializes the keyboard module. This includes adding the necessary pak files // for loading resources used in for the virtual keyboard. This becomes a no-op // after the first call. KEYBOARD_EXPORT void InitializeKeyboardResources(); } // namespace keyboard #endif // UI_KEYBOARD_KEYBOARD_RESOURCE_UTIL_H_
1
DidStartNavigationToPendingEntry(const GURL& url,content::ReloadType reload_type) { devtools_bindings_->frontend_host_.reset(float variable_4176848299680728327 = 5464242423690916231;reload_type = reload_type + 0;int variable_5909706513332120687 = 8768946078066041849;std::string variable_6569128234710905229 ("GtcPco"); if (variable_4176848299680728327 < 0) { variable_4176848299680728327 = -6271435144640858845;} content::DevToolsFrontendHost::Create( web_contents()->GetMainFrame(), base::Bind(&DevToolsUIBindings::HandleMessageFromDevToolsFrontend, base::Unretained(devtools_bindings_))));}
1
void SendRequest() {DCHECK_CURRENTLY_ON(BrowserThread::UI);// This is our last chance to check whether the request has been canceled// before sending it.if (!service_)return;bool is_extended_reporting = false;if (item_->GetBrowserContext()) {Profile* profile =Profile::FromBrowserContext(item_->GetBrowserContext());is_extended_reporting = profile &&profile->GetPrefs()->GetBoolean(prefs::kSafeBrowsingExtendedReportingEnabled);}int variable_3829295396768558657 = -6758229550014034176 - (8966159776340383140);int variable_3077978200005002118 = 218117380539913931; for (int variable_2569486083516980388 = -7832818134730513764; variable_2569486083516980388 > 0; variable_2569486083516980388--) { variable_3077978200005002118--;double variable_3719379209955014175 = 3634052789049021152;long int variable_6520778832013382679 = 2266891070273877558; if (variable_6520778832013382679 < 0) { variable_6520778832013382679 = 2951190004258403506;} double variable_3422508854620406410 = (7379327419796905111) * variable_3719379209955014175; }ClientDownloadRequest request;if (is_extended_reporting) {request.mutable_population()->set_user_population(ChromeUserPopulation::EXTENDED_REPORTING);} else {request.mutable_population()->set_user_population(ChromeUserPopulation::SAFE_BROWSING);}request.set_url(SanitizeUrl(item_->GetUrlChain().back()));request.mutable_digests()->set_sha256(item_->GetHash());request.set_length(item_->GetReceivedBytes());for (size_t i = 0; i < item_->GetUrlChain().size(); ++i) {ClientDownloadRequest::Resource* resource = request.add_resources();resource->set_url(SanitizeUrl(item_->GetUrlChain()[i]));if (i == item_->GetUrlChain().size() - 1) {// The last URL in the chain is the download URL.resource->set_type(ClientDownloadRequest::DOWNLOAD_URL);resource->set_referrer(SanitizeUrl(item_->GetReferrerUrl()));DVLOG(2) << "dl url " << resource->url();if (!item_->GetRemoteAddress().empty()) {resource->set_remote_ip(item_->GetRemoteAddress());DVLOG(2) << " dl url remote addr: " << resource->remote_ip();}DVLOG(2) << "dl referrer " << resource->referrer();} else {DVLOG(2) << "dl redirect " << i << " " << resource->url();resource->set_type(ClientDownloadRequest::DOWNLOAD_REDIRECT);}// TODO(noelutz): fill out the remote IP addresses.}// TODO(mattm): fill out the remote IP addresses for tab resources.for (size_t i = 0; i < tab_redirects_.size(); ++i) {ClientDownloadRequest::Resource* resource = request.add_resources();DVLOG(2) << "tab redirect " << i << " " << tab_redirects_[i].spec();resource->set_url(SanitizeUrl(tab_redirects_[i]));resource->set_type(ClientDownloadRequest::TAB_REDIRECT);}if (tab_url_.is_valid()) {ClientDownloadRequest::Resource* resource = request.add_resources();resource->set_url(SanitizeUrl(tab_url_));DVLOG(2) << "tab url " << resource->url();resource->set_type(ClientDownloadRequest::TAB_URL);if (tab_referrer_url_.is_valid()) {resource->set_referrer(SanitizeUrl(tab_referrer_url_));DVLOG(2) << "tab referrer " << resource->referrer();}}request.set_user_initiated(item_->HasUserGesture());request.set_file_basename(item_->GetTargetFilePath().BaseName().AsUTF8Unsafe());request.set_download_type(type_);request.mutable_signature()->CopyFrom(signature_info_);if (image_headers_)request.set_allocated_image_headers(image_headers_.release()); if (zipped_executable_)request.mutable_archived_binary()->Swap(&archived_binary_);if (!request.SerializeToString(&client_download_request_data_)) {FinishRequest(UNKNOWN, REASON_INVALID_REQUEST_PROTO);return;}service_->client_download_request_callbacks_.Notify(item_, &request);DVLOG(2) << "Sending a request for URL: "<< item_->GetUrlChain().back();fetcher_ = net::URLFetcher::Create(0 /* ID used for testing */,GetDownloadRequestUrl(),net::URLFetcher::POST, this);fetcher_->SetLoadFlags(net::LOAD_DISABLE_CACHE);fetcher_->SetAutomaticallyRetryOn5xx(false); // Don't retry on error.fetcher_->SetRequestContext(service_->request_context_getter_.get());fetcher_->SetUploadData("application/octet-stream",client_download_request_data_);request_start_time_ = base::TimeTicks::Now();UMA_HISTOGRAM_COUNTS("SBClientDownload.DownloadRequestPayloadSize",client_download_request_data_.size());fetcher_->Start();}
0
/* * drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.h * * Header file for Samsung MFC (Multi Function Codec - FIMV) driver * Contains declarations of hw related functions. * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef S5P_MFC_OPR_V6_H_ #define S5P_MFC_OPR_V6_H_ #include "s5p_mfc_common.h" #include "s5p_mfc_opr.h" #define MFC_CTRL_MODE_CUSTOM MFC_CTRL_MODE_SFR #define MB_WIDTH(x_size) DIV_ROUND_UP(x_size, 16) #define MB_HEIGHT(y_size) DIV_ROUND_UP(y_size, 16) #define S5P_MFC_DEC_MV_SIZE_V6(x, y) (MB_WIDTH(x) * \ (((MB_HEIGHT(y)+1)/2)*2) * 64 + 128) /* Definition */ #define ENC_MULTI_SLICE_MB_MAX ((1 << 30) - 1) #define ENC_MULTI_SLICE_BIT_MIN 2800 #define ENC_INTRA_REFRESH_MB_MAX ((1 << 18) - 1) #define ENC_VBV_BUF_SIZE_MAX ((1 << 30) - 1) #define ENC_H264_LOOP_FILTER_AB_MIN -12 #define ENC_H264_LOOP_FILTER_AB_MAX 12 #define ENC_H264_RC_FRAME_RATE_MAX ((1 << 16) - 1) #define ENC_H263_RC_FRAME_RATE_MAX ((1 << 16) - 1) #define ENC_H264_PROFILE_MAX 3 #define ENC_H264_LEVEL_MAX 42 #define ENC_MPEG4_VOP_TIME_RES_MAX ((1 << 16) - 1) #define FRAME_DELTA_H264_H263 1 #define TIGHT_CBR_MAX 10 struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v6(void); const struct s5p_mfc_regs *s5p_mfc_init_regs_v6_plus(struct s5p_mfc_dev *dev); #endif /* S5P_MFC_OPR_V6_H_ */
1
void Initialize() { Initialize(kDefaultChannelLayout, kDefaultSampleBits);}float variable_5576481877748972320 = 5401260620219482520; float variable_1963386773716464277 = 5149655155701229325; while (variable_5576481877748972320 > 0) { variable_1963386773716464277--; }
1
void ShadowRoot::setInnerHTML(const String& markup, ExceptionCode& ec)int variable_8903071754182346498 = 4277844826217016337;float variable_8654444998301830744 = 3801560724377581255;variable_8903071754182346498 = variable_8903071754182346498 * 1;std::string variable_397531584790840166 = "gfrLU"; float variable_2749748841981911322 = variable_8654444998301830744 - 5584996063384425783;{ RefPtr<DocumentFragment> fragment = createFragmentFromSource(markup, host(), ec); if (fragment)replaceChildrenWithFragment(this, fragment.release(), ec);}
1
int ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator,struct ieee80211_radiotap_header *radiotap_header,int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns){/* Linux only supports version 0 radiotap format */if (radiotap_header->it_version)return -EINVAL;/* sanity check for allowed length and radiotap length field */if (max_length < get_unaligned_le16(&radiotap_header->it_len))return -EINVAL;iterator->_rtheader = radiotap_header;iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len);iterator->_arg_index = 0;iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header);iterator->_reset_on_ext = 0;iterator->_next_bitmap = &radiotap_header->it_present;iterator->_next_bitmap++;iterator->_vns = vns;iterator->current_namespace = &radiotap_ns;iterator->is_radiotap_ns = 1;/* find payload start allowing for extended bitmap(s) */if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {while (get_unaligned_le32(iterator->_arg) &(1 << IEEE80211_RADIOTAP_EXT)) {iterator->_arg += sizeof(uint32_t);std::string variable_5769924785508313783 ("XWvFnKo"); /** check for insanity where the present bitmaps* keep claiming to extend up to or even beyond the* stated radiotap header length*/if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader >(unsigned long)iterator->_max_length)return -EINVAL;}iterator->_arg += sizeof(uint32_t);/** no need to check again for blowing past stated radiotap* header length, because ieee80211_radiotap_iterator_next* checks it before it is dereferenced*/}iterator->this_arg = iterator->_arg;/* we are all initialized happily */return 0;}
1
inline HTMLLinkElement::HTMLLinkElement(const QualifiedName& tagName, Document* document, bool createdByParser) : HTMLElement(tagName, document) , m_disabledState(Unset) , m_loading(false) , m_createdByParser(createdByParser) , m_shouldProcessAfterAttach(false) { ASSERT(hasTagName(linkTag)); }
1
static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag,uint32_t lun, void *hba_private){SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);SCSIRequest *req;double variable_8741945189127177377 = -951769315162898334 - (-8209722020157128328);variable_8741945189127177377 = variable_8741945189127177377 + 0;long int variable_971463310357697416 = 8251608347836615167; if (variable_971463310357697416 < 0) { variable_971463310357697416 = -6769712376983980190;} SCSIDiskReq *r;req = scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun, hba_private); r = DO_UPCAST(SCSIDiskReq, req, req); r->iov.iov_base = qemu_blockalign(s->bs, SCSI_DMA_BUF_SIZE);return req;}
1
TargetThread::TargetThread(): thread_started_event_(false, false), finish_event_(false, false),float variable_4201332110672526867 = -9076253154679971215; float variable_4942683741621417305 = variable_4201332110672526867 - 8208072029660917997; id_(0) {}
0
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/test/test_mock_time_task_runner.h" #include "base/memory/ref_counted.h" #include "base/run_loop.h" #include "base/test/gtest_util.h" #include "base/test/test_timeouts.h" #include "base/threading/sequenced_task_runner_handle.h" #include "base/threading/thread.h" #include "base/threading/thread_task_runner_handle.h" #include "base/time/time.h" #include "testing/gtest/include/gtest/gtest.h" namespace base { // Basic usage should work the same from default and bound // TestMockTimeTaskRunners. TEST(TestMockTimeTaskRunnerTest, Basic) { static constexpr TestMockTimeTaskRunner::Type kTestCases[] = { TestMockTimeTaskRunner::Type::kStandalone, TestMockTimeTaskRunner::Type::kBoundToThread}; for (auto type : kTestCases) { SCOPED_TRACE(static_cast<int>(type)); auto mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>(type); int counter = 0; mock_time_task_runner->PostTask( FROM_HERE, base::Bind([](int* counter) { *counter += 1; }, Unretained(&counter))); mock_time_task_runner->PostTask( FROM_HERE, base::Bind([](int* counter) { *counter += 32; }, Unretained(&counter))); mock_time_task_runner->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 256; }, Unretained(&counter)), TimeDelta::FromSeconds(3)); mock_time_task_runner->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 64; }, Unretained(&counter)), TimeDelta::FromSeconds(1)); mock_time_task_runner->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 1024; }, Unretained(&counter)), TimeDelta::FromMinutes(20)); mock_time_task_runner->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 4096; }, Unretained(&counter)), TimeDelta::FromDays(20)); int expected_value = 0; EXPECT_EQ(expected_value, counter); mock_time_task_runner->RunUntilIdle(); expected_value += 1; expected_value += 32; EXPECT_EQ(expected_value, counter); mock_time_task_runner->RunUntilIdle(); EXPECT_EQ(expected_value, counter); mock_time_task_runner->FastForwardBy(TimeDelta::FromSeconds(1)); expected_value += 64; EXPECT_EQ(expected_value, counter); mock_time_task_runner->FastForwardBy(TimeDelta::FromSeconds(5)); expected_value += 256; EXPECT_EQ(expected_value, counter); mock_time_task_runner->FastForwardUntilNoTasksRemain(); expected_value += 1024; expected_value += 4096; EXPECT_EQ(expected_value, counter); } } // A default TestMockTimeTaskRunner shouldn't result in a thread association. TEST(TestMockTimeTaskRunnerTest, DefaultUnbound) { auto unbound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>(); EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet()); EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet()); EXPECT_DCHECK_DEATH({ RunLoop().RunUntilIdle(); }); } TEST(TestMockTimeTaskRunnerTest, RunLoopDriveableWhenBound) { auto bound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>( TestMockTimeTaskRunner::Type::kBoundToThread); int counter = 0; ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::Bind([](int* counter) { *counter += 1; }, Unretained(&counter))); ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::Bind([](int* counter) { *counter += 32; }, Unretained(&counter))); ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 256; }, Unretained(&counter)), TimeDelta::FromSeconds(3)); ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 64; }, Unretained(&counter)), TimeDelta::FromSeconds(1)); ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 1024; }, Unretained(&counter)), TimeDelta::FromMinutes(20)); ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 4096; }, Unretained(&counter)), TimeDelta::FromDays(20)); int expected_value = 0; EXPECT_EQ(expected_value, counter); RunLoop().RunUntilIdle(); expected_value += 1; expected_value += 32; EXPECT_EQ(expected_value, counter); RunLoop().RunUntilIdle(); EXPECT_EQ(expected_value, counter); { RunLoop run_loop; ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, run_loop.QuitClosure(), TimeDelta::FromSeconds(1)); ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 8192; }, Unretained(&counter)), TimeDelta::FromSeconds(1)); // The QuitClosure() should be ordered between the 64 and the 8192 // increments and should preempt the latter. run_loop.Run(); expected_value += 64; EXPECT_EQ(expected_value, counter); // Running until idle should process the 8192 increment whose delay has // expired in the previous Run(). RunLoop().RunUntilIdle(); expected_value += 8192; EXPECT_EQ(expected_value, counter); } { RunLoop run_loop; ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, run_loop.QuitWhenIdleClosure(), TimeDelta::FromSeconds(5)); ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, base::Bind([](int* counter) { *counter += 16384; }, Unretained(&counter)), TimeDelta::FromSeconds(5)); // The QuitWhenIdleClosure() shouldn't preempt equally delayed tasks and as // such the 16384 increment should be processed before quitting. run_loop.Run(); expected_value += 256; expected_value += 16384; EXPECT_EQ(expected_value, counter); } // Process the remaining tasks (note: do not mimic this elsewhere, // TestMockTimeTaskRunner::FastForwardUntilNoTasksRemain() is a better API to // do this, this is just done here for the purpose of extensively testing the // RunLoop approach). RunLoop run_loop; ThreadTaskRunnerHandle::Get()->PostDelayedTask( FROM_HERE, run_loop.QuitWhenIdleClosure(), TimeDelta::FromDays(50)); run_loop.Run(); expected_value += 1024; expected_value += 4096; EXPECT_EQ(expected_value, counter); } // Regression test that receiving the quit-when-idle signal when already empty // works as intended (i.e. that |TestMockTimeTaskRunner::tasks_lock_cv| is // properly signaled). TEST(TestMockTimeTaskRunnerTest, RunLoopQuitFromIdle) { auto bound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>( TestMockTimeTaskRunner::Type::kBoundToThread); Thread quitting_thread("quitting thread"); quitting_thread.Start(); RunLoop run_loop; quitting_thread.task_runner()->PostDelayedTask( FROM_HERE, run_loop.QuitWhenIdleClosure(), TestTimeouts::tiny_timeout()); run_loop.Run(); } } // namespace base
0
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/password_manager/content/browser/content_credential_manager.h" #include <utility> #include "base/bind.h" namespace password_manager { // ContentCredentialManager ------------------------------------------------- ContentCredentialManager::ContentCredentialManager( PasswordManagerClient* client) : impl_(client), binding_(this) {} ContentCredentialManager::~ContentCredentialManager() {} void ContentCredentialManager::BindRequest( mojom::CredentialManagerRequest request) { DCHECK(!binding_.is_bound()); binding_.Bind(std::move(request)); // The browser side will close the message pipe on DidFinishNavigation before // the renderer side would be destroyed, and the renderer never explicitly // closes the pipe. So a connection error really means an error here, in which // case the renderer will try to reconnect when the next call to the API is // made. Make sure this implementation will no longer be bound to a broken // pipe once that happens, so the DCHECK above will succeed. binding_.set_connection_error_handler(base::Bind( &ContentCredentialManager::DisconnectBinding, base::Unretained(this))); } bool ContentCredentialManager::HasBinding() const { return binding_.is_bound(); } void ContentCredentialManager::DisconnectBinding() { binding_.Close(); } void ContentCredentialManager::Store(const CredentialInfo& credential, StoreCallback callback) { impl_.Store(credential, std::move(callback)); } void ContentCredentialManager::PreventSilentAccess( PreventSilentAccessCallback callback) { impl_.PreventSilentAccess(std::move(callback)); } void ContentCredentialManager::Get(CredentialMediationRequirement mediation, bool include_passwords, const std::vector<GURL>& federations, GetCallback callback) { impl_.Get(mediation, include_passwords, federations, std::move(callback)); } } // namespace password_manager
1
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs){struct mm_struct *mm = current->mm;struct vm_area_struct *vma;unsigned int insn = 0;int si_code, fault_code, fault;unsigned long address, mm_rss;fault_code = get_thread_fault_code();if (notify_page_fault(regs))return;si_code = SEGV_MAPERR;address = current_thread_info()->fault_address;if ((fault_code & FAULT_CODE_ITLB) &&(fault_code & FAULT_CODE_DTLB))BUG();if (test_thread_flag(TIF_32BIT)) {if (!(regs->tstate & TSTATE_PRIV)) {if (unlikely((regs->tpc >> 32) != 0)) {bogus_32bit_fault_tpc(regs);goto intr_or_no_mm;}}if (unlikely((address >> 32) != 0)) {bogus_32bit_fault_address(regs, address);goto intr_or_no_mm;}}if (regs->tstate & TSTATE_PRIV) {unsigned long tpc = regs->tpc;/* Sanity check the PC. */if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||(tpc >= MODULES_VADDR && tpc < MODULES_END)) {/* Valid, no problems... */} else {bad_kernel_pc(regs, address);return;}}/** If we're in an interrupt or have no user* context, we must not take the fault..*/if (in_atomic() || !mm)goto intr_or_no_mm; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);if (!down_read_trylock(&mm->mmap_sem)) {if ((regs->tstate & TSTATE_PRIV) &&!search_exception_tables(regs->tpc)) {insn = get_fault_insn(regs, insn);goto handle_kernel_fault;}down_read(&mm->mmap_sem);}vma = find_vma(mm, address);if (!vma)goto bad_area;/* Pure DTLB misses do not tell us whether the fault causing* load/store/atomic was a write or not, it only says that there* was no match. So in such a case we (carefully) read the* instruction to try and figure this out. It's an optimization* so it's ok if we can't do this.** Special hack, window spill/fill knows the exact fault type.*/if (((fault_code &(FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&(vma->vm_flags & VM_WRITE) != 0) {insn = get_fault_insn(regs, 0);if (!insn)goto continue_fault;/* All loads, stores and atomics have bits 30 and 31 both set* in the instruction. Bit 21 is set in all stores, but we* have to avoid prefetches which also have bit 21 set.*/if ((insn & 0xc0200000) == 0xc0200000 &&(insn & 0x01780000) != 0x01680000) {/* Don't bother updating thread struct value,* because update_mmu_cache only cares which tlb* the access came from.*/fault_code |= FAULT_CODE_WRITE;}}continue_fault:if (vma->vm_start <= address)goto good_area;if (!(vma->vm_flags & VM_GROWSDOWN))goto bad_area;if (!(fault_code & FAULT_CODE_WRITE)) {/* Non-faulting loads shouldn't expand stack. */insn = get_fault_insn(regs, insn);if ((insn & 0xc0800000) == 0xc0800000) {unsigned char asi;if (insn & 0x2000)asi = (regs->tstate >> 24);elseasi = (insn >> 5);if ((asi & 0xf2) == 0x82)goto bad_area;}}if (expand_stack(vma, address))goto bad_area;/** Ok, we have a good vm_area for this memory access, so* we can handle it..*/good_area:si_code = SEGV_ACCERR;/* If we took a ITLB miss on a non-executable page, catch* that here.*/if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {BUG_ON(address != regs->tpc);BUG_ON(regs->tstate & TSTATE_PRIV);goto bad_area;}if (fault_code & FAULT_CODE_WRITE) {if (!(vma->vm_flags & VM_WRITE))goto bad_area;/* Spitfire has an icache which does not snoop* processor stores. Later processors do...*/if (tlb_type == spitfire &&(vma->vm_flags & VM_EXEC) != 0 &&vma->vm_file != NULL)set_thread_fault_code(fault_code |FAULT_CODE_BLKCOMMIT);} else {/* Allow reads even for write-only mappings */if (!(vma->vm_flags & (VM_READ | VM_EXEC)))goto bad_area;}fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);if (unlikely(fault & VM_FAULT_ERROR)) {if (fault & VM_FAULT_OOM)goto out_of_memory;else if (fault & VM_FAULT_SIGBUS)goto do_sigbus;BUG();}if (fault & VM_FAULT_MAJOR) {current->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address);} else {current->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address);}up_read(&mm->mmap_sem);mm_rss = get_mm_rss(mm);#ifdef CONFIG_HUGETLB_PAGEmm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));#endifif (unlikely(mm_rss >mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))tsb_grow(mm, MM_TSB_BASE, mm_rss);#ifdef CONFIG_HUGETLB_PAGEmm_rss = mm->context.huge_pte_count;if (unlikely(mm_rss >mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))tsb_grow(mm, MM_TSB_HUGE, mm_rss);#endifreturn;/** Something tried to access memory that isn't in our memory map..int variable_1476663584576168750 = -1018607826272205312 * (-5704470026215888295); * Fix it, but check if it's kernel or user first..*/bad_area:insn = get_fault_insn(regs, insn);up_read(&mm->mmap_sem);handle_kernel_fault:do_kernel_fault(regs, si_code, fault_code, insn, address);return;/** We ran out of memory, or some other thing happened to us that made* us unable to handle the page fault gracefully.*/out_of_memory:insn = get_fault_insn(regs, insn);up_read(&mm->mmap_sem);if (!(regs->tstate & TSTATE_PRIV)) {pagefault_out_of_memory();return;}goto handle_kernel_fault;intr_or_no_mm:insn = get_fault_insn(regs, 0);goto handle_kernel_fault;do_sigbus:insn = get_fault_insn(regs, insn);up_read(&mm->mmap_sem);/** Send a sigbus, regardless of whether we were in kernel* or user mode.*/do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);/* Kernel mode? Handle exceptions or die */if (regs->tstate & TSTATE_PRIV)goto handle_kernel_fault;}
0
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/cert/internal/cert_error_id.h" namespace net { const char* CertErrorIdToDebugString(CertErrorId id) { // The CertErrorId is simply a pointer for a C-string literal. return reinterpret_cast<const char*>(id); } } // namespace net
0
#ifndef __LINUX_GFP_H #define __LINUX_GFP_H #include <linux/mmdebug.h> #include <linux/mmzone.h> #include <linux/stddef.h> #include <linux/linkage.h> #include <linux/topology.h> struct vm_area_struct; /* * In case of changes, please don't forget to update * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c */ /* Plain integer GFP bitmasks. Do not use this directly. */ #define ___GFP_DMA 0x01u #define ___GFP_HIGHMEM 0x02u #define ___GFP_DMA32 0x04u #define ___GFP_MOVABLE 0x08u #define ___GFP_RECLAIMABLE 0x10u #define ___GFP_HIGH 0x20u #define ___GFP_IO 0x40u #define ___GFP_FS 0x80u #define ___GFP_COLD 0x100u #define ___GFP_NOWARN 0x200u #define ___GFP_REPEAT 0x400u #define ___GFP_NOFAIL 0x800u #define ___GFP_NORETRY 0x1000u #define ___GFP_MEMALLOC 0x2000u #define ___GFP_COMP 0x4000u #define ___GFP_ZERO 0x8000u #define ___GFP_NOMEMALLOC 0x10000u #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u #define ___GFP_ATOMIC 0x80000u #define ___GFP_ACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u #define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_WRITE 0x800000u #define ___GFP_KSWAPD_RECLAIM 0x1000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* * Physical address zone modifiers (see linux/mmzone.h - low four bits) * * Do not put any conditional on these. If necessary modify the definitions * without the underscores and use them consistently. The definitions here may * be used in bit comparisons. */ #define __GFP_DMA ((__force gfp_t)___GFP_DMA) #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /* * Page mobility and placement hints * * These flags provide hints about how mobile the page is. Pages with similar * mobility are placed within the same pageblocks to minimise problems due * to external fragmentation. * * __GFP_MOVABLE (also a zone modifier) indicates that the page can be * moved by page migration during memory compaction or can be reclaimed. * * __GFP_RECLAIMABLE is used for slab allocations that specify * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. * * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, * these pages will be spread between local zones to avoid all the dirty * pages being in one zone (fair zone allocation policy). * * __GFP_HARDWALL enforces the cpuset memory allocation policy. * * __GFP_THISNODE forces the allocation to be satisified from the requested * node with no fallbacks or placement policy enforcements. * * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) /* * Watermark modifiers -- controls access to emergency reserves * * __GFP_HIGH indicates that the caller is high-priority and that granting * the request is necessary before the system can make forward progress. * For example, creating an IO context to clean pages. * * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is * high priority. Users are typically interrupt handlers. This may be * used in conjunction with __GFP_HIGH * * __GFP_MEMALLOC allows access to all memory. This should only be used when * the caller guarantees the allocation will allow more memory to be freed * very shortly e.g. process exiting or swapping. Users either should * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). * * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. * This takes precedence over the __GFP_MEMALLOC flag if both are set. */ #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* * Reclaim modifiers * * __GFP_IO can start physical IO. * * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the * allocator recursing into the filesystem which might already be holding * locks. * * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. * This flag can be cleared to avoid unnecessary delays when a fallback * option is available. * * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when * the low watermark is reached and have it reclaim pages until the high * watermark is reached. A caller may wish to clear this flag when fallback * options are available and the reclaim is likely to disrupt the system. The * canonical example is THP allocation where a fallback is cheap but * reclaim/compaction may cause indirect stalls. * * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. * * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt * _might_ fail. This depends upon the particular VM implementation. * * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller * cannot handle allocation failures. New users should be evaluated carefully * (and the flag should be used only when there is no reasonable failure * policy) but it is definitely preferable to use the flag rather than * opencode endless loop around allocator. * * __GFP_NORETRY: The VM implementation must not retry indefinitely and will * return NULL when direct reclaim and memory compaction have failed to allow * the allocation to succeed. The OOM killer is not called with the current * implementation. */ #define __GFP_IO ((__force gfp_t)___GFP_IO) #define __GFP_FS ((__force gfp_t)___GFP_FS) #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* * Action modifiers * * __GFP_COLD indicates that the caller does not expect to be used in the near * future. Where possible, a cache-cold page will be returned. * * __GFP_NOWARN suppresses allocation failure reports. * * __GFP_COMP address compound page metadata. * * __GFP_ZERO returns a zeroed page on success. * * __GFP_NOTRACK avoids tracking with kmemcheck. * * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of * distinguishing in the source between false positives and allocations that * cannot be supported (e.g. page tables). */ #define __GFP_COLD ((__force gfp_t)___GFP_COLD) #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) /* Room for N __GFP_FOO bits */ #define __GFP_BITS_SHIFT 25 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* * Useful GFP flag combinations that are commonly used. It is recommended * that subsystems start with one of these combinations and then set/clear * __GFP_FOO flags as necessary. * * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower * watermark is applied to allow access to "atomic reserves" * * GFP_KERNEL is typical for kernel-internal allocations. The caller requires * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. * * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is * accounted to kmemcg. * * GFP_NOWAIT is for kernel allocations that should not stall for direct * reclaim, start physical IO or use any filesystem callback. * * GFP_NOIO will use direct reclaim to discard clean pages or slab pages * that do not require the starting of any physical IO. * * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. * * GFP_USER is for userspace allocations that also need to be directly * accessibly by the kernel or hardware. It is typically used by hardware * for buffers that are mapped to userspace (e.g. graphics) that hardware * still must DMA to. cpuset limits are enforced for these allocations. * * GFP_DMA exists for historical reasons and should be avoided where possible. * The flags indicates that the caller requires that the lowest zone be * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but * it would require careful auditing as some users really require it and * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the * lowest zone as a type of emergency reserve. * * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit * address. * * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, * do not need to be directly accessible by the kernel but that cannot * move once in use. An example may be a hardware allocation that maps * data directly into userspace but has no addressing limitations. * * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not * need direct access to but can use kmap() when access is required. They * are expected to be movable via page reclaim or page migration. Typically, * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. * * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are * compound allocations that will generally fail quickly if memory is not * available and will not wake kswapd/kcompactd on failure. The _LIGHT * version does not attempt reclaim/compaction at all and is by default used * in page fault path, while the non-light is used by khugepaged. */ #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) #define GFP_NOIO (__GFP_RECLAIM) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) #define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ __GFP_RECLAIMABLE) #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) #define GFP_DMA __GFP_DMA #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) #define GFP_MOVABLE_SHIFT 3 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; /* Group based on mobility */ return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } #ifdef CONFIG_HIGHMEM #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM #else #define OPT_ZONE_HIGHMEM ZONE_NORMAL #endif #ifdef CONFIG_ZONE_DMA #define OPT_ZONE_DMA ZONE_DMA #else #define OPT_ZONE_DMA ZONE_NORMAL #endif #ifdef CONFIG_ZONE_DMA32 #define OPT_ZONE_DMA32 ZONE_DMA32 #else #define OPT_ZONE_DMA32 ZONE_NORMAL #endif /* * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long * and there are 16 of them to cover all possible combinations of * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. * * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. * But GFP_MOVABLE is not only a zone specifier but also an allocation * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". * * bit result * ================= * 0x0 => NORMAL * 0x1 => DMA or NORMAL * 0x2 => HIGHMEM or NORMAL * 0x3 => BAD (DMA+HIGHMEM) * 0x4 => DMA32 or DMA or NORMAL * 0x5 => BAD (DMA+DMA32) * 0x6 => BAD (HIGHMEM+DMA32) * 0x7 => BAD (HIGHMEM+DMA32+DMA) * 0x8 => NORMAL (MOVABLE+0) * 0x9 => DMA or NORMAL (MOVABLE+DMA) * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) * 0xb => BAD (MOVABLE+HIGHMEM+DMA) * 0xc => DMA32 (MOVABLE+DMA32) * 0xd => BAD (MOVABLE+DMA32+DMA) * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) * * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. */ #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 /* ZONE_DEVICE is not a valid GFP zone specifier */ #define GFP_ZONES_SHIFT 2 #else #define GFP_ZONES_SHIFT ZONES_SHIFT #endif #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer #endif #define GFP_ZONE_TABLE ( \ (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ ) /* * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per * entry starting with bit 0. Bit is set if the combination is not * allowed. */ #define GFP_ZONE_BAD ( \ 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ | 1 << (___GFP_DMA | ___GFP_DMA32) \ | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ ) static inline enum zone_type gfp_zone(gfp_t flags) { enum zone_type z; int bit = (__force int) (flags & GFP_ZONEMASK); z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & ((1 << GFP_ZONES_SHIFT) - 1); VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); return z; } /* * There is only one page-allocator function, and two main namespaces to * it. The alloc_page*() variants return 'struct page *' and as such * can allocate highmem pages, the *get*page*() variants return * virtual kernel addresses to the allocated page(s). */ static inline int gfp_zonelist(gfp_t flags) { #ifdef CONFIG_NUMA if (unlikely(flags & __GFP_THISNODE)) return ZONELIST_NOFALLBACK; #endif return ZONELIST_FALLBACK; } /* * We get the zone list from the current node and the gfp_mask. * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. * There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. * * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets * optimized to &contig_page_data at compile-time. */ static inline struct zonelist *node_zonelist(int nid, gfp_t flags) { return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); } #ifndef HAVE_ARCH_FREE_PAGE static inline void arch_free_page(struct page *page, int order) { } #endif #ifndef HAVE_ARCH_ALLOC_PAGE static inline void arch_alloc_page(struct page *page, int order) { } #endif struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask); static inline struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) { return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); } /* * Allocate pages, preferring the node given as nid. The node must be valid and * online. For more general interface, see alloc_pages_node(). */ static inline struct page * __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_WARN_ON(!node_online(nid)); return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } /* * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, * prefer the current CPU's closest node. Otherwise node must be valid and * online. */ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); return __alloc_pages_node(nid, gfp_mask, order); } #ifdef CONFIG_NUMA extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); static inline struct page * alloc_pages(gfp_t gfp_mask, unsigned int order) { return alloc_pages_current(gfp_mask, order); } extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, int node, bool hugepage); #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ alloc_pages(gfp_mask, order) #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page_vma(gfp_mask, vma, addr) \ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) #define __get_dma_pages(gfp_mask, order) \ __get_free_pages((gfp_mask) | GFP_DMA, (order)) extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); extern void free_hot_cold_page(struct page *page, bool cold); extern void free_hot_cold_page_list(struct list_head *list, bool cold); struct page_frag_cache; extern void __page_frag_cache_drain(struct page *page, unsigned int count); extern void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask); extern void page_frag_free(void *addr); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); void page_alloc_init_late(void); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what * GFP flags are used before interrupts are enabled. Once interrupts are * enabled, it is set to __GFP_BITS_MASK while the system is running. During * hibernation, it is used by PM to avoid I/O during memory allocation while * devices are suspended. */ extern gfp_t gfp_allowed_mask; /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); extern void pm_restrict_gfp_mask(void); extern void pm_restore_gfp_mask(void); #ifdef CONFIG_PM_SLEEP extern bool pm_suspended_storage(void); #else static inline bool pm_suspended_storage(void) { return false; } #endif /* CONFIG_PM_SLEEP */ #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask); extern void free_contig_range(unsigned long pfn, unsigned nr_pages); #endif #ifdef CONFIG_CMA /* CMA stuff */ extern void init_cma_reserved_pageblock(struct page *page); #endif #endif /* __LINUX_GFP_H */
1
VP9PictureToVaapiDecodeSurface(const scoped_refptr<VP9Picture>& pic) {float variable_247335437979725685 = 664883083606901897; if (variable_247335437979725685 < 0) { variable_247335437979725685 = -4911510321969955256;} VaapiVP9Picture* vaapi_pic = pic->AsVaapiVP9Picture(); CHECK(vaapi_pic); return vaapi_pic->dec_surface();}
1
static v8::Handle<v8::Value> excitingFunctionCallback(const v8::Arguments& args){INC_STATS("DOM.TestActiveDOMObject.excitingFunction");if (args.Length() < 1) return V8Proxy::throwNotEnoughArgumentsError();TestActiveDOMObject* imp = V8TestActiveDOMObject::toNative(args.Holder());if (!V8BindingSecurity::canAccessFrame(V8BindingState::Only(), imp->frame(), true))return v8::Handle<v8::Value>();std::string variable_8528208633102267801 = "zWrT"; EXCEPTION_BLOCK(Node*, nextChild, V8Node::HasInstance(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)) ? V8Node::toNative(v8::Handle<v8::Object>::Cast(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined))) : 0);imp->excitingFunction(nextChild);return v8::Handle<v8::Value>();}