file_name
int64
0
72.3k
vulnerable_line_numbers
stringlengths
1
1.06k
dataset_type
stringclasses
1 value
commit_hash
stringlengths
40
44
unique_id
int64
0
271k
project
stringclasses
10 values
target
int64
0
1
repo_url
stringclasses
10 values
date
stringlengths
25
25
code
stringlengths
0
20.4M
CVE
stringlengths
13
43
CWE
stringlengths
6
8
commit_link
stringlengths
73
97
severity
stringclasses
4 values
__index_level_0__
int64
0
124k
3,085
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
168,080
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Implement the manual drop-all-pagecache function */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/writeback.h> #include <linux/sysctl.h> #include <linux/gfp.h> #include "internal.h" /* A global variable is a bit ugly, but it keeps the code simple */ int sysctl_drop_caches; static void drop_pagecache_sb(struct super_block *sb, void *unused) { struct inode *inode, *toput_inode = NULL; spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { spin_lock(&inode->i_lock); if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || (inode->i_mapping->nrpages == 0)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); invalidate_mapping_pages(inode->i_mapping, 0, -1); iput(toput_inode); toput_inode = inode; spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); iput(toput_inode); } int drop_caches_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { int ret; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); if (ret) return ret; if (write) { static int stfu; if (sysctl_drop_caches & 1) { iterate_supers(drop_pagecache_sb, NULL); count_vm_event(DROP_PAGECACHE); } if (sysctl_drop_caches & 2) { drop_slab(); count_vm_event(DROP_SLAB); } if (!stfu) { pr_info("%s (%d): drop_caches: %d\n", current->comm, task_pid_nr(current), sysctl_drop_caches); } stfu |= sysctl_drop_caches & 4; } return 0; }
null
null
null
null
76,428
27,267
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
192,262
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * aio_aio12_8.c * Driver for Access I/O Products PC-104 AIO12-8 Analog I/O Board * Copyright (C) 2006 C&C Technologies, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Driver: aio_aio12_8 * Description: Access I/O Products PC-104 AIO12-8 Analog I/O Board * Author: Pablo Mejia <pablo.mejia@cctechnol.com> * Devices: [Access I/O] PC-104 AIO12-8 (aio_aio12_8), * [Access I/O] PC-104 AI12-8 (aio_ai12_8), * [Access I/O] PC-104 AO12-4 (aio_ao12_4) * Status: experimental * * Configuration Options: * [0] - I/O port base address * * Notes: * Only synchronous operations are supported. */ #include <linux/module.h> #include "../comedidev.h" #include "comedi_8254.h" #include "8255.h" /* * Register map */ #define AIO12_8_STATUS_REG 0x00 #define AIO12_8_STATUS_ADC_EOC BIT(7) #define AIO12_8_STATUS_PORT_C_COS BIT(6) #define AIO12_8_STATUS_IRQ_ENA BIT(2) #define AIO12_8_INTERRUPT_REG 0x01 #define AIO12_8_INTERRUPT_ADC BIT(7) #define AIO12_8_INTERRUPT_COS BIT(6) #define AIO12_8_INTERRUPT_COUNTER1 BIT(5) #define AIO12_8_INTERRUPT_PORT_C3 BIT(4) #define AIO12_8_INTERRUPT_PORT_C0 BIT(3) #define AIO12_8_INTERRUPT_ENA BIT(2) #define AIO12_8_ADC_REG 0x02 #define AIO12_8_ADC_MODE(x) (((x) & 0x3) << 6) #define AIO12_8_ADC_MODE_NORMAL AIO12_8_ADC_MODE(0) #define AIO12_8_ADC_MODE_INT_CLK AIO12_8_ADC_MODE(1) #define AIO12_8_ADC_MODE_STANDBY AIO12_8_ADC_MODE(2) #define AIO12_8_ADC_MODE_POWERDOWN AIO12_8_ADC_MODE(3) #define AIO12_8_ADC_ACQ(x) (((x) & 0x1) << 5) #define AIO12_8_ADC_ACQ_3USEC AIO12_8_ADC_ACQ(0) #define AIO12_8_ADC_ACQ_PROGRAM AIO12_8_ADC_ACQ(1) #define AIO12_8_ADC_RANGE(x) ((x) << 3) #define AIO12_8_ADC_CHAN(x) ((x) << 0) #define AIO12_8_DAC_REG(x) (0x04 + (x) * 2) #define AIO12_8_8254_BASE_REG 0x0c #define AIO12_8_8255_BASE_REG 0x10 #define AIO12_8_DIO_CONTROL_REG 0x14 #define AIO12_8_DIO_CONTROL_TST BIT(0) #define AIO12_8_ADC_TRIGGER_REG 0x15 #define AIO12_8_ADC_TRIGGER_RANGE(x) ((x) << 3) #define AIO12_8_ADC_TRIGGER_CHAN(x) ((x) << 0) #define AIO12_8_TRIGGER_REG 0x16 #define AIO12_8_TRIGGER_ADTRIG BIT(1) #define AIO12_8_TRIGGER_DACTRIG BIT(0) #define AIO12_8_COS_REG 0x17 #define AIO12_8_DAC_ENABLE_REG 0x18 #define AIO12_8_DAC_ENABLE_REF_ENA BIT(0) static const struct comedi_lrange aio_aio12_8_range = { 4, { UNI_RANGE(5), BIP_RANGE(5), UNI_RANGE(10), BIP_RANGE(10) } }; struct aio12_8_boardtype { const char *name; unsigned int has_ai:1; unsigned int has_ao:1; }; static const struct aio12_8_boardtype board_types[] = { { .name = "aio_aio12_8", .has_ai = 1, .has_ao = 1, }, { .name = "aio_ai12_8", .has_ai = 1, }, { .name = "aio_ao12_4", .has_ao = 1, }, }; static int aio_aio12_8_ai_eoc(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context) { unsigned int status; status = inb(dev->iobase + AIO12_8_STATUS_REG); if (status & AIO12_8_STATUS_ADC_EOC) return 0; return -EBUSY; } static int aio_aio12_8_ai_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int range = CR_RANGE(insn->chanspec); unsigned int val; unsigned char control; int ret; int i; /* * Setup the control byte for internal 2MHz clock, 3uS conversion, * at the desired range of the requested channel. */ control = AIO12_8_ADC_MODE_NORMAL | AIO12_8_ADC_ACQ_3USEC | AIO12_8_ADC_RANGE(range) | AIO12_8_ADC_CHAN(chan); /* Read status to clear EOC latch */ inb(dev->iobase + AIO12_8_STATUS_REG); for (i = 0; i < insn->n; i++) { /* Setup and start conversion */ outb(control, dev->iobase + AIO12_8_ADC_REG); /* Wait for conversion to complete */ ret = comedi_timeout(dev, s, insn, aio_aio12_8_ai_eoc, 0); if (ret) return ret; val = inw(dev->iobase + AIO12_8_ADC_REG) & s->maxdata; /* munge bipolar 2's complement data to offset binary */ if (comedi_range_is_bipolar(s, range)) val = comedi_offset_munge(s, val); data[i] = val; } return insn->n; } static int aio_aio12_8_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int val = s->readback[chan]; int i; /* enable DACs */ outb(AIO12_8_DAC_ENABLE_REF_ENA, dev->iobase + AIO12_8_DAC_ENABLE_REG); for (i = 0; i < insn->n; i++) { val = data[i]; outw(val, dev->iobase + AIO12_8_DAC_REG(chan)); } s->readback[chan] = val; return insn->n; } static int aio_aio12_8_counter_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); switch (data[0]) { case INSN_CONFIG_GET_CLOCK_SRC: /* * Channels 0 and 2 have external clock sources. * Channel 1 has a fixed 1 MHz clock source. */ data[0] = 0; data[1] = (chan == 1) ? I8254_OSC_BASE_1MHZ : 0; break; default: return -EINVAL; } return insn->n; } static int aio_aio12_8_attach(struct comedi_device *dev, struct comedi_devconfig *it) { const struct aio12_8_boardtype *board = dev->board_ptr; struct comedi_subdevice *s; int ret; ret = comedi_request_region(dev, it->options[0], 32); if (ret) return ret; dev->pacer = comedi_8254_init(dev->iobase + AIO12_8_8254_BASE_REG, 0, I8254_IO8, 0); if (!dev->pacer) return -ENOMEM; ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; /* Analog Input subdevice */ s = &dev->subdevices[0]; if (board->has_ai) { s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF; s->n_chan = 8; s->maxdata = 0x0fff; s->range_table = &aio_aio12_8_range; s->insn_read = aio_aio12_8_ai_read; } else { s->type = COMEDI_SUBD_UNUSED; } /* Analog Output subdevice */ s = &dev->subdevices[1]; if (board->has_ao) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND; s->n_chan = 4; s->maxdata = 0x0fff; s->range_table = &aio_aio12_8_range; s->insn_write = aio_aio12_8_ao_insn_write; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; } else { s->type = COMEDI_SUBD_UNUSED; } /* Digital I/O subdevice (8255) */ s = &dev->subdevices[2]; ret = subdev_8255_init(dev, s, NULL, AIO12_8_8255_BASE_REG); if (ret) return ret; /* Counter subdevice (8254) */ s = &dev->subdevices[3]; comedi_8254_subdevice_init(s, dev->pacer); dev->pacer->insn_config = aio_aio12_8_counter_insn_config; return 0; } static struct comedi_driver aio_aio12_8_driver = { .driver_name = "aio_aio12_8", .module = THIS_MODULE, .attach = aio_aio12_8_attach, .detach = comedi_legacy_detach, .board_name = &board_types[0].name, .num_names = ARRAY_SIZE(board_types), .offset = sizeof(struct aio12_8_boardtype), }; module_comedi_driver(aio_aio12_8_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi driver for Access I/O AIO12-8 Analog I/O Board"); MODULE_LICENSE("GPL");
null
null
null
null
100,609
24,836
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
24,836
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_RENDERER_USER_SCRIPT_SET_MANAGER_H_ #define EXTENSIONS_RENDERER_USER_SCRIPT_SET_MANAGER_H_ #include <map> #include <set> #include <string> #include <vector> #include "base/macros.h" #include "base/memory/shared_memory.h" #include "base/observer_list.h" #include "content/public/renderer/render_thread_observer.h" #include "extensions/common/extension.h" #include "extensions/common/user_script.h" #include "extensions/renderer/user_script_set.h" namespace content { class RenderFrame; } namespace IPC { class Message; } namespace extensions { class ScriptInjection; // Manager for separate UserScriptSets, one for each shared memory region. // Regions are organized as follows: // static_scripts -- contains all extensions' scripts that are statically // declared in the extension manifest. // programmatic_scripts -- one region per host (extension or WebUI) containing // only programmatically-declared scripts, instantiated // when an extension first creates a declarative rule // that would, if triggered, request a script injection. class UserScriptSetManager : public content::RenderThreadObserver { public: // Like a UserScriptSet::Observer, but automatically subscribes to all sets // associated with the manager. class Observer { public: virtual void OnUserScriptsUpdated( const std::set<HostID>& changed_hosts) = 0; }; UserScriptSetManager(); ~UserScriptSetManager() override; void AddObserver(Observer* observer); void RemoveObserver(Observer* observer); // Looks up the script injection associated with |script_id| and // |extension_id| in the context of the given |web_frame|, |tab_id|, // and |url|. std::unique_ptr<ScriptInjection> GetInjectionForDeclarativeScript( int script_id, content::RenderFrame* render_frame, int tab_id, const GURL& url, const std::string& extension_id); // Append all injections from |static_scripts| and each of // |programmatic_scripts_| to |injections|. void GetAllInjections( std::vector<std::unique_ptr<ScriptInjection>>* injections, content::RenderFrame* render_frame, int tab_id, UserScript::RunLocation run_location); // Get active extension IDs from |static_scripts| and each of // |programmatic_scripts_|. void GetAllActiveExtensionIds(std::set<std::string>* ids) const; const UserScriptSet* static_scripts() const { return &static_scripts_; } void set_activity_logging_enabled(bool enabled) { activity_logging_enabled_ = enabled; } private: // Map for per-extension sets that may be defined programmatically. using UserScriptSetMap = std::map<HostID, std::unique_ptr<UserScriptSet>>; // content::RenderThreadObserver implementation. bool OnControlMessageReceived(const IPC::Message& message) override; UserScriptSet* GetProgrammaticScriptsByHostID(const HostID& host_id); // Handle the UpdateUserScripts extension message. void OnUpdateUserScripts(base::SharedMemoryHandle shared_memory, const HostID& host_id, const std::set<HostID>& changed_hosts, bool whitelisted_only); // Scripts statically defined in extension manifests. UserScriptSet static_scripts_; // Scripts programmatically-defined through API calls (initialized and stored // per-extension). UserScriptSetMap programmatic_scripts_; // Whether or not dom activity should be logged for injected scripts. bool activity_logging_enabled_; // The associated observers. base::ObserverList<Observer> observers_; DISALLOW_COPY_AND_ASSIGN(UserScriptSetManager); }; } // namespace extensions #endif // EXTENSIONS_RENDERER_USER_SCRIPT_SET_MANAGER_H_
null
null
null
null
21,699
51,112
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
51,112
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_EVENTS_EVENT_DISPATCHER_H_ #define UI_EVENTS_EVENT_DISPATCHER_H_ #include "base/auto_reset.h" #include "base/macros.h" #include "ui/events/event.h" #include "ui/events/event_constants.h" #include "ui/events/event_handler.h" #include "ui/events/events_export.h" namespace ui { class EventDispatcher; class EventTarget; struct EventDispatchDetails { EventDispatchDetails() : dispatcher_destroyed(false), target_destroyed(false) {} bool dispatcher_destroyed; bool target_destroyed; }; class EVENTS_EXPORT EventDispatcherDelegate { public: EventDispatcherDelegate(); virtual ~EventDispatcherDelegate(); // Returns whether an event can still be dispatched to a target. (e.g. during // event dispatch, one of the handlers may have destroyed the target, in which // case the event can no longer be dispatched to the target). virtual bool CanDispatchToTarget(EventTarget* target) = 0; // Returns the event being dispatched (or NULL if no event is being // dispatched). Event* current_event(); // Dispatches |event| to |target|. This calls |PreDispatchEvent()| before // dispatching the event, and |PostDispatchEvent()| after the event has been // dispatched. EventDispatchDetails DispatchEvent(EventTarget* target, Event* event) WARN_UNUSED_RESULT; protected: // This is called once a target has been determined for an event, right before // the event is dispatched to the target. This function may modify |event| to // prepare it for dispatch (e.g. update event flags, location etc.). virtual EventDispatchDetails PreDispatchEvent( EventTarget* target, Event* event) WARN_UNUSED_RESULT; // This is called right after the event dispatch is completed. // |target| is NULL if the target was deleted during dispatch. virtual EventDispatchDetails PostDispatchEvent( EventTarget* target, const Event& event) WARN_UNUSED_RESULT; private: // Dispatches the event to the target. EventDispatchDetails DispatchEventToTarget(EventTarget* target, Event* event) WARN_UNUSED_RESULT; EventDispatcher* dispatcher_; DISALLOW_COPY_AND_ASSIGN(EventDispatcherDelegate); }; // Dispatches events to appropriate targets. class EVENTS_EXPORT EventDispatcher { public: explicit EventDispatcher(EventDispatcherDelegate* delegate); virtual ~EventDispatcher(); void ProcessEvent(EventTarget* target, Event* event); const Event* current_event() const { return current_event_; } Event* current_event() { return current_event_; } bool delegate_destroyed() const { return !delegate_; } void OnHandlerDestroyed(EventHandler* handler); void OnDispatcherDelegateDestroyed(); private: void DispatchEventToEventHandlers(EventHandlerList* list, Event* event); // Dispatches an event, and makes sure it sets ER_CONSUMED on the // event-handling result if the dispatcher itself has been destroyed during // dispatching the event to the event handler. void DispatchEvent(EventHandler* handler, Event* event); EventDispatcherDelegate* delegate_; Event* current_event_; EventHandlerList handler_list_; DISALLOW_COPY_AND_ASSIGN(EventDispatcher); }; } // namespace ui #endif // UI_EVENTS_EVENT_DISPATCHER_H_
null
null
null
null
47,975
956
null
train_val
c536b6be1a72aefd632d5530106a67c516cb9f4b
257,343
openssl
0
https://github.com/openssl/openssl
2016-09-22 23:12:38+01:00
/* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdio.h> #include "internal/cryptlib.h" #include <openssl/evp.h> #include <openssl/objects.h> #include "internal/evp_int.h" static int null_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc); static int null_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl); static const EVP_CIPHER n_cipher = { NID_undef, 1, 0, 0, 0, null_init_key, null_cipher, NULL, 0, NULL, NULL, NULL, NULL }; const EVP_CIPHER *EVP_enc_null(void) { return (&n_cipher); } static int null_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { return 1; } static int null_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) { if (in != out) memcpy(out, in, inl); return 1; }
null
null
null
null
118,788
48,663
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
48,663
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/views/animation/ink_drop_highlight.h" #include <memory> #include <string> #include <utility> #include "third_party/skia/include/core/SkColor.h" #include "ui/compositor/callback_layer_animation_observer.h" #include "ui/compositor/layer.h" #include "ui/compositor/layer_animation_sequence.h" #include "ui/compositor/scoped_layer_animation_settings.h" #include "ui/gfx/animation/animation.h" #include "ui/gfx/geometry/insets.h" #include "ui/views/animation/ink_drop_highlight_observer.h" #include "ui/views/animation/ink_drop_painted_layer_delegates.h" #include "ui/views/animation/ink_drop_util.h" namespace views { namespace { // The opacity of the highlight when it is not visible. const float kHiddenOpacity = 0.0f; } // namespace std::string ToString(InkDropHighlight::AnimationType animation_type) { switch (animation_type) { case InkDropHighlight::FADE_IN: return std::string("FADE_IN"); case InkDropHighlight::FADE_OUT: return std::string("FADE_OUT"); } NOTREACHED() << "Should never be reached but is necessary for some compilers."; return std::string("UNKNOWN"); } InkDropHighlight::InkDropHighlight( const gfx::PointF& center_point, std::unique_ptr<BasePaintedLayerDelegate> layer_delegate) : center_point_(center_point), visible_opacity_(1.f), last_animation_initiated_was_fade_in_(false), layer_delegate_(std::move(layer_delegate)), layer_(new ui::Layer()), observer_(nullptr) { const gfx::RectF painted_bounds = layer_delegate_->GetPaintedBounds(); size_ = explode_size_ = painted_bounds.size(); layer_->SetBounds(gfx::ToEnclosingRect(painted_bounds)); layer_->SetFillsBoundsOpaquely(false); layer_->set_delegate(layer_delegate_.get()); layer_->SetVisible(false); layer_->SetMasksToBounds(false); layer_->set_name("InkDropHighlight:layer"); } InkDropHighlight::InkDropHighlight(const gfx::SizeF& size, int corner_radius, const gfx::PointF& center_point, SkColor color) : InkDropHighlight( center_point, std::unique_ptr<BasePaintedLayerDelegate>( new RoundedRectangleLayerDelegate(color, size, corner_radius))) { visible_opacity_ = 0.128f; layer_->SetOpacity(visible_opacity_); } InkDropHighlight::InkDropHighlight(const gfx::Size& size, int corner_radius, const gfx::PointF& center_point, SkColor color) : InkDropHighlight(gfx::SizeF(size), corner_radius, center_point, color) {} InkDropHighlight::~InkDropHighlight() { // Explicitly aborting all the animations ensures all callbacks are invoked // while this instance still exists. layer_->GetAnimator()->AbortAllAnimations(); } bool InkDropHighlight::IsFadingInOrVisible() const { return last_animation_initiated_was_fade_in_; } void InkDropHighlight::FadeIn(const base::TimeDelta& duration) { layer_->SetOpacity(kHiddenOpacity); layer_->SetVisible(true); AnimateFade(FADE_IN, duration, size_, size_); } void InkDropHighlight::FadeOut(const base::TimeDelta& duration, bool explode) { AnimateFade(FADE_OUT, duration, size_, explode ? explode_size_ : size_); } test::InkDropHighlightTestApi* InkDropHighlight::GetTestApi() { return nullptr; } void InkDropHighlight::AnimateFade(AnimationType animation_type, const base::TimeDelta& duration, const gfx::SizeF& initial_size, const gfx::SizeF& target_size) { const base::TimeDelta effective_duration = gfx::Animation::ShouldRenderRichAnimation() ? duration : base::TimeDelta(); last_animation_initiated_was_fade_in_ = animation_type == FADE_IN; layer_->SetTransform(CalculateTransform(initial_size)); // The |animation_observer| will be destroyed when the // AnimationStartedCallback() returns true. ui::CallbackLayerAnimationObserver* animation_observer = new ui::CallbackLayerAnimationObserver( base::Bind(&InkDropHighlight::AnimationStartedCallback, base::Unretained(this), animation_type), base::Bind(&InkDropHighlight::AnimationEndedCallback, base::Unretained(this), animation_type)); ui::LayerAnimator* animator = layer_->GetAnimator(); ui::ScopedLayerAnimationSettings animation(animator); animation.SetTweenType(gfx::Tween::EASE_IN_OUT); animation.SetPreemptionStrategy( ui::LayerAnimator::IMMEDIATELY_ANIMATE_TO_NEW_TARGET); std::unique_ptr<ui::LayerAnimationElement> opacity_element = ui::LayerAnimationElement::CreateOpacityElement( animation_type == FADE_IN ? visible_opacity_ : kHiddenOpacity, effective_duration); ui::LayerAnimationSequence* opacity_sequence = new ui::LayerAnimationSequence(std::move(opacity_element)); opacity_sequence->AddObserver(animation_observer); animator->StartAnimation(opacity_sequence); if (initial_size != target_size) { std::unique_ptr<ui::LayerAnimationElement> transform_element = ui::LayerAnimationElement::CreateTransformElement( CalculateTransform(target_size), effective_duration); ui::LayerAnimationSequence* transform_sequence = new ui::LayerAnimationSequence(std::move(transform_element)); transform_sequence->AddObserver(animation_observer); animator->StartAnimation(transform_sequence); } animation_observer->SetActive(); } gfx::Transform InkDropHighlight::CalculateTransform( const gfx::SizeF& size) const { gfx::Transform transform; transform.Translate(center_point_.x(), center_point_.y()); // TODO(bruthig): Fix the InkDropHighlight to work well when initialized with // a (0x0) size. See https://crbug.com/661618. transform.Scale(size_.width() == 0 ? 0 : size.width() / size_.width(), size_.height() == 0 ? 0 : size.height() / size_.height()); gfx::Vector2dF layer_offset = layer_delegate_->GetCenteringOffset(); transform.Translate(-layer_offset.x(), -layer_offset.y()); // Add subpixel correction to the transform. transform.ConcatTransform( GetTransformSubpixelCorrection(transform, layer_->device_scale_factor())); return transform; } void InkDropHighlight::AnimationStartedCallback( AnimationType animation_type, const ui::CallbackLayerAnimationObserver& observer) { if (observer_) observer_->AnimationStarted(animation_type); } bool InkDropHighlight::AnimationEndedCallback( AnimationType animation_type, const ui::CallbackLayerAnimationObserver& observer) { // AnimationEndedCallback() may be invoked when this is being destroyed and // |layer_| may be null. if (animation_type == FADE_OUT && layer_) layer_->SetVisible(false); if (observer_) { observer_->AnimationEnded(animation_type, observer.aborted_count() ? InkDropAnimationEndedReason::PRE_EMPTED : InkDropAnimationEndedReason::SUCCESS); } return true; } } // namespace views
null
null
null
null
45,526
9,723
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,723
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromecast/base/bitstream_audio_codecs.h" #include <vector> #include "base/strings/string_piece.h" #include "base/strings/string_util.h" namespace chromecast { namespace { const char* BitstreamAudioCodecToString(int codec) { switch (codec) { case kBitstreamAudioCodecNone: return "None"; case kBitstreamAudioCodecAc3: return "AC3"; case kBitstreamAudioCodecDts: return "DTS"; case kBitstreamAudioCodecDtsHd: return "DTS-HD"; case kBitstreamAudioCodecEac3: return "EAC3"; case kBitstreamAudioCodecPcmSurround: return "PCM"; default: return ""; } } } // namespace std::string BitstreamAudioCodecsToString(int codecs) { std::string codec_string = BitstreamAudioCodecToString(codecs); if (!codec_string.empty()) { return codec_string; } std::vector<base::StringPiece> codec_strings; for (int codec : {kBitstreamAudioCodecAc3, kBitstreamAudioCodecDts, kBitstreamAudioCodecDtsHd, kBitstreamAudioCodecEac3, kBitstreamAudioCodecPcmSurround}) { if ((codec & codecs) != 0) { codec_strings.push_back(BitstreamAudioCodecToString(codec)); } } return "[" + base::JoinString(codec_strings, ", ") + "]"; } } // namespace chromecast
null
null
null
null
6,586
61,847
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
61,847
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <utility> #include "base/location.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/rand_util.h" #include "base/single_thread_task_runner.h" #include "base/threading/thread_task_runner_handle.h" #include "chrome/browser/devtools/device/android_device_manager.h" #include "content/public/browser/browser_thread.h" #include "net/base/io_buffer.h" #include "net/base/net_errors.h" #include "net/server/web_socket_encoder.h" #include "net/socket/stream_socket.h" #include "net/traffic_annotation/network_traffic_annotation.h" using content::BrowserThread; using net::WebSocket; namespace { const int kBufferSize = 16 * 1024; const char kCloseResponse[] = "\x88\x80\x2D\x0E\x1E\xFA"; net::NetworkTrafficAnnotationTag kAndroidWebSocketTrafficAnnotation = net::DefineNetworkTrafficAnnotation("android_web_socket", R"( semantics { sender: "Android Web Socket" description: "Remote debugging is supported over existing ADB (Android Debug " "Bridge) connection, in addition to raw USB connection. This " "socket talks to the local ADB daemon which routes debugging " "traffic to a remote device." trigger: "A user connects to an Android device using remote debugging." data: "Any data required for remote debugging." destination: LOCAL } policy { cookies_allowed: NO setting: "To use adb with a device connected over USB, you must enable USB " "debugging in the device system settings, under Developer options." policy_exception_justification: "This is not a network request and is only used for remote " "debugging." })"); } // namespace class AndroidDeviceManager::AndroidWebSocket::WebSocketImpl { public: WebSocketImpl( scoped_refptr<base::SingleThreadTaskRunner> response_task_runner, base::WeakPtr<AndroidWebSocket> weak_socket, const std::string& extensions, const std::string& body_head, std::unique_ptr<net::StreamSocket> socket) : response_task_runner_(response_task_runner), weak_socket_(weak_socket), socket_(std::move(socket)), encoder_(net::WebSocketEncoder::CreateClient(extensions)), response_buffer_(body_head), weak_factory_(this) { thread_checker_.DetachFromThread(); } void StartListening() { DCHECK(thread_checker_.CalledOnValidThread()); DCHECK(socket_); scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize)); if (response_buffer_.size() > 0) ProcessResponseBuffer(buffer); else Read(buffer); } void SendFrame(const std::string& message) { DCHECK(thread_checker_.CalledOnValidThread()); if (!socket_) return; int mask = base::RandInt(0, 0x7FFFFFFF); std::string encoded_frame; encoder_->EncodeFrame(message, mask, &encoded_frame); SendData(encoded_frame); } base::WeakPtr<WebSocketImpl> GetWeakPtr() { return weak_factory_.GetWeakPtr(); } private: void Read(scoped_refptr<net::IOBuffer> io_buffer) { int result = socket_->Read(io_buffer.get(), kBufferSize, base::Bind(&WebSocketImpl::OnBytesRead, weak_factory_.GetWeakPtr(), io_buffer)); if (result != net::ERR_IO_PENDING) OnBytesRead(io_buffer, result); } void OnBytesRead(scoped_refptr<net::IOBuffer> io_buffer, int result) { DCHECK(thread_checker_.CalledOnValidThread()); if (result <= 0) { Disconnect(); return; } response_buffer_.append(io_buffer->data(), result); ProcessResponseBuffer(io_buffer); } void ProcessResponseBuffer(scoped_refptr<net::IOBuffer> io_buffer) { int bytes_consumed; std::string output; WebSocket::ParseResult parse_result = encoder_->DecodeFrame( response_buffer_, &bytes_consumed, &output); while (parse_result == WebSocket::FRAME_OK) { response_buffer_ = response_buffer_.substr(bytes_consumed); response_task_runner_->PostTask( FROM_HERE, base::BindOnce(&AndroidWebSocket::OnFrameRead, weak_socket_, output)); parse_result = encoder_->DecodeFrame( response_buffer_, &bytes_consumed, &output); } if (parse_result == WebSocket::FRAME_CLOSE) SendData(kCloseResponse); if (parse_result == WebSocket::FRAME_ERROR) { Disconnect(); return; } Read(io_buffer); } void SendData(const std::string& data) { request_buffer_ += data; if (request_buffer_.length() == data.length()) SendPendingRequests(0); } void SendPendingRequests(int result) { DCHECK(thread_checker_.CalledOnValidThread()); if (result < 0) { Disconnect(); return; } request_buffer_ = request_buffer_.substr(result); if (request_buffer_.empty()) return; scoped_refptr<net::StringIOBuffer> buffer = new net::StringIOBuffer(request_buffer_); result = socket_->Write(buffer.get(), buffer->size(), base::Bind(&WebSocketImpl::SendPendingRequests, weak_factory_.GetWeakPtr()), kAndroidWebSocketTrafficAnnotation); if (result != net::ERR_IO_PENDING) SendPendingRequests(result); } void Disconnect() { DCHECK(thread_checker_.CalledOnValidThread()); socket_.reset(); response_task_runner_->PostTask( FROM_HERE, base::BindOnce(&AndroidWebSocket::OnSocketClosed, weak_socket_)); } scoped_refptr<base::SingleThreadTaskRunner> response_task_runner_; base::WeakPtr<AndroidWebSocket> weak_socket_; std::unique_ptr<net::StreamSocket> socket_; std::unique_ptr<net::WebSocketEncoder> encoder_; std::string response_buffer_; std::string request_buffer_; base::ThreadChecker thread_checker_; DISALLOW_COPY_AND_ASSIGN(WebSocketImpl); base::WeakPtrFactory<WebSocketImpl> weak_factory_; }; AndroidDeviceManager::AndroidWebSocket::AndroidWebSocket( scoped_refptr<Device> device, const std::string& socket_name, const std::string& path, Delegate* delegate) : device_(device), socket_impl_(nullptr, base::OnTaskRunnerDeleter(device->task_runner_)), delegate_(delegate), weak_factory_(this) { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(delegate_); DCHECK(device_); device_->HttpUpgrade( socket_name, path, net::WebSocketEncoder::kClientExtensions, base::Bind(&AndroidWebSocket::Connected, weak_factory_.GetWeakPtr())); } AndroidDeviceManager::AndroidWebSocket::~AndroidWebSocket() = default; void AndroidDeviceManager::AndroidWebSocket::SendFrame( const std::string& message) { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(socket_impl_); DCHECK(device_); device_->task_runner_->PostTask( FROM_HERE, base::BindOnce(&WebSocketImpl::SendFrame, socket_impl_->GetWeakPtr(), message)); } void AndroidDeviceManager::AndroidWebSocket::Connected( int result, const std::string& extensions, const std::string& body_head, std::unique_ptr<net::StreamSocket> socket) { DCHECK_CURRENTLY_ON(BrowserThread::UI); if (result != net::OK || !socket.get()) { OnSocketClosed(); return; } socket_impl_.reset(new WebSocketImpl(base::ThreadTaskRunnerHandle::Get(), weak_factory_.GetWeakPtr(), extensions, body_head, std::move(socket))); device_->task_runner_->PostTask(FROM_HERE, base::BindOnce(&WebSocketImpl::StartListening, socket_impl_->GetWeakPtr())); delegate_->OnSocketOpened(); } void AndroidDeviceManager::AndroidWebSocket::OnFrameRead( const std::string& message) { DCHECK_CURRENTLY_ON(BrowserThread::UI); delegate_->OnFrameRead(message); } void AndroidDeviceManager::AndroidWebSocket::OnSocketClosed() { DCHECK_CURRENTLY_ON(BrowserThread::UI); delegate_->OnSocketClosed(); } AndroidDeviceManager::AndroidWebSocket* AndroidDeviceManager::Device::CreateWebSocket( const std::string& socket_name, const std::string& path, AndroidWebSocket::Delegate* delegate) { return new AndroidWebSocket(this, socket_name, path, delegate); }
null
null
null
null
58,710
61,589
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
61,589
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_MEDIA_GALLERIES_GALLERY_WATCH_MANAGER_H_ #define CHROME_BROWSER_MEDIA_GALLERIES_GALLERY_WATCH_MANAGER_H_ #include <map> #include <memory> #include <string> #include "base/callback_forward.h" #include "base/files/file_path.h" #include "base/files/file_path_watcher.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/time/time.h" #include "chrome/browser/media_galleries/media_galleries_preferences.h" #include "components/keyed_service/core/keyed_service_shutdown_notifier.h" #include "components/storage_monitor/removable_storage_observer.h" class GalleryWatchManagerObserver; namespace base { class SequencedTaskRunner; } namespace content { class BrowserContext; } namespace extensions { class Extension; } // The GalleryWatchManager is owned by MediaFileSystemRegistry, which is global. // This class manages all watches on media galleries, regardless of profile. // It tracks outstanding watch requests and creates one FilePathWatcher per // watched directory. This class lives and is called on the UI thread. class GalleryWatchManager : public MediaGalleriesPreferences::GalleryChangeObserver, public storage_monitor::RemovableStorageObserver { public: // On success, |error| is empty. typedef base::Callback<void(const std::string& /* error */)> ResultCallback; static const char kInvalidGalleryIDError[]; static const char kNoPermissionError[]; static const char kCouldNotWatchGalleryError[]; GalleryWatchManager(); ~GalleryWatchManager() override; // Add or remove observer of change events - this is the only way to // get the result of the file watches. There can only be one observer per // browser context. void AddObserver(content::BrowserContext* browser_context, GalleryWatchManagerObserver* observer); void RemoveObserver(content::BrowserContext* browser_context); // Must be called when |browser_context| is shut down. void ShutdownBrowserContext(content::BrowserContext* browser_context); // Add a watch for |gallery_id|. void AddWatch(content::BrowserContext* browser_context, const extensions::Extension* extension, MediaGalleryPrefId gallery_id, const ResultCallback& callback); // Remove the watch for |gallery_id|. It is valid to call this method on // non-existent watches. void RemoveWatch(content::BrowserContext* browser_context, const std::string& extension_id, MediaGalleryPrefId gallery_id); // Remove all the watches for |extension_id|. void RemoveAllWatches(content::BrowserContext* browser_context, const std::string& extension_id); // Return the set of galleries being watched for |extension_id|. MediaGalleryPrefIdSet GetWatchSet(content::BrowserContext* browser_context, const std::string& extension_id); private: class FileWatchManager; // Used to track the gallery watches connected to a specific path. struct WatchOwner { WatchOwner(content::BrowserContext* browser_context, const std::string& extension_id, MediaGalleryPrefId gallery_id); content::BrowserContext* browser_context; const std::string extension_id; MediaGalleryPrefId gallery_id; // Needed to support storage in STL set, as well as usage as map key. bool operator<(const WatchOwner& other) const; }; struct NotificationInfo { NotificationInfo(); NotificationInfo(const NotificationInfo& other); ~NotificationInfo(); std::set<WatchOwner> owners; base::Time last_notify_time; bool delayed_notification_pending; }; typedef std::map<WatchOwner, base::FilePath> WatchesMap; typedef std::map<base::FilePath, NotificationInfo> WatchedPaths; typedef std::map<content::BrowserContext*, GalleryWatchManagerObserver*> ObserverMap; typedef std::map<content::BrowserContext*, std::unique_ptr<KeyedServiceShutdownNotifier::Subscription>> BrowserContextSubscriptionMap; // Ensure there is a subscription to shutdown notifications for // |browser_context|. void EnsureBrowserContextSubscription( content::BrowserContext* browser_context); // Stop the FilePathWatcher for |path|. Updates |watched_paths_| but not // |registered_watches_|. void DeactivateFileWatch(const WatchOwner& owner, const base::FilePath& path); // Called by FilePathWatcher on the UI thread to respond to a request to // watch the path. void OnFileWatchActivated(const WatchOwner& owner, const base::FilePath& path, const ResultCallback& callback, bool success); // Called by FilePathWatcher on the UI thread on a change event for |path|. void OnFilePathChanged(const base::FilePath& path, bool error); // MediaGalleriesPreferences::GalleryChangeObserver implementation. void OnPermissionRemoved(MediaGalleriesPreferences* pref, const std::string& extension_id, MediaGalleryPrefId pref_id) override; void OnGalleryRemoved(MediaGalleriesPreferences* pref, MediaGalleryPrefId pref_id) override; // storage_monitor::RemovableStorageObserver implementation. void OnRemovableStorageDetached( const storage_monitor::StorageInfo& info) override; // True if the we are already observing the storage monitor. bool storage_monitor_observed_; // MediaGalleriesPreferences we are currently observing. std::set<MediaGalleriesPreferences*> observed_preferences_; // All registered watches, keyed by WatchOwner. WatchesMap watches_; // Reverse mapping of watched paths to the set of owning WatchOwners. WatchedPaths watched_paths_; // Things that want to hear about gallery changes. ObserverMap observers_; // Helper that does the watches on a sequenced task runner. std::unique_ptr<FileWatchManager> watch_manager_; // The background task runner that |watch_manager_| lives on. scoped_refptr<base::SequencedTaskRunner> watch_manager_task_runner_; // Removes watches when a browser context is shut down as watches contain raw // pointers. BrowserContextSubscriptionMap browser_context_subscription_map_; base::WeakPtrFactory<GalleryWatchManager> weak_factory_; DISALLOW_COPY_AND_ASSIGN(GalleryWatchManager); }; #endif // CHROME_BROWSER_MEDIA_GALLERIES_GALLERY_WATCH_MANAGER_H_
null
null
null
null
58,452
14,575
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,575
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/history/core/browser/keyword_search_term.h" namespace history { KeywordSearchTermVisit::KeywordSearchTermVisit() : visits(0) {} KeywordSearchTermVisit::~KeywordSearchTermVisit() {} KeywordSearchTermRow::KeywordSearchTermRow() : keyword_id(0), url_id(0) {} KeywordSearchTermRow::~KeywordSearchTermRow() {} } // namespace history
null
null
null
null
11,438
1,797
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,854
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * DSP functions for Indeo Video Interactive codecs (Indeo4 and Indeo5) * * Copyright (c) 2009-2011 Maxim Poliakovski * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * DSP functions (inverse transforms, motion compensation, wavelet recompositions) * for Indeo Video Interactive codecs. */ #include "avcodec.h" #include "ivi.h" #include "ivi_dsp.h" void ff_ivi_recompose53(const IVIPlaneDesc *plane, uint8_t *dst, const ptrdiff_t dst_pitch) { int x, y, indx; int32_t p0, p1, p2, p3, tmp0, tmp1, tmp2; int32_t b0_1, b0_2, b1_1, b1_2, b1_3, b2_1, b2_2, b2_3, b2_4, b2_5, b2_6; int32_t b3_1, b3_2, b3_3, b3_4, b3_5, b3_6, b3_7, b3_8, b3_9; ptrdiff_t pitch, back_pitch; const short *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr; const int num_bands = 4; /* all bands should have the same pitch */ pitch = plane->bands[0].pitch; /* pixels at the position "y-1" will be set to pixels at the "y" for the 1st iteration */ back_pitch = 0; /* get pointers to the wavelet bands */ b0_ptr = plane->bands[0].buf; b1_ptr = plane->bands[1].buf; b2_ptr = plane->bands[2].buf; b3_ptr = plane->bands[3].buf; for (y = 0; y < plane->height; y += 2) { if (y+2 >= plane->height) pitch= 0; /* load storage variables with values */ if (num_bands > 0) { b0_1 = b0_ptr[0]; b0_2 = b0_ptr[pitch]; } if (num_bands > 1) { b1_1 = b1_ptr[back_pitch]; b1_2 = b1_ptr[0]; b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch]; } if (num_bands > 2) { b2_2 = b2_ptr[0]; // b2[x, y ] b2_3 = b2_2; // b2[x+1,y ] = b2[x,y] b2_5 = b2_ptr[pitch]; // b2[x ,y+1] b2_6 = b2_5; // b2[x+1,y+1] = b2[x,y+1] } if (num_bands > 3) { b3_2 = b3_ptr[back_pitch]; // b3[x ,y-1] b3_3 = b3_2; // b3[x+1,y-1] = b3[x ,y-1] b3_5 = b3_ptr[0]; // b3[x ,y ] b3_6 = b3_5; // b3[x+1,y ] = b3[x ,y ] b3_8 = b3_2 - b3_5*6 + b3_ptr[pitch]; b3_9 = b3_8; } for (x = 0, indx = 0; x < plane->width; x+=2, indx++) { if (x+2 >= plane->width) { b0_ptr --; b1_ptr --; b2_ptr --; b3_ptr --; } /* some values calculated in the previous iterations can */ /* be reused in the next ones, so do appropriate copying */ b2_1 = b2_2; // b2[x-1,y ] = b2[x, y ] b2_2 = b2_3; // b2[x ,y ] = b2[x+1,y ] b2_4 = b2_5; // b2[x-1,y+1] = b2[x ,y+1] b2_5 = b2_6; // b2[x ,y+1] = b2[x+1,y+1] b3_1 = b3_2; // b3[x-1,y-1] = b3[x ,y-1] b3_2 = b3_3; // b3[x ,y-1] = b3[x+1,y-1] b3_4 = b3_5; // b3[x-1,y ] = b3[x ,y ] b3_5 = b3_6; // b3[x ,y ] = b3[x+1,y ] b3_7 = b3_8; // vert_HPF(x-1) b3_8 = b3_9; // vert_HPF(x ) p0 = p1 = p2 = p3 = 0; /* process the LL-band by applying LPF both vertically and horizontally */ if (num_bands > 0) { tmp0 = b0_1; tmp2 = b0_2; b0_1 = b0_ptr[indx+1]; b0_2 = b0_ptr[pitch+indx+1]; tmp1 = tmp0 + b0_1; p0 = tmp0 * 16; p1 = tmp1 * 8; p2 = (tmp0 + tmp2) * 8; p3 = (tmp1 + tmp2 + b0_2) * 4; } /* process the HL-band by applying HPF vertically and LPF horizontally */ if (num_bands > 1) { tmp0 = b1_2; tmp1 = b1_1; b1_2 = b1_ptr[indx+1]; b1_1 = b1_ptr[back_pitch+indx+1]; tmp2 = tmp1 - tmp0*6 + b1_3; b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch+indx+1]; p0 += (tmp0 + tmp1) * 8; p1 += (tmp0 + tmp1 + b1_1 + b1_2) * 4; p2 += tmp2 * 4; p3 += (tmp2 + b1_3) * 2; } /* process the LH-band by applying LPF vertically and HPF horizontally */ if (num_bands > 2) { b2_3 = b2_ptr[indx+1]; b2_6 = b2_ptr[pitch+indx+1]; tmp0 = b2_1 + b2_2; tmp1 = b2_1 - b2_2*6 + b2_3; p0 += tmp0 * 8; p1 += tmp1 * 4; p2 += (tmp0 + b2_4 + b2_5) * 4; p3 += (tmp1 + b2_4 - b2_5*6 + b2_6) * 2; } /* process the HH-band by applying HPF both vertically and horizontally */ if (num_bands > 3) { b3_6 = b3_ptr[indx+1]; // b3[x+1,y ] b3_3 = b3_ptr[back_pitch+indx+1]; // b3[x+1,y-1] tmp0 = b3_1 + b3_4; tmp1 = b3_2 + b3_5; tmp2 = b3_3 + b3_6; b3_9 = b3_3 - b3_6*6 + b3_ptr[pitch+indx+1]; p0 += (tmp0 + tmp1) * 4; p1 += (tmp0 - tmp1*6 + tmp2) * 2; p2 += (b3_7 + b3_8) * 2; p3 += b3_7 - b3_8*6 + b3_9; } /* output four pixels */ dst[x] = av_clip_uint8((p0 >> 6) + 128); dst[x+1] = av_clip_uint8((p1 >> 6) + 128); dst[dst_pitch+x] = av_clip_uint8((p2 >> 6) + 128); dst[dst_pitch+x+1] = av_clip_uint8((p3 >> 6) + 128); }// for x dst += dst_pitch << 1; back_pitch = -pitch; b0_ptr += pitch + 1; b1_ptr += pitch + 1; b2_ptr += pitch + 1; b3_ptr += pitch + 1; } } void ff_ivi_recompose_haar(const IVIPlaneDesc *plane, uint8_t *dst, const ptrdiff_t dst_pitch) { int x, y, indx, b0, b1, b2, b3, p0, p1, p2, p3; const short *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr; ptrdiff_t pitch; /* all bands should have the same pitch */ pitch = plane->bands[0].pitch; /* get pointers to the wavelet bands */ b0_ptr = plane->bands[0].buf; b1_ptr = plane->bands[1].buf; b2_ptr = plane->bands[2].buf; b3_ptr = plane->bands[3].buf; for (y = 0; y < plane->height; y += 2) { for (x = 0, indx = 0; x < plane->width; x += 2, indx++) { /* load coefficients */ b0 = b0_ptr[indx]; //should be: b0 = (num_bands > 0) ? b0_ptr[indx] : 0; b1 = b1_ptr[indx]; //should be: b1 = (num_bands > 1) ? b1_ptr[indx] : 0; b2 = b2_ptr[indx]; //should be: b2 = (num_bands > 2) ? b2_ptr[indx] : 0; b3 = b3_ptr[indx]; //should be: b3 = (num_bands > 3) ? b3_ptr[indx] : 0; /* haar wavelet recomposition */ p0 = (b0 + b1 + b2 + b3 + 2) >> 2; p1 = (b0 + b1 - b2 - b3 + 2) >> 2; p2 = (b0 - b1 + b2 - b3 + 2) >> 2; p3 = (b0 - b1 - b2 + b3 + 2) >> 2; /* bias, convert and output four pixels */ dst[x] = av_clip_uint8(p0 + 128); dst[x + 1] = av_clip_uint8(p1 + 128); dst[dst_pitch + x] = av_clip_uint8(p2 + 128); dst[dst_pitch + x + 1] = av_clip_uint8(p3 + 128); }// for x dst += dst_pitch << 1; b0_ptr += pitch; b1_ptr += pitch; b2_ptr += pitch; b3_ptr += pitch; }// for y } /** butterfly operation for the inverse Haar transform */ #define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \ t = ((s1) - (s2)) >> 1;\ o1 = ((s1) + (s2)) >> 1;\ o2 = (t);\ /** inverse 8-point Haar transform */ #define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\ d1, d2, d3, d4, d5, d6, d7, d8,\ t0, t1, t2, t3, t4, t5, t6, t7, t8) {\ t1 = (s1) * 2; t5 = (s5) * 2;\ IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\ IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\ IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\ IVI_HAAR_BFLY(t7, s8, t7, t8, t0);\ d1 = COMPENSATE(t1);\ d2 = COMPENSATE(t2);\ d3 = COMPENSATE(t3);\ d4 = COMPENSATE(t4);\ d5 = COMPENSATE(t5);\ d6 = COMPENSATE(t6);\ d7 = COMPENSATE(t7);\ d8 = COMPENSATE(t8); } /** inverse 4-point Haar transform */ #define INV_HAAR4(s1, s3, s5, s7, d1, d2, d3, d4, t0, t1, t2, t3, t4) {\ IVI_HAAR_BFLY(s1, s3, t0, t1, t4);\ IVI_HAAR_BFLY(t0, s5, t2, t3, t4);\ d1 = COMPENSATE(t2);\ d2 = COMPENSATE(t3);\ IVI_HAAR_BFLY(t1, s7, t2, t3, t4);\ d3 = COMPENSATE(t2);\ d4 = COMPENSATE(t3); } void ff_ivi_inverse_haar_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i, shift, sp1, sp2, sp3, sp4; const int32_t *src; int32_t *dst; int tmp[64]; int t0, t1, t2, t3, t4, t5, t6, t7, t8; /* apply the InvHaar8 to all columns */ #define COMPENSATE(x) (x) src = in; dst = tmp; for (i = 0; i < 8; i++) { if (flags[i]) { /* pre-scaling */ shift = !(i & 4); sp1 = src[ 0] * (1 << shift); sp2 = src[ 8] * (1 << shift); sp3 = src[16] * (1 << shift); sp4 = src[24] * (1 << shift); INV_HAAR8( sp1, sp2, sp3, sp4, src[32], src[40], src[48], src[56], dst[ 0], dst[ 8], dst[16], dst[24], dst[32], dst[40], dst[48], dst[56], t0, t1, t2, t3, t4, t5, t6, t7, t8); } else dst[ 0] = dst[ 8] = dst[16] = dst[24] = dst[32] = dst[40] = dst[48] = dst[56] = 0; src++; dst++; } #undef COMPENSATE /* apply the InvHaar8 to all rows */ #define COMPENSATE(x) (x) src = tmp; for (i = 0; i < 8; i++) { if ( !src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) { memset(out, 0, 8 * sizeof(out[0])); } else { INV_HAAR8(src[0], src[1], src[2], src[3], src[4], src[5], src[6], src[7], out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7], t0, t1, t2, t3, t4, t5, t6, t7, t8); } src += 8; out += pitch; } #undef COMPENSATE } void ff_ivi_row_haar8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i; int t0, t1, t2, t3, t4, t5, t6, t7, t8; /* apply the InvHaar8 to all rows */ #define COMPENSATE(x) (x) for (i = 0; i < 8; i++) { if ( !in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) { memset(out, 0, 8 * sizeof(out[0])); } else { INV_HAAR8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7], t0, t1, t2, t3, t4, t5, t6, t7, t8); } in += 8; out += pitch; } #undef COMPENSATE } void ff_ivi_col_haar8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i; int t0, t1, t2, t3, t4, t5, t6, t7, t8; /* apply the InvHaar8 to all columns */ #define COMPENSATE(x) (x) for (i = 0; i < 8; i++) { if (flags[i]) { INV_HAAR8(in[ 0], in[ 8], in[16], in[24], in[32], in[40], in[48], in[56], out[0 * pitch], out[1 * pitch], out[2 * pitch], out[3 * pitch], out[4 * pitch], out[5 * pitch], out[6 * pitch], out[7 * pitch], t0, t1, t2, t3, t4, t5, t6, t7, t8); } else out[0 * pitch] = out[1 * pitch] = out[2 * pitch] = out[3 * pitch] = out[4 * pitch] = out[5 * pitch] = out[6 * pitch] = out[7 * pitch] = 0; in++; out++; } #undef COMPENSATE } void ff_ivi_inverse_haar_4x4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i, shift, sp1, sp2; const int32_t *src; int32_t *dst; int tmp[16]; int t0, t1, t2, t3, t4; /* apply the InvHaar4 to all columns */ #define COMPENSATE(x) (x) src = in; dst = tmp; for (i = 0; i < 4; i++) { if (flags[i]) { /* pre-scaling */ shift = !(i & 2); sp1 = src[0] * (1 << shift); sp2 = src[4] * (1 << shift); INV_HAAR4( sp1, sp2, src[8], src[12], dst[0], dst[4], dst[8], dst[12], t0, t1, t2, t3, t4); } else dst[0] = dst[4] = dst[8] = dst[12] = 0; src++; dst++; } #undef COMPENSATE /* apply the InvHaar8 to all rows */ #define COMPENSATE(x) (x) src = tmp; for (i = 0; i < 4; i++) { if (!src[0] && !src[1] && !src[2] && !src[3]) { memset(out, 0, 4 * sizeof(out[0])); } else { INV_HAAR4(src[0], src[1], src[2], src[3], out[0], out[1], out[2], out[3], t0, t1, t2, t3, t4); } src += 4; out += pitch; } #undef COMPENSATE } void ff_ivi_row_haar4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i; int t0, t1, t2, t3, t4; /* apply the InvHaar4 to all rows */ #define COMPENSATE(x) (x) for (i = 0; i < 4; i++) { if (!in[0] && !in[1] && !in[2] && !in[3]) { memset(out, 0, 4 * sizeof(out[0])); } else { INV_HAAR4(in[0], in[1], in[2], in[3], out[0], out[1], out[2], out[3], t0, t1, t2, t3, t4); } in += 4; out += pitch; } #undef COMPENSATE } void ff_ivi_col_haar4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i; int t0, t1, t2, t3, t4; /* apply the InvHaar8 to all columns */ #define COMPENSATE(x) (x) for (i = 0; i < 4; i++) { if (flags[i]) { INV_HAAR4(in[0], in[4], in[8], in[12], out[0 * pitch], out[1 * pitch], out[2 * pitch], out[3 * pitch], t0, t1, t2, t3, t4); } else out[0 * pitch] = out[1 * pitch] = out[2 * pitch] = out[3 * pitch] = 0; in++; out++; } #undef COMPENSATE } void ff_ivi_dc_haar_2d(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size) { int x, y; int16_t dc_coeff; dc_coeff = (*in + 0) >> 3; for (y = 0; y < blk_size; out += pitch, y++) { for (x = 0; x < blk_size; x++) out[x] = dc_coeff; } } /** butterfly operation for the inverse slant transform */ #define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \ t = (s1) - (s2);\ o1 = (s1) + (s2);\ o2 = (t);\ /** This is a reflection a,b = 1/2, 5/4 for the inverse slant transform */ #define IVI_IREFLECT(s1, s2, o1, o2, t) \ t = (((s1) + (s2)*2 + 2) >> 2) + (s1);\ o2 = (((s1)*2 - (s2) + 2) >> 2) - (s2);\ o1 = (t);\ /** This is a reflection a,b = 1/2, 7/8 for the inverse slant transform */ #define IVI_SLANT_PART4(s1, s2, o1, o2, t) \ t = (s2) + (((s1)*4 - (s2) + 4) >> 3);\ o2 = (s1) + ((-(s1) - (s2)*4 + 4) >> 3);\ o1 = (t);\ /** inverse slant8 transform */ #define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\ d1, d2, d3, d4, d5, d6, d7, d8,\ t0, t1, t2, t3, t4, t5, t6, t7, t8) {\ IVI_SLANT_PART4(s4, s5, t4, t5, t0);\ \ IVI_SLANT_BFLY(s1, t5, t1, t5, t0); IVI_SLANT_BFLY(s2, s6, t2, t6, t0);\ IVI_SLANT_BFLY(s7, s3, t7, t3, t0); IVI_SLANT_BFLY(t4, s8, t4, t8, t0);\ \ IVI_SLANT_BFLY(t1, t2, t1, t2, t0); IVI_IREFLECT (t4, t3, t4, t3, t0);\ IVI_SLANT_BFLY(t5, t6, t5, t6, t0); IVI_IREFLECT (t8, t7, t8, t7, t0);\ IVI_SLANT_BFLY(t1, t4, t1, t4, t0); IVI_SLANT_BFLY(t2, t3, t2, t3, t0);\ IVI_SLANT_BFLY(t5, t8, t5, t8, t0); IVI_SLANT_BFLY(t6, t7, t6, t7, t0);\ d1 = COMPENSATE(t1);\ d2 = COMPENSATE(t2);\ d3 = COMPENSATE(t3);\ d4 = COMPENSATE(t4);\ d5 = COMPENSATE(t5);\ d6 = COMPENSATE(t6);\ d7 = COMPENSATE(t7);\ d8 = COMPENSATE(t8);} /** inverse slant4 transform */ #define IVI_INV_SLANT4(s1, s4, s2, s3, d1, d2, d3, d4, t0, t1, t2, t3, t4) {\ IVI_SLANT_BFLY(s1, s2, t1, t2, t0); IVI_IREFLECT (s4, s3, t4, t3, t0);\ \ IVI_SLANT_BFLY(t1, t4, t1, t4, t0); IVI_SLANT_BFLY(t2, t3, t2, t3, t0);\ d1 = COMPENSATE(t1);\ d2 = COMPENSATE(t2);\ d3 = COMPENSATE(t3);\ d4 = COMPENSATE(t4);} void ff_ivi_inverse_slant_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i; const int32_t *src; int32_t *dst; int tmp[64]; int t0, t1, t2, t3, t4, t5, t6, t7, t8; #define COMPENSATE(x) (x) src = in; dst = tmp; for (i = 0; i < 8; i++) { if (flags[i]) { IVI_INV_SLANT8(src[0], src[8], src[16], src[24], src[32], src[40], src[48], src[56], dst[0], dst[8], dst[16], dst[24], dst[32], dst[40], dst[48], dst[56], t0, t1, t2, t3, t4, t5, t6, t7, t8); } else dst[0] = dst[8] = dst[16] = dst[24] = dst[32] = dst[40] = dst[48] = dst[56] = 0; src++; dst++; } #undef COMPENSATE #define COMPENSATE(x) (((x) + 1)>>1) src = tmp; for (i = 0; i < 8; i++) { if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) { memset(out, 0, 8*sizeof(out[0])); } else { IVI_INV_SLANT8(src[0], src[1], src[2], src[3], src[4], src[5], src[6], src[7], out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7], t0, t1, t2, t3, t4, t5, t6, t7, t8); } src += 8; out += pitch; } #undef COMPENSATE } void ff_ivi_inverse_slant_4x4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i; const int32_t *src; int32_t *dst; int tmp[16]; int t0, t1, t2, t3, t4; #define COMPENSATE(x) (x) src = in; dst = tmp; for (i = 0; i < 4; i++) { if (flags[i]) { IVI_INV_SLANT4(src[0], src[4], src[8], src[12], dst[0], dst[4], dst[8], dst[12], t0, t1, t2, t3, t4); } else dst[0] = dst[4] = dst[8] = dst[12] = 0; src++; dst++; } #undef COMPENSATE #define COMPENSATE(x) (((x) + 1)>>1) src = tmp; for (i = 0; i < 4; i++) { if (!src[0] && !src[1] && !src[2] && !src[3]) { out[0] = out[1] = out[2] = out[3] = 0; } else { IVI_INV_SLANT4(src[0], src[1], src[2], src[3], out[0], out[1], out[2], out[3], t0, t1, t2, t3, t4); } src += 4; out += pitch; } #undef COMPENSATE } void ff_ivi_dc_slant_2d(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size) { int x, y; int16_t dc_coeff; dc_coeff = (*in + 1) >> 1; for (y = 0; y < blk_size; out += pitch, y++) { for (x = 0; x < blk_size; x++) out[x] = dc_coeff; } } void ff_ivi_row_slant8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i; int t0, t1, t2, t3, t4, t5, t6, t7, t8; #define COMPENSATE(x) (((x) + 1)>>1) for (i = 0; i < 8; i++) { if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) { memset(out, 0, 8*sizeof(out[0])); } else { IVI_INV_SLANT8( in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7], t0, t1, t2, t3, t4, t5, t6, t7, t8); } in += 8; out += pitch; } #undef COMPENSATE } void ff_ivi_dc_row_slant(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size) { int x, y; int16_t dc_coeff; dc_coeff = (*in + 1) >> 1; for (x = 0; x < blk_size; x++) out[x] = dc_coeff; out += pitch; for (y = 1; y < blk_size; out += pitch, y++) { for (x = 0; x < blk_size; x++) out[x] = 0; } } void ff_ivi_col_slant8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i, row2, row4, row8; int t0, t1, t2, t3, t4, t5, t6, t7, t8; row2 = pitch << 1; row4 = pitch << 2; row8 = pitch << 3; #define COMPENSATE(x) (((x) + 1)>>1) for (i = 0; i < 8; i++) { if (flags[i]) { IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56], out[0], out[pitch], out[row2], out[row2 + pitch], out[row4], out[row4 + pitch], out[row4 + row2], out[row8 - pitch], t0, t1, t2, t3, t4, t5, t6, t7, t8); } else { out[0] = out[pitch] = out[row2] = out[row2 + pitch] = out[row4] = out[row4 + pitch] = out[row4 + row2] = out[row8 - pitch] = 0; } in++; out++; } #undef COMPENSATE } void ff_ivi_dc_col_slant(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size) { int x, y; int16_t dc_coeff; dc_coeff = (*in + 1) >> 1; for (y = 0; y < blk_size; out += pitch, y++) { out[0] = dc_coeff; for (x = 1; x < blk_size; x++) out[x] = 0; } } void ff_ivi_row_slant4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i; int t0, t1, t2, t3, t4; #define COMPENSATE(x) (((x) + 1)>>1) for (i = 0; i < 4; i++) { if (!in[0] && !in[1] && !in[2] && !in[3]) { memset(out, 0, 4*sizeof(out[0])); } else { IVI_INV_SLANT4( in[0], in[1], in[2], in[3], out[0], out[1], out[2], out[3], t0, t1, t2, t3, t4); } in += 4; out += pitch; } #undef COMPENSATE } void ff_ivi_col_slant4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i, row2; int t0, t1, t2, t3, t4; row2 = pitch << 1; #define COMPENSATE(x) (((x) + 1)>>1) for (i = 0; i < 4; i++) { if (flags[i]) { IVI_INV_SLANT4(in[0], in[4], in[8], in[12], out[0], out[pitch], out[row2], out[row2 + pitch], t0, t1, t2, t3, t4); } else { out[0] = out[pitch] = out[row2] = out[row2 + pitch] = 0; } in++; out++; } #undef COMPENSATE } void ff_ivi_put_pixels_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int x, y; for (y = 0; y < 8; out += pitch, in += 8, y++) for (x = 0; x < 8; x++) out[x] = in[x]; } void ff_ivi_put_dc_pixel_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size) { int y; out[0] = in[0]; memset(out + 1, 0, 7*sizeof(out[0])); out += pitch; for (y = 1; y < 8; out += pitch, y++) memset(out, 0, 8*sizeof(out[0])); } #define IVI_MC_TEMPLATE(size, suffix, OP) \ static void ivi_mc_ ## size ##x## size ## suffix(int16_t *buf, \ ptrdiff_t dpitch, \ const int16_t *ref_buf, \ ptrdiff_t pitch, int mc_type) \ { \ int i, j; \ const int16_t *wptr; \ \ switch (mc_type) { \ case 0: /* fullpel (no interpolation) */ \ for (i = 0; i < size; i++, buf += dpitch, ref_buf += pitch) { \ for (j = 0; j < size; j++) {\ OP(buf[j], ref_buf[j]); \ } \ } \ break; \ case 1: /* horizontal halfpel interpolation */ \ for (i = 0; i < size; i++, buf += dpitch, ref_buf += pitch) \ for (j = 0; j < size; j++) \ OP(buf[j], (ref_buf[j] + ref_buf[j+1]) >> 1); \ break; \ case 2: /* vertical halfpel interpolation */ \ wptr = ref_buf + pitch; \ for (i = 0; i < size; i++, buf += dpitch, wptr += pitch, ref_buf += pitch) \ for (j = 0; j < size; j++) \ OP(buf[j], (ref_buf[j] + wptr[j]) >> 1); \ break; \ case 3: /* vertical and horizontal halfpel interpolation */ \ wptr = ref_buf + pitch; \ for (i = 0; i < size; i++, buf += dpitch, wptr += pitch, ref_buf += pitch) \ for (j = 0; j < size; j++) \ OP(buf[j], (ref_buf[j] + ref_buf[j+1] + wptr[j] + wptr[j+1]) >> 2); \ break; \ } \ } \ \ void ff_ivi_mc_ ## size ##x## size ## suffix(int16_t *buf, const int16_t *ref_buf, \ ptrdiff_t pitch, int mc_type) \ { \ ivi_mc_ ## size ##x## size ## suffix(buf, pitch, ref_buf, pitch, mc_type); \ } \ #define IVI_MC_AVG_TEMPLATE(size, suffix, OP) \ void ff_ivi_mc_avg_ ## size ##x## size ## suffix(int16_t *buf, \ const int16_t *ref_buf, \ const int16_t *ref_buf2, \ ptrdiff_t pitch, \ int mc_type, int mc_type2) \ { \ int16_t tmp[size * size]; \ int i, j; \ \ ivi_mc_ ## size ##x## size ## _no_delta(tmp, size, ref_buf, pitch, mc_type); \ ivi_mc_ ## size ##x## size ## _delta(tmp, size, ref_buf2, pitch, mc_type2); \ for (i = 0; i < size; i++, buf += pitch) { \ for (j = 0; j < size; j++) {\ OP(buf[j], tmp[i * size + j] >> 1); \ } \ } \ } \ #define OP_PUT(a, b) (a) = (b) #define OP_ADD(a, b) (a) += (b) IVI_MC_TEMPLATE(8, _no_delta, OP_PUT) IVI_MC_TEMPLATE(8, _delta, OP_ADD) IVI_MC_TEMPLATE(4, _no_delta, OP_PUT) IVI_MC_TEMPLATE(4, _delta, OP_ADD) IVI_MC_AVG_TEMPLATE(8, _no_delta, OP_PUT) IVI_MC_AVG_TEMPLATE(8, _delta, OP_ADD) IVI_MC_AVG_TEMPLATE(4, _no_delta, OP_PUT) IVI_MC_AVG_TEMPLATE(4, _delta, OP_ADD)
null
null
null
null
70,909
34,056
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
34,056
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NGPhysicalOffset_h #define NGPhysicalOffset_h #include "third_party/blink/renderer/core/core_export.h" #include "third_party/blink/renderer/platform/layout_unit.h" #include "third_party/blink/renderer/platform/text/text_direction.h" #include "third_party/blink/renderer/platform/text/writing_mode.h" namespace blink { class LayoutPoint; class LayoutSize; struct NGLogicalOffset; struct NGPhysicalSize; // NGPhysicalOffset is the position of a rect (typically a fragment) relative to // its parent rect in the physical coordinate system. struct CORE_EXPORT NGPhysicalOffset { NGPhysicalOffset() = default; NGPhysicalOffset(LayoutUnit left, LayoutUnit top) : left(left), top(top) {} LayoutUnit left; LayoutUnit top; // Converts a physical offset to a logical offset. See: // https://drafts.csswg.org/css-writing-modes-3/#logical-to-physical // @param outer_size the size of the rect (typically a fragment). // @param inner_size the size of the inner rect (typically a child fragment). NGLogicalOffset ConvertToLogical(WritingMode, TextDirection, NGPhysicalSize outer_size, NGPhysicalSize inner_size) const; NGPhysicalOffset operator+(const NGPhysicalOffset& other) const; NGPhysicalOffset& operator+=(const NGPhysicalOffset& other); NGPhysicalOffset operator-(const NGPhysicalOffset& other) const; NGPhysicalOffset& operator-=(const NGPhysicalOffset& other); bool operator==(const NGPhysicalOffset& other) const; // Conversions from/to existing code. New code prefers type safety for // logical/physical distinctions. explicit NGPhysicalOffset(const LayoutPoint& point); explicit NGPhysicalOffset(const LayoutSize& size); // Conversions from/to existing code. New code prefers type safety for // logical/physical distinctions. LayoutPoint ToLayoutPoint() const; LayoutSize ToLayoutSize() const; String ToString() const; }; CORE_EXPORT std::ostream& operator<<(std::ostream&, const NGPhysicalOffset&); } // namespace blink #endif // NGPhysicalOffset_h
null
null
null
null
30,919
24,124
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
189,119
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * NVIDIA Tegra DRM GEM helper functions * * Copyright (C) 2012 Sascha Hauer, Pengutronix * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. * * Based on the GEM/CMA helpers * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/dma-buf.h> #include <linux/iommu.h> #include <drm/tegra_drm.h> #include "drm.h" #include "gem.h" static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) { return container_of(bo, struct tegra_bo, base); } static void tegra_bo_put(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); drm_gem_object_unreference_unlocked(&obj->gem); } static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); *sgt = obj->sgt; return obj->paddr; } static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) { } static void *tegra_bo_mmap(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); if (obj->vaddr) return obj->vaddr; else if (obj->gem.import_attach) return dma_buf_vmap(obj->gem.import_attach->dmabuf); else return vmap(obj->pages, obj->num_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); } static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); if (obj->vaddr) return; else if (obj->gem.import_attach) dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); else vunmap(addr); } static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); if (obj->vaddr) return obj->vaddr + page * PAGE_SIZE; else if (obj->gem.import_attach) return dma_buf_kmap(obj->gem.import_attach->dmabuf, page); else return vmap(obj->pages + page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); } static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, void *addr) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); if (obj->vaddr) return; else if (obj->gem.import_attach) dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr); else vunmap(addr); } static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); drm_gem_object_reference(&obj->gem); return bo; } static const struct host1x_bo_ops tegra_bo_ops = { .get = tegra_bo_get, .put = tegra_bo_put, .pin = tegra_bo_pin, .unpin = tegra_bo_unpin, .mmap = tegra_bo_mmap, .munmap = tegra_bo_munmap, .kmap = tegra_bo_kmap, .kunmap = tegra_bo_kunmap, }; static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) { int prot = IOMMU_READ | IOMMU_WRITE; ssize_t err; if (bo->mm) return -EBUSY; bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); if (!bo->mm) return -ENOMEM; err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, PAGE_SIZE, 0, 0); if (err < 0) { dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", err); goto free; } bo->paddr = bo->mm->start; err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, bo->sgt->nents, prot); if (err < 0) { dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err); goto remove; } bo->size = err; return 0; remove: drm_mm_remove_node(bo->mm); free: kfree(bo->mm); return err; } static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) { if (!bo->mm) return 0; iommu_unmap(tegra->domain, bo->paddr, bo->size); drm_mm_remove_node(bo->mm); kfree(bo->mm); return 0; } static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, size_t size) { struct tegra_bo *bo; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(size, PAGE_SIZE); err = drm_gem_object_init(drm, &bo->gem, size); if (err < 0) goto free; err = drm_gem_create_mmap_offset(&bo->gem); if (err < 0) goto release; return bo; release: drm_gem_object_release(&bo->gem); free: kfree(bo); return ERR_PTR(err); } static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) { if (bo->pages) { drm_gem_put_pages(&bo->gem, bo->pages, true, true); sg_free_table(bo->sgt); kfree(bo->sgt); } else if (bo->vaddr) { dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); } } static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) { struct scatterlist *s; unsigned int i; bo->pages = drm_gem_get_pages(&bo->gem); if (IS_ERR(bo->pages)) return PTR_ERR(bo->pages); bo->num_pages = bo->gem.size >> PAGE_SHIFT; bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); if (IS_ERR(bo->sgt)) goto put_pages; /* * Fake up the SG table so that dma_sync_sg_for_device() can be used * to flush the pages associated with it. * * TODO: Replace this by drm_clflash_sg() once it can be implemented * without relying on symbols that are not exported. */ for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i) sg_dma_address(s) = sg_phys(s); dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, DMA_TO_DEVICE); return 0; put_pages: drm_gem_put_pages(&bo->gem, bo->pages, false, false); return PTR_ERR(bo->sgt); } static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) { struct tegra_drm *tegra = drm->dev_private; int err; if (tegra->domain) { err = tegra_bo_get_pages(drm, bo); if (err < 0) return err; err = tegra_bo_iommu_map(tegra, bo); if (err < 0) { tegra_bo_free(drm, bo); return err; } } else { size_t size = bo->gem.size; bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, GFP_KERNEL | __GFP_NOWARN); if (!bo->vaddr) { dev_err(drm->dev, "failed to allocate buffer of size %zu\n", size); return -ENOMEM; } } return 0; } struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, unsigned long flags) { struct tegra_bo *bo; int err; bo = tegra_bo_alloc_object(drm, size); if (IS_ERR(bo)) return bo; err = tegra_bo_alloc(drm, bo); if (err < 0) goto release; if (flags & DRM_TEGRA_GEM_CREATE_TILED) bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) bo->flags |= TEGRA_BO_BOTTOM_UP; return bo; release: drm_gem_object_release(&bo->gem); kfree(bo); return ERR_PTR(err); } struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, struct drm_device *drm, size_t size, unsigned long flags, u32 *handle) { struct tegra_bo *bo; int err; bo = tegra_bo_create(drm, size, flags); if (IS_ERR(bo)) return bo; err = drm_gem_handle_create(file, &bo->gem, handle); if (err) { tegra_bo_free_object(&bo->gem); return ERR_PTR(err); } drm_gem_object_unreference_unlocked(&bo->gem); return bo; } static struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) { struct tegra_drm *tegra = drm->dev_private; struct dma_buf_attachment *attach; struct tegra_bo *bo; int err; bo = tegra_bo_alloc_object(drm, buf->size); if (IS_ERR(bo)) return bo; attach = dma_buf_attach(buf, drm->dev); if (IS_ERR(attach)) { err = PTR_ERR(attach); goto free; } get_dma_buf(buf); bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto detach; } if (tegra->domain) { err = tegra_bo_iommu_map(tegra, bo); if (err < 0) goto detach; } else { if (bo->sgt->nents > 1) { err = -EINVAL; goto detach; } bo->paddr = sg_dma_address(bo->sgt->sgl); } bo->gem.import_attach = attach; return bo; detach: if (!IS_ERR_OR_NULL(bo->sgt)) dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); dma_buf_detach(buf, attach); dma_buf_put(buf); free: drm_gem_object_release(&bo->gem); kfree(bo); return ERR_PTR(err); } void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_drm *tegra = gem->dev->dev_private; struct tegra_bo *bo = to_tegra_bo(gem); if (tegra->domain) tegra_bo_iommu_unmap(tegra, bo); if (gem->import_attach) { dma_buf_unmap_attachment(gem->import_attach, bo->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(gem, NULL); } else { tegra_bo_free(gem->dev, bo); } drm_gem_object_release(gem); kfree(bo); } int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args) { unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); struct tegra_drm *tegra = drm->dev_private; struct tegra_bo *bo; args->pitch = round_up(min_pitch, tegra->pitch_align); args->size = args->pitch * args->height; bo = tegra_bo_create_with_handle(file, drm, args->size, 0, &args->handle); if (IS_ERR(bo)) return PTR_ERR(bo); return 0; } int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, u32 handle, u64 *offset) { struct drm_gem_object *gem; struct tegra_bo *bo; gem = drm_gem_object_lookup(file, handle); if (!gem) { dev_err(drm->dev, "failed to lookup GEM object\n"); return -EINVAL; } bo = to_tegra_bo(gem); *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); drm_gem_object_unreference_unlocked(gem); return 0; } static int tegra_bo_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *gem = vma->vm_private_data; struct tegra_bo *bo = to_tegra_bo(gem); struct page *page; pgoff_t offset; int err; if (!bo->pages) return VM_FAULT_SIGBUS; offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; page = bo->pages[offset]; err = vm_insert_page(vma, vmf->address, page); switch (err) { case -EAGAIN: case 0: case -ERESTARTSYS: case -EINTR: case -EBUSY: return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; } return VM_FAULT_SIGBUS; } const struct vm_operations_struct tegra_bo_vm_ops = { .fault = tegra_bo_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) { struct drm_gem_object *gem; struct tegra_bo *bo; int ret; ret = drm_gem_mmap(file, vma); if (ret) return ret; gem = vma->vm_private_data; bo = to_tegra_bo(gem); if (!bo->pages) { unsigned long vm_pgoff = vma->vm_pgoff; vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, gem->size); if (ret) { drm_gem_vm_close(vma); return ret; } vma->vm_pgoff = vm_pgoff; } else { pgprot_t prot = vm_get_page_prot(vma->vm_flags); vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags &= ~VM_PFNMAP; vma->vm_page_prot = pgprot_writecombine(prot); } return 0; } static struct sg_table * tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct drm_gem_object *gem = attach->dmabuf->priv; struct tegra_bo *bo = to_tegra_bo(gem); struct sg_table *sgt; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return NULL; if (bo->pages) { struct scatterlist *sg; unsigned int i; if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) goto free; for_each_sg(sgt->sgl, sg, bo->num_pages, i) sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) goto free; } else { if (sg_alloc_table(sgt, 1, GFP_KERNEL)) goto free; sg_dma_address(sgt->sgl) = bo->paddr; sg_dma_len(sgt->sgl) = gem->size; } return sgt; free: sg_free_table(sgt); kfree(sgt); return NULL; } static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { struct drm_gem_object *gem = attach->dmabuf->priv; struct tegra_bo *bo = to_tegra_bo(gem); if (bo->pages) dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); sg_free_table(sgt); kfree(sgt); } static void tegra_gem_prime_release(struct dma_buf *buf) { drm_gem_dmabuf_release(buf); } static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, unsigned long page) { return NULL; } static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, unsigned long page, void *addr) { } static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) { return NULL; } static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, void *addr) { } static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) { return -EINVAL; } static void *tegra_gem_prime_vmap(struct dma_buf *buf) { struct drm_gem_object *gem = buf->priv; struct tegra_bo *bo = to_tegra_bo(gem); return bo->vaddr; } static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) { } static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { .map_dma_buf = tegra_gem_prime_map_dma_buf, .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, .release = tegra_gem_prime_release, .kmap_atomic = tegra_gem_prime_kmap_atomic, .kunmap_atomic = tegra_gem_prime_kunmap_atomic, .kmap = tegra_gem_prime_kmap, .kunmap = tegra_gem_prime_kunmap, .mmap = tegra_gem_prime_mmap, .vmap = tegra_gem_prime_vmap, .vunmap = tegra_gem_prime_vunmap, }; struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, struct drm_gem_object *gem, int flags) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &tegra_gem_prime_dmabuf_ops; exp_info.size = gem->size; exp_info.flags = flags; exp_info.priv = gem; return drm_gem_dmabuf_export(drm, &exp_info); } struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, struct dma_buf *buf) { struct tegra_bo *bo; if (buf->ops == &tegra_gem_prime_dmabuf_ops) { struct drm_gem_object *gem = buf->priv; if (gem->dev == drm) { drm_gem_object_reference(gem); return gem; } } bo = tegra_bo_import(drm, buf); if (IS_ERR(bo)) return ERR_CAST(bo); return &bo->gem; }
null
null
null
null
97,466
41,912
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
41,912
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stddef.h> #include <algorithm> #include <iostream> #include <string> #include <vector> #include "base/stl_util.h" #include "build/build_config.h" // Include once to get the type definitions #include "tools/ipc_fuzzer/message_lib/all_messages.h" struct msginfo { const char* name; const char* file; int id; bool operator< (const msginfo& other) const { return id < other.id; } }; // Redefine macros to generate table #include "tools/ipc_fuzzer/message_lib/all_message_null_macros.h" #undef IPC_MESSAGE_DECL #define IPC_MESSAGE_DECL(name, ...) {#name, __FILE__, IPC_MESSAGE_ID()}, static msginfo msgtable[] = { #include "tools/ipc_fuzzer/message_lib/all_messages.h" }; #define MSGTABLE_SIZE (sizeof(msgtable)/sizeof(msgtable[0])) static_assert(MSGTABLE_SIZE, "check your headers for an extra semicolon"); static bool check_msgtable() { bool result = true; int previous_class_id = 0; int highest_class_id = 0; const char* file_name = "NONE"; const char* previous_file_name = "NONE"; std::vector<int> exemptions; // Exclude test and other non-browser files from consideration. Do not // include message files used inside the actual chrome browser in this list. exemptions.push_back(TestMsgStart); exemptions.push_back(ShellMsgStart); exemptions.push_back(LayoutTestMsgStart); exemptions.push_back(CastCryptoMsgStart); // Reserved for chromecast. exemptions.push_back(CastChannelMsgStart); // Reserved for chromecast. exemptions.push_back(CastMediaMsgStart); // Reserved for chromecast. exemptions.push_back(IPCTestMsgStart); exemptions.push_back(WorkerMsgStart); // Now only used by tests. exemptions.push_back(ResourceMsgStart); // Cleanup underway. exemptions.push_back(ChromeUtilityPrintingMsgStart); // BUILDFLAGS, sigh. #if !BUILDFLAG(ENABLE_NACL) exemptions.push_back(NaClMsgStart); #endif // !BUILDFLAG(ENABLE_NACL) #if !BUILDFLAG(ENABLE_WEBRTC) exemptions.push_back(WebRtcLoggingMsgStart); #endif #if !defined(OS_ANDROID) exemptions.push_back(JavaBridgeMsgStart); exemptions.push_back(MediaPlayerMsgStart); exemptions.push_back(EncryptedMediaMsgStart); exemptions.push_back(GinJavaBridgeMsgStart); exemptions.push_back(AndroidWebViewMsgStart); exemptions.push_back(SyncCompositorMsgStart); exemptions.push_back(ExtensionWorkerMsgStart); exemptions.push_back(SurfaceViewManagerMsgStart); #endif // !defined(OS_ANDROID) #if !defined(USE_OZONE) exemptions.push_back(OzoneGpuMsgStart); #endif // !defined(USE_OZONE) for (size_t i = 0; i < MSGTABLE_SIZE; ++i) { int class_id = IPC_MESSAGE_ID_CLASS(msgtable[i].id); file_name = msgtable[i].file; if (class_id >= LastIPCMsgStart) { std::cout << "Invalid LastIPCMsgStart setting\n"; result = false; } if (class_id == previous_class_id && strcmp(file_name, previous_file_name) != 0) { std::cerr << "enum used in multiple files: " << file_name << " vs " << previous_file_name << "\n"; result = false; } while (class_id > previous_class_id + 1) { if (!base::ContainsValue(exemptions, previous_class_id + 1)) { std::cout << "Missing message file for enum " << class_id - (previous_class_id + 1) << " before enum used by " << file_name << "\n"; result = false; } ++previous_class_id; } previous_class_id = class_id; previous_file_name = file_name; if (class_id > highest_class_id) highest_class_id = class_id; } while (LastIPCMsgStart > highest_class_id + 1) { if (!base::ContainsValue(exemptions, highest_class_id + 1)) { std::cout << "Missing message file for enum " << LastIPCMsgStart - (highest_class_id + 1) << " before enum LastIPCMsgStart\n"; break; } ++highest_class_id; } if (!result) std::cout << "Please check tools/ipc_fuzzer/message_lib/all_messages.h\n"; return result; } static void dump_msgtable(bool show_args, bool show_ids, bool show_comma, const char *prefix) { bool first = true; for (size_t i = 0; i < MSGTABLE_SIZE; ++i) { if ((!prefix) || strstr(msgtable[i].name, prefix) == msgtable[i].name) { if (show_comma) { if (!first) std::cout << ","; first = false; std::cout << msgtable[i].id; } else { if (show_ids) std::cout << msgtable[i].id << " " << IPC_MESSAGE_ID_CLASS(msgtable[i].id) << "," << IPC_MESSAGE_ID_LINE(msgtable[i].id) << " "; std::cout << msgtable[i].name << "\n"; } } } if (show_comma) std::cout << "\n"; } int main(int argc, char **argv) { bool show_args = false; bool show_ids = false; bool skip_check = false; bool show_comma = false; const char* filter = NULL; while (--argc > 0) { ++argv; if (std::string("--args") == *argv) { show_args = true; } else if (std::string("--comma") == *argv) { show_comma = true; } else if (std::string("--filter") == *argv) { filter = *(++argv); --argc; } else if (std::string("--ids") == *argv) { show_ids = true; } else if (std::string("--no-check") == *argv) { skip_check = true; } else { std::cout << "usage: ipc_message_list [--args] [--ids] [--no-check] " "[--filter prefix] [--comma]\n"; return 1; } } std::sort(msgtable, msgtable + MSGTABLE_SIZE); if (!skip_check && check_msgtable() == false) return 1; dump_msgtable(show_args, show_ids, show_comma, filter); return 0; }
null
null
null
null
38,775
26,514
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
191,509
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2014 Traphandler * Copyright (C) 2014 Free Electrons * Copyright (C) 2014 Atmel * * Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com> * Author: Boris BREZILLON <boris.brezillon@free-electrons.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/clk.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include "atmel_hlcdc_dc.h" #define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8 static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x40, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, }, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = { .min_width = 0, .min_height = 0, .max_width = 1280, .max_height = 860, .max_spw = 0x3f, .max_vpw = 0x3f, .max_hpw = 0xff, .conflicting_output_formats = true, .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers), .layers = atmel_hlcdc_at91sam9n12_layers, }; static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x40, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, .disc_pos = 5, .disc_size = 6, }, }, { .name = "overlay1", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x100, .id = 1, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, }, { .name = "high-end-overlay", .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, .regs_offset = 0x280, .id = 2, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x4c, .layout = { .pos = 2, .size = 3, .memsize = 4, .xstride = { 5, 7 }, .pstride = { 6, 8 }, .default_color = 9, .chroma_key = 10, .chroma_key_mask = 11, .general_config = 12, .scaler_config = 13, .csc = 14, }, }, { .name = "cursor", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x340, .id = 3, .type = ATMEL_HLCDC_CURSOR_LAYER, .max_width = 128, .max_height = 128, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = { .min_width = 0, .min_height = 0, .max_width = 800, .max_height = 600, .max_spw = 0x3f, .max_vpw = 0x3f, .max_hpw = 0xff, .conflicting_output_formats = true, .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers), .layers = atmel_hlcdc_at91sam9x5_layers, }; static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x40, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, .disc_pos = 5, .disc_size = 6, }, }, { .name = "overlay1", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x140, .id = 1, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, }, { .name = "overlay2", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x240, .id = 2, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, }, { .name = "high-end-overlay", .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, .regs_offset = 0x340, .id = 3, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x4c, .layout = { .pos = 2, .size = 3, .memsize = 4, .xstride = { 5, 7 }, .pstride = { 6, 8 }, .default_color = 9, .chroma_key = 10, .chroma_key_mask = 11, .general_config = 12, .scaler_config = 13, .phicoeffs = { .x = 17, .y = 33, }, .csc = 14, }, }, { .name = "cursor", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x440, .id = 4, .type = ATMEL_HLCDC_CURSOR_LAYER, .max_width = 128, .max_height = 128, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, .scaler_config = 13, }, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = { .min_width = 0, .min_height = 0, .max_width = 2048, .max_height = 2048, .max_spw = 0x3f, .max_vpw = 0x3f, .max_hpw = 0x1ff, .conflicting_output_formats = true, .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d3_layers), .layers = atmel_hlcdc_sama5d3_layers, }; static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x40, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, .disc_pos = 5, .disc_size = 6, }, }, { .name = "overlay1", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x140, .id = 1, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, }, { .name = "overlay2", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x240, .id = 2, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, }, { .name = "high-end-overlay", .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, .regs_offset = 0x340, .id = 3, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x4c, .layout = { .pos = 2, .size = 3, .memsize = 4, .xstride = { 5, 7 }, .pstride = { 6, 8 }, .default_color = 9, .chroma_key = 10, .chroma_key_mask = 11, .general_config = 12, .scaler_config = 13, .phicoeffs = { .x = 17, .y = 33, }, .csc = 14, }, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = { .min_width = 0, .min_height = 0, .max_width = 2048, .max_height = 2048, .max_spw = 0xff, .max_vpw = 0xff, .max_hpw = 0x3ff, .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers), .layers = atmel_hlcdc_sama5d4_layers, }; static const struct of_device_id atmel_hlcdc_of_match[] = { { .compatible = "atmel,at91sam9n12-hlcdc", .data = &atmel_hlcdc_dc_at91sam9n12, }, { .compatible = "atmel,at91sam9x5-hlcdc", .data = &atmel_hlcdc_dc_at91sam9x5, }, { .compatible = "atmel,sama5d2-hlcdc", .data = &atmel_hlcdc_dc_sama5d4, }, { .compatible = "atmel,sama5d3-hlcdc", .data = &atmel_hlcdc_dc_sama5d3, }, { .compatible = "atmel,sama5d4-hlcdc", .data = &atmel_hlcdc_dc_sama5d4, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match); int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc, struct drm_display_mode *mode) { int vfront_porch = mode->vsync_start - mode->vdisplay; int vback_porch = mode->vtotal - mode->vsync_end; int vsync_len = mode->vsync_end - mode->vsync_start; int hfront_porch = mode->hsync_start - mode->hdisplay; int hback_porch = mode->htotal - mode->hsync_end; int hsync_len = mode->hsync_end - mode->hsync_start; if (hsync_len > dc->desc->max_spw + 1 || hsync_len < 1) return MODE_HSYNC; if (vsync_len > dc->desc->max_spw + 1 || vsync_len < 1) return MODE_VSYNC; if (hfront_porch > dc->desc->max_hpw + 1 || hfront_porch < 1 || hback_porch > dc->desc->max_hpw + 1 || hback_porch < 1 || mode->hdisplay < 1) return MODE_H_ILLEGAL; if (vfront_porch > dc->desc->max_vpw + 1 || vfront_porch < 1 || vback_porch > dc->desc->max_vpw || vback_porch < 0 || mode->vdisplay < 1) return MODE_V_ILLEGAL; return MODE_OK; } static void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer) { if (!layer) return; if (layer->desc->type == ATMEL_HLCDC_BASE_LAYER || layer->desc->type == ATMEL_HLCDC_OVERLAY_LAYER || layer->desc->type == ATMEL_HLCDC_CURSOR_LAYER) atmel_hlcdc_plane_irq(atmel_hlcdc_layer_to_plane(layer)); } static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data) { struct drm_device *dev = data; struct atmel_hlcdc_dc *dc = dev->dev_private; unsigned long status; unsigned int imr, isr; int i; regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_IMR, &imr); regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr); status = imr & isr; if (!status) return IRQ_NONE; if (status & ATMEL_HLCDC_SOF) atmel_hlcdc_crtc_irq(dc->crtc); for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) { if (ATMEL_HLCDC_LAYER_STATUS(i) & status) atmel_hlcdc_layer_irq(dc->layers[i]); } return IRQ_HANDLED; } static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { return drm_fb_cma_create(dev, file_priv, mode_cmd); } static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; if (dc->fbdev) drm_fbdev_cma_hotplug_event(dc->fbdev); } struct atmel_hlcdc_dc_commit { struct work_struct work; struct drm_device *dev; struct drm_atomic_state *state; }; static void atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit) { struct drm_device *dev = commit->dev; struct atmel_hlcdc_dc *dc = dev->dev_private; struct drm_atomic_state *old_state = commit->state; /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_wait_for_vblanks(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); drm_atomic_state_put(old_state); /* Complete the commit, wake up any waiter. */ spin_lock(&dc->commit.wait.lock); dc->commit.pending = false; wake_up_all_locked(&dc->commit.wait); spin_unlock(&dc->commit.wait.lock); kfree(commit); } static void atmel_hlcdc_dc_atomic_work(struct work_struct *work) { struct atmel_hlcdc_dc_commit *commit = container_of(work, struct atmel_hlcdc_dc_commit, work); atmel_hlcdc_dc_atomic_complete(commit); } static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) { struct atmel_hlcdc_dc *dc = dev->dev_private; struct atmel_hlcdc_dc_commit *commit; int ret; ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) return ret; /* Allocate the commit object. */ commit = kzalloc(sizeof(*commit), GFP_KERNEL); if (!commit) { ret = -ENOMEM; goto error; } INIT_WORK(&commit->work, atmel_hlcdc_dc_atomic_work); commit->dev = dev; commit->state = state; spin_lock(&dc->commit.wait.lock); ret = wait_event_interruptible_locked(dc->commit.wait, !dc->commit.pending); if (ret == 0) dc->commit.pending = true; spin_unlock(&dc->commit.wait.lock); if (ret) { kfree(commit); goto error; } /* Swap the state, this is the point of no return. */ drm_atomic_helper_swap_state(state, true); drm_atomic_state_get(state); if (async) queue_work(dc->wq, &commit->work); else atmel_hlcdc_dc_atomic_complete(commit); return 0; error: drm_atomic_helper_cleanup_planes(dev, state); return ret; } static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = atmel_hlcdc_fb_create, .output_poll_changed = atmel_hlcdc_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = atmel_hlcdc_dc_atomic_commit, }; static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; int ret; drm_mode_config_init(dev); ret = atmel_hlcdc_create_outputs(dev); if (ret) { dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret); return ret; } ret = atmel_hlcdc_create_planes(dev); if (ret) { dev_err(dev->dev, "failed to create planes: %d\n", ret); return ret; } ret = atmel_hlcdc_crtc_create(dev); if (ret) { dev_err(dev->dev, "failed to create crtc\n"); return ret; } dev->mode_config.min_width = dc->desc->min_width; dev->mode_config.min_height = dc->desc->min_height; dev->mode_config.max_width = dc->desc->max_width; dev->mode_config.max_height = dc->desc->max_height; dev->mode_config.funcs = &mode_config_funcs; return 0; } static int atmel_hlcdc_dc_load(struct drm_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev); const struct of_device_id *match; struct atmel_hlcdc_dc *dc; int ret; match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node); if (!match) { dev_err(&pdev->dev, "invalid compatible string\n"); return -ENODEV; } if (!match->data) { dev_err(&pdev->dev, "invalid hlcdc description\n"); return -EINVAL; } dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL); if (!dc) return -ENOMEM; dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); if (!dc->wq) return -ENOMEM; init_waitqueue_head(&dc->commit.wait); dc->desc = match->data; dc->hlcdc = dev_get_drvdata(dev->dev->parent); dev->dev_private = dc; ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { dev_err(dev->dev, "failed to enable periph_clk\n"); goto err_destroy_wq; } pm_runtime_enable(dev->dev); ret = drm_vblank_init(dev, 1); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } drm_mode_config_reset(dev); pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } platform_set_drvdata(pdev, dev); dc->fbdev = drm_fbdev_cma_init(dev, 24, dev->mode_config.num_connector); if (IS_ERR(dc->fbdev)) dc->fbdev = NULL; drm_kms_helper_poll_init(dev); return 0; err_periph_clk_disable: pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); err_destroy_wq: destroy_workqueue(dc->wq); return ret; } static void atmel_hlcdc_dc_unload(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; if (dc->fbdev) drm_fbdev_cma_fini(dc->fbdev); flush_workqueue(dc->wq); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); pm_runtime_get_sync(dev->dev); drm_irq_uninstall(dev); pm_runtime_put_sync(dev->dev); dev->dev_private = NULL; pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); destroy_workqueue(dc->wq); } static void atmel_hlcdc_dc_lastclose(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; drm_fbdev_cma_restore_mode(dc->fbdev); } static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; unsigned int cfg = 0; int i; /* Enable interrupts on activated layers */ for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) { if (dc->layers[i]) cfg |= ATMEL_HLCDC_LAYER_STATUS(i); } regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, cfg); return 0; } static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; unsigned int isr; regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IDR, 0xffffffff); regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr); } DEFINE_DRM_GEM_CMA_FOPS(fops); static struct drm_driver atmel_hlcdc_dc_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .lastclose = atmel_hlcdc_dc_lastclose, .irq_handler = atmel_hlcdc_dc_irq_handler, .irq_preinstall = atmel_hlcdc_dc_irq_uninstall, .irq_postinstall = atmel_hlcdc_dc_irq_postinstall, .irq_uninstall = atmel_hlcdc_dc_irq_uninstall, .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = drm_gem_prime_import, .gem_prime_export = drm_gem_prime_export, .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, .gem_prime_vmap = drm_gem_cma_prime_vmap, .gem_prime_vunmap = drm_gem_cma_prime_vunmap, .gem_prime_mmap = drm_gem_cma_prime_mmap, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, .dumb_destroy = drm_gem_dumb_destroy, .fops = &fops, .name = "atmel-hlcdc", .desc = "Atmel HLCD Controller DRM", .date = "20141504", .major = 1, .minor = 0, }; static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev) { struct drm_device *ddev; int ret; ddev = drm_dev_alloc(&atmel_hlcdc_dc_driver, &pdev->dev); if (IS_ERR(ddev)) return PTR_ERR(ddev); ret = atmel_hlcdc_dc_load(ddev); if (ret) goto err_unref; ret = drm_dev_register(ddev, 0); if (ret) goto err_unload; return 0; err_unload: atmel_hlcdc_dc_unload(ddev); err_unref: drm_dev_unref(ddev); return ret; } static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) { struct drm_device *ddev = platform_get_drvdata(pdev); drm_dev_unregister(ddev); atmel_hlcdc_dc_unload(ddev); drm_dev_unref(ddev); return 0; } #ifdef CONFIG_PM_SLEEP static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; struct regmap *regmap = dc->hlcdc->regmap; struct drm_atomic_state *state; state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(state)) return PTR_ERR(state); dc->suspend.state = state; regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr); regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr); clk_disable_unprepare(dc->hlcdc->periph_clk); return 0; } static int atmel_hlcdc_dc_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; clk_prepare_enable(dc->hlcdc->periph_clk); regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr); return drm_atomic_helper_resume(drm_dev, dc->suspend.state); } #endif static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops, atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume); static const struct of_device_id atmel_hlcdc_dc_of_match[] = { { .compatible = "atmel,hlcdc-display-controller" }, { }, }; static struct platform_driver atmel_hlcdc_dc_platform_driver = { .probe = atmel_hlcdc_dc_drm_probe, .remove = atmel_hlcdc_dc_drm_remove, .driver = { .name = "atmel-hlcdc-display-controller", .pm = &atmel_hlcdc_dc_drm_pm_ops, .of_match_table = atmel_hlcdc_dc_of_match, }, }; module_platform_driver(atmel_hlcdc_dc_platform_driver); MODULE_AUTHOR("Jean-Jacques Hiblot <jjhiblot@traphandler.com>"); MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); MODULE_DESCRIPTION("Atmel HLCDC Display Controller DRM Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:atmel-hlcdc-dc");
null
null
null
null
99,856
23,816
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
23,816
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_APPCACHE_APPCACHE_UPDATE_JOB_H_ #define CONTENT_BROWSER_APPCACHE_APPCACHE_UPDATE_JOB_H_ #include <stddef.h> #include <stdint.h> #include <map> #include <set> #include <string> #include <vector> #include "base/containers/circular_deque.h" #include "base/gtest_prod_util.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/time/time.h" #include "content/browser/appcache/appcache.h" #include "content/browser/appcache/appcache_host.h" #include "content/browser/appcache/appcache_response.h" #include "content/browser/appcache/appcache_service_impl.h" #include "content/browser/appcache/appcache_storage.h" #include "content/common/appcache_interfaces.h" #include "content/common/content_export.h" #include "net/base/completion_callback.h" #include "net/http/http_response_headers.h" #include "net/url_request/url_request.h" #include "url/gurl.h" namespace content { FORWARD_DECLARE_TEST(AppCacheGroupTest, QueueUpdate); class AppCacheGroupTest; namespace appcache_update_job_unittest { class AppCacheUpdateJobTest; } class HostNotifier; // Application cache Update algorithm and state. class CONTENT_EXPORT AppCacheUpdateJob : public AppCacheStorage::Delegate, public AppCacheHost::Observer, public AppCacheServiceImpl::Observer { public: // Used for uma stats only for now, so new values are append only. enum ResultType { UPDATE_OK, DB_ERROR, DISKCACHE_ERROR, QUOTA_ERROR, REDIRECT_ERROR, MANIFEST_ERROR, NETWORK_ERROR, SERVER_ERROR, CANCELLED_ERROR, SECURITY_ERROR, NUM_UPDATE_JOB_RESULT_TYPES }; AppCacheUpdateJob(AppCacheServiceImpl* service, AppCacheGroup* group); ~AppCacheUpdateJob() override; // Triggers the update process or adds more info if this update is already // in progress. void StartUpdate(AppCacheHost* host, const GURL& new_master_resource); private: friend class content::AppCacheGroupTest; friend class content::appcache_update_job_unittest::AppCacheUpdateJobTest; class URLFetcher; class UpdateRequestBase; class UpdateURLLoaderRequest; class UpdateURLRequest; // Master entries have multiple hosts, for example, the same page is opened // in different tabs. using PendingHosts = std::vector<AppCacheHost*>; using PendingMasters = std::map<GURL, PendingHosts>; using PendingUrlFetches = std::map<GURL, URLFetcher*>; using LoadingResponses = std::map<int64_t, GURL>; static const int kRerunDelayMs = 1000; // TODO(michaeln): Rework the set of states vs update types vs stored states. // The NO_UPDATE state is really more of an update type. For all update types // storing the results is relevant. enum UpdateType { UNKNOWN_TYPE, UPGRADE_ATTEMPT, CACHE_ATTEMPT, }; enum InternalUpdateState { FETCH_MANIFEST, NO_UPDATE, DOWNLOADING, // Every state after this comment indicates the update is terminating. REFETCH_MANIFEST, CACHE_FAILURE, CANCELLED, COMPLETED, }; enum StoredState { UNSTORED, STORING, STORED, }; struct UrlToFetch { UrlToFetch(const GURL& url, bool checked, AppCacheResponseInfo* info); UrlToFetch(const UrlToFetch& other); ~UrlToFetch(); GURL url; bool storage_checked; scoped_refptr<AppCacheResponseInfo> existing_response_info; }; AppCacheResponseWriter* CreateResponseWriter(); // Methods for AppCacheStorage::Delegate. void OnResponseInfoLoaded(AppCacheResponseInfo* response_info, int64_t response_id) override; void OnGroupAndNewestCacheStored(AppCacheGroup* group, AppCache* newest_cache, bool success, bool would_exceed_quota) override; void OnGroupMadeObsolete(AppCacheGroup* group, bool success, int response_code) override; // Methods for AppCacheHost::Observer. void OnCacheSelectionComplete(AppCacheHost* host) override {} // N/A void OnDestructionImminent(AppCacheHost* host) override; // Methods for AppCacheServiceImpl::Observer. void OnServiceReinitialized(AppCacheStorageReference* old_storage) override; void HandleCacheFailure(const AppCacheErrorDetails& details, ResultType result, const GURL& failed_resource_url); void FetchManifest(bool is_first_fetch); void HandleManifestFetchCompleted(URLFetcher* fetcher, int net_error); void ContinueHandleManifestFetchCompleted(bool changed); void HandleUrlFetchCompleted(URLFetcher* fetcher, int net_error); void HandleMasterEntryFetchCompleted(URLFetcher* fetcher, int net_error); void HandleManifestRefetchCompleted(URLFetcher* fetcher, int net_error); void OnManifestInfoWriteComplete(int result); void OnManifestDataWriteComplete(int result); void StoreGroupAndCache(); void NotifySingleHost(AppCacheHost* host, AppCacheEventID event_id); void NotifyAllAssociatedHosts(AppCacheEventID event_id); void NotifyAllProgress(const GURL& url); void NotifyAllFinalProgress(); void NotifyAllError(const AppCacheErrorDetails& detals); void LogConsoleMessageToAll(const std::string& message); void AddAllAssociatedHostsToNotifier(HostNotifier* notifier); // Checks if manifest is byte for byte identical with the manifest // in the newest application cache. void CheckIfManifestChanged(); void OnManifestDataReadComplete(int result); // Creates the list of files that may need to be fetched and initiates // fetches. Section 6.9.4 steps 12-17 void BuildUrlFileList(const AppCacheManifest& manifest); void AddUrlToFileList(const GURL& url, int type); void FetchUrls(); void CancelAllUrlFetches(); bool ShouldSkipUrlFetch(const AppCacheEntry& entry); // If entry already exists in the cache currently being updated, merge // the entry type information with the existing entry. // Returns true if entry exists in cache currently being updated. bool AlreadyFetchedEntry(const GURL& url, int entry_type); // TODO(jennb): Delete when update no longer fetches master entries directly. // Creates the list of master entries that need to be fetched and initiates // fetches. void AddMasterEntryToFetchList(AppCacheHost* host, const GURL& url, bool is_new); void FetchMasterEntries(); void CancelAllMasterEntryFetches(const AppCacheErrorDetails& details); // Asynchronously loads the entry from the newest complete cache if the // HTTP caching semantics allow. // Returns false if immediately obvious that data cannot be loaded from // newest complete cache. bool MaybeLoadFromNewestCache(const GURL& url, AppCacheEntry& entry); void LoadFromNewestCacheFailed(const GURL& url, AppCacheResponseInfo* newest_response_info); // Does nothing if update process is still waiting for pending master // entries or URL fetches to complete downloading. Otherwise, completes // the update process. void MaybeCompleteUpdate(); // Schedules a rerun of the entire update with the same parameters as // this update job after a short delay. void ScheduleUpdateRetry(int delay_ms); void Cancel(); void ClearPendingMasterEntries(); void DiscardInprogressCache(); void DiscardDuplicateResponses(); void LogHistogramStats(ResultType result, const GURL& failed_resource_url); void MadeProgress() { last_progress_time_ = base::Time::Now(); } // Deletes this object after letting the stack unwind. void DeleteSoon(); bool IsTerminating() { return internal_state_ >= REFETCH_MANIFEST || stored_state_ != UNSTORED; } AppCacheServiceImpl* service_; const GURL manifest_url_; // here for easier access // Defined prior to refs to AppCaches and Groups because destruction // order matters, the disabled_storage_reference_ must outlive those // objects. scoped_refptr<AppCacheStorageReference> disabled_storage_reference_; scoped_refptr<AppCache> inprogress_cache_; AppCacheGroup* group_; UpdateType update_type_; InternalUpdateState internal_state_; base::Time last_progress_time_; bool doing_full_update_check_; PendingMasters pending_master_entries_; size_t master_entries_completed_; std::set<GURL> failed_master_entries_; // TODO(jennb): Delete when update no longer fetches master entries directly. // Helper containers to track which pending master entries have yet to be // fetched and which are currently being fetched. Master entries that // are listed in the manifest may be fetched as a regular URL instead of // as a separate master entry fetch to optimize against duplicate fetches. std::set<GURL> master_entries_to_fetch_; PendingUrlFetches master_entry_fetches_; // URLs of files to fetch along with their flags. AppCache::EntryMap url_file_list_; size_t url_fetches_completed_; // Helper container to track which urls have not been fetched yet. URLs are // removed when the fetch is initiated. Flag indicates whether an attempt // to load the URL from storage has already been tried and failed. base::circular_deque<UrlToFetch> urls_to_fetch_; // Helper container to track which urls are being loaded from response // storage. LoadingResponses loading_responses_; // Keep track of pending URL requests so we can cancel them if necessary. URLFetcher* manifest_fetcher_; PendingUrlFetches pending_url_fetches_; // Temporary storage of manifest response data for parsing and comparison. std::string manifest_data_; std::unique_ptr<net::HttpResponseInfo> manifest_response_info_; std::unique_ptr<AppCacheResponseWriter> manifest_response_writer_; scoped_refptr<net::IOBuffer> read_manifest_buffer_; std::string loaded_manifest_data_; std::unique_ptr<AppCacheResponseReader> manifest_response_reader_; bool manifest_has_valid_mime_type_; // New master entries added to the cache by this job, used to cleanup // in error conditions. std::vector<GURL> added_master_entries_; // Response ids stored by this update job, used to cleanup in // error conditions. std::vector<int64_t> stored_response_ids_; // In some cases we fetch the same resource multiple times, and then // have to delete the duplicates upon successful update. These ids // are also in the stored_response_ids_ collection so we only schedule // these for deletion on success. // TODO(michaeln): Rework when we no longer fetches master entries directly. std::vector<int64_t> duplicate_response_ids_; // Whether we've stored the resulting group/cache yet. StoredState stored_state_; AppCacheStorage* storage_; base::WeakPtrFactory<AppCacheUpdateJob> weak_factory_; FRIEND_TEST_ALL_PREFIXES(content::AppCacheGroupTest, QueueUpdate); DISALLOW_COPY_AND_ASSIGN(AppCacheUpdateJob); }; } // namespace content #endif // CONTENT_BROWSER_APPCACHE_APPCACHE_UPDATE_JOB_H_
null
null
null
null
20,679
28,491
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
193,486
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _RTW_DEBUG_C_ #include <rtw_debug.h> #include <usb_ops_linux.h> int proc_get_drv_version(char *page, char **start, off_t offset, int count, int *eof, void *data) { int len = 0; len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION); *eof = 1; return len; } int proc_get_write_reg(char *page, char **start, off_t offset, int count, int *eof, void *data) { *eof = 1; return 0; } int proc_set_write_reg(struct file *file, const char __user *buffer, unsigned long count, void *data) { struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); char tmp[32]; u32 addr, val, len; if (count < 3) { DBG_88E("argument size is less than 3\n"); return -EFAULT; } if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) { int num = sscanf(tmp, "%x %x %x", &addr, &val, &len); if (num != 3) { DBG_88E("invalid write_reg parameter!\n"); return count; } switch (len) { case 1: usb_write8(padapter, addr, (u8)val); break; case 2: usb_write16(padapter, addr, (u16)val); break; case 4: usb_write32(padapter, addr, val); break; default: DBG_88E("error write length =%d", len); break; } } return count; } static u32 proc_get_read_addr = 0xeeeeeeee; static u32 proc_get_read_len = 0x4; int proc_get_read_reg(char *page, char **start, off_t offset, int count, int *eof, void *data) { struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); int len = 0; if (proc_get_read_addr == 0xeeeeeeee) { *eof = 1; return len; } switch (proc_get_read_len) { case 1: len += snprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(padapter, proc_get_read_addr)); break; case 2: len += snprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16(padapter, proc_get_read_addr)); break; case 4: len += snprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32(padapter, proc_get_read_addr)); break; default: len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len); break; } *eof = 1; return len; } int proc_set_read_reg(struct file *file, const char __user *buffer, unsigned long count, void *data) { char tmp[16]; u32 addr, len; if (count < 2) { DBG_88E("argument size is less than 2\n"); return -EFAULT; } if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) { int num = sscanf(tmp, "%x %x", &addr, &len); if (num != 2) { DBG_88E("invalid read_reg parameter!\n"); return count; } proc_get_read_addr = addr; proc_get_read_len = len; } return count; } int proc_get_adapter_state(char *page, char **start, off_t offset, int count, int *eof, void *data) { struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); int len = 0; len += snprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n", padapter->bSurpriseRemoved, padapter->bDriverStopped); *eof = 1; return len; } int proc_get_best_channel(char *page, char **start, off_t offset, int count, int *eof, void *data) { struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; int len = 0; u32 i, best_channel_24G = 1, best_channel_5G = 36, index_24G = 0, index_5G = 0; for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) { if (pmlmeext->channel_set[i].ChannelNum == 1) index_24G = i; if (pmlmeext->channel_set[i].ChannelNum == 36) index_5G = i; } for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) { /* 2.4G */ if (pmlmeext->channel_set[i].ChannelNum == 6) { if (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_24G].rx_count) { index_24G = i; best_channel_24G = pmlmeext->channel_set[i].ChannelNum; } } /* 5G */ if (pmlmeext->channel_set[i].ChannelNum >= 36 && pmlmeext->channel_set[i].ChannelNum < 140) { /* Find primary channel */ if (((pmlmeext->channel_set[i].ChannelNum - 36) % 8 == 0) && (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) { index_5G = i; best_channel_5G = pmlmeext->channel_set[i].ChannelNum; } } if (pmlmeext->channel_set[i].ChannelNum >= 149 && pmlmeext->channel_set[i].ChannelNum < 165) { /* find primary channel */ if (((pmlmeext->channel_set[i].ChannelNum - 149) % 8 == 0) && (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) { index_5G = i; best_channel_5G = pmlmeext->channel_set[i].ChannelNum; } } /* debug */ len += snprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n", pmlmeext->channel_set[i].ChannelNum, pmlmeext->channel_set[i].rx_count); } len += snprintf(page + len, count - len, "best_channel_5G = %d\n", best_channel_5G); len += snprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G); *eof = 1; return len; }
null
null
null
null
101,833
38,439
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
203,434
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef LOCKS_H #define LOCKS_H #include <limits.h> #include <pthread.h> #include <stdbool.h> #include "assume.h" #include "bug_on.h" #include "preempt.h" int nondet_int(void); #define __acquire(x) #define __acquires(x) #define __release(x) #define __releases(x) /* Only use one lock mechanism. Select which one. */ #ifdef PTHREAD_LOCK struct lock_impl { pthread_mutex_t mutex; }; static inline void lock_impl_lock(struct lock_impl *lock) { BUG_ON(pthread_mutex_lock(&lock->mutex)); } static inline void lock_impl_unlock(struct lock_impl *lock) { BUG_ON(pthread_mutex_unlock(&lock->mutex)); } static inline bool lock_impl_trylock(struct lock_impl *lock) { int err = pthread_mutex_trylock(&lock->mutex); if (!err) return true; else if (err == EBUSY) return false; BUG(); } static inline void lock_impl_init(struct lock_impl *lock) { pthread_mutex_init(&lock->mutex, NULL); } #define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER} #else /* !defined(PTHREAD_LOCK) */ /* Spinlock that assumes that it always gets the lock immediately. */ struct lock_impl { bool locked; }; static inline bool lock_impl_trylock(struct lock_impl *lock) { #ifdef RUN /* TODO: Should this be a test and set? */ return __sync_bool_compare_and_swap(&lock->locked, false, true); #else __CPROVER_atomic_begin(); bool old_locked = lock->locked; lock->locked = true; __CPROVER_atomic_end(); /* Minimal barrier to prevent accesses leaking out of lock. */ __CPROVER_fence("RRfence", "RWfence"); return !old_locked; #endif } static inline void lock_impl_lock(struct lock_impl *lock) { /* * CBMC doesn't support busy waiting, so just assume that the * lock is available. */ assume(lock_impl_trylock(lock)); /* * If the lock was already held by this thread then the assumption * is unsatisfiable (deadlock). */ } static inline void lock_impl_unlock(struct lock_impl *lock) { #ifdef RUN BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false)); #else /* Minimal barrier to prevent accesses leaking out of lock. */ __CPROVER_fence("RWfence", "WWfence"); __CPROVER_atomic_begin(); bool old_locked = lock->locked; lock->locked = false; __CPROVER_atomic_end(); BUG_ON(!old_locked); #endif } static inline void lock_impl_init(struct lock_impl *lock) { lock->locked = false; } #define LOCK_IMPL_INITIALIZER {.locked = false} #endif /* !defined(PTHREAD_LOCK) */ /* * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing * locks of different types. */ typedef struct { struct lock_impl internal_lock; } spinlock_t; #define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER} #define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED static inline void spin_lock_init(spinlock_t *lock) { lock_impl_init(&lock->internal_lock); } static inline void spin_lock(spinlock_t *lock) { /* * Spin locks also need to be removed in order to eliminate all * memory barriers. They are only used by the write side anyway. */ #ifndef NO_SYNC_SMP_MB preempt_disable(); lock_impl_lock(&lock->internal_lock); #endif } static inline void spin_unlock(spinlock_t *lock) { #ifndef NO_SYNC_SMP_MB lock_impl_unlock(&lock->internal_lock); preempt_enable(); #endif } /* Don't bother with interrupts */ #define spin_lock_irq(lock) spin_lock(lock) #define spin_unlock_irq(lock) spin_unlock(lock) #define spin_lock_irqsave(lock, flags) spin_lock(lock) #define spin_unlock_irqrestore(lock, flags) spin_unlock(lock) /* * This is supposed to return an int, but I think that a bool should work as * well. */ static inline bool spin_trylock(spinlock_t *lock) { #ifndef NO_SYNC_SMP_MB preempt_disable(); return lock_impl_trylock(&lock->internal_lock); #else return true; #endif } struct completion { /* Hopefuly this won't overflow. */ unsigned int count; }; #define COMPLETION_INITIALIZER(x) {.count = 0} #define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x) #define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x) static inline void init_completion(struct completion *c) { c->count = 0; } static inline void wait_for_completion(struct completion *c) { unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1); assume(prev_count); } static inline void complete(struct completion *c) { unsigned int prev_count = __sync_fetch_and_add(&c->count, 1); BUG_ON(prev_count == UINT_MAX); } /* This function probably isn't very useful for CBMC. */ static inline bool try_wait_for_completion(struct completion *c) { BUG(); } static inline bool completion_done(struct completion *c) { return c->count; } /* TODO: Implement complete_all */ static inline void complete_all(struct completion *c) { BUG(); } #endif
null
null
null
null
111,781
9,307
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
174,302
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/arch/arm/mach-mmp/mmp2.c * * code name MMP2 * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk/mmp.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip/mmp.h> #include <linux/platform_device.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/time.h> #include "addr-map.h" #include "regs-apbc.h" #include "cputype.h" #include "irqs.h" #include "mfp.h" #include "devices.h" #include "mmp2.h" #include "pm-mmp2.h" #include "common.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) static struct mfp_addr_map mmp2_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO58, 0x54), MFP_ADDR_X(GPIO59, GPIO73, 0x280), MFP_ADDR_X(GPIO74, GPIO101, 0x170), MFP_ADDR(GPIO102, 0x0), MFP_ADDR(GPIO103, 0x4), MFP_ADDR(GPIO104, 0x1fc), MFP_ADDR(GPIO105, 0x1f8), MFP_ADDR(GPIO106, 0x1f4), MFP_ADDR(GPIO107, 0x1f0), MFP_ADDR(GPIO108, 0x21c), MFP_ADDR(GPIO109, 0x218), MFP_ADDR(GPIO110, 0x214), MFP_ADDR(GPIO111, 0x200), MFP_ADDR(GPIO112, 0x244), MFP_ADDR(GPIO113, 0x25c), MFP_ADDR(GPIO114, 0x164), MFP_ADDR_X(GPIO115, GPIO122, 0x260), MFP_ADDR(GPIO123, 0x148), MFP_ADDR_X(GPIO124, GPIO141, 0xc), MFP_ADDR(GPIO142, 0x8), MFP_ADDR_X(GPIO143, GPIO151, 0x220), MFP_ADDR_X(GPIO152, GPIO153, 0x248), MFP_ADDR_X(GPIO154, GPIO155, 0x254), MFP_ADDR_X(GPIO156, GPIO159, 0x14c), MFP_ADDR(GPIO160, 0x250), MFP_ADDR(GPIO161, 0x210), MFP_ADDR(GPIO162, 0x20c), MFP_ADDR(GPIO163, 0x208), MFP_ADDR(GPIO164, 0x204), MFP_ADDR(GPIO165, 0x1ec), MFP_ADDR(GPIO166, 0x1e8), MFP_ADDR(GPIO167, 0x1e4), MFP_ADDR(GPIO168, 0x1e0), MFP_ADDR_X(TWSI1_SCL, TWSI1_SDA, 0x140), MFP_ADDR_X(TWSI4_SCL, TWSI4_SDA, 0x2bc), MFP_ADDR(PMIC_INT, 0x2c4), MFP_ADDR(CLK_REQ, 0x160), MFP_ADDR_END, }; void mmp2_clear_pmic_int(void) { void __iomem *mfpr_pmic; unsigned long data; mfpr_pmic = APB_VIRT_BASE + 0x1e000 + 0x2c4; data = __raw_readl(mfpr_pmic); __raw_writel(data | (1 << 6), mfpr_pmic); __raw_writel(data, mfpr_pmic); } void __init mmp2_init_irq(void) { mmp2_init_icu(); #ifdef CONFIG_PM icu_irq_chip.irq_set_wake = mmp2_set_wake; #endif } static int __init mmp2_init(void) { if (cpu_is_mmp2()) { #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(0); #endif mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(mmp2_addr_map); mmp2_clk_init(APB_PHYS_BASE + 0x50000, AXI_PHYS_BASE + 0x82800, APB_PHYS_BASE + 0x15000); } return 0; } postcore_initcall(mmp2_init); #define APBC_TIMERS APBC_REG(0x024) void __init mmp2_timer_init(void) { unsigned long clk_rst; __raw_writel(APBC_APBCLK | APBC_RST, APBC_TIMERS); /* * enable bus/functional clock, enable 6.5MHz (divider 4), * release reset */ clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(1); __raw_writel(clk_rst, APBC_TIMERS); timer_init(IRQ_MMP2_TIMER1); } /* on-chip devices */ MMP2_DEVICE(uart1, "pxa2xx-uart", 0, UART1, 0xd4030000, 0x30, 4, 5); MMP2_DEVICE(uart2, "pxa2xx-uart", 1, UART2, 0xd4017000, 0x30, 20, 21); MMP2_DEVICE(uart3, "pxa2xx-uart", 2, UART3, 0xd4018000, 0x30, 22, 23); MMP2_DEVICE(uart4, "pxa2xx-uart", 3, UART4, 0xd4016000, 0x30, 18, 19); MMP2_DEVICE(twsi1, "pxa2xx-i2c", 0, TWSI1, 0xd4011000, 0x70); MMP2_DEVICE(twsi2, "pxa2xx-i2c", 1, TWSI2, 0xd4031000, 0x70); MMP2_DEVICE(twsi3, "pxa2xx-i2c", 2, TWSI3, 0xd4032000, 0x70); MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70); MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29); MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120); MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); MMP2_DEVICE(asram, "asram", -1, NONE, 0xe0000000, 0x4000); /* 0xd1000000 ~ 0xd101ffff is reserved for secure processor */ MMP2_DEVICE(isram, "isram", -1, NONE, 0xd1020000, 0x18000); struct resource mmp2_resource_gpio[] = { { .start = 0xd4019000, .end = 0xd4019fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_MMP2_GPIO, .end = IRQ_MMP2_GPIO, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device mmp2_device_gpio = { .name = "mmp2-gpio", .id = -1, .num_resources = ARRAY_SIZE(mmp2_resource_gpio), .resource = mmp2_resource_gpio, };
null
null
null
null
82,649
13,688
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
178,683
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ #ifndef __ASM_ARC_PTRACE_H #define __ASM_ARC_PTRACE_H #include <uapi/asm/ptrace.h> #ifndef __ASSEMBLY__ /* THE pt_regs: Defines how regs are saved during entry into kernel */ #ifdef CONFIG_ISA_ARCOMPACT struct pt_regs { /* Real registers */ unsigned long bta; /* bta_l1, bta_l2, erbta */ unsigned long lp_start, lp_end, lp_count; unsigned long status32; /* status32_l1, status32_l2, erstatus */ unsigned long ret; /* ilink1, ilink2 or eret */ unsigned long blink; unsigned long fp; unsigned long r26; /* gp */ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; unsigned long sp; /* User/Kernel depending on where we came from */ unsigned long orig_r0; /* * To distinguish bet excp, syscall, irq * For traps and exceptions, Exception Cause Register. * ECR: <00> <VV> <CC> <PP> * Last word used by Linux for extra state mgmt (syscall-restart) * For interrupts, use artificial ECR values to note current prio-level */ union { struct { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned long state:8, ecr_vec:8, ecr_cause:8, ecr_param:8; #else unsigned long ecr_param:8, ecr_cause:8, ecr_vec:8, state:8; #endif }; unsigned long event; }; unsigned long user_r25; }; #else struct pt_regs { unsigned long orig_r0; union { struct { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned long state:8, ecr_vec:8, ecr_cause:8, ecr_param:8; #else unsigned long ecr_param:8, ecr_cause:8, ecr_vec:8, state:8; #endif }; unsigned long event; }; unsigned long bta; /* bta_l1, bta_l2, erbta */ unsigned long user_r25; unsigned long r26; /* gp */ unsigned long fp; unsigned long sp; /* user/kernel sp depending on where we came from */ unsigned long r12, r30; /*------- Below list auto saved by h/w -----------*/ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; unsigned long blink; unsigned long lp_end, lp_start, lp_count; unsigned long ei, ldi, jli; unsigned long ret; unsigned long status32; }; #endif /* Callee saved registers - need to be saved only when you are scheduled out */ struct callee_regs { unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; }; #define instruction_pointer(regs) ((regs)->ret) #define profile_pc(regs) instruction_pointer(regs) /* return 1 if user mode or 0 if kernel mode */ #define user_mode(regs) (regs->status32 & STATUS_U_MASK) #define user_stack_pointer(regs)\ ({ unsigned int sp; \ if (user_mode(regs)) \ sp = (regs)->sp;\ else \ sp = -1; \ sp; \ }) /* return 1 if PC in delay slot */ #define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK) #define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param) #define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param) #define STATE_SCALL_RESTARTED 0x01 #define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED) #define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED) #define current_pt_regs() \ ({ \ /* open-coded current_thread_info() */ \ register unsigned long sp asm ("sp"); \ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \ }) static inline long regs_return_value(struct pt_regs *regs) { return (long)regs->r0; } #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PTRACE_H */
null
null
null
null
87,030
63,192
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
63,192
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/ime_driver/remote_text_input_client.h" RemoteTextInputClient::RemoteTextInputClient( ui::mojom::TextInputClientPtr remote_client, ui::TextInputType text_input_type, ui::TextInputMode text_input_mode, base::i18n::TextDirection text_direction, int text_input_flags, gfx::Rect caret_bounds) : remote_client_(std::move(remote_client)), text_input_type_(text_input_type), text_input_mode_(text_input_mode), text_direction_(text_direction), text_input_flags_(text_input_flags), caret_bounds_(caret_bounds) {} RemoteTextInputClient::~RemoteTextInputClient() {} void RemoteTextInputClient::SetTextInputType( ui::TextInputType text_input_type) { text_input_type_ = text_input_type; } void RemoteTextInputClient::SetCaretBounds(const gfx::Rect& caret_bounds) { caret_bounds_ = caret_bounds; } void RemoteTextInputClient::SetCompositionText( const ui::CompositionText& composition) { remote_client_->SetCompositionText(composition); } void RemoteTextInputClient::ConfirmCompositionText() { remote_client_->ConfirmCompositionText(); } void RemoteTextInputClient::ClearCompositionText() { remote_client_->ClearCompositionText(); } void RemoteTextInputClient::InsertText(const base::string16& text) { remote_client_->InsertText(text); } void RemoteTextInputClient::InsertChar(const ui::KeyEvent& event) { remote_client_->InsertChar(ui::Event::Clone(event)); } ui::TextInputType RemoteTextInputClient::GetTextInputType() const { return text_input_type_; } ui::TextInputMode RemoteTextInputClient::GetTextInputMode() const { return text_input_mode_; } base::i18n::TextDirection RemoteTextInputClient::GetTextDirection() const { return text_direction_; } int RemoteTextInputClient::GetTextInputFlags() const { return text_input_flags_; } bool RemoteTextInputClient::CanComposeInline() const { // If we return false here, ui::InputMethodChromeOS will try to create a // composition window. But here we are at IMEDriver, and composition // window shouldn't be created by IMEDriver. return true; } gfx::Rect RemoteTextInputClient::GetCaretBounds() const { return caret_bounds_; } bool RemoteTextInputClient::GetCompositionCharacterBounds( uint32_t index, gfx::Rect* rect) const { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } bool RemoteTextInputClient::HasCompositionText() const { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } bool RemoteTextInputClient::GetTextRange(gfx::Range* range) const { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } bool RemoteTextInputClient::GetCompositionTextRange(gfx::Range* range) const { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } bool RemoteTextInputClient::GetSelectionRange(gfx::Range* range) const { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } bool RemoteTextInputClient::SetSelectionRange(const gfx::Range& range) { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } bool RemoteTextInputClient::DeleteRange(const gfx::Range& range) { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } bool RemoteTextInputClient::GetTextFromRange(const gfx::Range& range, base::string16* text) const { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } void RemoteTextInputClient::OnInputMethodChanged() { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); } bool RemoteTextInputClient::ChangeTextDirectionAndLayoutAlignment( base::i18n::TextDirection direction) { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } void RemoteTextInputClient::ExtendSelectionAndDelete(size_t before, size_t after) { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); } void RemoteTextInputClient::EnsureCaretNotInRect(const gfx::Rect& rect) { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); } bool RemoteTextInputClient::IsTextEditCommandEnabled( ui::TextEditCommand command) const { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return false; } void RemoteTextInputClient::SetTextEditCommandForNextKeyEvent( ui::TextEditCommand command) { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); } const std::string& RemoteTextInputClient::GetClientSourceInfo() const { // TODO(moshayedi): crbug.com/631527. NOTIMPLEMENTED_LOG_ONCE(); return base::EmptyString(); } ui::EventDispatchDetails RemoteTextInputClient::DispatchKeyEventPostIME( ui::KeyEvent* event) { remote_client_->DispatchKeyEventPostIME(ui::Event::Clone(*event), base::OnceCallback<void(bool)>()); return ui::EventDispatchDetails(); }
null
null
null
null
60,055
5,605
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
5,605
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/binder/util.h" #include "base/logging.h" #include "chromeos/binder/binder_driver_api.h" namespace binder { const char* CommandToString(uint32_t command) { switch (command) { case BR_ERROR: return "BR_ERROR"; case BR_OK: return "BR_OK"; case BR_TRANSACTION: return "BR_TRANSACTION"; case BR_REPLY: return "BR_REPLY"; case BR_ACQUIRE_RESULT: return "BR_ACQUIRE_RESULT"; case BR_DEAD_REPLY: return "BR_DEAD_REPLY"; case BR_TRANSACTION_COMPLETE: return "BR_TRANSACTION_COMPLETE"; case BR_INCREFS: return "BR_INCREFS"; case BR_ACQUIRE: return "BR_ACQUIRE"; case BR_RELEASE: return "BR_RELEASE"; case BR_DECREFS: return "BR_DECREFS"; case BR_ATTEMPT_ACQUIRE: return "BR_ATTEMPT_ACQUIRE"; case BR_NOOP: return "BR_NOOP"; case BR_SPAWN_LOOPER: return "BR_SPAWN_LOOPER"; case BR_FINISHED: return "BR_FINISHED"; case BR_DEAD_BINDER: return "BR_DEAD_BINDER"; case BR_CLEAR_DEATH_NOTIFICATION_DONE: return "BR_CLEAR_DEATH_NOTIFICATION_DONE"; case BR_FAILED_REPLY: return "BR_FAILED_REPLY"; case BC_TRANSACTION: return "BC_TRANSACTION"; case BC_REPLY: return "BC_REPLY"; case BC_ACQUIRE_RESULT: return "BC_ACQUIRE_RESULT"; case BC_FREE_BUFFER: return "BC_FREE_BUFFER"; case BC_INCREFS: return "BC_INCREFS"; case BC_ACQUIRE: return "BC_ACQUIRE"; case BC_RELEASE: return "BC_RELEASE"; case BC_DECREFS: return "BC_DECREFS"; case BC_INCREFS_DONE: return "BC_INCREFS_DONE"; case BC_ACQUIRE_DONE: return "BC_ACQUIRE_DONE"; case BC_ATTEMPT_ACQUIRE: return "BC_ATTEMPT_ACQUIRE"; case BC_REGISTER_LOOPER: return "BC_REGISTER_LOOPER"; case BC_ENTER_LOOPER: return "BC_ENTER_LOOPER"; case BC_EXIT_LOOPER: return "BC_EXIT_LOOPER"; case BC_REQUEST_DEATH_NOTIFICATION: return "BC_REQUEST_DEATH_NOTIFICATION"; case BC_CLEAR_DEATH_NOTIFICATION: return "BC_CLEAR_DEATH_NOTIFICATION"; case BC_DEAD_BINDER_DONE: return "BC_DEAD_BINDER_DONE"; } LOG(ERROR) << "Unknown command: " << command; return "UNKNOWN"; } } // namespace binder
null
null
null
null
2,468
31,967
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
31,967
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/css/properties/longhands/mask_source_type.h" #include "third_party/blink/renderer/core/css/properties/css_parsing_utils.h" #include "third_party/blink/renderer/core/style/computed_style.h" namespace blink { namespace CSSLonghand { const CSSValue* MaskSourceType::ParseSingleValue( CSSParserTokenRange& range, const CSSParserContext&, const CSSParserLocalContext&) const { return CSSPropertyParserHelpers::ConsumeCommaSeparatedList( CSSParsingUtils::ConsumeMaskSourceType, range); } static CSSValue* ValueForFillSourceType(EMaskSourceType type) { switch (type) { case EMaskSourceType::kAlpha: return CSSIdentifierValue::Create(CSSValueAlpha); case EMaskSourceType::kLuminance: return CSSIdentifierValue::Create(CSSValueLuminance); } NOTREACHED(); return nullptr; } const CSSValue* MaskSourceType::CSSValueFromComputedStyleInternal( const ComputedStyle& style, const SVGComputedStyle&, const LayoutObject*, Node*, bool allow_visited_style) const { CSSValueList* list = CSSValueList::CreateCommaSeparated(); for (const FillLayer* curr_layer = &style.MaskLayers(); curr_layer; curr_layer = curr_layer->Next()) list->Append(*ValueForFillSourceType(curr_layer->MaskSourceType())); return list; } } // namespace CSSLonghand } // namespace blink
null
null
null
null
28,830
19,487
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
19,487
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/sessions/content/content_platform_specific_tab_data.h" #include "base/memory/ptr_util.h" #include "content/public/browser/navigation_controller.h" #include "content/public/browser/web_contents.h" namespace sessions { ContentPlatformSpecificTabData::ContentPlatformSpecificTabData( content::WebContents* web_contents) : // TODO(ajwong): This does not correctly handle storage for isolated // apps. session_storage_namespace_(web_contents->GetController() .GetDefaultSessionStorageNamespace()) {} ContentPlatformSpecificTabData::ContentPlatformSpecificTabData() {} ContentPlatformSpecificTabData::~ContentPlatformSpecificTabData() {} } // namespace sessions
null
null
null
null
16,350
27,261
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
27,261
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/*********************************************************************** Copyright (c) 2006-2011, Skype Limited. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of Internet Society, IETF or IETF Trust, nor the names of specific contributors, may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* conversion between prediction filter coefficients and LSFs */ /* order should be even */ /* a piecewise linear approximation maps LSF <-> cos(LSF) */ /* therefore the result is not accurate LSFs, but the two */ /* functions are accurate inverses of each other */ #include "SigProc_FIX.h" #include "tables.h" #define QA 16 /* helper function for NLSF2A(..) */ static OPUS_INLINE void silk_NLSF2A_find_poly( opus_int32 *out, /* O intermediate polynomial, QA [dd+1] */ const opus_int32 *cLSF, /* I vector of interleaved 2*cos(LSFs), QA [d] */ opus_int dd /* I polynomial order (= 1/2 * filter order) */ ) { opus_int k, n; opus_int32 ftmp; out[0] = silk_LSHIFT( 1, QA ); out[1] = -cLSF[0]; for( k = 1; k < dd; k++ ) { ftmp = cLSF[2*k]; /* QA*/ out[k+1] = silk_LSHIFT( out[k-1], 1 ) - (opus_int32)silk_RSHIFT_ROUND64( silk_SMULL( ftmp, out[k] ), QA ); for( n = k; n > 1; n-- ) { out[n] += out[n-2] - (opus_int32)silk_RSHIFT_ROUND64( silk_SMULL( ftmp, out[n-1] ), QA ); } out[1] -= ftmp; } } /* compute whitening filter coefficients from normalized line spectral frequencies */ void silk_NLSF2A( opus_int16 *a_Q12, /* O monic whitening filter coefficients in Q12, [ d ] */ const opus_int16 *NLSF, /* I normalized line spectral frequencies in Q15, [ d ] */ const opus_int d, /* I filter order (should be even) */ int arch /* I Run-time architecture */ ) { /* This ordering was found to maximize quality. It improves numerical accuracy of silk_NLSF2A_find_poly() compared to "standard" ordering. */ static const unsigned char ordering16[16] = { 0, 15, 8, 7, 4, 11, 12, 3, 2, 13, 10, 5, 6, 9, 14, 1 }; static const unsigned char ordering10[10] = { 0, 9, 6, 3, 4, 5, 8, 1, 2, 7 }; const unsigned char *ordering; opus_int k, i, dd; opus_int32 cos_LSF_QA[ SILK_MAX_ORDER_LPC ]; opus_int32 P[ SILK_MAX_ORDER_LPC / 2 + 1 ], Q[ SILK_MAX_ORDER_LPC / 2 + 1 ]; opus_int32 Ptmp, Qtmp, f_int, f_frac, cos_val, delta; opus_int32 a32_QA1[ SILK_MAX_ORDER_LPC ]; silk_assert( LSF_COS_TAB_SZ_FIX == 128 ); silk_assert( d==10 || d==16 ); /* convert LSFs to 2*cos(LSF), using piecewise linear curve from table */ ordering = d == 16 ? ordering16 : ordering10; for( k = 0; k < d; k++ ) { silk_assert( NLSF[k] >= 0 ); /* f_int on a scale 0-127 (rounded down) */ f_int = silk_RSHIFT( NLSF[k], 15 - 7 ); /* f_frac, range: 0..255 */ f_frac = NLSF[k] - silk_LSHIFT( f_int, 15 - 7 ); silk_assert(f_int >= 0); silk_assert(f_int < LSF_COS_TAB_SZ_FIX ); /* Read start and end value from table */ cos_val = silk_LSFCosTab_FIX_Q12[ f_int ]; /* Q12 */ delta = silk_LSFCosTab_FIX_Q12[ f_int + 1 ] - cos_val; /* Q12, with a range of 0..200 */ /* Linear interpolation */ cos_LSF_QA[ordering[k]] = silk_RSHIFT_ROUND( silk_LSHIFT( cos_val, 8 ) + silk_MUL( delta, f_frac ), 20 - QA ); /* QA */ } dd = silk_RSHIFT( d, 1 ); /* generate even and odd polynomials using convolution */ silk_NLSF2A_find_poly( P, &cos_LSF_QA[ 0 ], dd ); silk_NLSF2A_find_poly( Q, &cos_LSF_QA[ 1 ], dd ); /* convert even and odd polynomials to opus_int32 Q12 filter coefs */ for( k = 0; k < dd; k++ ) { Ptmp = P[ k+1 ] + P[ k ]; Qtmp = Q[ k+1 ] - Q[ k ]; /* the Ptmp and Qtmp values at this stage need to fit in int32 */ a32_QA1[ k ] = -Qtmp - Ptmp; /* QA+1 */ a32_QA1[ d-k-1 ] = Qtmp - Ptmp; /* QA+1 */ } /* Convert int32 coefficients to Q12 int16 coefs */ silk_LPC_fit( a_Q12, a32_QA1, 12, QA + 1, d ); for( i = 0; silk_LPC_inverse_pred_gain( a_Q12, d, arch ) == 0 && i < MAX_LPC_STABILIZE_ITERATIONS; i++ ) { /* Prediction coefficients are (too close to) unstable; apply bandwidth expansion */ /* on the unscaled coefficients, convert to Q12 and measure again */ silk_bwexpander_32( a32_QA1, d, 65536 - silk_LSHIFT( 2, i ) ); for( k = 0; k < d; k++ ) { a_Q12[ k ] = (opus_int16)silk_RSHIFT_ROUND( a32_QA1[ k ], QA + 1 - 12 ); /* QA+1 -> Q12 */ } } }
null
null
null
null
24,124
34,100
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
199,095
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * ALSA PCM device for the * ALSA interface to ivtv PCM capture streams * * Copyright (C) 2009,2012 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
null
null
null
null
107,442
61,199
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
61,199
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/resource_coordinator/tab_manager_features.h" #include "base/metrics/field_trial_params.h" #include "chrome/common/chrome_features.h" namespace { constexpr char kTabLoadTimeoutInMsParameterName[] = "tabLoadTimeoutInMs"; } // namespace namespace features { // Enables using customized value for tab load timeout. This is used by both // staggered background tab opening and session restore in finch experiment to // see what timeout value is better. The default timeout is used when this // feature is disabled. const base::Feature kCustomizedTabLoadTimeout{ "CustomizedTabLoadTimeout", base::FEATURE_DISABLED_BY_DEFAULT}; // Enables proactive tab discarding. const base::Feature kProactiveTabDiscarding{"ProactiveTabDiscarding", base::FEATURE_DISABLED_BY_DEFAULT}; // Enables delaying the navigation of background tabs in order to improve // foreground tab's user experience. const base::Feature kStaggeredBackgroundTabOpening{ "StaggeredBackgroundTabOpening", base::FEATURE_DISABLED_BY_DEFAULT}; // This controls whether we are running experiment with staggered background // tab opening feature. For control group, this should be disabled. This depends // on |kStaggeredBackgroundTabOpening| above. const base::Feature kStaggeredBackgroundTabOpeningExperiment{ "StaggeredBackgroundTabOpeningExperiment", base::FEATURE_ENABLED_BY_DEFAULT}; } // namespace features namespace resource_coordinator { // Field-trial parameter names for proactive tab discarding. const char kProactiveTabDiscard_LowLoadedTabCountParam[] = "LowLoadedTabCount"; const char kProactiveTabDiscard_ModerateLoadedTabsPerGbRamParam[] = "ModerateLoadedTabsPerGbRam"; const char kProactiveTabDiscard_HighLoadedTabCountParam[] = "HighLoadedTabCount"; const char kProactiveTabDiscard_LowOccludedTimeoutParam[] = "LowOccludedTimeoutSeconds"; const char kProactiveTabDiscard_ModerateOccludedTimeoutParam[] = "ModerateOccludedTimeoutSeconds"; const char kProactiveTabDiscard_HighOccludedTimeoutParam[] = "HighOccludedTimeoutSeconds"; // Default values for ProactiveTabDiscardParams. // // 50% of people cap out at 4 tabs, so for them proactive discarding won't even // be invoked. See Tabs.MaxTabsInADay. // TODO(chrisha): This should eventually be informed by the number of tabs // typically used over a given time horizon (metric being developed). const uint32_t kProactiveTabDiscard_LowLoadedTabCountDefault = 4; // Testing in the lab shows that 2GB devices suffer beyond 6 tabs, and 4GB // devices suffer beyond about 12 tabs. As a very simple first step, we'll aim // at allowing 3 tabs per GB of RAM on a system before proactive discarding // kicks in. This is a system resource dependent max, which is combined with the // DefaultMaxLoadedTabCount to determine the max on a system. const uint32_t kProactiveTabDiscard_ModerateLoadedTabsPerGbRamDefault = 3; // 99.9% of people cap out with fewer than this number, so only 0.1% of the // population should ever encounter proactive discarding based on this cap. const uint32_t kProactiveTabDiscard_HighLoadedTabCountDefault = 100; // Current discarding uses 10 minutes as a minimum cap. This uses exponentially // increasing timeouts beyond that. const base::TimeDelta kProactiveTabDiscard_LowOccludedTimeoutDefault = base::TimeDelta::FromHours(6); const base::TimeDelta kProactiveTabDiscard_ModerateOccludedTimeoutDefault = base::TimeDelta::FromHours(1); const base::TimeDelta kProactiveTabDiscard_HighOccludedTimeoutDefault = base::TimeDelta::FromMinutes(10); void GetProactiveTabDiscardParams(ProactiveTabDiscardParams* params) { params->low_loaded_tab_count = base::GetFieldTrialParamByFeatureAsInt( features::kProactiveTabDiscarding, kProactiveTabDiscard_LowLoadedTabCountParam, kProactiveTabDiscard_LowLoadedTabCountDefault); params->moderate_loaded_tab_count_per_gb = base::GetFieldTrialParamByFeatureAsInt( features::kProactiveTabDiscarding, kProactiveTabDiscard_ModerateLoadedTabsPerGbRamParam, kProactiveTabDiscard_ModerateLoadedTabsPerGbRamDefault); params->high_loaded_tab_count = base::GetFieldTrialParamByFeatureAsInt( features::kProactiveTabDiscarding, kProactiveTabDiscard_HighLoadedTabCountParam, kProactiveTabDiscard_HighLoadedTabCountDefault); params->low_occluded_timeout = base::TimeDelta::FromSeconds(base::GetFieldTrialParamByFeatureAsInt( features::kProactiveTabDiscarding, kProactiveTabDiscard_LowOccludedTimeoutParam, kProactiveTabDiscard_LowOccludedTimeoutDefault.InSeconds())); params->moderate_occluded_timeout = base::TimeDelta::FromSeconds(base::GetFieldTrialParamByFeatureAsInt( features::kProactiveTabDiscarding, kProactiveTabDiscard_ModerateOccludedTimeoutParam, kProactiveTabDiscard_ModerateOccludedTimeoutDefault.InSeconds())); params->high_occluded_timeout = base::TimeDelta::FromSeconds(base::GetFieldTrialParamByFeatureAsInt( features::kProactiveTabDiscarding, kProactiveTabDiscard_HighOccludedTimeoutParam, kProactiveTabDiscard_HighOccludedTimeoutDefault.InSeconds())); } base::TimeDelta GetTabLoadTimeout(const base::TimeDelta& default_timeout) { int timeout_in_ms = base::GetFieldTrialParamByFeatureAsInt( features::kCustomizedTabLoadTimeout, kTabLoadTimeoutInMsParameterName, default_timeout.InMilliseconds()); if (timeout_in_ms <= 0) return default_timeout; return base::TimeDelta::FromMilliseconds(timeout_in_ms); } } // namespace resource_coordinator
null
null
null
null
58,062
57,212
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
57,212
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_CHROMEOS_FIRST_RUN_STEP_H_ #define CHROME_BROWSER_CHROMEOS_FIRST_RUN_STEP_H_ #include <string> #include "base/macros.h" #include "base/time/time.h" namespace chromeos { class FirstRunActor; class FirstRunController; namespace first_run { class Step { public: Step(const std::string& name, FirstRunController* controller, FirstRunActor* actor); virtual ~Step(); // Step shows its content. void Show(); // Called before hiding step. void OnBeforeHide(); // Called after step has been hidden. void OnAfterHide(); const std::string& name() const { return name_; } protected: FirstRunController* first_run_controller() { return first_run_controller_; } FirstRunActor* actor() const { return actor_; } // Called from Show method. virtual void DoShow() = 0; // Called from OnBeforeHide. Step implementation could override this method to // react on corresponding event. virtual void DoOnBeforeHide() {} // Called from OnAfterHide. Step implementation could override this method to // react on event. virtual void DoOnAfterHide() {} private: // Records time spent on step to UMA. void RecordCompletion(); std::string name_; FirstRunController* first_run_controller_; FirstRunActor* actor_; base::Time show_time_; DISALLOW_COPY_AND_ASSIGN(Step); }; } // namespace first_run } // namespace chromeos #endif // CHROME_BROWSER_CHROMEOS_FIRST_RUN_STEP_H_
null
null
null
null
54,075
25,520
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
25,520
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef EXTENSIONS_BROWSER_EXTENSION_REGISTRY_OBSERVER_H_ #define EXTENSIONS_BROWSER_EXTENSION_REGISTRY_OBSERVER_H_ #include "extensions/browser/uninstall_reason.h" #include "extensions/common/extension.h" namespace content { class BrowserContext; } namespace extensions { class ExtensionRegistry; // Observer for ExtensionRegistry. Exists in a separate header file to reduce // the include file burden for typical clients of ExtensionRegistry. class ExtensionRegistryObserver { public: virtual ~ExtensionRegistryObserver() {} // Called after an extension is loaded. The extension will exclusively exist // in the enabled_extensions set of ExtensionRegistry. virtual void OnExtensionLoaded( content::BrowserContext* browser_context, const Extension* extension) {} // Called after an extension is loaded and all necessary browser state is // initialized to support the start of the extension's child process. virtual void OnExtensionReady(content::BrowserContext* browser_context, const Extension* extension) {} // Called after an extension is unloaded. The extension no longer exists in // the set |ExtensionRegistry::enabled_extensions()|, but it can still be a // member of one of the other sets, like disabled, blacklisted or terminated. virtual void OnExtensionUnloaded(content::BrowserContext* browser_context, const Extension* extension, UnloadedExtensionReason reason) {} // Called when |extension| is about to be installed. |is_update| is true if // the installation is the result of it updating, in which case |old_name| is // the name of the extension's previous version. // The ExtensionRegistry will not be tracking |extension| at the time this // event is fired, but will be immediately afterwards (note: not necessarily // enabled; it might be installed in the disabled or even blacklisted sets, // for example). // Note that it's much more common to care about extensions being loaded // (OnExtensionLoaded). // // TODO(tmdiep): We should stash the state of the previous extension version // somewhere and have observers retrieve it. |is_update|, and |old_name| can // be removed when this is done. virtual void OnExtensionWillBeInstalled( content::BrowserContext* browser_context, const Extension* extension, bool is_update, const std::string& old_name) {} // Called when the installation of |extension| is complete. At this point the // extension is tracked in one of the ExtensionRegistry sets, but is not // necessarily enabled. virtual void OnExtensionInstalled(content::BrowserContext* browser_context, const Extension* extension, bool is_update) {} // Called after an extension is uninstalled. The extension no longer exists in // any of the ExtensionRegistry sets (enabled, disabled, etc.). virtual void OnExtensionUninstalled(content::BrowserContext* browser_context, const Extension* extension, UninstallReason reason) {} // Notifies observers that the observed object is going away. virtual void OnShutdown(ExtensionRegistry* registry) {} }; } // namespace extensions #endif // EXTENSIONS_BROWSER_EXTENSION_REGISTRY_OBSERVER_H_
null
null
null
null
22,383
30,447
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
30,447
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_ENCRYPTED_MEDIA_CLIENT_H_ #define THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_ENCRYPTED_MEDIA_CLIENT_H_ #include "third_party/blink/public/platform/web_common.h" namespace blink { class WebEncryptedMediaRequest; class BLINK_PLATFORM_EXPORT WebEncryptedMediaClient { public: virtual ~WebEncryptedMediaClient(); virtual void RequestMediaKeySystemAccess(WebEncryptedMediaRequest) = 0; }; } // namespace blink #endif // THIRD_PARTY_BLINK_PUBLIC_PLATFORM_WEB_ENCRYPTED_MEDIA_CLIENT_H_
null
null
null
null
27,310
14,330
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,330
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/ntp_snippets/category_status.h" namespace ntp_snippets { bool IsCategoryStatusAvailable(CategoryStatus status) { // Note: This code is duplicated in SnippetsBridge.java. return status == CategoryStatus::AVAILABLE_LOADING || status == CategoryStatus::AVAILABLE; } bool IsCategoryStatusInitOrAvailable(CategoryStatus status) { // Note: This code is duplicated in SnippetsBridge.java. return status == CategoryStatus::INITIALIZING || IsCategoryStatusAvailable(status); } } // namespace ntp_snippets
null
null
null
null
11,193
55,047
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
55,047
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/renderer/safe_browsing/phishing_classifier.h" #include <string> #include "base/bind.h" #include "base/callback.h" #include "base/compiler_specific.h" #include "base/location.h" #include "base/logging.h" #include "base/metrics/histogram_macros.h" #include "base/single_thread_task_runner.h" #include "base/strings/string_util.h" #include "base/threading/thread_task_runner_handle.h" #include "chrome/common/url_constants.h" #include "chrome/renderer/safe_browsing/feature_extractor_clock.h" #include "chrome/renderer/safe_browsing/features.h" #include "chrome/renderer/safe_browsing/phishing_dom_feature_extractor.h" #include "chrome/renderer/safe_browsing/phishing_term_feature_extractor.h" #include "chrome/renderer/safe_browsing/phishing_url_feature_extractor.h" #include "chrome/renderer/safe_browsing/scorer.h" #include "components/safe_browsing/proto/csd.pb.h" #include "content/public/renderer/render_frame.h" #include "crypto/sha2.h" #include "third_party/blink/public/platform/web_url.h" #include "third_party/blink/public/platform/web_url_request.h" #include "third_party/blink/public/web/web_document.h" #include "third_party/blink/public/web/web_document_loader.h" #include "third_party/blink/public/web/web_local_frame.h" #include "third_party/blink/public/web/web_view.h" #include "url/gurl.h" namespace safe_browsing { const float PhishingClassifier::kInvalidScore = -1.0; const float PhishingClassifier::kPhishyThreshold = 0.5; namespace { // Used for UMA, do not reorder. enum SkipClassificationReason { CLASSIFICATION_PROCEED = 0, DEPRECATED_SKIP_HTTPS = 1, SKIP_NONE_GET = 2, SKIP_SCHEME_NOT_SUPPORTED = 3, SKIP_REASON_MAX }; void RecordReasonForSkippingClassificationToUMA( SkipClassificationReason reason) { UMA_HISTOGRAM_ENUMERATION("SBClientPhishing.SkipClassificationReason", reason, SKIP_REASON_MAX); } } // namespace PhishingClassifier::PhishingClassifier(content::RenderFrame* render_frame, FeatureExtractorClock* clock) : render_frame_(render_frame), scorer_(NULL), clock_(clock), weak_factory_(this) { Clear(); } PhishingClassifier::~PhishingClassifier() { // The RenderView should have called CancelPendingClassification() before // we are destroyed. CheckNoPendingClassification(); } void PhishingClassifier::set_phishing_scorer(const Scorer* scorer) { CheckNoPendingClassification(); scorer_ = scorer; if (scorer_) { url_extractor_.reset(new PhishingUrlFeatureExtractor); dom_extractor_.reset(new PhishingDOMFeatureExtractor(clock_.get())); term_extractor_.reset(new PhishingTermFeatureExtractor( &scorer_->page_terms(), &scorer_->page_words(), scorer_->max_words_per_term(), scorer_->murmurhash3_seed(), scorer_->max_shingles_per_page(), scorer_->shingle_size(), clock_.get())); } else { // We're disabling client-side phishing detection, so tear down all // of the relevant objects. url_extractor_.reset(); dom_extractor_.reset(); term_extractor_.reset(); } } bool PhishingClassifier::is_ready() const { return scorer_ != NULL; } void PhishingClassifier::BeginClassification( const base::string16* page_text, const DoneCallback& done_callback) { DCHECK(is_ready()); // The RenderView should have called CancelPendingClassification() before // starting a new classification, so DCHECK this. CheckNoPendingClassification(); // However, in an opt build, we will go ahead and clean up the pending // classification so that we can start in a known state. CancelPendingClassification(); page_text_ = page_text; done_callback_ = done_callback; // For consistency, we always want to invoke the DoneCallback // asynchronously, rather than directly from this method. To ensure that // this is the case, post a task to begin feature extraction on the next // iteration of the message loop. base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&PhishingClassifier::BeginFeatureExtraction, weak_factory_.GetWeakPtr())); } void PhishingClassifier::BeginFeatureExtraction() { blink::WebLocalFrame* frame = render_frame_->GetWebFrame(); // Check whether the URL is one that we should classify. // Currently, we only classify http/https URLs that are GET requests. GURL url(frame->GetDocument().Url()); if (!url.SchemeIsHTTPOrHTTPS()) { RecordReasonForSkippingClassificationToUMA(SKIP_SCHEME_NOT_SUPPORTED); RunFailureCallback(); return; } blink::WebDocumentLoader* document_loader = frame->GetDocumentLoader(); if (!document_loader || document_loader->GetRequest().HttpMethod().Ascii() != "GET") { if (document_loader) RecordReasonForSkippingClassificationToUMA(SKIP_NONE_GET); RunFailureCallback(); return; } RecordReasonForSkippingClassificationToUMA(CLASSIFICATION_PROCEED); features_.reset(new FeatureMap); if (!url_extractor_->ExtractFeatures(url, features_.get())) { RunFailureCallback(); return; } // DOM feature extraction can take awhile, so it runs asynchronously // in several chunks of work and invokes the callback when finished. dom_extractor_->ExtractFeatures( frame->GetDocument(), features_.get(), base::Bind(&PhishingClassifier::DOMExtractionFinished, base::Unretained(this))); } void PhishingClassifier::CancelPendingClassification() { // Note that cancelling the feature extractors is simply a no-op if they // were not running. DCHECK(is_ready()); dom_extractor_->CancelPendingExtraction(); term_extractor_->CancelPendingExtraction(); weak_factory_.InvalidateWeakPtrs(); Clear(); } void PhishingClassifier::DOMExtractionFinished(bool success) { shingle_hashes_.reset(new std::set<uint32_t>); if (success) { // Term feature extraction can take awhile, so it runs asynchronously // in several chunks of work and invokes the callback when finished. term_extractor_->ExtractFeatures( page_text_, features_.get(), shingle_hashes_.get(), base::Bind(&PhishingClassifier::TermExtractionFinished, base::Unretained(this))); } else { RunFailureCallback(); } } void PhishingClassifier::TermExtractionFinished(bool success) { if (success) { blink::WebLocalFrame* main_frame = render_frame_->GetWebFrame(); // Hash all of the features so that they match the model, then compute // the score. FeatureMap hashed_features; ClientPhishingRequest verdict; verdict.set_model_version(scorer_->model_version()); verdict.set_url(main_frame->GetDocument().Url().GetString().Utf8()); for (const auto& it : features_->features()) { DVLOG(2) << "Feature: " << it.first << " = " << it.second; bool result = hashed_features.AddRealFeature( crypto::SHA256HashString(it.first), it.second); DCHECK(result); ClientPhishingRequest::Feature* feature = verdict.add_feature_map(); feature->set_name(it.first); feature->set_value(it.second); } for (const auto& it : *shingle_hashes_) { verdict.add_shingle_hashes(it); } float score = static_cast<float>(scorer_->ComputeScore(hashed_features)); verdict.set_client_score(score); verdict.set_is_phishing(score >= kPhishyThreshold); RunCallback(verdict); } else { RunFailureCallback(); } } void PhishingClassifier::CheckNoPendingClassification() { DCHECK(done_callback_.is_null()); DCHECK(!page_text_); if (!done_callback_.is_null() || page_text_) { LOG(ERROR) << "Classification in progress, missing call to " << "CancelPendingClassification"; } } void PhishingClassifier::RunCallback(const ClientPhishingRequest& verdict) { done_callback_.Run(verdict); Clear(); } void PhishingClassifier::RunFailureCallback() { ClientPhishingRequest verdict; // In this case we're not guaranteed to have a valid URL. Just set it // to the empty string to make sure we have a valid protocol buffer. verdict.set_url(""); verdict.set_client_score(kInvalidScore); verdict.set_is_phishing(false); RunCallback(verdict); } void PhishingClassifier::Clear() { page_text_ = NULL; done_callback_.Reset(); features_.reset(NULL); shingle_hashes_.reset(NULL); } } // namespace safe_browsing
null
null
null
null
51,910
9,776
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,776
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromecast/renderer/memory_pressure_observer_impl.h" #include "base/memory/memory_pressure_listener.h" namespace chromecast { MemoryPressureObserverImpl::MemoryPressureObserverImpl( mojom::MemoryPressureObserverPtr* proxy) : binding_(this, mojo::MakeRequest(proxy)) {} MemoryPressureObserverImpl::~MemoryPressureObserverImpl() = default; void MemoryPressureObserverImpl::MemoryPressureLevelChanged( int32_t pressure_level) { base::MemoryPressureListener::NotifyMemoryPressure( static_cast<base::MemoryPressureListener::MemoryPressureLevel>( pressure_level)); } } // namespace chromecast
null
null
null
null
6,639
57,328
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
57,328
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/arc/test/arc_data_removed_waiter.h" #include "base/run_loop.h" namespace arc { ArcDataRemovedWaiter::ArcDataRemovedWaiter() { DCHECK(ArcSessionManager::Get()); ArcSessionManager::Get()->AddObserver(this); } ArcDataRemovedWaiter::~ArcDataRemovedWaiter() { ArcSessionManager::Get()->RemoveObserver(this); } void ArcDataRemovedWaiter::Wait() { run_loop_ = std::make_unique<base::RunLoop>(); run_loop_->Run(); run_loop_.reset(); } void ArcDataRemovedWaiter::OnArcDataRemoved() { if (!run_loop_) return; run_loop_->Quit(); } } // namespace arc
null
null
null
null
54,191
815
null
train_val
31e986bc171719c9e6d40d0c2cb1501796a69e6c
259,770
php-src
0
https://github.com/php/php-src
2016-10-24 10:37:20+01:00
/* * "streamable kanji code filter and converter" * Copyright (c) 1998-2002 HappySize, Inc. All rights reserved. * * LICENSE NOTICES * * This file is part of "streamable kanji code filter and converter", * which is distributed under the terms of GNU Lesser General Public * License (version 2) as published by the Free Software Foundation. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with "streamable kanji code filter and converter"; * if not, write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * * The author of this file: * */ /* * The source code included in this files was separated from mbfilter_euc_cn.h * by Moriyoshi Koizumi <moriyoshi@php.net> on 4 Dec 2002. * */ #ifndef MBFL_MBFILTER_EUC_CN_H #define MBFL_MBFILTER_EUC_CN_H #include "mbfilter.h" extern const mbfl_encoding mbfl_encoding_euc_cn; extern const struct mbfl_identify_vtbl vtbl_identify_euccn; extern const struct mbfl_convert_vtbl vtbl_euccn_wchar; extern const struct mbfl_convert_vtbl vtbl_wchar_euccn; int mbfl_filt_conv_euccn_wchar(int c, mbfl_convert_filter *filter); int mbfl_filt_conv_wchar_euccn(int c, mbfl_convert_filter *filter); #endif /* MBFL_MBFILTER_EUC_CN_H */
null
null
null
null
119,691
762
null
train_val
c536b6be1a72aefd632d5530106a67c516cb9f4b
257,149
openssl
0
https://github.com/openssl/openssl
2016-09-22 23:12:38+01:00
/* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdio.h> #include <string.h> #include <openssl/md4.h> #include <openssl/crypto.h> #ifdef CHARSET_EBCDIC # include <openssl/ebcdic.h> #endif unsigned char *MD4(const unsigned char *d, size_t n, unsigned char *md) { MD4_CTX c; static unsigned char m[MD4_DIGEST_LENGTH]; if (md == NULL) md = m; if (!MD4_Init(&c)) return NULL; #ifndef CHARSET_EBCDIC MD4_Update(&c, d, n); #else { char temp[1024]; unsigned long chunk; while (n > 0) { chunk = (n > sizeof(temp)) ? sizeof(temp) : n; ebcdic2ascii(temp, d, chunk); MD4_Update(&c, temp, chunk); n -= chunk; d += chunk; } } #endif MD4_Final(md, &c); OPENSSL_cleanse(&c, sizeof(c)); /* security consideration */ return (md); }
null
null
null
null
118,594
20,173
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
185,168
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/io-mapping.h> #include <linux/delay.h> #include <linux/kmod.h> #include <linux/etherdevice.h> #include <net/devlink.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> #include "mlx4.h" #include "fw.h" #include "icm.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); struct workqueue_struct *mlx4_wq; #ifdef CONFIG_MLX4_DEBUG int mlx4_debug_level = 0; module_param_named(debug_level, mlx4_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); #endif /* CONFIG_MLX4_DEBUG */ #ifdef CONFIG_PCI_MSI static int msi_x = 1; module_param(msi_x, int, 0444); MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #else /* CONFIG_PCI_MSI */ #define msi_x (0) #endif /* CONFIG_PCI_MSI */ static uint8_t num_vfs[3] = {0, 0, 0}; static int num_vfs_argc; module_param_array(num_vfs, byte , &num_vfs_argc, 0444); MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" "num_vfs=port1,port2,port1+2"); static uint8_t probe_vf[3] = {0, 0, 0}; static int probe_vfs_argc; module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" "probe_vf=port1,port2,port1+2"); int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; module_param_named(log_num_mgm_entry_size, mlx4_log_num_mgm_entry_size, int, 0444); MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" " of qp per mcg, for example:" " 10 gives 248.range: 7 <=" " log_num_mgm_entry_size <= 12." " To activate device managed" " flow steering when available, set to -1"); static bool enable_64b_cqe_eqe = true; module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); static bool enable_4k_uar; module_param(enable_4k_uar, bool, 0444); MODULE_PARM_DESC(enable_4k_uar, "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)"); #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ MLX4_FUNC_CAP_DMFS_A0_STATIC) #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) static char mlx4_version[] = DRV_NAME ": Mellanox ConnectX core driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static struct mlx4_profile default_profile = { .num_qp = 1 << 18, .num_srq = 1 << 16, .rdmarc_per_qp = 1 << 4, .num_cq = 1 << 16, .num_mcg = 1 << 13, .num_mpt = 1 << 19, .num_mtt = 1 << 20, /* It is really num mtt segements */ }; static struct mlx4_profile low_mem_profile = { .num_qp = 1 << 17, .num_srq = 1 << 6, .rdmarc_per_qp = 1 << 4, .num_cq = 1 << 8, .num_mcg = 1 << 8, .num_mpt = 1 << 9, .num_mtt = 1 << 7, }; static int log_num_mac = 7; module_param_named(log_num_mac, log_num_mac, int, 0444); MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); static int log_num_vlan; module_param_named(log_num_vlan, log_num_vlan, int, 0444); MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); /* Log2 max number of VLANs per ETH port (0-7) */ #define MLX4_LOG_NUM_VLANS 7 #define MLX4_MIN_LOG_NUM_VLANS 0 #define MLX4_MIN_LOG_NUM_MAC 1 static bool use_prio; module_param_named(use_prio, use_prio, bool, 0444); MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; static int arr_argc = 2; module_param_array(port_type_array, int, &arr_argc, 0444); MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " "1 for IB, 2 for Ethernet"); struct mlx4_port_config { struct list_head list; enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; struct pci_dev *pdev; }; static atomic_t pf_loading = ATOMIC_INIT(0); static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { /* The reserved_uars is calculated by system page size unit. * Therefore, adjustment is added when the uar page size is less * than the system page size */ dev->caps.reserved_uars = max_t(int, mlx4_get_num_reserved_uar(dev), dev_cap->reserved_uars / (1 << (PAGE_SHIFT - dev->uar_page_shift))); } int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { int i; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { for (i = 0; i < dev->caps.num_ports - 1; i++) { if (port_type[i] != port_type[i + 1]) { mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); return -EINVAL; } } } for (i = 0; i < dev->caps.num_ports; i++) { if (!(port_type[i] & dev->caps.supported_type[i+1])) { mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", i + 1); return -EINVAL; } } return 0; } static void mlx4_set_port_mask(struct mlx4_dev *dev) { int i; for (i = 1; i <= dev->caps.num_ports; ++i) dev->caps.port_mask[i] = dev->caps.port_type[i]; } enum { MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, }; static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err = 0; struct mlx4_func func; if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { err = mlx4_QUERY_FUNC(dev, &func, 0); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); return err; } dev_cap->max_eqs = func.max_eq; dev_cap->reserved_eqs = func.rsvd_eqs; dev_cap->reserved_uars = func.rsvd_uars; err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; } return err; } static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) { struct mlx4_caps *dev_cap = &dev->caps; /* FW not supporting or cancelled by user */ if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) return; /* Must have 64B CQE_EQE enabled by FW to use bigger stride * When FW has NCSI it may decide not to report 64B CQE/EQEs */ if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; return; } if (cache_line_size() == 128 || cache_line_size() == 256) { mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); /* Changing the real data inside CQE size to 32B */ dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; if (mlx4_is_master(dev)) dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; } else { if (cache_line_size() != 32 && cache_line_size() != 64) mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; } } static int _mlx4_dev_port(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) { dev->caps.vl_cap[port] = port_cap->max_vl; dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; /* set gid and pkey table operating lengths by default * to non-sriov values */ dev->caps.gid_table_len[port] = port_cap->max_gids; dev->caps.pkey_table_len[port] = port_cap->max_pkeys; dev->caps.port_width_cap[port] = port_cap->max_port_width; dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; dev->caps.max_tc_eth = port_cap->max_tc_eth; dev->caps.def_mac[port] = port_cap->def_mac; dev->caps.supported_type[port] = port_cap->supported_port_types; dev->caps.suggested_type[port] = port_cap->suggested_type; dev->caps.default_sense[port] = port_cap->default_sense; dev->caps.trans_type[port] = port_cap->trans_type; dev->caps.vendor_oui[port] = port_cap->vendor_oui; dev->caps.wavelength[port] = port_cap->wavelength; dev->caps.trans_code[port] = port_cap->trans_code; return 0; } static int mlx4_dev_port(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) { int err = 0; err = mlx4_QUERY_PORT(dev, port, port_cap); if (err) mlx4_err(dev, "QUERY_PORT command failed.\n"); return err; } static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) { if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) return; if (mlx4_is_mfunc(dev)) { mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; return; } if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { mlx4_dbg(dev, "Keep FCS is not supported - Disabling Ignore FCS"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; return; } } #define MLX4_A0_STEERING_TABLE_SIZE 256 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; int i; err = mlx4_QUERY_DEV_CAP(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; } mlx4_dev_cap_dump(dev, dev_cap); if (dev_cap->min_page_sz > PAGE_SIZE) { mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", dev_cap->min_page_sz, PAGE_SIZE); return -ENODEV; } if (dev_cap->num_ports > MLX4_MAX_PORTS) { mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", dev_cap->num_ports, MLX4_MAX_PORTS); return -ENODEV; } if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev_cap->uar_size, (unsigned long long) pci_resource_len(dev->persist->pdev, 2)); return -ENODEV; } dev->caps.num_ports = dev_cap->num_ports; dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? dev->caps.num_sys_eqs : MLX4_MAX_EQ_NUM; for (i = 1; i <= dev->caps.num_ports; ++i) { err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); if (err) { mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); return err; } } dev->caps.uar_page_size = PAGE_SIZE; dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; dev->caps.bf_reg_size = dev_cap->bf_reg_size; dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; dev->caps.max_sq_sg = dev_cap->max_sq_sg; dev->caps.max_rq_sg = dev_cap->max_rq_sg; dev->caps.max_wqes = dev_cap->max_qp_sz; dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; dev->caps.max_srq_wqes = dev_cap->max_srq_sz; dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; dev->caps.reserved_srqs = dev_cap->reserved_srqs; dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; /* * Subtract 1 from the limit because we need to allocate a * spare CQE so the HCA HW can tell the difference between an * empty CQ and a full CQ. */ dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs; dev->caps.reserved_mtts = dev_cap->reserved_mtts; dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->reserved_xrcds : 0; dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? dev_cap->max_xrcds : 0; dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; dev->caps.flags2 = dev_cap->flags2; dev->caps.bmme_flags = dev_cap->bmme_flags; dev->caps.reserved_lkey = dev_cap->reserved_lkey; dev->caps.stat_rate_support = dev_cap->stat_rate_support; dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; /* Save uar page shift */ if (!mlx4_is_slave(dev)) { /* Virtual PCI function needs to determine UAR page size from * firmware. Only master PCI function can set the uar page size */ if (enable_4k_uar) dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; else dev->uar_page_shift = PAGE_SHIFT; mlx4_set_num_reserved_uars(dev, dev_cap); } if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { struct mlx4_init_hca_param hca_param; memset(&hca_param, 0, sizeof(hca_param)); err = mlx4_QUERY_HCA(dev, &hca_param); /* Turn off PHV_EN flag in case phv_check_en is set. * phv_check_en is a HW check that parse the packet and verify * phv bit was reported correctly in the wqe. To allow QinQ * PHV_EN flag should be set and phv_check_en must be cleared * otherwise QinQ packets will be drop by the HW. */ if (err || hca_param.phv_check_en) dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; } /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; /* Don't do sense port on multifunction devices (for now at least) */ if (mlx4_is_mfunc(dev)) dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; if (mlx4_low_memory_profile()) { dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; } else { dev->caps.log_num_macs = log_num_mac; dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; } for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; if (dev->caps.supported_type[i]) { /* if only ETH is supported - assign ETH */ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; /* if only IB is supported, assign IB */ else if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_IB) dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; else { /* if IB and ETH are supported, we set the port * type according to user selection of port type; * if user selected none, take the FW hint */ if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) dev->caps.port_type[i] = dev->caps.suggested_type[i] ? MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; else dev->caps.port_type[i] = port_type_array[i - 1]; } } /* * Link sensing is allowed on the port if 3 conditions are true: * 1. Both protocols are supported on the port. * 2. Different types are supported on the port * 3. FW declared that it supports link sensing */ mlx4_priv(dev)->sense.sense_allowed[i] = ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); /* * If "default_sense" bit is set, we move the port to "AUTO" mode * and perform sense_port FW command to try and set the correct * port type from beginning */ if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; mlx4_SENSE_PORT(dev, i, &sensed_port); if (sensed_port != MLX4_PORT_TYPE_NONE) dev->caps.port_type[i] = sensed_port; } else { dev->caps.possible_type[i] = dev->caps.port_type[i]; } if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_macs); } if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_vlans); } } if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && (port_type_array[0] == MLX4_PORT_TYPE_IB) && (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { mlx4_warn(dev, "Granular QoS per VF not supported with IB/Eth configuration\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; } dev->caps.max_counters = dev_cap->max_counters; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = (1 << dev->caps.log_num_macs) * (1 << dev->caps.log_num_vlans) * dev->caps.num_ports; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; if (dev_cap->dmfs_high_rate_qpn_base > 0 && dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; else dev->caps.dmfs_high_rate_qpn_base = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; if (dev_cap->dmfs_high_rate_qpn_range > 0 && dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; } else { dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; dev->caps.dmfs_high_rate_qpn_base = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; } dev->caps.rl_caps = dev_cap->rl_caps; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = dev->caps.dmfs_high_rate_qpn_range; dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { if (dev_cap->flags & (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; } if (dev_cap->flags2 & (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; } } if ((dev->caps.flags & (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; if (!mlx4_is_slave(dev)) { mlx4_enable_cqe_eqe_stride(dev); dev->caps.alloc_res_qp_mask = (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | MLX4_RESERVE_A0_QP; if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { mlx4_warn(dev, "Old device ETS support detected\n"); mlx4_warn(dev, "Consider upgrading device FW.\n"); dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; } } else { dev->caps.alloc_res_qp_mask = 0; } mlx4_enable_ignore_fcs(dev); return 0; } static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width) { u32 lnkcap1, lnkcap2; int err1, err2; #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, &lnkcap1); err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, &lnkcap2); if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) *speed = PCIE_SPEED_8_0GT; else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) *speed = PCIE_SPEED_5_0GT; else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) *speed = PCIE_SPEED_2_5GT; } if (!err1) { *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; if (!lnkcap2) { /* pre-r3.0 */ if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) *speed = PCIE_SPEED_5_0GT; else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) *speed = PCIE_SPEED_2_5GT; } } if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { return err1 ? err1 : err2 ? err2 : -EINVAL; } return 0; } static void mlx4_check_pcie_caps(struct mlx4_dev *dev) { enum pcie_link_width width, width_cap; enum pci_bus_speed speed, speed_cap; int err; #define PCIE_SPEED_STR(speed) \ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ "Unknown") err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); if (err) { mlx4_warn(dev, "Unable to determine PCIe device BW capabilities\n"); return; } err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); if (err || speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { mlx4_warn(dev, "Unable to determine PCI device chain minimum BW\n"); return; } if (width != width_cap || speed != speed_cap) mlx4_warn(dev, "PCIe BW is different than device's capability\n"); mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", width, width_cap); return; } /*The function checks if there are live vf, return the num of them*/ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state; int i; int ret = 0; for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { s_state = &priv->mfunc.master.slave_state[i]; if (s_state->active && s_state->last_cmd != MLX4_COMM_CMD_RESET) { mlx4_warn(dev, "%s: slave: %d is still active\n", __func__, i); ret++; } } return ret; } int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) { u32 qk = MLX4_RESERVED_QKEY_BASE; if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || qpn < dev->phys_caps.base_proxy_sqpn) return -EINVAL; if (qpn >= dev->phys_caps.base_tunnel_sqpn) /* tunnel qp */ qk += qpn - dev->phys_caps.base_tunnel_sqpn; else qk += qpn - dev->phys_caps.base_proxy_sqpn; *qkey = qk; return 0; } EXPORT_SYMBOL(mlx4_get_parav_qkey); void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) { struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); if (!mlx4_is_master(dev)) return; priv->virt2phys_pkey[slave][port - 1][i] = val; } EXPORT_SYMBOL(mlx4_sync_pkey_table); void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) { struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); if (!mlx4_is_master(dev)) return; priv->slave_node_guids[slave] = guid; } EXPORT_SYMBOL(mlx4_put_slave_node_guid); __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); if (!mlx4_is_master(dev)) return 0; return priv->slave_node_guids[slave]; } EXPORT_SYMBOL(mlx4_get_slave_node_guid); int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_slave; if (!mlx4_is_master(dev)) return 0; s_slave = &priv->mfunc.master.slave_state[slave]; return !!s_slave->active; } EXPORT_SYMBOL(mlx4_is_slave_active); void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, struct _rule_hw *eth_header) { if (is_multicast_ether_addr(eth_header->eth.dst_mac) || is_broadcast_ether_addr(eth_header->eth.dst_mac)) { struct mlx4_net_trans_rule_hw_eth *eth = (struct mlx4_net_trans_rule_hw_eth *)eth_header; struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); bool last_rule = next_rule->size == 0 && next_rule->id == 0 && next_rule->rsvd == 0; if (last_rule) ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); } } EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio); static void slave_adjust_steering_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, struct mlx4_init_hca_param *hca_param) { dev->caps.steering_mode = hca_param->steering_mode; if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; dev->caps.fs_log_max_ucast_qp_range_size = dev_cap->fs_log_max_ucast_qp_range_size; } else dev->caps.num_qp_per_mgm = 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); mlx4_dbg(dev, "Steering mode is: %s\n", mlx4_steering_mode_str(dev->caps.steering_mode)); } static int mlx4_slave_cap(struct mlx4_dev *dev) { int err; u32 page_size; struct mlx4_dev_cap dev_cap; struct mlx4_func_cap func_cap; struct mlx4_init_hca_param hca_param; u8 i; memset(&hca_param, 0, sizeof(hca_param)); err = mlx4_QUERY_HCA(dev, &hca_param); if (err) { mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); return err; } /* fail if the hca has an unknown global capability * at this time global_caps should be always zeroed */ if (hca_param.global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); return -EINVAL; } dev->caps.hca_core_clock = hca_param.hca_core_clock; memset(&dev_cap, 0, sizeof(dev_cap)); dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; } err = mlx4_QUERY_FW(dev); if (err) mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); page_size = ~dev->caps.page_size_cap + 1; mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); if (page_size > PAGE_SIZE) { mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", page_size, PAGE_SIZE); return -ENODEV; } /* Set uar_page_shift for VF */ dev->uar_page_shift = hca_param.uar_page_sz + 12; /* Make sure the master uar page size is valid */ if (dev->uar_page_shift > PAGE_SHIFT) { mlx4_err(dev, "Invalid configuration: uar page size is larger than system page size\n"); return -ENODEV; } /* Set reserved_uars based on the uar_page_shift */ mlx4_set_num_reserved_uars(dev, &dev_cap); /* Although uar page size in FW differs from system page size, * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) * still works with assumption that uar page size == system page size */ dev->caps.uar_page_size = PAGE_SIZE; memset(&func_cap, 0, sizeof(func_cap)); err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); if (err) { mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", err); return err; } if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != PF_CONTEXT_BEHAVIOUR_MASK) { mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); return -EINVAL; } dev->caps.num_ports = func_cap.num_ports; dev->quotas.qp = func_cap.qp_quota; dev->quotas.srq = func_cap.srq_quota; dev->quotas.cq = func_cap.cq_quota; dev->quotas.mpt = func_cap.mpt_quota; dev->quotas.mtt = func_cap.mtt_quota; dev->caps.num_qps = 1 << hca_param.log_num_qps; dev->caps.num_srqs = 1 << hca_param.log_num_srqs; dev->caps.num_cqs = 1 << hca_param.log_num_cqs; dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; dev->caps.num_eqs = func_cap.max_eq; dev->caps.reserved_eqs = func_cap.reserved_eq; dev->caps.reserved_lkey = func_cap.reserved_lkey; dev->caps.num_pds = MLX4_NUM_PDS; dev->caps.num_mgms = 0; dev->caps.num_amgms = 0; if (dev->caps.num_ports > MLX4_MAX_PORTS) { mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", dev->caps.num_ports, MLX4_MAX_PORTS); return -ENODEV; } mlx4_replace_zero_macs(dev); dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || !dev->caps.qp0_qkey) { err = -ENOMEM; goto err_mem; } for (i = 1; i <= dev->caps.num_ports; ++i) { err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); if (err) { mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", i, err); goto err_mem; } dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; dev->caps.port_mask[i] = dev->caps.port_type[i]; dev->caps.phys_port_id[i] = func_cap.phys_port_id; err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, &dev->caps.gid_table_len[i], &dev->caps.pkey_table_len[i]); if (err) goto err_mem; } if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > pci_resource_len(dev->persist->pdev, 2)) { mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev->caps.uar_page_size * dev->caps.num_uars, (unsigned long long) pci_resource_len(dev->persist->pdev, 2)); err = -ENOMEM; goto err_mem; } if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { dev->caps.eqe_size = 64; dev->caps.eqe_factor = 1; } else { dev->caps.eqe_size = 32; dev->caps.eqe_factor = 0; } if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { dev->caps.cqe_size = 64; dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { dev->caps.eqe_size = hca_param.eqe_size; dev->caps.eqe_factor = 0; } if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { dev->caps.cqe_size = hca_param.cqe_size; /* User still need to know when CQE > 32B */ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); slave_adjust_steering_mode(dev, &dev_cap, &hca_param); mlx4_dbg(dev, "RSS support for IP fragments is %s\n", hca_param.rss_ip_frags ? "on" : "off"); if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && dev->caps.bf_reg_size) dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; return 0; err_mem: kfree(dev->caps.qp0_qkey); kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); dev->caps.qp0_qkey = NULL; dev->caps.qp0_tunnel = NULL; dev->caps.qp0_proxy = NULL; dev->caps.qp1_tunnel = NULL; dev->caps.qp1_proxy = NULL; return err; } static void mlx4_request_modules(struct mlx4_dev *dev) { int port; int has_ib_port = false; int has_eth_port = false; #define EN_DRV_NAME "mlx4_en" #define IB_DRV_NAME "mlx4_ib" for (port = 1; port <= dev->caps.num_ports; port++) { if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) has_ib_port = true; else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) has_eth_port = true; } if (has_eth_port) request_module_nowait(EN_DRV_NAME); if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) request_module_nowait(IB_DRV_NAME); } /* * Change the port configuration of the device. * Every user of this function must hold the port mutex. */ int mlx4_change_port_types(struct mlx4_dev *dev, enum mlx4_port_type *port_types) { int err = 0; int change = 0; int port; for (port = 0; port < dev->caps.num_ports; port++) { /* Change the port type only if the new type is different * from the current, and not set to Auto */ if (port_types[port] != dev->caps.port_type[port + 1]) change = 1; } if (change) { mlx4_unregister_device(dev); for (port = 1; port <= dev->caps.num_ports; port++) { mlx4_CLOSE_PORT(dev, port); dev->caps.port_type[port] = port_types[port - 1]; err = mlx4_SET_PORT(dev, port, -1); if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", port); goto out; } } mlx4_set_port_mask(dev); err = mlx4_register_device(dev); if (err) { mlx4_err(dev, "Failed to register device\n"); goto out; } mlx4_request_modules(dev); } out: return err; } static ssize_t show_port_type(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, port_attr); struct mlx4_dev *mdev = info->dev; char type[8]; sprintf(type, "%s", (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? "ib" : "eth"); if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) sprintf(buf, "auto (%s)\n", type); else sprintf(buf, "%s\n", type); return strlen(buf); } static int __set_port_type(struct mlx4_port_info *info, enum mlx4_port_type port_type) { struct mlx4_dev *mdev = info->dev; struct mlx4_priv *priv = mlx4_priv(mdev); enum mlx4_port_type types[MLX4_MAX_PORTS]; enum mlx4_port_type new_types[MLX4_MAX_PORTS]; int i; int err = 0; if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { mlx4_err(mdev, "Requested port type for port %d is not supported on this HCA\n", info->port); err = -EINVAL; goto err_sup; } mlx4_stop_sense(mdev); mutex_lock(&priv->port_mutex); info->tmp_type = port_type; /* Possible type is always the one that was delivered */ mdev->caps.possible_type[info->port] = info->tmp_type; for (i = 0; i < mdev->caps.num_ports; i++) { types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : mdev->caps.possible_type[i+1]; if (types[i] == MLX4_PORT_TYPE_AUTO) types[i] = mdev->caps.port_type[i+1]; } if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { for (i = 1; i <= mdev->caps.num_ports; i++) { if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { mdev->caps.possible_type[i] = mdev->caps.port_type[i]; err = -EINVAL; } } } if (err) { mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); goto out; } mlx4_do_sense_ports(mdev, new_types, types); err = mlx4_check_port_params(mdev, new_types); if (err) goto out; /* We are about to apply the changes after the configuration * was verified, no need to remember the temporary types * any more */ for (i = 0; i < mdev->caps.num_ports; i++) priv->port[i + 1].tmp_type = 0; err = mlx4_change_port_types(mdev, new_types); out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); err_sup: return err; } static ssize_t set_port_type(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, port_attr); struct mlx4_dev *mdev = info->dev; enum mlx4_port_type port_type; static DEFINE_MUTEX(set_port_type_mutex); int err; mutex_lock(&set_port_type_mutex); if (!strcmp(buf, "ib\n")) { port_type = MLX4_PORT_TYPE_IB; } else if (!strcmp(buf, "eth\n")) { port_type = MLX4_PORT_TYPE_ETH; } else if (!strcmp(buf, "auto\n")) { port_type = MLX4_PORT_TYPE_AUTO; } else { mlx4_err(mdev, "%s is not supported port type\n", buf); err = -EINVAL; goto err_out; } err = __set_port_type(info, port_type); err_out: mutex_unlock(&set_port_type_mutex); return err ? err : count; } enum ibta_mtu { IB_MTU_256 = 1, IB_MTU_512 = 2, IB_MTU_1024 = 3, IB_MTU_2048 = 4, IB_MTU_4096 = 5 }; static inline int int_to_ibta_mtu(int mtu) { switch (mtu) { case 256: return IB_MTU_256; case 512: return IB_MTU_512; case 1024: return IB_MTU_1024; case 2048: return IB_MTU_2048; case 4096: return IB_MTU_4096; default: return -1; } } static inline int ibta_mtu_to_int(enum ibta_mtu mtu) { switch (mtu) { case IB_MTU_256: return 256; case IB_MTU_512: return 512; case IB_MTU_1024: return 1024; case IB_MTU_2048: return 2048; case IB_MTU_4096: return 4096; default: return -1; } } static ssize_t show_port_ib_mtu(struct device *dev, struct device_attribute *attr, char *buf) { struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, port_mtu_attr); struct mlx4_dev *mdev = info->dev; if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); sprintf(buf, "%d\n", ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); return strlen(buf); } static ssize_t set_port_ib_mtu(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, port_mtu_attr); struct mlx4_dev *mdev = info->dev; struct mlx4_priv *priv = mlx4_priv(mdev); int err, port, mtu, ibta_mtu = -1; if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); return -EINVAL; } err = kstrtoint(buf, 0, &mtu); if (!err) ibta_mtu = int_to_ibta_mtu(mtu); if (err || ibta_mtu < 0) { mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); return -EINVAL; } mdev->caps.port_ib_mtu[info->port] = ibta_mtu; mlx4_stop_sense(mdev); mutex_lock(&priv->port_mutex); mlx4_unregister_device(mdev); for (port = 1; port <= mdev->caps.num_ports; port++) { mlx4_CLOSE_PORT(mdev, port); err = mlx4_SET_PORT(mdev, port, -1); if (err) { mlx4_err(mdev, "Failed to set port %d, aborting\n", port); goto err_set_port; } } err = mlx4_register_device(mdev); err_set_port: mutex_unlock(&priv->port_mutex); mlx4_start_sense(mdev); return err ? err : count; } /* bond for multi-function device */ #define MAX_MF_BOND_ALLOWED_SLAVES 63 static int mlx4_mf_bond(struct mlx4_dev *dev) { int err = 0; int nvfs; struct mlx4_slaves_pport slaves_port1; struct mlx4_slaves_pport slaves_port2; DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); bitmap_and(slaves_port_1_2, slaves_port1.slaves, slaves_port2.slaves, dev->persist->num_vfs + 1); /* only single port vfs are allowed */ if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); return -EINVAL; } /* number of virtual functions is number of total functions minus one * physical function for each port. */ nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; /* limit on maximum allowed VFs */ if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", nvfs, MAX_MF_BOND_ALLOWED_SLAVES); return -EINVAL; } if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); return -EINVAL; } err = mlx4_bond_mac_table(dev); if (err) return err; err = mlx4_bond_vlan_table(dev); if (err) goto err1; err = mlx4_bond_fs_rules(dev); if (err) goto err2; return 0; err2: (void)mlx4_unbond_vlan_table(dev); err1: (void)mlx4_unbond_mac_table(dev); return err; } static int mlx4_mf_unbond(struct mlx4_dev *dev) { int ret, ret1; ret = mlx4_unbond_fs_rules(dev); if (ret) mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); ret1 = mlx4_unbond_mac_table(dev); if (ret1) { mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); ret = ret1; } ret1 = mlx4_unbond_vlan_table(dev); if (ret1) { mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); ret = ret1; } return ret; } int mlx4_bond(struct mlx4_dev *dev) { int ret = 0; struct mlx4_priv *priv = mlx4_priv(dev); mutex_lock(&priv->bond_mutex); if (!mlx4_is_bonded(dev)) { ret = mlx4_do_bond(dev, true); if (ret) mlx4_err(dev, "Failed to bond device: %d\n", ret); if (!ret && mlx4_is_master(dev)) { ret = mlx4_mf_bond(dev); if (ret) { mlx4_err(dev, "bond for multifunction failed\n"); mlx4_do_bond(dev, false); } } } mutex_unlock(&priv->bond_mutex); if (!ret) mlx4_dbg(dev, "Device is bonded\n"); return ret; } EXPORT_SYMBOL_GPL(mlx4_bond); int mlx4_unbond(struct mlx4_dev *dev) { int ret = 0; struct mlx4_priv *priv = mlx4_priv(dev); mutex_lock(&priv->bond_mutex); if (mlx4_is_bonded(dev)) { int ret2 = 0; ret = mlx4_do_bond(dev, false); if (ret) mlx4_err(dev, "Failed to unbond device: %d\n", ret); if (mlx4_is_master(dev)) ret2 = mlx4_mf_unbond(dev); if (ret2) { mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); ret = ret2; } } mutex_unlock(&priv->bond_mutex); if (!ret) mlx4_dbg(dev, "Device is unbonded\n"); return ret; } EXPORT_SYMBOL_GPL(mlx4_unbond); int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) { u8 port1 = v2p->port1; u8 port2 = v2p->port2; struct mlx4_priv *priv = mlx4_priv(dev); int err; if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) return -EOPNOTSUPP; mutex_lock(&priv->bond_mutex); /* zero means keep current mapping for this port */ if (port1 == 0) port1 = priv->v2p.port1; if (port2 == 0) port2 = priv->v2p.port2; if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || (port2 < 1) || (port2 > MLX4_MAX_PORTS) || (port1 == 2 && port2 == 1)) { /* besides boundary checks cross mapping makes * no sense and therefore not allowed */ err = -EINVAL; } else if ((port1 == priv->v2p.port1) && (port2 == priv->v2p.port2)) { err = 0; } else { err = mlx4_virt2phy_port_map(dev, port1, port2); if (!err) { mlx4_dbg(dev, "port map changed: [%d][%d]\n", port1, port2); priv->v2p.port1 = port1; priv->v2p.port2 = port2; } else { mlx4_err(dev, "Failed to change port mape: %d\n", err); } } mutex_unlock(&priv->bond_mutex); return err; } EXPORT_SYMBOL_GPL(mlx4_port_map_set); static int mlx4_load_fw(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.fw_icm) { mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); return -ENOMEM; } err = mlx4_MAP_FA(dev, priv->fw.fw_icm); if (err) { mlx4_err(dev, "MAP_FA command failed, aborting\n"); goto err_free; } err = mlx4_RUN_FW(dev); if (err) { mlx4_err(dev, "RUN_FW command failed, aborting\n"); goto err_unmap_fa; } return 0; err_unmap_fa: mlx4_UNMAP_FA(dev); err_free: mlx4_free_icm(dev, priv->fw.fw_icm, 0); return err; } static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, int cmpt_entry_sz) { struct mlx4_priv *priv = mlx4_priv(dev); int err; int num_eqs; err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_QP * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) goto err; err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_SRQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_srqs, dev->caps.reserved_srqs, 0, 0); if (err) goto err_qp; err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_CQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_cqs, dev->caps.reserved_cqs, 0, 0); if (err) goto err_srq; num_eqs = dev->phys_caps.num_phys_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_EQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, num_eqs, num_eqs, 0, 0); if (err) goto err_cq; return 0; err_cq: mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); err_srq: mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); err_qp: mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); err: return err; } static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, struct mlx4_init_hca_param *init_hca, u64 icm_size) { struct mlx4_priv *priv = mlx4_priv(dev); u64 aux_pages; int num_eqs; int err; err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); if (err) { mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); return err; } mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", (unsigned long long) icm_size >> 10, (unsigned long long) aux_pages << 2); priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.aux_icm) { mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); return -ENOMEM; } err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); if (err) { mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); goto err_free_aux; } err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); if (err) { mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); goto err_unmap_aux; } num_eqs = dev->phys_caps.num_phys_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.table, init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); goto err_unmap_cmpt; } /* * Reserved MTT entries must be aligned up to a cacheline * boundary, since the FW will write to them, while the driver * writes to all other MTT entries. (The variable * dev->caps.mtt_entry_sz below is really the MTT segment * size, not the raw entry size) */ dev->caps.reserved_mtts = ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, init_hca->mtt_base, dev->caps.mtt_entry_sz, dev->caps.num_mtts, dev->caps.reserved_mtts, 1, 0); if (err) { mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); goto err_unmap_eq; } err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, init_hca->dmpt_base, dev_cap->dmpt_entry_sz, dev->caps.num_mpts, dev->caps.reserved_mrws, 1, 1); if (err) { mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); goto err_unmap_mtt; } err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, init_hca->qpc_base, dev_cap->qpc_entry_sz, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { mlx4_err(dev, "Failed to map QP context memory, aborting\n"); goto err_unmap_dmpt; } err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, init_hca->auxc_base, dev_cap->aux_entry_sz, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); goto err_unmap_qp; } err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, init_hca->altc_base, dev_cap->altc_entry_sz, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); goto err_unmap_auxc; } err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, init_hca->rdmarc_base, dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, dev->caps.num_qps, dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); goto err_unmap_altc; } err = mlx4_init_icm_table(dev, &priv->cq_table.table, init_hca->cqc_base, dev_cap->cqc_entry_sz, dev->caps.num_cqs, dev->caps.reserved_cqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); goto err_unmap_rdmarc; } err = mlx4_init_icm_table(dev, &priv->srq_table.table, init_hca->srqc_base, dev_cap->srq_entry_sz, dev->caps.num_srqs, dev->caps.reserved_srqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); goto err_unmap_cq; } /* * For flow steering device managed mode it is required to use * mlx4_init_icm_table. For B0 steering mode it's not strictly * required, but for simplicity just map the whole multicast * group table now. The table isn't very big and it's a lot * easier than trying to track ref counts. */ err = mlx4_init_icm_table(dev, &priv->mcg_table.table, init_hca->mc_base, mlx4_get_mgm_entry_size(dev), dev->caps.num_mgms + dev->caps.num_amgms, dev->caps.num_mgms + dev->caps.num_amgms, 0, 0); if (err) { mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); goto err_unmap_srq; } return 0; err_unmap_srq: mlx4_cleanup_icm_table(dev, &priv->srq_table.table); err_unmap_cq: mlx4_cleanup_icm_table(dev, &priv->cq_table.table); err_unmap_rdmarc: mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); err_unmap_altc: mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); err_unmap_auxc: mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); err_unmap_qp: mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); err_unmap_dmpt: mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); err_unmap_mtt: mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); err_unmap_eq: mlx4_cleanup_icm_table(dev, &priv->eq_table.table); err_unmap_cmpt: mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); err_unmap_aux: mlx4_UNMAP_ICM_AUX(dev); err_free_aux: mlx4_free_icm(dev, priv->fw.aux_icm, 0); return err; } static void mlx4_free_icms(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); mlx4_cleanup_icm_table(dev, &priv->srq_table.table); mlx4_cleanup_icm_table(dev, &priv->cq_table.table); mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); mlx4_cleanup_icm_table(dev, &priv->eq_table.table); mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); mlx4_UNMAP_ICM_AUX(dev); mlx4_free_icm(dev, priv->fw.aux_icm, 0); } static void mlx4_slave_exit(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) mlx4_warn(dev, "Failed to close slave function\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); } static int map_bf_area(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); resource_size_t bf_start; resource_size_t bf_len; int err = 0; if (!dev->caps.bf_reg_size) return -ENXIO; bf_start = pci_resource_start(dev->persist->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); bf_len = pci_resource_len(dev->persist->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); if (!priv->bf_mapping) err = -ENOMEM; return err; } static void unmap_bf_area(struct mlx4_dev *dev) { if (mlx4_priv(dev)->bf_mapping) io_mapping_free(mlx4_priv(dev)->bf_mapping); } u64 mlx4_read_clock(struct mlx4_dev *dev) { u32 clockhi, clocklo, clockhi1; u64 cycles; int i; struct mlx4_priv *priv = mlx4_priv(dev); for (i = 0; i < 10; i++) { clockhi = swab32(readl(priv->clock_mapping)); clocklo = swab32(readl(priv->clock_mapping + 4)); clockhi1 = swab32(readl(priv->clock_mapping)); if (clockhi == clockhi1) break; } cycles = (u64) clockhi << 32 | (u64) clocklo; return cycles; } EXPORT_SYMBOL_GPL(mlx4_read_clock); static int map_internal_clock(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); priv->clock_mapping = ioremap(pci_resource_start(dev->persist->pdev, priv->fw.clock_bar) + priv->fw.clock_offset, MLX4_CLOCK_SIZE); if (!priv->clock_mapping) return -ENOMEM; return 0; } int mlx4_get_internal_clock_params(struct mlx4_dev *dev, struct mlx4_clock_params *params) { struct mlx4_priv *priv = mlx4_priv(dev); if (mlx4_is_slave(dev)) return -EOPNOTSUPP; if (!params) return -EINVAL; params->bar = priv->fw.clock_bar; params->offset = priv->fw.clock_offset; params->size = MLX4_CLOCK_SIZE; return 0; } EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); static void unmap_internal_clock(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); if (priv->clock_mapping) iounmap(priv->clock_mapping); } static void mlx4_close_hca(struct mlx4_dev *dev) { unmap_internal_clock(dev); unmap_bf_area(dev); if (mlx4_is_slave(dev)) mlx4_slave_exit(dev); else { mlx4_CLOSE_HCA(dev, 0); mlx4_free_icms(dev); } } static void mlx4_close_fw(struct mlx4_dev *dev) { if (!mlx4_is_slave(dev)) { mlx4_UNMAP_FA(dev); mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); } } static int mlx4_comm_check_offline(struct mlx4_dev *dev) { #define COMM_CHAN_OFFLINE_OFFSET 0x09 u32 comm_flags; u32 offline_bit; unsigned long end; struct mlx4_priv *priv = mlx4_priv(dev); end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; while (time_before(jiffies, end)) { comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS)); offline_bit = (comm_flags & (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); if (!offline_bit) return 0; /* If device removal has been requested, * do not continue retrying. */ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_NOWAIT) break; /* There are cases as part of AER/Reset flow that PF needs * around 100 msec to load. We therefore sleep for 100 msec * to allow other tasks to make use of that CPU during this * time interval. */ msleep(100); } mlx4_err(dev, "Communication channel is offline.\n"); return -EIO; } static void mlx4_reset_vf_support(struct mlx4_dev *dev) { #define COMM_CHAN_RST_OFFSET 0x1e struct mlx4_priv *priv = mlx4_priv(dev); u32 comm_rst; u32 comm_caps; comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_CAPS)); comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); if (comm_rst) dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; } static int mlx4_init_slave(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); u64 dma = (u64) priv->mfunc.vhcr_dma; int ret_from_reset = 0; u32 slave_read; u32 cmd_channel_ver; if (atomic_read(&pf_loading)) { mlx4_warn(dev, "PF is not ready - Deferring probe\n"); return -EPROBE_DEFER; } mutex_lock(&priv->cmd.slave_cmd_mutex); priv->cmd.max_cmds = 1; if (mlx4_comm_check_offline(dev)) { mlx4_err(dev, "PF is not responsive, skipping initialization\n"); goto err_offline; } mlx4_reset_vf_support(dev); mlx4_warn(dev, "Sending reset\n"); ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); /* if we are in the middle of flr the slave will try * NUM_OF_RESET_RETRIES times before leaving.*/ if (ret_from_reset) { if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); return -EPROBE_DEFER; } else goto err; } /* check the driver version - the slave I/F revision * must match the master's */ slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); cmd_channel_ver = mlx4_comm_get_version(); if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != MLX4_COMM_GET_IF_REV(slave_read)) { mlx4_err(dev, "slave driver version is not supported by the master\n"); goto err; } mlx4_warn(dev, "Sending vhcr0\n"); if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) goto err; if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) goto err; if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) goto err; if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) goto err; mutex_unlock(&priv->cmd.slave_cmd_mutex); return 0; err: mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); err_offline: mutex_unlock(&priv->cmd.slave_cmd_mutex); return -EIO; } static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) { int i; for (i = 1; i <= dev->caps.num_ports; i++) { if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) dev->caps.gid_table_len[i] = mlx4_get_slave_num_gids(dev, 0, i); else dev->caps.gid_table_len[i] = 1; dev->caps.pkey_table_len[i] = dev->phys_caps.pkey_phys_table_len[i] - 1; } } static int choose_log_fs_mgm_entry_size(int qp_per_entry) { int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; i++) { if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) break; } return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; } static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) { switch (dmfs_high_steer_mode) { case MLX4_STEERING_DMFS_A0_DEFAULT: return "default performance"; case MLX4_STEERING_DMFS_A0_DYNAMIC: return "dynamic hybrid mode"; case MLX4_STEERING_DMFS_A0_STATIC: return "performance optimized for limited rule configuration (static)"; case MLX4_STEERING_DMFS_A0_DISABLE: return "disabled performance optimized steering"; case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: return "performance optimized steering not supported"; default: return "Unrecognized mode"; } } #define MLX4_DMFS_A0_STEERING (1UL << 2) static void choose_steering_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { if (mlx4_log_num_mgm_entry_size <= 0) { if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) mlx4_err(dev, "DMFS high rate mode not supported\n"); else dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_STATIC; } } if (mlx4_log_num_mgm_entry_size <= 0 && dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && (!mlx4_is_mfunc(dev) || (dev_cap->fs_max_num_qp_per_entry >= (dev->persist->num_vfs + 1))) && choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= MLX4_MIN_MGM_LOG_ENTRY_SIZE) { dev->oper_log_mgm_entry_size = choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; dev->caps.fs_log_max_ucast_qp_range_size = dev_cap->fs_log_max_ucast_qp_range_size; } else { if (dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) dev->caps.steering_mode = MLX4_STEERING_MODE_B0; else { dev->caps.steering_mode = MLX4_STEERING_MODE_A0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); } dev->oper_log_mgm_entry_size = mlx4_log_num_mgm_entry_size > 0 ? mlx4_log_num_mgm_entry_size : MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); } mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", mlx4_steering_mode_str(dev->caps.steering_mode), dev->oper_log_mgm_entry_size, mlx4_log_num_mgm_entry_size); } static void choose_tunnel_offload_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; else dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); } static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) { int i; struct mlx4_port_cap port_cap; if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) return -EINVAL; for (i = 1; i <= dev->caps.num_ports; i++) { if (mlx4_dev_port(dev, i, &port_cap)) { mlx4_err(dev, "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); } else if ((dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_DEFAULT) && (port_cap.dmfs_optimized_state == !!(dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_DISABLE))) { mlx4_err(dev, "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", dmfs_high_rate_steering_mode_str( dev->caps.dmfs_high_steer_mode), (port_cap.dmfs_optimized_state ? "enabled" : "disabled")); } } return 0; } static int mlx4_init_fw(struct mlx4_dev *dev) { struct mlx4_mod_stat_cfg mlx4_cfg; int err = 0; if (!mlx4_is_slave(dev)) { err = mlx4_QUERY_FW(dev); if (err) { if (err == -EACCES) mlx4_info(dev, "non-primary physical function, skipping\n"); else mlx4_err(dev, "QUERY_FW command failed, aborting\n"); return err; } err = mlx4_load_fw(dev); if (err) { mlx4_err(dev, "Failed to start FW, aborting\n"); return err; } mlx4_cfg.log_pg_sz_m = 1; mlx4_cfg.log_pg_sz = 0; err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); if (err) mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); } return err; } static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_adapter adapter; struct mlx4_dev_cap dev_cap; struct mlx4_profile profile; struct mlx4_init_hca_param init_hca; u64 icm_size; struct mlx4_config_dev_params params; int err; if (!mlx4_is_slave(dev)) { err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; } choose_steering_mode(dev, &dev_cap); choose_tunnel_offload_mode(dev, &dev_cap); if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; err = mlx4_get_phys_port_id(dev); if (err) mlx4_err(dev, "Fail to get physical port id\n"); if (mlx4_is_master(dev)) mlx4_parav_master_pf_caps(dev); if (mlx4_low_memory_profile()) { mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); profile = low_mem_profile; } else { profile = default_profile; } if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) profile.num_mcg = MLX4_FS_NUM_MCG; icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); if ((long long) icm_size < 0) { err = icm_size; return err; } dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; if (enable_4k_uar) { init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; } else { init_hca.log_uar_sz = ilog2(dev->caps.num_uars); init_hca.uar_page_sz = PAGE_SHIFT - 12; } init_hca.mw_enabled = 0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); if (err) return err; err = mlx4_INIT_HCA(dev, &init_hca); if (err) { mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; } if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { err = mlx4_query_func(dev, &dev_cap); if (err < 0) { mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); goto err_close; } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { dev->caps.num_eqs = dev_cap.max_eqs; dev->caps.reserved_eqs = dev_cap.reserved_eqs; dev->caps.reserved_uars = dev_cap.reserved_uars; } } /* * If TS is supported by FW * read HCA frequency by QUERY_HCA command */ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { memset(&init_hca, 0, sizeof(init_hca)); err = mlx4_QUERY_HCA(dev, &init_hca); if (err) { mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; } else { dev->caps.hca_core_clock = init_hca.hca_core_clock; } /* In case we got HCA frequency 0 - disable timestamping * to avoid dividing by zero */ if (!dev->caps.hca_core_clock) { dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_err(dev, "HCA frequency is 0 - timestamping is not supported\n"); } else if (map_internal_clock(dev)) { /* * Map internal clock, * in case of failure disable timestamping */ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); } } if (dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { if (mlx4_validate_optimized_steering(dev)) mlx4_warn(dev, "Optimized steering validation failed\n"); if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_DISABLE) { dev->caps.dmfs_high_rate_qpn_base = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; } mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", dmfs_high_rate_steering_mode_str( dev->caps.dmfs_high_steer_mode)); } } else { err = mlx4_init_slave(dev); if (err) { if (err != -EPROBE_DEFER) mlx4_err(dev, "Failed to initialize slave\n"); return err; } err = mlx4_slave_cap(dev); if (err) { mlx4_err(dev, "Failed to obtain slave caps\n"); goto err_close; } } if (map_bf_area(dev)) mlx4_dbg(dev, "Failed to map blue flame area\n"); /*Only the master set the ports, all the rest got it from it.*/ if (!mlx4_is_slave(dev)) mlx4_set_port_mask(dev); err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); goto unmap_bf; } /* Query CONFIG_DEV parameters */ err = mlx4_config_dev_retrieval(dev, &params); if (err && err != -EOPNOTSUPP) { mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); } else if (!err) { dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; } priv->eq_table.inta_pin = adapter.inta_pin; memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); return 0; unmap_bf: unmap_internal_clock(dev); unmap_bf_area(dev); if (mlx4_is_slave(dev)) { kfree(dev->caps.qp0_qkey); kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); } err_close: if (mlx4_is_slave(dev)) mlx4_slave_exit(dev); else mlx4_CLOSE_HCA(dev, 0); err_free_icm: if (!mlx4_is_slave(dev)) mlx4_free_icms(dev); return err; } static int mlx4_init_counters_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int nent_pow2; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) return -ENOENT; if (!dev->caps.max_counters) return -ENOSPC; nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); /* reserve last counter index for sink counter */ return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, nent_pow2 - 1, 0, nent_pow2 - dev->caps.max_counters + 1); } static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) { if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) return; if (!dev->caps.max_counters) return; mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); } static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int port; for (port = 0; port < dev->caps.num_ports; port++) if (priv->def_counter[port] != -1) mlx4_counter_free(dev, priv->def_counter[port]); } static int mlx4_allocate_default_counters(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int port, err = 0; u32 idx; for (port = 0; port < dev->caps.num_ports; port++) priv->def_counter[port] = -1; for (port = 0; port < dev->caps.num_ports; port++) { err = mlx4_counter_alloc(dev, &idx); if (!err || err == -ENOSPC) { priv->def_counter[port] = idx; } else if (err == -ENOENT) { err = 0; continue; } else if (mlx4_is_slave(dev) && err == -EINVAL) { priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", MLX4_SINK_COUNTER_INDEX(dev)); err = 0; } else { mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", __func__, port + 1, err); mlx4_cleanup_default_counters(dev); return err; } mlx4_dbg(dev, "%s: default counter index %d for port %d\n", __func__, priv->def_counter[port], port + 1); } return err; } int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) { struct mlx4_priv *priv = mlx4_priv(dev); if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) return -ENOENT; *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); if (*idx == -1) { *idx = MLX4_SINK_COUNTER_INDEX(dev); return -ENOSPC; } return 0; } int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) { u64 out_param; int err; if (mlx4_is_mfunc(dev)) { err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) *idx = get_param_l(&out_param); return err; } return __mlx4_counter_alloc(dev, idx); } EXPORT_SYMBOL_GPL(mlx4_counter_alloc); static int __mlx4_clear_if_stat(struct mlx4_dev *dev, u8 counter_index) { struct mlx4_cmd_mailbox *if_stat_mailbox; int err; u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(if_stat_mailbox)) return PTR_ERR(if_stat_mailbox); err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, if_stat_mailbox); return err; } void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) { if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) return; if (idx == MLX4_SINK_COUNTER_INDEX(dev)) return; __mlx4_clear_if_stat(dev, idx); mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); return; } void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) { u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, idx); mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); return; } __mlx4_counter_free(dev, idx); } EXPORT_SYMBOL_GPL(mlx4_counter_free); int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) { struct mlx4_priv *priv = mlx4_priv(dev); return priv->def_counter[port - 1]; } EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) { struct mlx4_priv *priv = mlx4_priv(dev); priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; } EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) { struct mlx4_priv *priv = mlx4_priv(dev); return priv->mfunc.master.vf_admin[entry].vport[port].guid; } EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) { struct mlx4_priv *priv = mlx4_priv(dev); __be64 guid; /* hw GUID */ if (entry == 0) return; get_random_bytes((char *)&guid, sizeof(guid)); guid &= ~(cpu_to_be64(1ULL << 56)); guid |= cpu_to_be64(1ULL << 57); priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; } static int mlx4_setup_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; int port; __be32 ib_port_default_caps; err = mlx4_init_uar_table(dev); if (err) { mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); return err; } err = mlx4_uar_alloc(dev, &priv->driver_uar); if (err) { mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); goto err_uar_table_free; } priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!priv->kar) { mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); err = -ENOMEM; goto err_uar_free; } err = mlx4_init_pd_table(dev); if (err) { mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); goto err_kar_unmap; } err = mlx4_init_xrcd_table(dev); if (err) { mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); goto err_pd_table_free; } err = mlx4_init_mr_table(dev); if (err) { mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); goto err_xrcd_table_free; } if (!mlx4_is_slave(dev)) { err = mlx4_init_mcg_table(dev); if (err) { mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); goto err_mr_table_free; } err = mlx4_config_mad_demux(dev); if (err) { mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); goto err_mcg_table_free; } } err = mlx4_init_eq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); goto err_mcg_table_free; } err = mlx4_cmd_use_events(dev); if (err) { mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); goto err_eq_table_free; } err = mlx4_NOP(dev); if (err) { if (dev->flags & MLX4_FLAG_MSI_X) { mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_warn(dev, "Trying again without MSI-X\n"); } else { mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } goto err_cmd_poll; } mlx4_dbg(dev, "NOP command IRQ test passed\n"); err = mlx4_init_cq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); goto err_cmd_poll; } err = mlx4_init_srq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); goto err_cq_table_free; } err = mlx4_init_qp_table(dev); if (err) { mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); goto err_srq_table_free; } if (!mlx4_is_slave(dev)) { err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { mlx4_err(dev, "Failed to initialize counters table, aborting\n"); goto err_qp_table_free; } } err = mlx4_allocate_default_counters(dev); if (err) { mlx4_err(dev, "Failed to allocate default counters, aborting\n"); goto err_counters_table_free; } if (!mlx4_is_slave(dev)) { for (port = 1; port <= dev->caps.num_ports; port++) { ib_port_default_caps = 0; err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); if (err) mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps; /* initialize per-slave default ib port capabilities */ if (mlx4_is_master(dev)) { int i; for (i = 0; i < dev->num_slaves; i++) { if (i == mlx4_master_func_num(dev)) continue; priv->mfunc.master.slave_state[i].ib_cap_mask[port] = ib_port_default_caps; } } if (mlx4_is_mfunc(dev)) dev->caps.port_ib_mtu[port] = IB_MTU_2048; else dev->caps.port_ib_mtu[port] = IB_MTU_4096; err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? dev->caps.pkey_table_len[port] : -1); if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", port); goto err_default_countes_free; } } } return 0; err_default_countes_free: mlx4_cleanup_default_counters(dev); err_counters_table_free: if (!mlx4_is_slave(dev)) mlx4_cleanup_counters_table(dev); err_qp_table_free: mlx4_cleanup_qp_table(dev); err_srq_table_free: mlx4_cleanup_srq_table(dev); err_cq_table_free: mlx4_cleanup_cq_table(dev); err_cmd_poll: mlx4_cmd_use_polling(dev); err_eq_table_free: mlx4_cleanup_eq_table(dev); err_mcg_table_free: if (!mlx4_is_slave(dev)) mlx4_cleanup_mcg_table(dev); err_mr_table_free: mlx4_cleanup_mr_table(dev); err_xrcd_table_free: mlx4_cleanup_xrcd_table(dev); err_pd_table_free: mlx4_cleanup_pd_table(dev); err_kar_unmap: iounmap(priv->kar); err_uar_free: mlx4_uar_free(dev, &priv->driver_uar); err_uar_table_free: mlx4_cleanup_uar_table(dev); return err; } static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) { int requested_cpu = 0; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eq *eq; int off = 0; int i; if (eqn > dev->caps.num_comp_vectors) return -EINVAL; for (i = 1; i < port; i++) off += mlx4_get_eqs_per_port(dev, i); requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); /* Meaning EQs are shared, and this call comes from the second port */ if (requested_cpu < 0) return 0; eq = &priv->eq_table.eq[eqn]; if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL)) return -ENOMEM; cpumask_set_cpu(requested_cpu, eq->affinity_mask); return 0; } static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int i; int port = 0; if (msi_x) { int nreq = dev->caps.num_ports * num_online_cpus() + 1; nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); if (nreq > MAX_MSIX) nreq = MAX_MSIX; entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) goto no_msi; for (i = 0; i < nreq; ++i) entries[i].entry = i; nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, nreq); if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { kfree(entries); goto no_msi; } /* 1 is reserved for events (asyncrounous EQ) */ dev->caps.num_comp_vectors = nreq - 1; priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, dev->caps.num_ports); for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { if (i == MLX4_EQ_ASYNC) continue; priv->eq_table.eq[i].irq = entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, dev->caps.num_ports); /* We don't set affinity hint when there * aren't enough EQs */ } else { set_bit(port, priv->eq_table.eq[i].actv_ports.ports); if (mlx4_init_affinity_hint(dev, port + 1, i)) mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", i); } /* We divide the Eqs evenly between the two ports. * (dev->caps.num_comp_vectors / dev->caps.num_ports) * refers to the number of Eqs per port * (i.e eqs_per_port). Theoretically, we would like to * write something like (i + 1) % eqs_per_port == 0. * However, since there's an asynchronous Eq, we have * to skip over it by comparing this condition to * !!((i + 1) > MLX4_EQ_ASYNC). */ if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && ((i + 1) % (dev->caps.num_comp_vectors / dev->caps.num_ports)) == !!((i + 1) > MLX4_EQ_ASYNC)) /* If dev->caps.num_comp_vectors < dev->caps.num_ports, * everything is shared anyway. */ port++; } dev->flags |= MLX4_FLAG_MSI_X; kfree(entries); return; } no_msi: dev->caps.num_comp_vectors = 1; BUG_ON(MLX4_EQ_ASYNC >= 2); for (i = 0; i < 2; ++i) { priv->eq_table.eq[i].irq = dev->persist->pdev->irq; if (i != MLX4_EQ_ASYNC) { bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, dev->caps.num_ports); } } } static int mlx4_init_port_info(struct mlx4_dev *dev, int port) { struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; int err; err = devlink_port_register(devlink, &info->devlink_port, port); if (err) return err; info->dev = dev; info->port = port; if (!mlx4_is_slave(dev)) { mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_vlan_table(dev, &info->vlan_table); mlx4_init_roce_gid_table(dev, &info->gid_table); info->base_qpn = mlx4_get_base_qpn(dev, port); } sprintf(info->dev_name, "mlx4_port%d", port); info->port_attr.attr.name = info->dev_name; if (mlx4_is_mfunc(dev)) info->port_attr.attr.mode = S_IRUGO; else { info->port_attr.attr.mode = S_IRUGO | S_IWUSR; info->port_attr.store = set_port_type; } info->port_attr.show = show_port_type; sysfs_attr_init(&info->port_attr.attr); err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); if (err) { mlx4_err(dev, "Failed to create file for port %d\n", port); devlink_port_unregister(&info->devlink_port); info->port = -1; } sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); info->port_mtu_attr.attr.name = info->dev_mtu_name; if (mlx4_is_mfunc(dev)) info->port_mtu_attr.attr.mode = S_IRUGO; else { info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; info->port_mtu_attr.store = set_port_ib_mtu; } info->port_mtu_attr.show = show_port_ib_mtu; sysfs_attr_init(&info->port_mtu_attr.attr); err = device_create_file(&dev->persist->pdev->dev, &info->port_mtu_attr); if (err) { mlx4_err(dev, "Failed to create mtu file for port %d\n", port); device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); devlink_port_unregister(&info->devlink_port); info->port = -1; } return err; } static void mlx4_cleanup_port_info(struct mlx4_port_info *info) { if (info->port < 0) return; device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); device_remove_file(&info->dev->persist->pdev->dev, &info->port_mtu_attr); devlink_port_unregister(&info->devlink_port); #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(info->rmap); info->rmap = NULL; #endif } static int mlx4_init_steering(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int num_entries = dev->caps.num_ports; int i, j; priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); if (!priv->steer) return -ENOMEM; for (i = 0; i < num_entries; i++) for (j = 0; j < MLX4_NUM_STEERS; j++) { INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); } return 0; } static void mlx4_clear_steering(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_steer_index *entry, *tmp_entry; struct mlx4_promisc_qp *pqp, *tmp_pqp; int num_entries = dev->caps.num_ports; int i, j; for (i = 0; i < num_entries; i++) { for (j = 0; j < MLX4_NUM_STEERS; j++) { list_for_each_entry_safe(pqp, tmp_pqp, &priv->steer[i].promisc_qps[j], list) { list_del(&pqp->list); kfree(pqp); } list_for_each_entry_safe(entry, tmp_entry, &priv->steer[i].steer_entries[j], list) { list_del(&entry->list); list_for_each_entry_safe(pqp, tmp_pqp, &entry->duplicates, list) { list_del(&pqp->list); kfree(pqp); } kfree(entry); } } } kfree(priv->steer); } static int extended_func_num(struct pci_dev *pdev) { return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); } #define MLX4_OWNER_BASE 0x8069c #define MLX4_OWNER_SIZE 4 static int mlx4_get_ownership(struct mlx4_dev *dev) { void __iomem *owner; u32 ret; if (pci_channel_offline(dev->persist->pdev)) return -EIO; owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + MLX4_OWNER_BASE, MLX4_OWNER_SIZE); if (!owner) { mlx4_err(dev, "Failed to obtain ownership bit\n"); return -ENOMEM; } ret = readl(owner); iounmap(owner); return (int) !!ret; } static void mlx4_free_ownership(struct mlx4_dev *dev) { void __iomem *owner; if (pci_channel_offline(dev->persist->pdev)) return; owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + MLX4_OWNER_BASE, MLX4_OWNER_SIZE); if (!owner) { mlx4_err(dev, "Failed to obtain ownership bit\n"); return; } writel(0, owner); msleep(1000); iounmap(owner); } #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ !!((flags) & MLX4_FLAG_MASTER)) static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, u8 total_vfs, int existing_vfs, int reset_flow) { u64 dev_flags = dev->flags; int err = 0; int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev), MLX4_MAX_NUM_VF); if (reset_flow) { dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), GFP_KERNEL); if (!dev->dev_vfs) goto free_mem; return dev_flags; } atomic_inc(&pf_loading); if (dev->flags & MLX4_FLAG_SRIOV) { if (existing_vfs != total_vfs) { mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", existing_vfs, total_vfs); total_vfs = existing_vfs; } } dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); if (NULL == dev->dev_vfs) { mlx4_err(dev, "Failed to allocate memory for VFs\n"); goto disable_sriov; } if (!(dev->flags & MLX4_FLAG_SRIOV)) { if (total_vfs > fw_enabled_sriov_vfs) { mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", total_vfs, fw_enabled_sriov_vfs); err = -ENOMEM; goto disable_sriov; } mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); err = pci_enable_sriov(pdev, total_vfs); } if (err) { mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", err); goto disable_sriov; } else { mlx4_warn(dev, "Running in master mode\n"); dev_flags |= MLX4_FLAG_SRIOV | MLX4_FLAG_MASTER; dev_flags &= ~MLX4_FLAG_SLAVE; dev->persist->num_vfs = total_vfs; } return dev_flags; disable_sriov: atomic_dec(&pf_loading); free_mem: dev->persist->num_vfs = 0; kfree(dev->dev_vfs); dev->dev_vfs = NULL; return dev_flags & ~MLX4_FLAG_MASTER; } enum { MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, }; static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, int *nvfs) { int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; /* Checking for 64 VFs as a limitation of CX2 */ if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && requested_vfs >= 64) { mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", requested_vfs); return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; } return 0; } static int mlx4_pci_enable_device(struct mlx4_dev *dev) { struct pci_dev *pdev = dev->persist->pdev; int err = 0; mutex_lock(&dev->persist->pci_status_mutex); if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { err = pci_enable_device(pdev); if (!err) dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; } mutex_unlock(&dev->persist->pci_status_mutex); return err; } static void mlx4_pci_disable_device(struct mlx4_dev *dev) { struct pci_dev *pdev = dev->persist->pdev; mutex_lock(&dev->persist->pci_status_mutex); if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { pci_disable_device(pdev); dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; } mutex_unlock(&dev->persist->pci_status_mutex); } static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, int total_vfs, int *nvfs, struct mlx4_priv *priv, int reset_flow) { struct mlx4_dev *dev; unsigned sum = 0; int err; int port; int i; struct mlx4_dev_cap *dev_cap = NULL; int existing_vfs = 0; dev = &priv->dev; INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); mutex_init(&priv->port_mutex); mutex_init(&priv->bond_mutex); INIT_LIST_HEAD(&priv->pgdir_list); mutex_init(&priv->pgdir_mutex); spin_lock_init(&priv->cmd.context_lock); INIT_LIST_HEAD(&priv->bf_list); mutex_init(&priv->bf_mutex); dev->rev_id = pdev->revision; dev->numa_node = dev_to_node(&pdev->dev); /* Detect if this device is a virtual function */ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); dev->flags |= MLX4_FLAG_SLAVE; } else { /* We reset the device and enable SRIOV only for physical * devices. Try to claim ownership on the device; * if already taken, skip -- do not allow multiple PFs */ err = mlx4_get_ownership(dev); if (err) { if (err < 0) return err; else { mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); return -EINVAL; } } atomic_set(&priv->opreq_count, 0); INIT_WORK(&priv->opreq_task, mlx4_opreq_action); /* * Now reset the HCA before we touch the PCI capabilities or * attempt a firmware command, since a boot ROM may have left * the HCA in an undefined state. */ err = mlx4_reset(dev); if (err) { mlx4_err(dev, "Failed to reset HCA, aborting\n"); goto err_sriov; } if (total_vfs) { dev->flags = MLX4_FLAG_MASTER; existing_vfs = pci_num_vf(pdev); if (existing_vfs) dev->flags |= MLX4_FLAG_SRIOV; dev->persist->num_vfs = total_vfs; } } /* on load remove any previous indication of internal error, * device is up. */ dev->persist->state = MLX4_DEVICE_STATE_UP; slave_start: err = mlx4_cmd_init(dev); if (err) { mlx4_err(dev, "Failed to init command interface, aborting\n"); goto err_sriov; } /* In slave functions, the communication channel must be initialized * before posting commands. Also, init num_slaves before calling * mlx4_init_hca */ if (mlx4_is_mfunc(dev)) { if (mlx4_is_master(dev)) { dev->num_slaves = MLX4_MAX_NUM_SLAVES; } else { dev->num_slaves = 0; err = mlx4_multi_func_init(dev); if (err) { mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); goto err_cmd; } } } err = mlx4_init_fw(dev); if (err) { mlx4_err(dev, "Failed to init fw, aborting.\n"); goto err_mfunc; } if (mlx4_is_master(dev)) { /* when we hit the goto slave_start below, dev_cap already initialized */ if (!dev_cap) { dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); if (!dev_cap) { err = -ENOMEM; goto err_fw; } err = mlx4_QUERY_DEV_CAP(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); goto err_fw; } if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) goto err_fw; if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs, reset_flow); mlx4_close_fw(dev); mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); dev->flags = dev_flags; if (!SRIOV_VALID_STATE(dev->flags)) { mlx4_err(dev, "Invalid SRIOV state\n"); goto err_sriov; } err = mlx4_reset(dev); if (err) { mlx4_err(dev, "Failed to reset HCA, aborting.\n"); goto err_sriov; } goto slave_start; } } else { /* Legacy mode FW requires SRIOV to be enabled before * doing QUERY_DEV_CAP, since max_eq's value is different if * SRIOV is enabled. */ memset(dev_cap, 0, sizeof(*dev_cap)); err = mlx4_QUERY_DEV_CAP(dev, dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); goto err_fw; } if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) goto err_fw; } } err = mlx4_init_hca(dev); if (err) { if (err == -EACCES) { /* Not primary Physical function * Running in slave mode */ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); /* We're not a PF */ if (dev->flags & MLX4_FLAG_SRIOV) { if (!existing_vfs) pci_disable_sriov(pdev); if (mlx4_is_master(dev) && !reset_flow) atomic_dec(&pf_loading); dev->flags &= ~MLX4_FLAG_SRIOV; } if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); dev->flags |= MLX4_FLAG_SLAVE; dev->flags &= ~MLX4_FLAG_MASTER; goto slave_start; } else goto err_fw; } if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs, reset_flow); if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); dev->flags = dev_flags; err = mlx4_cmd_init(dev); if (err) { /* Only VHCR is cleaned up, so could still * send FW commands */ mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); goto err_close; } } else { dev->flags = dev_flags; } if (!SRIOV_VALID_STATE(dev->flags)) { mlx4_err(dev, "Invalid SRIOV state\n"); goto err_close; } } /* check if the device is functioning at its maximum possible speed. * No return code for this call, just warn the user in case of PCI * express device capabilities are under-satisfied by the bus. */ if (!mlx4_is_slave(dev)) mlx4_check_pcie_caps(dev); /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ if (mlx4_is_master(dev)) { if (dev->caps.num_ports < 2 && num_vfs_argc > 1) { err = -EINVAL; mlx4_err(dev, "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", dev->caps.num_ports); goto err_close; } memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); for (i = 0; i < sizeof(dev->persist->nvfs)/ sizeof(dev->persist->nvfs[0]); i++) { unsigned j; for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; dev->dev_vfs[sum].n_ports = i < 2 ? 1 : dev->caps.num_ports; } } /* In master functions, the communication channel * must be initialized after obtaining its address from fw */ err = mlx4_multi_func_init(dev); if (err) { mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); goto err_close; } } err = mlx4_alloc_eq_table(dev); if (err) goto err_master_mfunc; bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); mutex_init(&priv->msix_ctl.pool_lock); mlx4_enable_msi_x(dev); if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { err = -EOPNOTSUPP; mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); goto err_free_eq; } if (!mlx4_is_slave(dev)) { err = mlx4_init_steering(dev); if (err) goto err_disable_msix; } mlx4_init_quotas(dev); err = mlx4_setup_hca(dev); if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && !mlx4_is_mfunc(dev)) { dev->flags &= ~MLX4_FLAG_MSI_X; dev->caps.num_comp_vectors = 1; pci_disable_msix(pdev); err = mlx4_setup_hca(dev); } if (err) goto err_steer; /* When PF resources are ready arm its comm channel to enable * getting commands */ if (mlx4_is_master(dev)) { err = mlx4_ARM_COMM_CHANNEL(dev); if (err) { mlx4_err(dev, " Failed to arm comm channel eq: %x\n", err); goto err_steer; } } for (port = 1; port <= dev->caps.num_ports; port++) { err = mlx4_init_port_info(dev, port); if (err) goto err_port; } priv->v2p.port1 = 1; priv->v2p.port2 = 2; err = mlx4_register_device(dev); if (err) goto err_port; mlx4_request_modules(dev); mlx4_sense_init(dev); mlx4_start_sense(dev); priv->removed = 0; if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) atomic_dec(&pf_loading); kfree(dev_cap); return 0; err_port: for (--port; port >= 1; --port) mlx4_cleanup_port_info(&priv->port[port]); mlx4_cleanup_default_counters(dev); if (!mlx4_is_slave(dev)) mlx4_cleanup_counters_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mcg_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); mlx4_cleanup_uar_table(dev); err_steer: if (!mlx4_is_slave(dev)) mlx4_clear_steering(dev); err_disable_msix: if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); err_free_eq: mlx4_free_eq_table(dev); err_master_mfunc: if (mlx4_is_master(dev)) { mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); mlx4_multi_func_cleanup(dev); } if (mlx4_is_slave(dev)) { kfree(dev->caps.qp0_qkey); kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); } err_close: mlx4_close_hca(dev); err_fw: mlx4_close_fw(dev); err_mfunc: if (mlx4_is_slave(dev)) mlx4_multi_func_cleanup(dev); err_cmd: mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); err_sriov: if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { pci_disable_sriov(pdev); dev->flags &= ~MLX4_FLAG_SRIOV; } if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) atomic_dec(&pf_loading); kfree(priv->dev.dev_vfs); if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); kfree(dev_cap); return err; } static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, struct mlx4_priv *priv) { int err; int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; unsigned total_vfs = 0; unsigned int i; pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); err = mlx4_pci_enable_device(&priv->dev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS * per port, we must limit the number of VFs to 63 (since their are * 128 MACs) */ for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; if (nvfs[i] < 0) { dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); err = -EINVAL; goto err_disable_pdev; } } for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; i++) { prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); err = -EINVAL; goto err_disable_pdev; } } if (total_vfs > MLX4_MAX_NUM_VF) { dev_err(&pdev->dev, "Requested more VF's (%d) than allowed by hw (%d)\n", total_vfs, MLX4_MAX_NUM_VF); err = -EINVAL; goto err_disable_pdev; } for (i = 0; i < MLX4_MAX_PORTS; i++) { if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { dev_err(&pdev->dev, "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", nvfs[i] + nvfs[2], i + 1, MLX4_MAX_NUM_VF_P_PORT); err = -EINVAL; goto err_disable_pdev; } } /* Check for BARs. */ if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", pci_dev_data, pci_resource_flags(pdev, 0)); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing UAR, aborting\n"); err = -ENODEV; goto err_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); goto err_disable_pdev; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); goto err_release_regions; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); goto err_release_regions; } } /* Allow large DMA segments, up to the firmware limit of 1 GB */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); /* Detect if this device is a virtual function */ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { /* When acting as pf, we normally skip vfs unless explicitly * requested to probe them. */ if (total_vfs) { unsigned vfs_offset = 0; for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && vfs_offset + nvfs[i] < extended_func_num(pdev); vfs_offset += nvfs[i], i++) ; if (i == sizeof(nvfs)/sizeof(nvfs[0])) { err = -ENODEV; goto err_release_regions; } if ((extended_func_num(pdev) - vfs_offset) > prb_vf[i]) { dev_warn(&pdev->dev, "Skipping virtual function:%d\n", extended_func_num(pdev)); err = -ENODEV; goto err_release_regions; } } } err = mlx4_catas_init(&priv->dev); if (err) goto err_release_regions; err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); if (err) goto err_catas; return 0; err_catas: mlx4_catas_end(&priv->dev); err_release_regions: pci_release_regions(pdev); err_disable_pdev: mlx4_pci_disable_device(&priv->dev); pci_set_drvdata(pdev, NULL); return err; } static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, enum devlink_port_type port_type) { struct mlx4_port_info *info = container_of(devlink_port, struct mlx4_port_info, devlink_port); enum mlx4_port_type mlx4_port_type; switch (port_type) { case DEVLINK_PORT_TYPE_AUTO: mlx4_port_type = MLX4_PORT_TYPE_AUTO; break; case DEVLINK_PORT_TYPE_ETH: mlx4_port_type = MLX4_PORT_TYPE_ETH; break; case DEVLINK_PORT_TYPE_IB: mlx4_port_type = MLX4_PORT_TYPE_IB; break; default: return -EOPNOTSUPP; } return __set_port_type(info, mlx4_port_type); } static const struct devlink_ops mlx4_devlink_ops = { .port_type_set = mlx4_devlink_port_type_set, }; static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct devlink *devlink; struct mlx4_priv *priv; struct mlx4_dev *dev; int ret; printk_once(KERN_INFO "%s", mlx4_version); devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv)); if (!devlink) return -ENOMEM; priv = devlink_priv(devlink); dev = &priv->dev; dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); if (!dev->persist) { ret = -ENOMEM; goto err_devlink_free; } dev->persist->pdev = pdev; dev->persist->dev = dev; pci_set_drvdata(pdev, dev->persist); priv->pci_dev_data = id->driver_data; mutex_init(&dev->persist->device_state_mutex); mutex_init(&dev->persist->interface_state_mutex); mutex_init(&dev->persist->pci_status_mutex); ret = devlink_register(devlink, &pdev->dev); if (ret) goto err_persist_free; ret = __mlx4_init_one(pdev, id->driver_data, priv); if (ret) goto err_devlink_unregister; pci_save_state(pdev); return 0; err_devlink_unregister: devlink_unregister(devlink); err_persist_free: kfree(dev->persist); err_devlink_free: devlink_free(devlink); return ret; } static void mlx4_clean_dev(struct mlx4_dev *dev) { struct mlx4_dev_persistent *persist = dev->persist; struct mlx4_priv *priv = mlx4_priv(dev); unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); memset(priv, 0, sizeof(*priv)); priv->dev.persist = persist; priv->dev.flags = flags; } static void mlx4_unload_one(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); int pci_dev_data; int p, i; if (priv->removed) return; /* saving current ports type for further use */ for (i = 0; i < dev->caps.num_ports; i++) { dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; dev->persist->curr_port_poss_type[i] = dev->caps. possible_type[i + 1]; } pci_dev_data = priv->pci_dev_data; mlx4_stop_sense(dev); mlx4_unregister_device(dev); for (p = 1; p <= dev->caps.num_ports; p++) { mlx4_cleanup_port_info(&priv->port[p]); mlx4_CLOSE_PORT(dev, p); } if (mlx4_is_master(dev)) mlx4_free_resource_tracker(dev, RES_TR_FREE_SLAVES_ONLY); mlx4_cleanup_default_counters(dev); if (!mlx4_is_slave(dev)) mlx4_cleanup_counters_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); mlx4_cleanup_mcg_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); if (mlx4_is_master(dev)) mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); iounmap(priv->kar); mlx4_uar_free(dev, &priv->driver_uar); mlx4_cleanup_uar_table(dev); if (!mlx4_is_slave(dev)) mlx4_clear_steering(dev); mlx4_free_eq_table(dev); if (mlx4_is_master(dev)) mlx4_multi_func_cleanup(dev); mlx4_close_hca(dev); mlx4_close_fw(dev); if (mlx4_is_slave(dev)) mlx4_multi_func_cleanup(dev); mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); kfree(dev->caps.qp0_qkey); kfree(dev->caps.qp0_tunnel); kfree(dev->caps.qp0_proxy); kfree(dev->caps.qp1_tunnel); kfree(dev->caps.qp1_proxy); kfree(dev->dev_vfs); mlx4_clean_dev(dev); priv->pci_dev_data = pci_dev_data; priv->removed = 1; } static void mlx4_remove_one(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); struct devlink *devlink = priv_to_devlink(priv); int active_vfs = 0; if (mlx4_is_slave(dev)) persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; mutex_lock(&persist->interface_state_mutex); persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; mutex_unlock(&persist->interface_state_mutex); /* Disabling SR-IOV is not allowed while there are active vf's */ if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { active_vfs = mlx4_how_many_lives_vf(dev); if (active_vfs) { pr_warn("Removing PF when there are active VF's !!\n"); pr_warn("Will not disable SR-IOV.\n"); } } /* device marked to be under deletion running now without the lock * letting other tasks to be terminated */ if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); else mlx4_info(dev, "%s: interface is down\n", __func__); mlx4_catas_end(dev); if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); } pci_release_regions(pdev); mlx4_pci_disable_device(dev); devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); pci_set_drvdata(pdev, NULL); } static int restore_current_port_types(struct mlx4_dev *dev, enum mlx4_port_type *types, enum mlx4_port_type *poss_types) { struct mlx4_priv *priv = mlx4_priv(dev); int err, i; mlx4_stop_sense(dev); mutex_lock(&priv->port_mutex); for (i = 0; i < dev->caps.num_ports; i++) dev->caps.possible_type[i + 1] = poss_types[i]; err = mlx4_change_port_types(dev, types); mlx4_start_sense(dev); mutex_unlock(&priv->port_mutex); return err; } int mlx4_restart_one(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; int pci_dev_data, err, total_vfs; pci_dev_data = priv->pci_dev_data; total_vfs = dev->persist->num_vfs; memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mlx4_unload_one(pdev); err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); if (err) { mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", __func__, pci_name(pdev), err); return err; } err = restore_current_port_types(dev, dev->persist->curr_port_type, dev->persist->curr_port_poss_type); if (err) mlx4_err(dev, "could not restore original port types (%d)\n", err); return err; } #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT } #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF } #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 } static const struct pci_device_id mlx4_pci_table[] = { /* MT25408 "Hermon" */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */ /* MT25458 ConnectX EN 10GBASE-T */ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN), MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */ /* MT26468 ConnectX EN 10GigE PCIe Gen2*/ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2), /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2), /* MT26478 ConnectX2 40GigE PCIe Gen2 */ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2), /* MT25400 Family [ConnectX-2] */ MLX_VF(0x1002), /* Virtual Function */ /* MT27500 Family [ConnectX-3] */ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3), MLX_VF(0x1004), /* Virtual Function */ MLX_GN(0x1005), /* MT27510 Family */ MLX_GN(0x1006), /* MT27511 Family */ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */ MLX_GN(0x1008), /* MT27521 Family */ MLX_GN(0x1009), /* MT27530 Family */ MLX_GN(0x100a), /* MT27531 Family */ MLX_GN(0x100b), /* MT27540 Family */ MLX_GN(0x100c), /* MT27541 Family */ MLX_GN(0x100d), /* MT27550 Family */ MLX_GN(0x100e), /* MT27551 Family */ MLX_GN(0x100f), /* MT27560 Family */ MLX_GN(0x1010), /* MT27561 Family */ /* * See the mellanox_check_broken_intx_masking() quirk when * adding devices */ { 0, } }; MODULE_DEVICE_TABLE(pci, mlx4_pci_table); static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); mlx4_enter_error_state(persist); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; mlx4_pci_disable_device(persist->dev); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; int err; mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); err = mlx4_pci_enable_device(dev); if (err) { mlx4_err(dev, "Can not re-enable device, err=%d\n", err); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); return PCI_ERS_RESULT_RECOVERED; } static void mlx4_pci_resume(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; int total_vfs; int err; mlx4_err(dev, "%s was called\n", __func__); total_vfs = dev->persist->num_vfs; memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mutex_lock(&persist->interface_state_mutex); if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, priv, 1); if (err) { mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", __func__, err); goto end; } err = restore_current_port_types(dev, dev->persist-> curr_port_type, dev->persist-> curr_port_poss_type); if (err) mlx4_err(dev, "could not restore original port types (%d)\n", err); } end: mutex_unlock(&persist->interface_state_mutex); } static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); } static const struct pci_error_handlers mlx4_err_handler = { .error_detected = mlx4_pci_err_detected, .slot_reset = mlx4_pci_slot_reset, .resume = mlx4_pci_resume, }; static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, .shutdown = mlx4_shutdown, .remove = mlx4_remove_one, .err_handler = &mlx4_err_handler, }; static int __init mlx4_verify_params(void) { if ((log_num_mac < 0) || (log_num_mac > 7)) { pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); return -1; } if (log_num_vlan != 0) pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", MLX4_LOG_NUM_VLANS); if (use_prio != 0) pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); return -1; } /* Check if module param for ports type has legal combination */ if (port_type_array[0] == false && port_type_array[1] == true) { pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); port_type_array[0] = true; } if (mlx4_log_num_mgm_entry_size < -7 || (mlx4_log_num_mgm_entry_size > 0 && (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", mlx4_log_num_mgm_entry_size, MLX4_MIN_MGM_LOG_ENTRY_SIZE, MLX4_MAX_MGM_LOG_ENTRY_SIZE); return -1; } return 0; } static int __init mlx4_init(void) { int ret; if (mlx4_verify_params()) return -EINVAL; mlx4_wq = create_singlethread_workqueue("mlx4"); if (!mlx4_wq) return -ENOMEM; ret = pci_register_driver(&mlx4_driver); if (ret < 0) destroy_workqueue(mlx4_wq); return ret < 0 ? ret : 0; } static void __exit mlx4_cleanup(void) { pci_unregister_driver(&mlx4_driver); destroy_workqueue(mlx4_wq); } module_init(mlx4_init); module_exit(mlx4_cleanup);
null
null
null
null
93,515
28,657
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
28,657
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
#include "rar.hpp" RawRead::RawRead() { RawRead::SrcFile=NULL; Reset(); } RawRead::RawRead(File *SrcFile) { RawRead::SrcFile=SrcFile; Reset(); } void RawRead::Reset() { Data.SoftReset(); ReadPos=0; DataSize=0; Crypt=NULL; } size_t RawRead::Read(size_t Size) { size_t ReadSize=0; #if !defined(RAR_NOCRYPT) if (Crypt!=NULL) { // Full size of buffer with already read data including data read // for encryption block alignment. size_t FullSize=Data.Size(); // Data read for alignment and not processed yet. size_t DataLeft=FullSize-DataSize; if (Size>DataLeft) // Need to read more than we already have. { size_t SizeToRead=Size-DataLeft; size_t AlignedReadSize=SizeToRead+((~SizeToRead+1) & CRYPT_BLOCK_MASK); Data.Add(AlignedReadSize); ReadSize=SrcFile->Read(&Data[FullSize],AlignedReadSize); Crypt->DecryptBlock(&Data[FullSize],AlignedReadSize); DataSize+=ReadSize==0 ? 0:Size; } else // Use buffered data, no real read. { ReadSize=Size; DataSize+=Size; } } else #endif if (Size!=0) { Data.Add(Size); ReadSize=SrcFile->Read(&Data[DataSize],Size); DataSize+=ReadSize; } return ReadSize; } void RawRead::Read(byte *SrcData,size_t Size) { if (Size!=0) { Data.Add(Size); memcpy(&Data[DataSize],SrcData,Size); DataSize+=Size; } } byte RawRead::Get1() { return ReadPos<DataSize ? Data[ReadPos++]:0; } ushort RawRead::Get2() { if (ReadPos+1<DataSize) { ushort Result=Data[ReadPos]+(Data[ReadPos+1]<<8); ReadPos+=2; return Result; } return 0; } uint RawRead::Get4() { if (ReadPos+3<DataSize) { uint Result=Data[ReadPos]+(Data[ReadPos+1]<<8)+(Data[ReadPos+2]<<16)+ (Data[ReadPos+3]<<24); ReadPos+=4; return Result; } return 0; } uint64 RawRead::Get8() { uint Low=Get4(),High=Get4(); return INT32TO64(High,Low); } uint64 RawRead::GetV() { uint64 Result=0; // Need to check Shift<64, because for shift greater than or equal to // the width of the promoted left operand, the behavior is undefined. for (uint Shift=0;ReadPos<DataSize && Shift<64;Shift+=7) { byte CurByte=Data[ReadPos++]; Result+=uint64(CurByte & 0x7f)<<Shift; if ((CurByte & 0x80)==0) return Result; // Decoded successfully. } return 0; // Out of buffer border. } // Return a number of bytes in current variable length integer. uint RawRead::GetVSize(size_t Pos) { for (size_t CurPos=Pos;CurPos<DataSize;CurPos++) if ((Data[CurPos] & 0x80)==0) return int(CurPos-Pos+1); return 0; // Buffer overflow. } size_t RawRead::GetB(void *Field,size_t Size) { byte *F=(byte *)Field; size_t CopySize=Min(DataSize-ReadPos,Size); if (CopySize>0) memcpy(F,&Data[ReadPos],CopySize); if (Size>CopySize) memset(F+CopySize,0,Size-CopySize); ReadPos+=CopySize; return CopySize; } void RawRead::GetW(wchar *Field,size_t Size) { if (ReadPos+2*Size-1<DataSize) { RawToWide(&Data[ReadPos],Field,Size); ReadPos+=sizeof(wchar)*Size; } else memset(Field,0,sizeof(wchar)*Size); } uint RawRead::GetCRC15(bool ProcessedOnly) // RAR 1.5 block CRC. { if (DataSize<=2) return 0; uint HeaderCRC=CRC32(0xffffffff,&Data[2],(ProcessedOnly ? ReadPos:DataSize)-2); return ~HeaderCRC & 0xffff; } uint RawRead::GetCRC50() // RAR 5.0 block CRC. { if (DataSize<=4) return 0xffffffff; return CRC32(0xffffffff,&Data[4],DataSize-4) ^ 0xffffffff; } // Read vint from arbitrary byte array. uint64 RawGetV(const byte *Data,uint &ReadPos,uint DataSize,bool &Overflow) { Overflow=false; uint64 Result=0; for (uint Shift=0;ReadPos<DataSize;Shift+=7) { byte CurByte=Data[ReadPos++]; Result+=uint64(CurByte & 0x7f)<<Shift; if ((CurByte & 0x80)==0) return Result; // Decoded successfully. } Overflow=true; return 0; // Out of buffer border. }
null
null
null
null
25,520
5,779
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
5,779
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/dbus/arc_oemcrypto_client.h" #include <utility> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/callback.h" #include "dbus/bus.h" #include "dbus/message.h" #include "dbus/object_proxy.h" #include "third_party/cros_system_api/dbus/service_constants.h" namespace chromeos { namespace { class ArcOemCryptoClientImpl : public ArcOemCryptoClient { public: ArcOemCryptoClientImpl() : weak_ptr_factory_(this) {} ~ArcOemCryptoClientImpl() override = default; // ArcOemCryptoClient override: void BootstrapMojoConnection(base::ScopedFD fd, VoidDBusMethodCallback callback) override { if (!service_available_) { DVLOG(1) << "ArcOemCrypto D-Bus service not available"; std::move(callback).Run(false); return; } dbus::MethodCall method_call(arc_oemcrypto::kArcOemCryptoServiceInterface, arc_oemcrypto::kBootstrapMojoConnection); dbus::MessageWriter writer(&method_call); writer.AppendFileDescriptor(fd.get()); proxy_->CallMethod( &method_call, dbus::ObjectProxy::TIMEOUT_USE_DEFAULT, base::BindOnce(&ArcOemCryptoClientImpl::OnVoidDBusMethod, weak_ptr_factory_.GetWeakPtr(), std::move(callback))); } protected: // DBusClient override. void Init(dbus::Bus* bus) override { proxy_ = bus->GetObjectProxy( arc_oemcrypto::kArcOemCryptoServiceName, dbus::ObjectPath(arc_oemcrypto::kArcOemCryptoServicePath)); proxy_->WaitForServiceToBeAvailable( base::BindOnce(&ArcOemCryptoClientImpl::OnServiceAvailable, weak_ptr_factory_.GetWeakPtr())); } private: // Runs the callback with the method call result. void OnVoidDBusMethod(VoidDBusMethodCallback callback, dbus::Response* response) { std::move(callback).Run(response != nullptr); } void OnServiceAvailable(bool service_is_available) { service_available_ = service_is_available; } dbus::ObjectProxy* proxy_ = nullptr; bool service_available_ = false; base::WeakPtrFactory<ArcOemCryptoClientImpl> weak_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(ArcOemCryptoClientImpl); }; } // namespace ArcOemCryptoClient::ArcOemCryptoClient() = default; ArcOemCryptoClient::~ArcOemCryptoClient() = default; // static ArcOemCryptoClient* ArcOemCryptoClient::Create() { return new ArcOemCryptoClientImpl(); } } // namespace chromeos
null
null
null
null
2,642
3,174
null
train_val
1b0d3845b454eaaac0b2064c78926ca4d739a080
265,742
qemu
0
https://github.com/bonzini/qemu
2016-10-18 11:40:27+01:00
/* * QEMU ETRAX System Emulator * * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef HW_ETRAXFS_H #define HW_ETRAXFS_H #include "net/net.h" #include "hw/cris/etraxfs_dma.h" /* Instantiate an ETRAXFS Ethernet MAC. */ static inline DeviceState * etraxfs_eth_init(NICInfo *nd, hwaddr base, int phyaddr, void *dma_out, void *dma_in) { DeviceState *dev; qemu_check_nic_model(nd, "fseth"); dev = qdev_create(NULL, "etraxfs-eth"); qdev_set_nic_properties(dev, nd); qdev_prop_set_uint32(dev, "phyaddr", phyaddr); qdev_prop_set_ptr(dev, "dma_out", dma_out); qdev_prop_set_ptr(dev, "dma_in", dma_in); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); return dev; } static inline DeviceState *etraxfs_ser_create(hwaddr addr, qemu_irq irq, CharDriverState *chr) { DeviceState *dev; SysBusDevice *s; dev = qdev_create(NULL, "etraxfs,serial"); s = SYS_BUS_DEVICE(dev); qdev_prop_set_chr(dev, "chardev", chr); qdev_init_nofail(dev); sysbus_mmio_map(s, 0, addr); sysbus_connect_irq(s, 0, irq); return dev; } #endif
null
null
null
null
123,866
2,839
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
155,896
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <string.h> #include "libavutil/avstring.h" #include "ffmpeg.h" static int nb_hw_devices; static HWDevice **hw_devices; static HWDevice *hw_device_get_by_type(enum AVHWDeviceType type) { HWDevice *found = NULL; int i; for (i = 0; i < nb_hw_devices; i++) { if (hw_devices[i]->type == type) { if (found) return NULL; found = hw_devices[i]; } } return found; } HWDevice *hw_device_get_by_name(const char *name) { int i; for (i = 0; i < nb_hw_devices; i++) { if (!strcmp(hw_devices[i]->name, name)) return hw_devices[i]; } return NULL; } static HWDevice *hw_device_add(void) { int err; err = av_reallocp_array(&hw_devices, nb_hw_devices + 1, sizeof(*hw_devices)); if (err) { nb_hw_devices = 0; return NULL; } hw_devices[nb_hw_devices] = av_mallocz(sizeof(HWDevice)); if (!hw_devices[nb_hw_devices]) return NULL; return hw_devices[nb_hw_devices++]; } static char *hw_device_default_name(enum AVHWDeviceType type) { // Make an automatic name of the form "type%d". We arbitrarily // limit at 1000 anonymous devices of the same type - there is // probably something else very wrong if you get to this limit. const char *type_name = av_hwdevice_get_type_name(type); char *name; size_t index_pos; int index, index_limit = 1000; index_pos = strlen(type_name); name = av_malloc(index_pos + 4); if (!name) return NULL; for (index = 0; index < index_limit; index++) { snprintf(name, index_pos + 4, "%s%d", type_name, index); if (!hw_device_get_by_name(name)) break; } if (index >= index_limit) { av_freep(&name); return NULL; } return name; } int hw_device_init_from_string(const char *arg, HWDevice **dev_out) { // "type=name:device,key=value,key2=value2" // "type:device,key=value,key2=value2" // -> av_hwdevice_ctx_create() // "type=name@name" // "type@name" // -> av_hwdevice_ctx_create_derived() AVDictionary *options = NULL; char *type_name = NULL, *name = NULL, *device = NULL; enum AVHWDeviceType type; HWDevice *dev, *src; AVBufferRef *device_ref = NULL; int err; const char *errmsg, *p, *q; size_t k; k = strcspn(arg, ":=@"); p = arg + k; type_name = av_strndup(arg, k); if (!type_name) { err = AVERROR(ENOMEM); goto fail; } type = av_hwdevice_find_type_by_name(type_name); if (type == AV_HWDEVICE_TYPE_NONE) { errmsg = "unknown device type"; goto invalid; } if (*p == '=') { k = strcspn(p + 1, ":@"); name = av_strndup(p + 1, k); if (!name) { err = AVERROR(ENOMEM); goto fail; } if (hw_device_get_by_name(name)) { errmsg = "named device already exists"; goto invalid; } p += 1 + k; } else { name = hw_device_default_name(type); if (!name) { err = AVERROR(ENOMEM); goto fail; } } if (!*p) { // New device with no parameters. err = av_hwdevice_ctx_create(&device_ref, type, NULL, NULL, 0); if (err < 0) goto fail; } else if (*p == ':') { // New device with some parameters. ++p; q = strchr(p, ','); if (q) { device = av_strndup(p, q - p); if (!device) { err = AVERROR(ENOMEM); goto fail; } err = av_dict_parse_string(&options, q + 1, "=", ",", 0); if (err < 0) { errmsg = "failed to parse options"; goto invalid; } } err = av_hwdevice_ctx_create(&device_ref, type, device ? device : p, options, 0); if (err < 0) goto fail; } else if (*p == '@') { // Derive from existing device. src = hw_device_get_by_name(p + 1); if (!src) { errmsg = "invalid source device name"; goto invalid; } err = av_hwdevice_ctx_create_derived(&device_ref, type, src->device_ref, 0); if (err < 0) goto fail; } else { errmsg = "parse error"; goto invalid; } dev = hw_device_add(); if (!dev) { err = AVERROR(ENOMEM); goto fail; } dev->name = name; dev->type = type; dev->device_ref = device_ref; if (dev_out) *dev_out = dev; name = NULL; err = 0; done: av_freep(&type_name); av_freep(&name); av_freep(&device); av_dict_free(&options); return err; invalid: av_log(NULL, AV_LOG_ERROR, "Invalid device specification \"%s\": %s\n", arg, errmsg); err = AVERROR(EINVAL); goto done; fail: av_log(NULL, AV_LOG_ERROR, "Device creation failed: %d.\n", err); av_buffer_unref(&device_ref); goto done; } static int hw_device_init_from_type(enum AVHWDeviceType type, const char *device, HWDevice **dev_out) { AVBufferRef *device_ref = NULL; HWDevice *dev; char *name; int err; name = hw_device_default_name(type); if (!name) { err = AVERROR(ENOMEM); goto fail; } err = av_hwdevice_ctx_create(&device_ref, type, device, NULL, 0); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "Device creation failed: %d.\n", err); goto fail; } dev = hw_device_add(); if (!dev) { err = AVERROR(ENOMEM); goto fail; } dev->name = name; dev->type = type; dev->device_ref = device_ref; if (dev_out) *dev_out = dev; return 0; fail: av_freep(&name); av_buffer_unref(&device_ref); return err; } void hw_device_free_all(void) { int i; for (i = 0; i < nb_hw_devices; i++) { av_freep(&hw_devices[i]->name); av_buffer_unref(&hw_devices[i]->device_ref); av_freep(&hw_devices[i]); } av_freep(&hw_devices); nb_hw_devices = 0; } static HWDevice *hw_device_match_by_codec(const AVCodec *codec) { const AVCodecHWConfig *config; HWDevice *dev; int i; for (i = 0;; i++) { config = avcodec_get_hw_config(codec, i); if (!config) return NULL; if (!(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX)) continue; dev = hw_device_get_by_type(config->device_type); if (dev) return dev; } } int hw_device_setup_for_decode(InputStream *ist) { const AVCodecHWConfig *config; enum AVHWDeviceType type; HWDevice *dev = NULL; int err, auto_device = 0; if (ist->hwaccel_device) { dev = hw_device_get_by_name(ist->hwaccel_device); if (!dev) { if (ist->hwaccel_id == HWACCEL_AUTO) { auto_device = 1; } else if (ist->hwaccel_id == HWACCEL_GENERIC) { type = ist->hwaccel_device_type; err = hw_device_init_from_type(type, ist->hwaccel_device, &dev); } else { // This will be dealt with by API-specific initialisation // (using hwaccel_device), so nothing further needed here. return 0; } } else { if (ist->hwaccel_id == HWACCEL_AUTO) { ist->hwaccel_device_type = dev->type; } else if (ist->hwaccel_device_type != dev->type) { av_log(ist->dec_ctx, AV_LOG_ERROR, "Invalid hwaccel device " "specified for decoder: device %s of type %s is not " "usable with hwaccel %s.\n", dev->name, av_hwdevice_get_type_name(dev->type), av_hwdevice_get_type_name(ist->hwaccel_device_type)); return AVERROR(EINVAL); } } } else { if (ist->hwaccel_id == HWACCEL_AUTO) { auto_device = 1; } else if (ist->hwaccel_id == HWACCEL_GENERIC) { type = ist->hwaccel_device_type; dev = hw_device_get_by_type(type); if (!dev) err = hw_device_init_from_type(type, NULL, &dev); } else { dev = hw_device_match_by_codec(ist->dec); if (!dev) { // No device for this codec, but not using generic hwaccel // and therefore may well not need one - ignore. return 0; } } } if (auto_device) { int i; if (!avcodec_get_hw_config(ist->dec, 0)) { // Decoder does not support any hardware devices. return 0; } for (i = 0; !dev; i++) { config = avcodec_get_hw_config(ist->dec, i); if (!config) break; type = config->device_type; dev = hw_device_get_by_type(type); if (dev) { av_log(ist->dec_ctx, AV_LOG_INFO, "Using auto " "hwaccel type %s with existing device %s.\n", av_hwdevice_get_type_name(type), dev->name); } } for (i = 0; !dev; i++) { config = avcodec_get_hw_config(ist->dec, i); if (!config) break; type = config->device_type; // Try to make a new device of this type. err = hw_device_init_from_type(type, ist->hwaccel_device, &dev); if (err < 0) { // Can't make a device of this type. continue; } if (ist->hwaccel_device) { av_log(ist->dec_ctx, AV_LOG_INFO, "Using auto " "hwaccel type %s with new device created " "from %s.\n", av_hwdevice_get_type_name(type), ist->hwaccel_device); } else { av_log(ist->dec_ctx, AV_LOG_INFO, "Using auto " "hwaccel type %s with new default device.\n", av_hwdevice_get_type_name(type)); } } if (dev) { ist->hwaccel_device_type = type; } else { av_log(ist->dec_ctx, AV_LOG_INFO, "Auto hwaccel " "disabled: no device found.\n"); ist->hwaccel_id = HWACCEL_NONE; return 0; } } if (!dev) { av_log(ist->dec_ctx, AV_LOG_ERROR, "No device available " "for decoder: device type %s needed for codec %s.\n", av_hwdevice_get_type_name(type), ist->dec->name); return err; } ist->dec_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref); if (!ist->dec_ctx->hw_device_ctx) return AVERROR(ENOMEM); return 0; } int hw_device_setup_for_encode(OutputStream *ost) { HWDevice *dev; dev = hw_device_match_by_codec(ost->enc); if (dev) { ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref); if (!ost->enc_ctx->hw_device_ctx) return AVERROR(ENOMEM); return 0; } else { // No device required, or no device available. return 0; } } static int hwaccel_retrieve_data(AVCodecContext *avctx, AVFrame *input) { InputStream *ist = avctx->opaque; AVFrame *output = NULL; enum AVPixelFormat output_format = ist->hwaccel_output_format; int err; if (input->format == output_format) { // Nothing to do. return 0; } output = av_frame_alloc(); if (!output) return AVERROR(ENOMEM); output->format = output_format; err = av_hwframe_transfer_data(output, input, 0); if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to transfer data to " "output frame: %d.\n", err); goto fail; } err = av_frame_copy_props(output, input); if (err < 0) { av_frame_unref(output); goto fail; } av_frame_unref(input); av_frame_move_ref(input, output); av_frame_free(&output); return 0; fail: av_frame_free(&output); return err; } int hwaccel_decode_init(AVCodecContext *avctx) { InputStream *ist = avctx->opaque; ist->hwaccel_retrieve_data = &hwaccel_retrieve_data; return 0; }
null
null
null
null
71,951
44,287
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
209,282
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * BSD LICENSE * * Copyright(c) 2015 Broadcom Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Broadcom Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _CLOCK_BCM_NSP_H #define _CLOCK_BCM_NSP_H /* GENPLL clock channel ID */ #define BCM_NSP_GENPLL 0 #define BCM_NSP_GENPLL_PHY_CLK 1 #define BCM_NSP_GENPLL_ENET_SW_CLK 2 #define BCM_NSP_GENPLL_USB_PHY_REF_CLK 3 #define BCM_NSP_GENPLL_IPROCFAST_CLK 4 #define BCM_NSP_GENPLL_SATA1_CLK 5 #define BCM_NSP_GENPLL_SATA2_CLK 6 /* LCPLL0 clock channel ID */ #define BCM_NSP_LCPLL0 0 #define BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK 1 #define BCM_NSP_LCPLL0_SDIO_CLK 2 #define BCM_NSP_LCPLL0_DDR_PHY_CLK 3 #endif /* _CLOCK_BCM_NSP_H */
null
null
null
null
117,629
39,318
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
204,313
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * algif_hash: User-space interface for hash algorithms * * This file provides the user-space API for hash algorithms. * * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/hash.h> #include <crypto/if_alg.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> #include <net/sock.h> struct hash_ctx { struct af_alg_sgl sgl; u8 *result; struct af_alg_completion completion; unsigned int len; bool more; struct ahash_request req; }; struct algif_hash_tfm { struct crypto_ahash *hash; bool has_key; }; static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; if (ctx->result) return 0; ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); if (!ctx->result) return -ENOMEM; memset(ctx->result, 0, ds); return 0; } static void hash_free_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; if (!ctx->result) return; ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); sock_kzfree_s(sk, ctx->result, ds); ctx->result = NULL; } static int hash_sendmsg(struct socket *sock, struct msghdr *msg, size_t ignored) { int limit = ALG_MAX_PAGES * PAGE_SIZE; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; long copied = 0; int err; if (limit > sk->sk_sndbuf) limit = sk->sk_sndbuf; lock_sock(sk); if (!ctx->more) { if ((msg->msg_flags & MSG_MORE)) hash_free_result(sk, ctx); err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), &ctx->completion); if (err) goto unlock; } ctx->more = 0; while (msg_data_left(msg)) { int len = msg_data_left(msg); if (len > limit) len = limit; len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len); if (len < 0) { err = copied ? 0 : len; goto unlock; } ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), &ctx->completion); af_alg_free_sg(&ctx->sgl); if (err) goto unlock; copied += len; iov_iter_advance(&msg->msg_iter, len); } err = 0; ctx->more = msg->msg_flags & MSG_MORE; if (!ctx->more) { err = hash_alloc_result(sk, ctx); if (err) goto unlock; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); } unlock: release_sock(sk); return err ?: copied; } static ssize_t hash_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; int err; if (flags & MSG_SENDPAGE_NOTLAST) flags |= MSG_MORE; lock_sock(sk); sg_init_table(ctx->sgl.sg, 1); sg_set_page(ctx->sgl.sg, page, size, offset); if (!(flags & MSG_MORE)) { err = hash_alloc_result(sk, ctx); if (err) goto unlock; } else if (!ctx->more) hash_free_result(sk, ctx); ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); if (!(flags & MSG_MORE)) { if (ctx->more) err = crypto_ahash_finup(&ctx->req); else err = crypto_ahash_digest(&ctx->req); } else { if (!ctx->more) { err = crypto_ahash_init(&ctx->req); err = af_alg_wait_for_completion(err, &ctx->completion); if (err) goto unlock; } err = crypto_ahash_update(&ctx->req); } err = af_alg_wait_for_completion(err, &ctx->completion); if (err) goto unlock; ctx->more = flags & MSG_MORE; unlock: release_sock(sk); return err ?: size; } static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); bool result; int err; if (len > ds) len = ds; else if (len < ds) msg->msg_flags |= MSG_TRUNC; lock_sock(sk); result = ctx->result; err = hash_alloc_result(sk, ctx); if (err) goto unlock; ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); if (!result && !ctx->more) { err = af_alg_wait_for_completion( crypto_ahash_init(&ctx->req), &ctx->completion); if (err) goto unlock; } if (!result || ctx->more) { ctx->more = 0; err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); if (err) goto unlock; } err = memcpy_to_msg(msg, ctx->result, len); unlock: hash_free_result(sk, ctx); release_sock(sk); return err ?: len; } static int hash_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; struct ahash_request *req = &ctx->req; char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1]; struct sock *sk2; struct alg_sock *ask2; struct hash_ctx *ctx2; bool more; int err; lock_sock(sk); more = ctx->more; err = more ? crypto_ahash_export(req, state) : 0; release_sock(sk); if (err) return err; err = af_alg_accept(ask->parent, newsock, kern); if (err) return err; sk2 = newsock->sk; ask2 = alg_sk(sk2); ctx2 = ask2->private; ctx2->more = more; if (!more) return err; err = crypto_ahash_import(&ctx2->req, state); if (err) { sock_orphan(sk2); sock_put(sk2); } return err; } static struct proto_ops algif_hash_ops = { .family = PF_ALG, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .setsockopt = sock_no_setsockopt, .poll = sock_no_poll, .release = af_alg_release, .sendmsg = hash_sendmsg, .sendpage = hash_sendpage, .recvmsg = hash_recvmsg, .accept = hash_accept, }; static int hash_check_key(struct socket *sock) { int err = 0; struct sock *psk; struct alg_sock *pask; struct algif_hash_tfm *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); lock_sock(sk); if (ask->refcnt) goto unlock_child; psk = ask->parent; pask = alg_sk(ask->parent); tfm = pask->private; err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); if (!tfm->has_key) goto unlock; if (!pask->refcnt++) sock_hold(psk); ask->refcnt = 1; sock_put(psk); err = 0; unlock: release_sock(psk); unlock_child: release_sock(sk); return err; } static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg, size_t size) { int err; err = hash_check_key(sock); if (err) return err; return hash_sendmsg(sock, msg, size); } static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page, int offset, size_t size, int flags) { int err; err = hash_check_key(sock); if (err) return err; return hash_sendpage(sock, page, offset, size, flags); } static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { int err; err = hash_check_key(sock); if (err) return err; return hash_recvmsg(sock, msg, ignored, flags); } static int hash_accept_nokey(struct socket *sock, struct socket *newsock, int flags, bool kern) { int err; err = hash_check_key(sock); if (err) return err; return hash_accept(sock, newsock, flags, kern); } static struct proto_ops algif_hash_ops_nokey = { .family = PF_ALG, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .setsockopt = sock_no_setsockopt, .poll = sock_no_poll, .release = af_alg_release, .sendmsg = hash_sendmsg_nokey, .sendpage = hash_sendpage_nokey, .recvmsg = hash_recvmsg_nokey, .accept = hash_accept_nokey, }; static void *hash_bind(const char *name, u32 type, u32 mask) { struct algif_hash_tfm *tfm; struct crypto_ahash *hash; tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); if (!tfm) return ERR_PTR(-ENOMEM); hash = crypto_alloc_ahash(name, type, mask); if (IS_ERR(hash)) { kfree(tfm); return ERR_CAST(hash); } tfm->hash = hash; return tfm; } static void hash_release(void *private) { struct algif_hash_tfm *tfm = private; crypto_free_ahash(tfm->hash); kfree(tfm); } static int hash_setkey(void *private, const u8 *key, unsigned int keylen) { struct algif_hash_tfm *tfm = private; int err; err = crypto_ahash_setkey(tfm->hash, key, keylen); tfm->has_key = !err; return err; } static void hash_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; hash_free_result(sk, ctx); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } static int hash_accept_parent_nokey(void *private, struct sock *sk) { struct hash_ctx *ctx; struct alg_sock *ask = alg_sk(sk); struct algif_hash_tfm *tfm = private; struct crypto_ahash *hash = tfm->hash; unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->result = NULL; ctx->len = len; ctx->more = 0; af_alg_init_completion(&ctx->completion); ask->private = ctx; ahash_request_set_tfm(&ctx->req, hash); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); sk->sk_destruct = hash_sock_destruct; return 0; } static int hash_accept_parent(void *private, struct sock *sk) { struct algif_hash_tfm *tfm = private; if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) return -ENOKEY; return hash_accept_parent_nokey(private, sk); } static const struct af_alg_type algif_type_hash = { .bind = hash_bind, .release = hash_release, .setkey = hash_setkey, .accept = hash_accept_parent, .accept_nokey = hash_accept_parent_nokey, .ops = &algif_hash_ops, .ops_nokey = &algif_hash_ops_nokey, .name = "hash", .owner = THIS_MODULE }; static int __init algif_hash_init(void) { return af_alg_register_type(&algif_type_hash); } static void __exit algif_hash_exit(void) { int err = af_alg_unregister_type(&algif_type_hash); BUG_ON(err); } module_init(algif_hash_init); module_exit(algif_hash_exit); MODULE_LICENSE("GPL");
null
null
null
null
112,660
9,400
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
9,400
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/reporting/reporting_uploader.h" #include <memory> #include <string> #include <utility> #include "base/bind.h" #include "base/callback.h" #include "base/run_loop.h" #include "net/cookies/cookie_store.h" #include "net/cookies/cookie_store_test_callbacks.h" #include "net/http/http_status_code.h" #include "net/test/embedded_test_server/embedded_test_server.h" #include "net/test/embedded_test_server/http_request.h" #include "net/test/embedded_test_server/http_response.h" #include "net/url_request/url_request_test_util.h" #include "testing/gtest/include/gtest/gtest.h" namespace net { namespace { class ReportingUploaderTest : public ::testing::Test { protected: ReportingUploaderTest() : server_(test_server::EmbeddedTestServer::TYPE_HTTPS), uploader_(ReportingUploader::Create(&context_)) {} TestURLRequestContext context_; test_server::EmbeddedTestServer server_; std::unique_ptr<ReportingUploader> uploader_; }; const char kUploadBody[] = "{}"; void CheckUpload(const test_server::HttpRequest& request) { EXPECT_EQ("POST", request.method_string); auto it = request.headers.find("Content-Type"); EXPECT_TRUE(it != request.headers.end()); EXPECT_EQ(ReportingUploader::kUploadContentType, it->second); EXPECT_TRUE(request.has_content); EXPECT_EQ(kUploadBody, request.content); } std::unique_ptr<test_server::HttpResponse> ReturnResponse( HttpStatusCode code, const test_server::HttpRequest& request) { auto response = std::make_unique<test_server::BasicHttpResponse>(); response->set_code(code); response->set_content(""); response->set_content_type("text/plain"); return std::move(response); } std::unique_ptr<test_server::HttpResponse> ReturnInvalidResponse( const test_server::HttpRequest& request) { return std::make_unique<test_server::RawHttpResponse>( "", "Not a valid HTTP response."); } class TestUploadCallback { public: TestUploadCallback() : called_(false), waiting_(false) {} ReportingUploader::UploadCallback callback() { return base::BindOnce(&TestUploadCallback::OnUploadComplete, base::Unretained(this)); } void WaitForCall() { if (called_) return; base::RunLoop run_loop; waiting_ = true; closure_ = run_loop.QuitClosure(); run_loop.Run(); } ReportingUploader::Outcome outcome() const { return outcome_; } private: void OnUploadComplete(ReportingUploader::Outcome outcome) { EXPECT_FALSE(called_); called_ = true; outcome_ = outcome; if (waiting_) { waiting_ = false; closure_.Run(); } } bool called_; ReportingUploader::Outcome outcome_; bool waiting_; base::Closure closure_; }; TEST_F(ReportingUploaderTest, Upload) { server_.RegisterRequestMonitor(base::BindRepeating(&CheckUpload)); server_.RegisterRequestHandler(base::BindRepeating(&ReturnResponse, HTTP_OK)); ASSERT_TRUE(server_.Start()); TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); } TEST_F(ReportingUploaderTest, Success) { server_.RegisterRequestHandler(base::BindRepeating(&ReturnResponse, HTTP_OK)); ASSERT_TRUE(server_.Start()); TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); EXPECT_EQ(ReportingUploader::Outcome::SUCCESS, callback.outcome()); } TEST_F(ReportingUploaderTest, NetworkError1) { ASSERT_TRUE(server_.Start()); GURL url = server_.GetURL("/"); ASSERT_TRUE(server_.ShutdownAndWaitUntilComplete()); TestUploadCallback callback; uploader_->StartUpload(url, kUploadBody, 0, callback.callback()); callback.WaitForCall(); EXPECT_EQ(ReportingUploader::Outcome::FAILURE, callback.outcome()); } TEST_F(ReportingUploaderTest, NetworkError2) { server_.RegisterRequestHandler(base::BindRepeating(&ReturnInvalidResponse)); ASSERT_TRUE(server_.Start()); TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); EXPECT_EQ(ReportingUploader::Outcome::FAILURE, callback.outcome()); } TEST_F(ReportingUploaderTest, ServerError) { server_.RegisterRequestHandler( base::BindRepeating(&ReturnResponse, HTTP_INTERNAL_SERVER_ERROR)); ASSERT_TRUE(server_.Start()); TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); EXPECT_EQ(ReportingUploader::Outcome::FAILURE, callback.outcome()); } TEST_F(ReportingUploaderTest, RemoveEndpoint) { server_.RegisterRequestHandler( base::BindRepeating(&ReturnResponse, HTTP_GONE)); ASSERT_TRUE(server_.Start()); TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); EXPECT_EQ(ReportingUploader::Outcome::REMOVE_ENDPOINT, callback.outcome()); } const char kRedirectPath[] = "/redirect"; std::unique_ptr<test_server::HttpResponse> ReturnRedirect( const std::string& location, const test_server::HttpRequest& request) { if (request.relative_url != "/") return std::unique_ptr<test_server::HttpResponse>(); auto response = std::make_unique<test_server::BasicHttpResponse>(); response->set_code(HTTP_FOUND); response->AddCustomHeader("Location", location); response->set_content( "Thank you, Mario! But our Princess is in another castle."); response->set_content_type("text/plain"); return std::move(response); } std::unique_ptr<test_server::HttpResponse> CheckRedirect( bool* redirect_followed_out, const test_server::HttpRequest& request) { if (request.relative_url != kRedirectPath) return std::unique_ptr<test_server::HttpResponse>(); *redirect_followed_out = true; return ReturnResponse(HTTP_OK, request); } TEST_F(ReportingUploaderTest, FollowHttpsRedirect) { bool followed = false; server_.RegisterRequestHandler( base::BindRepeating(&ReturnRedirect, kRedirectPath)); server_.RegisterRequestHandler( base::BindRepeating(&CheckRedirect, &followed)); ASSERT_TRUE(server_.Start()); TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); EXPECT_TRUE(followed); EXPECT_EQ(ReportingUploader::Outcome::SUCCESS, callback.outcome()); } TEST_F(ReportingUploaderTest, DontFollowHttpRedirect) { bool followed = false; test_server::EmbeddedTestServer http_server_; http_server_.RegisterRequestHandler( base::BindRepeating(&CheckRedirect, &followed)); ASSERT_TRUE(http_server_.Start()); const GURL target = http_server_.GetURL(kRedirectPath); server_.RegisterRequestHandler( base::BindRepeating(&ReturnRedirect, target.spec())); ASSERT_TRUE(server_.Start()); TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); EXPECT_FALSE(followed); EXPECT_EQ(ReportingUploader::Outcome::FAILURE, callback.outcome()); } void CheckNoCookie(const test_server::HttpRequest& request) { auto it = request.headers.find("Cookie"); EXPECT_TRUE(it == request.headers.end()); } TEST_F(ReportingUploaderTest, DontSendCookies) { server_.RegisterRequestMonitor(base::BindRepeating(&CheckNoCookie)); server_.RegisterRequestHandler(base::BindRepeating(&ReturnResponse, HTTP_OK)); ASSERT_TRUE(server_.Start()); ResultSavingCookieCallback<bool> cookie_callback; context_.cookie_store()->SetCookieWithOptionsAsync( server_.GetURL("/"), "foo=bar", CookieOptions(), base::BindRepeating(&ResultSavingCookieCallback<bool>::Run, base::Unretained(&cookie_callback))); cookie_callback.WaitUntilDone(); ASSERT_TRUE(cookie_callback.result()); TestUploadCallback upload_callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, upload_callback.callback()); upload_callback.WaitForCall(); } std::unique_ptr<test_server::HttpResponse> SendCookie( const test_server::HttpRequest& request) { auto response = std::make_unique<test_server::BasicHttpResponse>(); response->set_code(HTTP_OK); response->AddCustomHeader("Set-Cookie", "foo=bar"); response->set_content(""); response->set_content_type("text/plain"); return std::move(response); } TEST_F(ReportingUploaderTest, DontSaveCookies) { server_.RegisterRequestHandler(base::BindRepeating(&SendCookie)); ASSERT_TRUE(server_.Start()); TestUploadCallback upload_callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, upload_callback.callback()); upload_callback.WaitForCall(); GetCookieListCallback cookie_callback; context_.cookie_store()->GetCookieListWithOptionsAsync( server_.GetURL("/"), CookieOptions(), base::BindRepeating(&GetCookieListCallback::Run, base::Unretained(&cookie_callback))); cookie_callback.WaitUntilDone(); EXPECT_TRUE(cookie_callback.cookies().empty()); } std::unique_ptr<test_server::HttpResponse> ReturnCacheableResponse( int* request_count_out, const test_server::HttpRequest& request) { ++*request_count_out; auto response = std::make_unique<test_server::BasicHttpResponse>(); response->set_code(HTTP_OK); response->AddCustomHeader("Cache-Control", "max-age=86400"); response->set_content(""); response->set_content_type("text/plain"); return std::move(response); } // TODO(juliatuttle): This passes even if the uploader doesn't set // LOAD_DISABLE_CACHE. Maybe that's okay -- Chromium might not cache POST // responses ever -- but this test should either not exist or be sure that it is // testing actual functionality, not a default. TEST_F(ReportingUploaderTest, DontCacheResponse) { int request_count = 0; server_.RegisterRequestHandler( base::BindRepeating(&ReturnCacheableResponse, &request_count)); ASSERT_TRUE(server_.Start()); { TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); } EXPECT_EQ(1, request_count); { TestUploadCallback callback; uploader_->StartUpload(server_.GetURL("/"), kUploadBody, 0, callback.callback()); callback.WaitForCall(); } EXPECT_EQ(2, request_count); } } // namespace } // namespace net
null
null
null
null
6,263
14,941
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
14,941
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_UKM_UKM_SERVICE_H_ #define COMPONENTS_UKM_UKM_SERVICE_H_ #include <stddef.h> #include <memory> #include <vector> #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "base/sequence_checker.h" #include "build/build_config.h" #include "components/metrics/delegating_provider.h" #include "components/metrics/metrics_provider.h" #include "components/metrics/metrics_rotation_scheduler.h" #include "components/ukm/ukm_recorder_impl.h" #include "components/ukm/ukm_reporting_service.h" class PrefRegistrySimple; class PrefService; namespace metrics { class MetricsServiceClient; class UkmBrowserTest; class UkmEGTestHelper; } namespace ukm { namespace debug { class UkmDebugDataExtractor; } // The URL-Keyed Metrics (UKM) service is responsible for gathering and // uploading reports that contain fine grained performance metrics including // URLs for top-level navigations. class UkmService : public UkmRecorderImpl { public: // Constructs a UkmService. // Calling code is responsible for ensuring that the lifetime of // |pref_service| is longer than the lifetime of UkmService. UkmService(PrefService* pref_service, metrics::MetricsServiceClient* client, bool restrict_to_whitelist_entries); ~UkmService() override; // Initializes the UKM service. void Initialize(); // Enables/disables transmission of accumulated logs. Logs that have already // been created will remain persisted to disk. void EnableReporting(); void DisableReporting(); #if defined(OS_ANDROID) || defined(OS_IOS) void OnAppEnterBackground(); void OnAppEnterForeground(); #endif // Records any collected data into logs, and writes to disk. void Flush(); // Deletes any unsent local data. void Purge(); // Resets the client id stored in prefs. void ResetClientId(); // Registers the specified |provider| to provide additional metrics into the // UKM log. Should be called during MetricsService initialization only. void RegisterMetricsProvider( std::unique_ptr<metrics::MetricsProvider> provider); // Registers the names of all of the preferences used by UkmService in // the provided PrefRegistry. static void RegisterPrefs(PrefRegistrySimple* registry); int32_t report_count() const { return report_count_; } private: friend ::metrics::UkmBrowserTest; friend ::metrics::UkmEGTestHelper; friend ::ukm::debug::UkmDebugDataExtractor; FRIEND_TEST_ALL_PREFIXES(UkmServiceTest, AddEntryWithEmptyMetrics); FRIEND_TEST_ALL_PREFIXES(UkmServiceTest, EntryBuilderAndSerialization); FRIEND_TEST_ALL_PREFIXES(UkmServiceTest, LogsUploadedOnlyWhenHavingSourcesOrEntries); FRIEND_TEST_ALL_PREFIXES(UkmServiceTest, MetricsProviderTest); FRIEND_TEST_ALL_PREFIXES(UkmServiceTest, PersistAndPurge); FRIEND_TEST_ALL_PREFIXES(UkmServiceTest, WhitelistEntryTest); // Starts metrics client initialization. void StartInitTask(); // Called when initialization tasks are complete, to notify the scheduler // that it can begin calling RotateLog. void FinishedInitTask(); // Periodically called by scheduler_ to advance processing of logs. void RotateLog(); // Constructs a new Report from available data and stores it in // persisted_logs_. void BuildAndStoreLog(); // Starts an upload of the next log from persisted_logs_. void StartScheduledUpload(); // Called by log_uploader_ when the an upload is completed. void OnLogUploadComplete(int response_code); // ukm::UkmRecorderImpl: bool ShouldRestrictToWhitelistedEntries() const override; // A weak pointer to the PrefService used to read and write preferences. PrefService* pref_service_; // If true, only whitelisted Entries should be recorded. bool restrict_to_whitelist_entries_; // The UKM client id stored in prefs. uint64_t client_id_; // The UKM session id stored in prefs. int32_t session_id_; // The number of reports generated this session. int32_t report_count_; // Used to interact with the embedder. Weak pointer; must outlive |this| // instance. metrics::MetricsServiceClient* const client_; // Registered metrics providers. metrics::DelegatingProvider metrics_providers_; // Log reporting service. ukm::UkmReportingService reporting_service_; // The scheduler for determining when uploads should happen. std::unique_ptr<metrics::MetricsRotationScheduler> scheduler_; SEQUENCE_CHECKER(sequence_checker_); bool initialize_started_; bool initialize_complete_; // Weak pointers factory used to post task on different threads. All weak // pointers managed by this factory have the same lifetime as UkmService. base::WeakPtrFactory<UkmService> self_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(UkmService); }; } // namespace ukm #endif // COMPONENTS_UKM_UKM_SERVICE_H_
null
null
null
null
11,804
46,086
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
46,086
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_WM_WINDOW_POSITIONER_H_ #define ASH_WM_WINDOW_POSITIONER_H_ #include "ash/ash_export.h" #include "base/macros.h" #include "ui/base/ui_base_types.h" namespace aura { class Window; } namespace gfx { class Rect; } namespace ash { // A collection of utilities that assist with placing new windows. class ASH_EXPORT WindowPositioner { public: // Computes and returns the bounds and show state for new window // based on the parameter passed AND existing windows. |is_saved_bounds| // indicates the |bounds_in_out| is the saved bounds. static void GetBoundsAndShowStateForNewWindow( bool is_saved_bounds, ui::WindowShowState show_state_in, gfx::Rect* bounds_in_out, ui::WindowShowState* show_state_out); // Check if after removal or hide of the given |removed_window| an // automated desktop location management can be performed and // rearrange accordingly. static void RearrangeVisibleWindowOnHideOrRemove( const aura::Window* removed_window); // Turn the automatic positioning logic temporarily off. Returns the previous // state. static bool DisableAutoPositioning(bool ignore); // Check if after insertion or showing of the given |added_window| // an automated desktop location management can be performed and // rearrange accordingly. static void RearrangeVisibleWindowOnShow(aura::Window* added_window); WindowPositioner(); ~WindowPositioner(); protected: friend class WindowPositionerTest; static constexpr int kWindowOffset = 32; DISALLOW_COPY_AND_ASSIGN(WindowPositioner); }; } // namespace ash #endif // ASH_WM_WINDOW_POSITIONER_H_
null
null
null
null
42,949
38,101
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
203,096
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _PERF_UI_BROWSER_HISTS_H_ #define _PERF_UI_BROWSER_HISTS_H_ 1 #include "ui/browser.h" struct hist_browser { struct ui_browser b; struct hists *hists; struct hist_entry *he_selection; struct map_symbol *selection; struct hist_browser_timer *hbt; struct pstack *pstack; struct perf_env *env; int print_seq; bool show_dso; bool show_headers; float min_pcnt; u64 nr_non_filtered_entries; u64 nr_hierarchy_entries; u64 nr_callchain_rows; bool c2c_filter; /* Get title string. */ int (*title)(struct hist_browser *browser, char *bf, size_t size); }; struct hist_browser *hist_browser__new(struct hists *hists); void hist_browser__delete(struct hist_browser *browser); int hist_browser__run(struct hist_browser *browser, const char *help); void hist_browser__init(struct hist_browser *browser, struct hists *hists); #endif /* _PERF_UI_BROWSER_HISTS_H_ */
null
null
null
null
111,443
58,901
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
58,901
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <stddef.h> #include <utility> #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "build/build_config.h" #include "chrome/browser/extensions/extension_apitest.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/browser_commands.h" #include "chrome/browser/ui/tabs/tab_strip_model.h" #include "chrome/common/url_constants.h" #include "chrome/test/base/ui_test_utils.h" #include "content/public/browser/notification_types.h" #include "content/public/browser/render_frame_host.h" #include "content/public/browser/render_process_host.h" #include "content/public/browser/site_instance.h" #include "content/public/browser/web_contents.h" #include "content/public/test/browser_test_utils.h" #include "extensions/browser/extension_host.h" #include "extensions/browser/extension_registry.h" #include "extensions/browser/process_map.h" #include "extensions/common/switches.h" #include "net/base/escape.h" #include "net/dns/mock_host_resolver.h" #include "net/test/embedded_test_server/embedded_test_server.h" #include "net/test/embedded_test_server/http_request.h" #include "net/test/embedded_test_server/http_response.h" using content::ExecuteScript; using content::ExecuteScriptAndExtractString; using content::NavigationController; using content::WebContents; namespace extensions { namespace { std::string WrapForJavascriptAndExtract(const char* javascript_expression) { return std::string("window.domAutomationController.send(") + javascript_expression + ")"; } std::unique_ptr<net::test_server::HttpResponse> HandleExpectAndSetCookieRequest( const net::EmbeddedTestServer* test_server, const net::test_server::HttpRequest& request) { if (!base::StartsWith(request.relative_url, "/expect-and-set-cookie?", base::CompareCase::SENSITIVE)) return std::unique_ptr<net::test_server::HttpResponse>(); std::unique_ptr<net::test_server::BasicHttpResponse> http_response( new net::test_server::BasicHttpResponse); http_response->set_code(net::HTTP_OK); std::string request_cookies; auto it = request.headers.find("Cookie"); if (it != request.headers.end()) request_cookies = it->second; size_t query_string_pos = request.relative_url.find('?'); std::string query_string = request.relative_url.substr(query_string_pos + 1); url::Component query(0, query_string.length()), key_pos, value_pos; bool expectations_satisfied = true; std::vector<std::string> cookies_to_set; while (url::ExtractQueryKeyValue(query_string.c_str(), &query, &key_pos, &value_pos)) { std::string escaped_key(query_string.substr(key_pos.begin, key_pos.len)); std::string escaped_value( query_string.substr(value_pos.begin, value_pos.len)); std::string key = net::UnescapeURLComponent( escaped_key, net::UnescapeRule::NORMAL | net::UnescapeRule::SPACES | net::UnescapeRule::PATH_SEPARATORS | net::UnescapeRule::URL_SPECIAL_CHARS_EXCEPT_PATH_SEPARATORS); std::string value = net::UnescapeURLComponent( escaped_value, net::UnescapeRule::NORMAL | net::UnescapeRule::SPACES | net::UnescapeRule::PATH_SEPARATORS | net::UnescapeRule::URL_SPECIAL_CHARS_EXCEPT_PATH_SEPARATORS); if (key == "expect") { if (request_cookies.find(value) == std::string::npos) expectations_satisfied = false; } else if (key == "set") { cookies_to_set.push_back(value); } else { return nullptr; } } if (expectations_satisfied) { for (size_t i = 0; i < cookies_to_set.size(); i++) http_response->AddCustomHeader("Set-Cookie", cookies_to_set[i]); } return std::move(http_response); } class IsolatedAppTest : public ExtensionBrowserTest { public: void SetUpOnMainThread() override { ExtensionBrowserTest::SetUpOnMainThread(); host_resolver()->AddRule("*", "127.0.0.1"); } // Returns whether the given tab's current URL has the given cookie. bool WARN_UNUSED_RESULT HasCookie(WebContents* contents, const std::string& cookie) { int value_size; std::string actual_cookie; ui_test_utils::GetCookies(contents->GetURL(), contents, &value_size, &actual_cookie); return actual_cookie.find(cookie) != std::string::npos; } const Extension* GetInstalledApp(WebContents* contents) { content::BrowserContext* browser_context = contents->GetBrowserContext(); ExtensionRegistry* registry = ExtensionRegistry::Get(browser_context); std::set<std::string> extension_ids = ProcessMap::Get(browser_context) ->GetExtensionsInProcess( contents->GetMainFrame()->GetProcess()->GetID()); for (std::set<std::string>::iterator iter = extension_ids.begin(); iter != extension_ids.end(); ++iter) { const Extension* installed_app = registry->enabled_extensions().GetByID(*iter); if (installed_app && installed_app->is_app()) return installed_app; } return NULL; } private: void SetUpCommandLine(base::CommandLine* command_line) override { ExtensionBrowserTest::SetUpCommandLine(command_line); command_line->AppendSwitch(switches::kEnableExperimentalExtensionApis); } }; IN_PROC_BROWSER_TEST_F(IsolatedAppTest, CrossProcessClientRedirect) { ASSERT_TRUE(embedded_test_server()->Start()); ASSERT_TRUE(LoadExtension(test_data_dir_.AppendASCII("isolated_apps/app1"))); ASSERT_TRUE(LoadExtension(test_data_dir_.AppendASCII("isolated_apps/app2"))); GURL base_url = embedded_test_server()->GetURL("/extensions/isolated_apps/"); GURL::Replacements replace_host; replace_host.SetHostStr("localhost"); base_url = base_url.ReplaceComponents(replace_host); ui_test_utils::NavigateToURL(browser(), base_url.Resolve("app1/main.html")); // Redirect to app2. GURL redirect_url(embedded_test_server()->GetURL( "/extensions/isolated_apps/app2/redirect.html")); ui_test_utils::NavigateToURL(browser(), redirect_url); // Go back twice. // If bug fixed, we cannot go back anymore. // If not fixed, we will redirect back to app2 and can go back again. EXPECT_TRUE(chrome::CanGoBack(browser())); chrome::GoBack(browser(), WindowOpenDisposition::CURRENT_TAB); EXPECT_TRUE(chrome::CanGoBack(browser())); chrome::GoBack(browser(), WindowOpenDisposition::CURRENT_TAB); EXPECT_FALSE(chrome::CanGoBack(browser())); // We also need to test script-initialized navigation (document.location.href) // happened after page finishes loading. This one will also triggered the // willPerformClientRedirect hook in RenderViewImpl but should not replace // the previous history entry. ui_test_utils::NavigateToURLWithDisposition( browser(), base_url.Resolve("non_app/main.html"), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); WebContents* tab0 = browser()->tab_strip_model()->GetWebContentsAt(1); // Using JavaScript to navigate to app2 page, // after the non_app page has finished loading. content::WindowedNotificationObserver observer1( content::NOTIFICATION_LOAD_STOP, content::Source<NavigationController>( &browser()->tab_strip_model()->GetActiveWebContents()-> GetController())); std::string script = base::StringPrintf( "document.location.href=\"%s\";", base_url.Resolve("app2/main.html").spec().c_str()); EXPECT_TRUE(ExecuteScript(tab0, script)); observer1.Wait(); // This kind of navigation should not replace previous navigation entry. EXPECT_TRUE(chrome::CanGoBack(browser())); chrome::GoBack(browser(), WindowOpenDisposition::CURRENT_TAB); EXPECT_FALSE(chrome::CanGoBack(browser())); } // Tests that cookies set within an isolated app are not visible to normal // pages or other apps. // // TODO(ajwong): Also test what happens if an app spans multiple sites in its // extent. These origins should also be isolated, but still have origin-based // separation as you would expect. IN_PROC_BROWSER_TEST_F(IsolatedAppTest, CookieIsolation) { ASSERT_TRUE(embedded_test_server()->Start()); ASSERT_TRUE(LoadExtension(test_data_dir_.AppendASCII("isolated_apps/app1"))); ASSERT_TRUE(LoadExtension(test_data_dir_.AppendASCII("isolated_apps/app2"))); // The app under test acts on URLs whose host is "localhost", // so the URLs we navigate to must have host "localhost". GURL base_url = embedded_test_server()->GetURL("/extensions/isolated_apps/"); GURL::Replacements replace_host; replace_host.SetHostStr("localhost"); base_url = base_url.ReplaceComponents(replace_host); ui_test_utils::NavigateToURL(browser(), base_url.Resolve("app1/main.html")); ui_test_utils::NavigateToURLWithDisposition( browser(), base_url.Resolve("app2/main.html"), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); ui_test_utils::NavigateToURLWithDisposition( browser(), base_url.Resolve("non_app/main.html"), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); ASSERT_EQ(3, browser()->tab_strip_model()->count()); // Ensure first two tabs have installed apps. WebContents* tab0 = browser()->tab_strip_model()->GetWebContentsAt(0); WebContents* tab1 = browser()->tab_strip_model()->GetWebContentsAt(1); WebContents* tab2 = browser()->tab_strip_model()->GetWebContentsAt(2); ASSERT_TRUE(GetInstalledApp(tab0)); ASSERT_TRUE(GetInstalledApp(tab1)); ASSERT_TRUE(!GetInstalledApp(tab2)); // Check that tabs see cannot each other's localStorage even though they are // in the same origin. ASSERT_TRUE(ExecuteScript( tab0, "window.localStorage.setItem('testdata', 'ls_app1');")); ASSERT_TRUE(ExecuteScript( tab1, "window.localStorage.setItem('testdata', 'ls_app2');")); ASSERT_TRUE(ExecuteScript( tab2, "window.localStorage.setItem('testdata', 'ls_normal');")); const std::string& kRetrieveLocalStorage = WrapForJavascriptAndExtract( "window.localStorage.getItem('testdata') || 'badval'"); std::string result; ASSERT_TRUE(ExecuteScriptAndExtractString( tab0, kRetrieveLocalStorage.c_str(), &result)); EXPECT_EQ("ls_app1", result); ASSERT_TRUE(ExecuteScriptAndExtractString( tab1, kRetrieveLocalStorage.c_str(), &result)); EXPECT_EQ("ls_app2", result); ASSERT_TRUE(ExecuteScriptAndExtractString( tab2, kRetrieveLocalStorage.c_str(), &result)); EXPECT_EQ("ls_normal", result); // Check that each tab sees its own cookie. EXPECT_TRUE(HasCookie(tab0, "app1=3")); EXPECT_TRUE(HasCookie(tab1, "app2=4")); EXPECT_TRUE(HasCookie(tab2, "normalPage=5")); // Check that app1 tab cannot see the other cookies. EXPECT_FALSE(HasCookie(tab0, "app2")); EXPECT_FALSE(HasCookie(tab0, "normalPage")); // Check that app2 tab cannot see the other cookies. EXPECT_FALSE(HasCookie(tab1, "app1")); EXPECT_FALSE(HasCookie(tab1, "normalPage")); // Check that normal tab cannot see the other cookies. EXPECT_FALSE(HasCookie(tab2, "app1")); EXPECT_FALSE(HasCookie(tab2, "app2")); // Check that the non_app iframe cookie is associated with app1 and not the // normal tab. (For now, iframes are always rendered in their parent // process, even if they aren't in the app manifest.) EXPECT_TRUE(HasCookie(tab0, "nonAppFrame=6")); EXPECT_FALSE(HasCookie(tab2, "nonAppFrame")); // Check that isolation persists even if the tab crashes and is reloaded. chrome::SelectNumberedTab(browser(), 0); content::CrashTab(tab0); content::WindowedNotificationObserver observer( content::NOTIFICATION_LOAD_STOP, content::Source<NavigationController>( &browser()->tab_strip_model()->GetActiveWebContents()-> GetController())); chrome::Reload(browser(), WindowOpenDisposition::CURRENT_TAB); observer.Wait(); EXPECT_TRUE(HasCookie(tab0, "app1=3")); EXPECT_FALSE(HasCookie(tab0, "app2")); EXPECT_FALSE(HasCookie(tab0, "normalPage")); } // This test is disabled due to being flaky. http://crbug.com/145588 // Ensure that cookies are not isolated if the isolated apps are not installed. IN_PROC_BROWSER_TEST_F(IsolatedAppTest, DISABLED_NoCookieIsolationWithoutApp) { ASSERT_TRUE(embedded_test_server()->Start()); // The app under test acts on URLs whose host is "localhost", // so the URLs we navigate to must have host "localhost". GURL base_url = embedded_test_server()->GetURL("/extensions/isolated_apps/"); GURL::Replacements replace_host; replace_host.SetHostStr("localhost"); base_url = base_url.ReplaceComponents(replace_host); ui_test_utils::NavigateToURL(browser(), base_url.Resolve("app1/main.html")); ui_test_utils::NavigateToURLWithDisposition( browser(), base_url.Resolve("app2/main.html"), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); ui_test_utils::NavigateToURLWithDisposition( browser(), base_url.Resolve("non_app/main.html"), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); ASSERT_EQ(3, browser()->tab_strip_model()->count()); // Check that tabs see each other's cookies. EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(0), "app2=4")); EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(0), "normalPage=5")); EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(0), "nonAppFrame=6")); EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(1), "app1=3")); EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(1), "normalPage=5")); EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(1), "nonAppFrame=6")); EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(2), "app1=3")); EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(2), "app2=4")); EXPECT_TRUE(HasCookie(browser()->tab_strip_model()->GetWebContentsAt(2), "nonAppFrame=6")); // Check that all tabs share the same localStorage if they have the same // origin. WebContents* app1_wc = browser()->tab_strip_model()->GetWebContentsAt(0); WebContents* app2_wc = browser()->tab_strip_model()->GetWebContentsAt(1); WebContents* non_app_wc = browser()->tab_strip_model()->GetWebContentsAt(2); ASSERT_TRUE(ExecuteScript( app1_wc, "window.localStorage.setItem('testdata', 'ls_app1');")); ASSERT_TRUE(ExecuteScript( app2_wc, "window.localStorage.setItem('testdata', 'ls_app2');")); ASSERT_TRUE(ExecuteScript( non_app_wc, "window.localStorage.setItem('testdata', 'ls_normal');")); const std::string& kRetrieveLocalStorage = WrapForJavascriptAndExtract("window.localStorage.getItem('testdata')"); std::string result; ASSERT_TRUE(ExecuteScriptAndExtractString( app1_wc, kRetrieveLocalStorage.c_str(), &result)); EXPECT_EQ("ls_normal", result); ASSERT_TRUE(ExecuteScriptAndExtractString( app2_wc, kRetrieveLocalStorage.c_str(), &result)); EXPECT_EQ("ls_normal", result); ASSERT_TRUE(ExecuteScriptAndExtractString( non_app_wc, kRetrieveLocalStorage.c_str(), &result)); EXPECT_EQ("ls_normal", result); } // http://crbug.com/174926 #if (defined(OS_WIN) && !defined(NDEBUG)) || defined(OS_MACOSX) #define MAYBE_SubresourceCookieIsolation DISABLED_SubresourceCookieIsolation #else #define MAYBE_SubresourceCookieIsolation SubresourceCookieIsolation #endif // (defined(OS_WIN) && !defined(NDEBUG)) || defined(OS_MACOSX) // Tests that subresource and media requests use the app's cookie store. // See http://crbug.com/141172. IN_PROC_BROWSER_TEST_F(IsolatedAppTest, MAYBE_SubresourceCookieIsolation) { embedded_test_server()->RegisterRequestHandler( base::Bind(&HandleExpectAndSetCookieRequest, embedded_test_server())); ASSERT_TRUE(embedded_test_server()->Start()); ASSERT_TRUE(LoadExtension(test_data_dir_.AppendASCII("isolated_apps/app1"))); // The app under test acts on URLs whose host is "localhost", // so the URLs we navigate to must have host "localhost". GURL root_url = embedded_test_server()->GetURL("/"); GURL base_url = embedded_test_server()->GetURL("/extensions/isolated_apps/"); GURL::Replacements replace_host; replace_host.SetHostStr("localhost"); root_url = root_url.ReplaceComponents(replace_host); base_url = base_url.ReplaceComponents(replace_host); // First set cookies inside and outside the app. ui_test_utils::NavigateToURL( browser(), root_url.Resolve("expect-and-set-cookie?set=nonApp%3d1")); WebContents* tab0 = browser()->tab_strip_model()->GetWebContentsAt(0); ASSERT_FALSE(GetInstalledApp(tab0)); ui_test_utils::NavigateToURLWithDisposition( browser(), base_url.Resolve("app1/main.html"), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); WebContents* tab1 = browser()->tab_strip_model()->GetWebContentsAt(1); ASSERT_TRUE(GetInstalledApp(tab1)); // Check that each tab sees its own cookie. EXPECT_TRUE(HasCookie(tab0, "nonApp=1")); EXPECT_FALSE(HasCookie(tab0, "app1=3")); EXPECT_FALSE(HasCookie(tab1, "nonApp=1")); EXPECT_TRUE(HasCookie(tab1, "app1=3")); // Now visit an app page that loads subresources located outside the app. // For both images and video tags, it loads two URLs: // - One will set nonApp{Media,Image}=1 cookies if nonApp=1 is set. // - One will set app1{Media,Image}=1 cookies if app1=3 is set. // We expect only the app's cookies to be present. // We must wait for the onload event, to allow the subresources to finish. content::WindowedNotificationObserver observer( content::NOTIFICATION_LOAD_COMPLETED_MAIN_FRAME, content::Source<WebContents>( browser()->tab_strip_model()->GetActiveWebContents())); ui_test_utils::NavigateToURL( browser(), base_url.Resolve("app1/app_subresources.html")); observer.Wait(); EXPECT_FALSE(HasCookie(tab1, "nonAppMedia=1")); EXPECT_TRUE(HasCookie(tab1, "app1Media=1")); EXPECT_FALSE(HasCookie(tab1, "nonAppImage=1")); EXPECT_TRUE(HasCookie(tab1, "app1Image=1")); // Also create a non-app tab to ensure no new cookies were set in that jar. ui_test_utils::NavigateToURLWithDisposition( browser(), root_url, WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); WebContents* tab2 = browser()->tab_strip_model()->GetWebContentsAt(2); EXPECT_FALSE(HasCookie(tab2, "nonAppMedia=1")); EXPECT_FALSE(HasCookie(tab2, "app1Media=1")); EXPECT_FALSE(HasCookie(tab2, "nonAppImage=1")); EXPECT_FALSE(HasCookie(tab2, "app1Image=1")); } // Test is flaky on Windows. // http://crbug.com/247667 #if defined(OS_WIN) #define MAYBE_IsolatedAppProcessModel DISABLED_IsolatedAppProcessModel #else #define MAYBE_IsolatedAppProcessModel IsolatedAppProcessModel #endif // defined(OS_WIN) // Tests that isolated apps processes do not render top-level non-app pages. // This is true even in the case of the OAuth workaround for hosted apps, // where non-app popups may be kept in the hosted app process. IN_PROC_BROWSER_TEST_F(IsolatedAppTest, MAYBE_IsolatedAppProcessModel) { ASSERT_TRUE(embedded_test_server()->Start()); ASSERT_TRUE(LoadExtension(test_data_dir_.AppendASCII("isolated_apps/app1"))); // The app under test acts on URLs whose host is "localhost", // so the URLs we navigate to must have host "localhost". GURL base_url = embedded_test_server()->GetURL("/extensions/isolated_apps/"); GURL::Replacements replace_host; replace_host.SetHostStr("localhost"); base_url = base_url.ReplaceComponents(replace_host); // Create three tabs in the isolated app in different ways. ui_test_utils::NavigateToURL(browser(), base_url.Resolve("app1/main.html")); ui_test_utils::NavigateToURLWithDisposition( browser(), base_url.Resolve("app1/main.html"), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); // For the third tab, use window.open to keep it in process with an opener. OpenWindow(browser()->tab_strip_model()->GetWebContentsAt(0), base_url.Resolve("app1/main.html"), true, true, nullptr); // In a fourth tab, use window.open to a non-app URL. It should open in a // separate process, even though this would trigger the OAuth workaround // for hosted apps (from http://crbug.com/59285). OpenWindow(browser()->tab_strip_model()->GetWebContentsAt(0), base_url.Resolve("non_app/main.html"), false, true, nullptr); // We should now have four tabs, the first and third sharing a process. // The second one is an independent instance in a separate process. ASSERT_EQ(4, browser()->tab_strip_model()->count()); int process_id_0 = browser() ->tab_strip_model() ->GetWebContentsAt(0) ->GetMainFrame() ->GetProcess() ->GetID(); int process_id_1 = browser() ->tab_strip_model() ->GetWebContentsAt(1) ->GetMainFrame() ->GetProcess() ->GetID(); EXPECT_NE(process_id_0, process_id_1); EXPECT_EQ(process_id_0, browser() ->tab_strip_model() ->GetWebContentsAt(2) ->GetMainFrame() ->GetProcess() ->GetID()); EXPECT_NE(process_id_0, browser() ->tab_strip_model() ->GetWebContentsAt(3) ->GetMainFrame() ->GetProcess() ->GetID()); // Navigating the second tab out of the app should cause a process swap. const GURL& non_app_url(base_url.Resolve("non_app/main.html")); NavigateInRenderer(browser()->tab_strip_model()->GetWebContentsAt(1), non_app_url); EXPECT_NE(process_id_1, browser() ->tab_strip_model() ->GetWebContentsAt(1) ->GetMainFrame() ->GetProcess() ->GetID()); } // This test no longer passes, since we don't properly isolate sessionStorage // for isolated apps. This was broken as part of the changes for storage // partition support for webview tags. // TODO(nasko): If isolated apps is no longer developed, this test should be // removed. http://crbug.com/159932 IN_PROC_BROWSER_TEST_F(IsolatedAppTest, DISABLED_SessionStorage) { ASSERT_TRUE(embedded_test_server()->Start()); ASSERT_TRUE(LoadExtension(test_data_dir_.AppendASCII("isolated_apps/app1"))); ASSERT_TRUE(LoadExtension(test_data_dir_.AppendASCII("isolated_apps/app2"))); // The app under test acts on URLs whose host is "localhost", // so the URLs we navigate to must have host "localhost". GURL base_url = embedded_test_server()->GetURL("/extensions/isolated_apps/"); GURL::Replacements replace_host; replace_host.SetHostStr("localhost"); base_url = base_url.ReplaceComponents(replace_host); // Enter some state into sessionStorage three times on the same origin, but // for three URLs that correspond to app1, app2, and a non-isolated site. ui_test_utils::NavigateToURL(browser(), base_url.Resolve("app1/main.html")); ASSERT_TRUE(ExecuteScript( browser()->tab_strip_model()->GetWebContentsAt(0), "window.sessionStorage.setItem('testdata', 'ss_app1');")); ui_test_utils::NavigateToURL(browser(), base_url.Resolve("app2/main.html")); ASSERT_TRUE(ExecuteScript( browser()->tab_strip_model()->GetWebContentsAt(0), "window.sessionStorage.setItem('testdata', 'ss_app2');")); ui_test_utils::NavigateToURL( browser(), base_url.Resolve("non_app/main.html")); ASSERT_TRUE(ExecuteScript( browser()->tab_strip_model()->GetWebContentsAt(0), "window.sessionStorage.setItem('testdata', 'ss_normal');")); // Now, ensure that the sessionStorage is correctly partitioned, and persists // when we navigate around all over the dang place. const std::string& kRetrieveSessionStorage = WrapForJavascriptAndExtract( "window.sessionStorage.getItem('testdata') || 'badval'"); std::string result; ui_test_utils::NavigateToURL(browser(), base_url.Resolve("app1/main.html")); ASSERT_TRUE(ExecuteScriptAndExtractString( browser()->tab_strip_model()->GetWebContentsAt(0), kRetrieveSessionStorage.c_str(), &result)); EXPECT_EQ("ss_app1", result); ui_test_utils::NavigateToURL(browser(), base_url.Resolve("app2/main.html")); ASSERT_TRUE(ExecuteScriptAndExtractString( browser()->tab_strip_model()->GetWebContentsAt(0), kRetrieveSessionStorage.c_str(), &result)); EXPECT_EQ("ss_app2", result); ui_test_utils::NavigateToURL( browser(), base_url.Resolve("non_app/main.html")); ASSERT_TRUE(ExecuteScriptAndExtractString( browser()->tab_strip_model()->GetWebContentsAt(0), kRetrieveSessionStorage.c_str(), &result)); EXPECT_EQ("ss_normal", result); } } // namespace } // namespace extensions
null
null
null
null
55,764
1,891
null
train_val
04b570817b2b38e35675b17328239746212f4c3f
154,948
FFmpeg
0
https://github.com/FFmpeg/FFmpeg
2018-06-01 01:23:12+05:30
/* * Wing Commander/Xan Video Decoder * Copyright (C) 2003 The FFmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Xan video decoder for Wing Commander III computer game * by Mario Brito (mbrito@student.dei.uc.pt) * and Mike Melanson (melanson@pcisys.net) * * The xan_wc3 decoder outputs PAL8 data. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "libavutil/intreadwrite.h" #include "libavutil/mem.h" #define BITSTREAM_READER_LE #include "avcodec.h" #include "bytestream.h" #include "get_bits.h" #include "internal.h" #define RUNTIME_GAMMA 0 #define VGA__TAG MKTAG('V', 'G', 'A', ' ') #define PALT_TAG MKTAG('P', 'A', 'L', 'T') #define SHOT_TAG MKTAG('S', 'H', 'O', 'T') #define PALETTE_COUNT 256 #define PALETTE_SIZE (PALETTE_COUNT * 3) #define PALETTES_MAX 256 typedef struct XanContext { AVCodecContext *avctx; AVFrame *last_frame; const uint8_t *buf; int size; /* scratch space */ uint8_t *buffer1; int buffer1_size; uint8_t *buffer2; int buffer2_size; unsigned *palettes; int palettes_count; int cur_palette; int frame_size; } XanContext; static av_cold int xan_decode_end(AVCodecContext *avctx) { XanContext *s = avctx->priv_data; av_frame_free(&s->last_frame); av_freep(&s->buffer1); av_freep(&s->buffer2); av_freep(&s->palettes); return 0; } static av_cold int xan_decode_init(AVCodecContext *avctx) { XanContext *s = avctx->priv_data; s->avctx = avctx; s->frame_size = 0; avctx->pix_fmt = AV_PIX_FMT_PAL8; s->buffer1_size = avctx->width * avctx->height; s->buffer1 = av_malloc(s->buffer1_size); if (!s->buffer1) return AVERROR(ENOMEM); s->buffer2_size = avctx->width * avctx->height; s->buffer2 = av_malloc(s->buffer2_size + 130); if (!s->buffer2) { av_freep(&s->buffer1); return AVERROR(ENOMEM); } s->last_frame = av_frame_alloc(); if (!s->last_frame) { xan_decode_end(avctx); return AVERROR(ENOMEM); } return 0; } static int xan_huffman_decode(uint8_t *dest, int dest_len, const uint8_t *src, int src_len) { uint8_t byte = *src++; uint8_t ival = byte + 0x16; const uint8_t * ptr = src + byte*2; int ptr_len = src_len - 1 - byte*2; uint8_t val = ival; uint8_t *dest_end = dest + dest_len; uint8_t *dest_start = dest; int ret; GetBitContext gb; if ((ret = init_get_bits8(&gb, ptr, ptr_len)) < 0) return ret; while (val != 0x16) { unsigned idx; if (get_bits_left(&gb) < 1) return AVERROR_INVALIDDATA; idx = val - 0x17 + get_bits1(&gb) * byte; if (idx >= 2 * byte) return AVERROR_INVALIDDATA; val = src[idx]; if (val < 0x16) { if (dest >= dest_end) return dest_len; *dest++ = val; val = ival; } } return dest - dest_start; } /** * unpack simple compression * * @param dest destination buffer of dest_len, must be padded with at least 130 bytes */ static void xan_unpack(uint8_t *dest, int dest_len, const uint8_t *src, int src_len) { uint8_t opcode; int size; uint8_t *dest_org = dest; uint8_t *dest_end = dest + dest_len; GetByteContext ctx; bytestream2_init(&ctx, src, src_len); while (dest < dest_end && bytestream2_get_bytes_left(&ctx)) { opcode = bytestream2_get_byte(&ctx); if (opcode < 0xe0) { int size2, back; if ((opcode & 0x80) == 0) { size = opcode & 3; back = ((opcode & 0x60) << 3) + bytestream2_get_byte(&ctx) + 1; size2 = ((opcode & 0x1c) >> 2) + 3; } else if ((opcode & 0x40) == 0) { size = bytestream2_peek_byte(&ctx) >> 6; back = (bytestream2_get_be16(&ctx) & 0x3fff) + 1; size2 = (opcode & 0x3f) + 4; } else { size = opcode & 3; back = ((opcode & 0x10) << 12) + bytestream2_get_be16(&ctx) + 1; size2 = ((opcode & 0x0c) << 6) + bytestream2_get_byte(&ctx) + 5; } if (dest_end - dest < size + size2 || dest + size - dest_org < back || bytestream2_get_bytes_left(&ctx) < size) return; bytestream2_get_buffer(&ctx, dest, size); dest += size; av_memcpy_backptr(dest, back, size2); dest += size2; } else { int finish = opcode >= 0xfc; size = finish ? opcode & 3 : ((opcode & 0x1f) << 2) + 4; if (dest_end - dest < size || bytestream2_get_bytes_left(&ctx) < size) return; bytestream2_get_buffer(&ctx, dest, size); dest += size; if (finish) return; } } } static inline void xan_wc3_output_pixel_run(XanContext *s, AVFrame *frame, const uint8_t *pixel_buffer, int x, int y, int pixel_count) { int stride; int line_inc; int index; int current_x; int width = s->avctx->width; uint8_t *palette_plane; palette_plane = frame->data[0]; stride = frame->linesize[0]; line_inc = stride - width; index = y * stride + x; current_x = x; while (pixel_count && index < s->frame_size) { int count = FFMIN(pixel_count, width - current_x); memcpy(palette_plane + index, pixel_buffer, count); pixel_count -= count; index += count; pixel_buffer += count; current_x += count; if (current_x >= width) { index += line_inc; current_x = 0; } } } static inline void xan_wc3_copy_pixel_run(XanContext *s, AVFrame *frame, int x, int y, int pixel_count, int motion_x, int motion_y) { int stride; int line_inc; int curframe_index, prevframe_index; int curframe_x, prevframe_x; int width = s->avctx->width; uint8_t *palette_plane, *prev_palette_plane; if (y + motion_y < 0 || y + motion_y >= s->avctx->height || x + motion_x < 0 || x + motion_x >= s->avctx->width) return; palette_plane = frame->data[0]; prev_palette_plane = s->last_frame->data[0]; if (!prev_palette_plane) prev_palette_plane = palette_plane; stride = frame->linesize[0]; line_inc = stride - width; curframe_index = y * stride + x; curframe_x = x; prevframe_index = (y + motion_y) * stride + x + motion_x; prevframe_x = x + motion_x; if (prev_palette_plane == palette_plane && FFABS(motion_x + width*motion_y) < pixel_count) { avpriv_request_sample(s->avctx, "Overlapping copy"); return ; } while (pixel_count && curframe_index < s->frame_size && prevframe_index < s->frame_size) { int count = FFMIN3(pixel_count, width - curframe_x, width - prevframe_x); memcpy(palette_plane + curframe_index, prev_palette_plane + prevframe_index, count); pixel_count -= count; curframe_index += count; prevframe_index += count; curframe_x += count; prevframe_x += count; if (curframe_x >= width) { curframe_index += line_inc; curframe_x = 0; } if (prevframe_x >= width) { prevframe_index += line_inc; prevframe_x = 0; } } } static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame) { int width = s->avctx->width; int height = s->avctx->height; int total_pixels = width * height; uint8_t opcode; uint8_t flag = 0; int size = 0; int motion_x, motion_y; int x, y, ret; uint8_t *opcode_buffer = s->buffer1; uint8_t *opcode_buffer_end = s->buffer1 + s->buffer1_size; int opcode_buffer_size = s->buffer1_size; const uint8_t *imagedata_buffer = s->buffer2; /* pointers to segments inside the compressed chunk */ const uint8_t *huffman_segment; GetByteContext size_segment; GetByteContext vector_segment; const uint8_t *imagedata_segment; int huffman_offset, size_offset, vector_offset, imagedata_offset, imagedata_size; if (s->size < 8) return AVERROR_INVALIDDATA; huffman_offset = AV_RL16(&s->buf[0]); size_offset = AV_RL16(&s->buf[2]); vector_offset = AV_RL16(&s->buf[4]); imagedata_offset = AV_RL16(&s->buf[6]); if (huffman_offset >= s->size || size_offset >= s->size || vector_offset >= s->size || imagedata_offset >= s->size) return AVERROR_INVALIDDATA; huffman_segment = s->buf + huffman_offset; bytestream2_init(&size_segment, s->buf + size_offset, s->size - size_offset); bytestream2_init(&vector_segment, s->buf + vector_offset, s->size - vector_offset); imagedata_segment = s->buf + imagedata_offset; if ((ret = xan_huffman_decode(opcode_buffer, opcode_buffer_size, huffman_segment, s->size - huffman_offset)) < 0) return AVERROR_INVALIDDATA; opcode_buffer_end = opcode_buffer + ret; if (imagedata_segment[0] == 2) { xan_unpack(s->buffer2, s->buffer2_size, &imagedata_segment[1], s->size - imagedata_offset - 1); imagedata_size = s->buffer2_size; } else { imagedata_size = s->size - imagedata_offset - 1; imagedata_buffer = &imagedata_segment[1]; } /* use the decoded data segments to build the frame */ x = y = 0; while (total_pixels && opcode_buffer < opcode_buffer_end) { opcode = *opcode_buffer++; size = 0; switch (opcode) { case 0: flag ^= 1; continue; case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: size = opcode; break; case 12: case 13: case 14: case 15: case 16: case 17: case 18: size += (opcode - 10); break; case 9: case 19: if (bytestream2_get_bytes_left(&size_segment) < 1) { av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n"); return AVERROR_INVALIDDATA; } size = bytestream2_get_byte(&size_segment); break; case 10: case 20: if (bytestream2_get_bytes_left(&size_segment) < 2) { av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n"); return AVERROR_INVALIDDATA; } size = bytestream2_get_be16(&size_segment); break; case 11: case 21: if (bytestream2_get_bytes_left(&size_segment) < 3) { av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n"); return AVERROR_INVALIDDATA; } size = bytestream2_get_be24(&size_segment); break; } if (size > total_pixels) break; if (opcode < 12) { flag ^= 1; if (flag) { /* run of (size) pixels is unchanged from last frame */ xan_wc3_copy_pixel_run(s, frame, x, y, size, 0, 0); } else { /* output a run of pixels from imagedata_buffer */ if (imagedata_size < size) break; xan_wc3_output_pixel_run(s, frame, imagedata_buffer, x, y, size); imagedata_buffer += size; imagedata_size -= size; } } else { uint8_t vector; if (bytestream2_get_bytes_left(&vector_segment) <= 0) { av_log(s->avctx, AV_LOG_ERROR, "vector_segment overread\n"); return AVERROR_INVALIDDATA; } /* run-based motion compensation from last frame */ vector = bytestream2_get_byte(&vector_segment); motion_x = sign_extend(vector >> 4, 4); motion_y = sign_extend(vector & 0xF, 4); /* copy a run of pixels from the previous frame */ xan_wc3_copy_pixel_run(s, frame, x, y, size, motion_x, motion_y); flag = 0; } /* coordinate accounting */ total_pixels -= size; y += (x + size) / width; x = (x + size) % width; } return 0; } #if RUNTIME_GAMMA static inline unsigned mul(unsigned a, unsigned b) { return (a * b) >> 16; } static inline unsigned pow4(unsigned a) { unsigned square = mul(a, a); return mul(square, square); } static inline unsigned pow5(unsigned a) { return mul(pow4(a), a); } static uint8_t gamma_corr(uint8_t in) { unsigned lo, hi = 0xff40, target; int i = 15; in = (in << 2) | (in >> 6); /* equivalent float code: if (in >= 252) return 253; return round(pow(in / 256.0, 0.8) * 256); */ lo = target = in << 8; do { unsigned mid = (lo + hi) >> 1; unsigned pow = pow5(mid); if (pow > target) hi = mid; else lo = mid; } while (--i); return (pow4((lo + hi) >> 1) + 0x80) >> 8; } #else /** * This is a gamma correction that xan3 applies to all palette entries. * * There is a peculiarity, namely that the values are clamped to 253 - * it seems likely that this table was calculated by a buggy fixed-point * implementation, the one above under RUNTIME_GAMMA behaves like this for * example. * The exponent value of 0.8 can be explained by this as well, since 0.8 = 4/5 * and thus pow(x, 0.8) is still easy to calculate. * Also, the input values are first rotated to the left by 2. */ static const uint8_t gamma_lookup[256] = { 0x00, 0x09, 0x10, 0x16, 0x1C, 0x21, 0x27, 0x2C, 0x31, 0x35, 0x3A, 0x3F, 0x43, 0x48, 0x4C, 0x50, 0x54, 0x59, 0x5D, 0x61, 0x65, 0x69, 0x6D, 0x71, 0x75, 0x79, 0x7D, 0x80, 0x84, 0x88, 0x8C, 0x8F, 0x93, 0x97, 0x9A, 0x9E, 0xA2, 0xA5, 0xA9, 0xAC, 0xB0, 0xB3, 0xB7, 0xBA, 0xBE, 0xC1, 0xC5, 0xC8, 0xCB, 0xCF, 0xD2, 0xD5, 0xD9, 0xDC, 0xDF, 0xE3, 0xE6, 0xE9, 0xED, 0xF0, 0xF3, 0xF6, 0xFA, 0xFD, 0x03, 0x0B, 0x12, 0x18, 0x1D, 0x23, 0x28, 0x2D, 0x32, 0x36, 0x3B, 0x40, 0x44, 0x49, 0x4D, 0x51, 0x56, 0x5A, 0x5E, 0x62, 0x66, 0x6A, 0x6E, 0x72, 0x76, 0x7A, 0x7D, 0x81, 0x85, 0x89, 0x8D, 0x90, 0x94, 0x98, 0x9B, 0x9F, 0xA2, 0xA6, 0xAA, 0xAD, 0xB1, 0xB4, 0xB8, 0xBB, 0xBF, 0xC2, 0xC5, 0xC9, 0xCC, 0xD0, 0xD3, 0xD6, 0xDA, 0xDD, 0xE0, 0xE4, 0xE7, 0xEA, 0xED, 0xF1, 0xF4, 0xF7, 0xFA, 0xFD, 0x05, 0x0D, 0x13, 0x19, 0x1F, 0x24, 0x29, 0x2E, 0x33, 0x38, 0x3C, 0x41, 0x45, 0x4A, 0x4E, 0x52, 0x57, 0x5B, 0x5F, 0x63, 0x67, 0x6B, 0x6F, 0x73, 0x77, 0x7B, 0x7E, 0x82, 0x86, 0x8A, 0x8D, 0x91, 0x95, 0x99, 0x9C, 0xA0, 0xA3, 0xA7, 0xAA, 0xAE, 0xB2, 0xB5, 0xB9, 0xBC, 0xBF, 0xC3, 0xC6, 0xCA, 0xCD, 0xD0, 0xD4, 0xD7, 0xDA, 0xDE, 0xE1, 0xE4, 0xE8, 0xEB, 0xEE, 0xF1, 0xF5, 0xF8, 0xFB, 0xFD, 0x07, 0x0E, 0x15, 0x1A, 0x20, 0x25, 0x2A, 0x2F, 0x34, 0x39, 0x3D, 0x42, 0x46, 0x4B, 0x4F, 0x53, 0x58, 0x5C, 0x60, 0x64, 0x68, 0x6C, 0x70, 0x74, 0x78, 0x7C, 0x7F, 0x83, 0x87, 0x8B, 0x8E, 0x92, 0x96, 0x99, 0x9D, 0xA1, 0xA4, 0xA8, 0xAB, 0xAF, 0xB2, 0xB6, 0xB9, 0xBD, 0xC0, 0xC4, 0xC7, 0xCB, 0xCE, 0xD1, 0xD5, 0xD8, 0xDB, 0xDF, 0xE2, 0xE5, 0xE9, 0xEC, 0xEF, 0xF2, 0xF6, 0xF9, 0xFC, 0xFD }; #endif static int xan_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *frame = data; const uint8_t *buf = avpkt->data; int ret, buf_size = avpkt->size; XanContext *s = avctx->priv_data; GetByteContext ctx; int tag = 0; bytestream2_init(&ctx, buf, buf_size); while (bytestream2_get_bytes_left(&ctx) > 8 && tag != VGA__TAG) { unsigned *tmpptr; uint32_t new_pal; int size; int i; tag = bytestream2_get_le32(&ctx); size = bytestream2_get_be32(&ctx); if (size < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid tag size %d\n", size); return AVERROR_INVALIDDATA; } size = FFMIN(size, bytestream2_get_bytes_left(&ctx)); switch (tag) { case PALT_TAG: if (size < PALETTE_SIZE) return AVERROR_INVALIDDATA; if (s->palettes_count >= PALETTES_MAX) return AVERROR_INVALIDDATA; tmpptr = av_realloc_array(s->palettes, s->palettes_count + 1, AVPALETTE_SIZE); if (!tmpptr) return AVERROR(ENOMEM); s->palettes = tmpptr; tmpptr += s->palettes_count * AVPALETTE_COUNT; for (i = 0; i < PALETTE_COUNT; i++) { #if RUNTIME_GAMMA int r = gamma_corr(bytestream2_get_byteu(&ctx)); int g = gamma_corr(bytestream2_get_byteu(&ctx)); int b = gamma_corr(bytestream2_get_byteu(&ctx)); #else int r = gamma_lookup[bytestream2_get_byteu(&ctx)]; int g = gamma_lookup[bytestream2_get_byteu(&ctx)]; int b = gamma_lookup[bytestream2_get_byteu(&ctx)]; #endif *tmpptr++ = (0xFFU << 24) | (r << 16) | (g << 8) | b; } s->palettes_count++; break; case SHOT_TAG: if (size < 4) return AVERROR_INVALIDDATA; new_pal = bytestream2_get_le32(&ctx); if (new_pal < s->palettes_count) { s->cur_palette = new_pal; } else av_log(avctx, AV_LOG_ERROR, "Invalid palette selected\n"); break; case VGA__TAG: break; default: bytestream2_skip(&ctx, size); break; } } buf_size = bytestream2_get_bytes_left(&ctx); if (s->palettes_count <= 0) { av_log(s->avctx, AV_LOG_ERROR, "No palette found\n"); return AVERROR_INVALIDDATA; } if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; if (!s->frame_size) s->frame_size = frame->linesize[0] * s->avctx->height; memcpy(frame->data[1], s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE); s->buf = ctx.buffer; s->size = buf_size; if (xan_wc3_decode_frame(s, frame) < 0) return AVERROR_INVALIDDATA; av_frame_unref(s->last_frame); if ((ret = av_frame_ref(s->last_frame, frame)) < 0) return ret; *got_frame = 1; /* always report that the buffer was completely consumed */ return buf_size; } AVCodec ff_xan_wc3_decoder = { .name = "xan_wc3", .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_XAN_WC3, .priv_data_size = sizeof(XanContext), .init = xan_decode_init, .close = xan_decode_end, .decode = xan_decode_frame, .capabilities = AV_CODEC_CAP_DR1, };
null
null
null
null
71,003
7,118
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
172,113
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2009-2010 PetaLogix * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation * * Provide default implementations of the DMA mapping callbacks for * directly mapped busses. */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <linux/dma-debug.h> #include <linux/export.h> #include <linux/bug.h> #define NOT_COHERENT_CACHE static void *dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { #ifdef NOT_COHERENT_CACHE return consistent_alloc(flag, size, dma_handle); #else void *ret; struct page *page; int node = dev_to_node(dev); /* ignore region specifiers */ flag &= ~(__GFP_HIGHMEM); page = alloc_pages_node(node, flag, get_order(size)); if (page == NULL) return NULL; ret = page_address(page); memset(ret, 0, size); *dma_handle = virt_to_phys(ret); return ret; #endif } static void dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { #ifdef NOT_COHERENT_CACHE consistent_free(size, vaddr); #else free_pages((unsigned long)vaddr, get_order(size)); #endif } static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg); if (attrs & DMA_ATTR_SKIP_CPU_SYNC) continue; __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, sg->length, direction); } return nents; } static int dma_direct_dma_supported(struct device *dev, u64 mask) { return 1; } static inline dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(page_to_phys(page) + offset, size, direction); return page_to_phys(page) + offset; } static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, unsigned long attrs) { /* There is not necessary to do cache cleanup * * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and * dma_address is physical address */ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(dma_address, size, direction); } static inline void dma_direct_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { /* * It's pointless to flush the cache as the memory segment * is given to the CPU */ if (direction == DMA_FROM_DEVICE) __dma_sync(dma_handle, size, direction); } static inline void dma_direct_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { /* * It's pointless to invalidate the cache if the device isn't * supposed to write to the relevant region */ if (direction == DMA_TO_DEVICE) __dma_sync(dma_handle, size, direction); } static inline void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ if (direction == DMA_FROM_DEVICE) for_each_sg(sgl, sg, nents, i) __dma_sync(sg->dma_address, sg->length, direction); } static inline void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ if (direction == DMA_TO_DEVICE) for_each_sg(sgl, sg, nents, i) __dma_sync(sg->dma_address, sg->length, direction); } static int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { #ifdef CONFIG_MMU unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; unsigned long pfn; if (off >= count || user_count > (count - off)) return -ENXIO; #ifdef NOT_COHERENT_CACHE vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pfn = consistent_virt_to_pfn(cpu_addr); #else pfn = virt_to_pfn(cpu_addr); #endif return remap_pfn_range(vma, vma->vm_start, pfn + off, vma->vm_end - vma->vm_start, vma->vm_page_prot); #else return -ENXIO; #endif } const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, .map_sg = dma_direct_map_sg, .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, .sync_single_for_cpu = dma_direct_sync_single_for_cpu, .sync_single_for_device = dma_direct_sync_single_for_device, .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, .sync_sg_for_device = dma_direct_sync_sg_for_device, }; EXPORT_SYMBOL(dma_direct_ops); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) static int __init dma_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_init);
null
null
null
null
80,460
5,607
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
170,602
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/sound/soc/pxa/pxa2xx-i2s.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _PXA2XX_I2S_H #define _PXA2XX_I2S_H /* I2S clock */ #define PXA2XX_I2S_SYSCLK 0 #endif
null
null
null
null
78,949
55,889
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
55,889
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_RENDERER_CONTEXT_MENU_SPELLING_MENU_OBSERVER_H_ #define CHROME_BROWSER_RENDERER_CONTEXT_MENU_SPELLING_MENU_OBSERVER_H_ #include <stddef.h> #include <stdint.h> #include <memory> #include <vector> #include "base/compiler_specific.h" #include "base/macros.h" #include "base/strings/string16.h" #include "base/timer/timer.h" #include "components/prefs/pref_member.h" #include "components/renderer_context_menu/render_view_context_menu_observer.h" #include "components/spellcheck/browser/spelling_service_client.h" class RenderViewContextMenuProxy; struct SpellCheckResult; // An observer that listens to events from the RenderViewContextMenu class and // shows suggestions from the Spelling ("do you mean") service to a context menu // while we show it. This class implements two interfaces: // * RenderViewContextMenuObserver // This interface is used for adding a menu item and update it while showing. // * net::URLFetcherDelegate // This interface is used for sending a JSON_RPC request to the Spelling // service and retrieving its response. // These interfaces allow this class to make a JSON-RPC call to the Spelling // service in the background and update the context menu while showing. The // following snippet describes how to add this class to the observer list of the // RenderViewContextMenu class. // // void RenderViewContextMenu::InitMenu() { // spelling_menu_observer_.reset(new SpellingMenuObserver(this)); // if (spelling_menu_observer_.get()) // observers_.AddObserver(spelling_menu_observer.get()); // } // class SpellingMenuObserver : public RenderViewContextMenuObserver { public: explicit SpellingMenuObserver(RenderViewContextMenuProxy* proxy); ~SpellingMenuObserver() override; // RenderViewContextMenuObserver implementation. void InitMenu(const content::ContextMenuParams& params) override; bool IsCommandIdSupported(int command_id) override; bool IsCommandIdChecked(int command_id) override; bool IsCommandIdEnabled(int command_id) override; void ExecuteCommand(int command_id) override; // A callback function called when the Spelling service finishes checking a // misspelled word. void OnTextCheckComplete( SpellingServiceClient::ServiceType type, bool success, const base::string16& text, const std::vector<SpellCheckResult>& results); private: // The callback function for base::RepeatingTimer. This function updates the // "loading..." animation in the context-menu item. void OnAnimationTimerExpired(); // The interface to add a context-menu item and update it. This class uses // this interface to avoid accesing context-menu items directly. RenderViewContextMenuProxy* proxy_; // Suggested words from the local spellchecker. If the spelling service // returns a word in this list, we hide the context-menu item to prevent // showing the same word twice. std::vector<base::string16> suggestions_; // The string used for animation until we receive a response from the Spelling // service. The current animation just adds periods at the end of this string: // 'Loading' -> 'Loading.' -> 'Loading..' -> 'Loading...' (-> 'Loading') base::string16 loading_message_; size_t loading_frame_; // A flag represending whether a JSON-RPC call to the Spelling service // finished successfully and its response had a suggestion not included in the // ones provided by the local spellchecker. When this flag is true, we enable // the context-menu item so users can choose it. bool succeeded_; // The misspelled word. When we choose the "Add to dictionary" item, we add // this word to the custom-word dictionary. base::string16 misspelled_word_; // The string representing the result of this call. This string is a // suggestion when this call finished successfully. Otherwise it is error // text. Until we receive a response from the Spelling service, this string // stores the input string. (Since the Spelling service sends only misspelled // words, we replace these misspelled words in the input text with the // suggested words to create suggestion text. base::string16 result_; // The URLFetcher object used for sending a JSON-RPC request. std::unique_ptr<SpellingServiceClient> client_; // A timer used for loading animation. base::RepeatingTimer animation_timer_; // Flag indicating whether online spelling correction service is enabled. When // this variable is true and we right-click a misspelled word, we send a // JSON-RPC request to the service and retrieve suggestions. BooleanPrefMember integrate_spelling_service_; // Flag indicating whether automatic spelling correction is enabled. BooleanPrefMember autocorrect_spelling_; DISALLOW_COPY_AND_ASSIGN(SpellingMenuObserver); }; #endif // CHROME_BROWSER_RENDERER_CONTEXT_MENU_SPELLING_MENU_OBSERVER_H_
null
null
null
null
52,752
4,474
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
169,469
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ #include <linux/time.h> #include "reiserfs.h" #include "acl.h" #include "xattr.h" #include <linux/uaccess.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/quotaops.h> /* * We pack the tails of files on file close, not at the time they are written. * This implies an unnecessary copy of the tail and an unnecessary indirect item * insertion/balancing, for files that are written in one write. * It avoids unnecessary tail packings (balances) for files that are written in * multiple writes and are small enough to have tails. * * file_release is called by the VFS layer when the file is closed. If * this is the last open file descriptor, and the file * small enough to have a tail, and the tail is currently in an * unformatted node, the tail is converted back into a direct item. * * We use reiserfs_truncate_file to pack the tail, since it already has * all the conditions coded. */ static int reiserfs_file_release(struct inode *inode, struct file *filp) { struct reiserfs_transaction_handle th; int err; int jbegin_failure = 0; BUG_ON(!S_ISREG(inode->i_mode)); if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1)) return 0; mutex_lock(&REISERFS_I(inode)->tailpack); if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) { mutex_unlock(&REISERFS_I(inode)->tailpack); return 0; } /* fast out for when nothing needs to be done */ if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) || !tail_has_to_be_packed(inode)) && REISERFS_I(inode)->i_prealloc_count <= 0) { mutex_unlock(&REISERFS_I(inode)->tailpack); return 0; } reiserfs_write_lock(inode->i_sb); /* * freeing preallocation only involves relogging blocks that * are already in the current transaction. preallocation gets * freed at the end of each transaction, so it is impossible for * us to log any additional blocks (including quota blocks) */ err = journal_begin(&th, inode->i_sb, 1); if (err) { /* * uh oh, we can't allow the inode to go away while there * is still preallocation blocks pending. Try to join the * aborted transaction */ jbegin_failure = err; err = journal_join_abort(&th, inode->i_sb); if (err) { /* * hmpf, our choices here aren't good. We can pin * the inode which will disallow unmount from ever * happening, we can do nothing, which will corrupt * random memory on unmount, or we can forcibly * remove the file from the preallocation list, which * will leak blocks on disk. Lets pin the inode * and let the admin know what is going on. */ igrab(inode); reiserfs_warning(inode->i_sb, "clm-9001", "pinning inode %lu because the " "preallocation can't be freed", inode->i_ino); goto out; } } reiserfs_update_inode_transaction(inode); #ifdef REISERFS_PREALLOCATE reiserfs_discard_prealloc(&th, inode); #endif err = journal_end(&th); /* copy back the error code from journal_begin */ if (!err) err = jbegin_failure; if (!err && (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) && tail_has_to_be_packed(inode)) { /* * if regular file is released by last holder and it has been * appended (we append by unformatted node only) or its direct * item(s) had to be converted, then it may have to be * indirect2direct converted */ err = reiserfs_truncate_file(inode, 0); } out: reiserfs_write_unlock(inode->i_sb); mutex_unlock(&REISERFS_I(inode)->tailpack); return err; } static int reiserfs_file_open(struct inode *inode, struct file *file) { int err = dquot_file_open(inode, file); /* somebody might be tailpacking on final close; wait for it */ if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) { mutex_lock(&REISERFS_I(inode)->tailpack); atomic_inc(&REISERFS_I(inode)->openers); mutex_unlock(&REISERFS_I(inode)->tailpack); } return err; } void reiserfs_vfs_truncate_file(struct inode *inode) { mutex_lock(&REISERFS_I(inode)->tailpack); reiserfs_truncate_file(inode, 1); mutex_unlock(&REISERFS_I(inode)->tailpack); } /* Sync a reiserfs file. */ /* * FIXME: sync_mapping_buffers() never has anything to sync. Can * be removed... */ static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; int err; int barrier_done; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; inode_lock(inode); BUG_ON(!S_ISREG(inode->i_mode)); err = sync_mapping_buffers(inode->i_mapping); reiserfs_write_lock(inode->i_sb); barrier_done = reiserfs_commit_for_inode(inode); reiserfs_write_unlock(inode->i_sb); if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); inode_unlock(inode); if (barrier_done < 0) return barrier_done; return (err < 0) ? -EIO : 0; } /* taken fs/buffer.c:__block_commit_write */ int reiserfs_commit_page(struct inode *inode, struct page *page, unsigned from, unsigned to) { unsigned block_start, block_end; int partial = 0; unsigned blocksize; struct buffer_head *bh, *head; unsigned long i_size_index = inode->i_size >> PAGE_SHIFT; int new; int logit = reiserfs_file_data_log(inode); struct super_block *s = inode->i_sb; int bh_per_page = PAGE_SIZE / s->s_blocksize; struct reiserfs_transaction_handle th; int ret = 0; th.t_trans_id = 0; blocksize = i_blocksize(inode); if (logit) { reiserfs_write_lock(s); ret = journal_begin(&th, s, bh_per_page + 1); if (ret) goto drop_write_lock; reiserfs_update_inode_transaction(inode); } for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { new = buffer_new(bh); clear_buffer_new(bh); block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (!buffer_uptodate(bh)) partial = 1; } else { set_buffer_uptodate(bh); if (logit) { reiserfs_prepare_for_journal(s, bh, 1); journal_mark_dirty(&th, bh); } else if (!buffer_dirty(bh)) { mark_buffer_dirty(bh); /* * do data=ordered on any page past the end * of file and any buffer marked BH_New. */ if (reiserfs_data_ordered(inode->i_sb) && (new || page->index >= i_size_index)) { reiserfs_add_ordered_list(inode, bh); } } } } if (logit) { ret = journal_end(&th); drop_write_lock: reiserfs_write_unlock(s); } /* * If this is a partial write which happened to make all buffers * uptodate then we can optimize away a bogus readpage() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ if (!partial) SetPageUptodate(page); return ret; } const struct file_operations reiserfs_file_operations = { .unlocked_ioctl = reiserfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = reiserfs_compat_ioctl, #endif .mmap = generic_file_mmap, .open = reiserfs_file_open, .release = reiserfs_file_release, .fsync = reiserfs_sync_file, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .llseek = generic_file_llseek, }; const struct inode_operations reiserfs_file_inode_operations = { .setattr = reiserfs_setattr, .listxattr = reiserfs_listxattr, .permission = reiserfs_permission, .get_acl = reiserfs_get_acl, .set_acl = reiserfs_set_acl, };
null
null
null
null
77,816
9,004
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
173,999
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * PTP 1588 clock using the IXP46X * * Copyright (C) 2010 OMICRON electronics GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _IXP46X_TS_H_ #define _IXP46X_TS_H_ #define DEFAULT_ADDEND 0xF0000029 #define TICKS_NS_SHIFT 4 struct ixp46x_channel_ctl { u32 ch_control; /* 0x40 Time Synchronization Channel Control */ u32 ch_event; /* 0x44 Time Synchronization Channel Event */ u32 tx_snap_lo; /* 0x48 Transmit Snapshot Low Register */ u32 tx_snap_hi; /* 0x4C Transmit Snapshot High Register */ u32 rx_snap_lo; /* 0x50 Receive Snapshot Low Register */ u32 rx_snap_hi; /* 0x54 Receive Snapshot High Register */ u32 src_uuid_lo; /* 0x58 Source UUID0 Low Register */ u32 src_uuid_hi; /* 0x5C Sequence Identifier/Source UUID0 High */ }; struct ixp46x_ts_regs { u32 control; /* 0x00 Time Sync Control Register */ u32 event; /* 0x04 Time Sync Event Register */ u32 addend; /* 0x08 Time Sync Addend Register */ u32 accum; /* 0x0C Time Sync Accumulator Register */ u32 test; /* 0x10 Time Sync Test Register */ u32 unused; /* 0x14 */ u32 rsystime_lo; /* 0x18 RawSystemTime_Low Register */ u32 rsystime_hi; /* 0x1C RawSystemTime_High Register */ u32 systime_lo; /* 0x20 SystemTime_Low Register */ u32 systime_hi; /* 0x24 SystemTime_High Register */ u32 trgt_lo; /* 0x28 TargetTime_Low Register */ u32 trgt_hi; /* 0x2C TargetTime_High Register */ u32 asms_lo; /* 0x30 Auxiliary Slave Mode Snapshot Low */ u32 asms_hi; /* 0x34 Auxiliary Slave Mode Snapshot High */ u32 amms_lo; /* 0x38 Auxiliary Master Mode Snapshot Low */ u32 amms_hi; /* 0x3C Auxiliary Master Mode Snapshot High */ struct ixp46x_channel_ctl channel[3]; }; /* 0x00 Time Sync Control Register Bits */ #define TSCR_AMM (1<<3) #define TSCR_ASM (1<<2) #define TSCR_TTM (1<<1) #define TSCR_RST (1<<0) /* 0x04 Time Sync Event Register Bits */ #define TSER_SNM (1<<3) #define TSER_SNS (1<<2) #define TTIPEND (1<<1) /* 0x40 Time Synchronization Channel Control Register Bits */ #define MASTER_MODE (1<<0) #define TIMESTAMP_ALL (1<<1) /* 0x44 Time Synchronization Channel Event Register Bits */ #define TX_SNAPSHOT_LOCKED (1<<0) #define RX_SNAPSHOT_LOCKED (1<<1) /* The ptp_ixp46x module will set this variable */ extern int ixp46x_phc_index; #endif
null
null
null
null
82,346
51,486
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
51,486
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_EVENTS_BLINK_DID_OVERSCROLL_PARAMS_H_ #define UI_EVENTS_BLINK_DID_OVERSCROLL_PARAMS_H_ #include "cc/input/overscroll_behavior.h" #include "ui/gfx/geometry/point_f.h" #include "ui/gfx/geometry/vector2d_f.h" namespace ui { struct DidOverscrollParams { DidOverscrollParams(); ~DidOverscrollParams(); gfx::Vector2dF accumulated_overscroll; gfx::Vector2dF latest_overscroll_delta; gfx::Vector2dF current_fling_velocity; gfx::PointF causal_event_viewport_point; cc::OverscrollBehavior overscroll_behavior; }; } // namespace ui #endif // UI_EVENTS_BLINK_DID_OVERSCROLL_PARAMS_H_
null
null
null
null
48,349
2,619
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
167,614
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The User Datagram Protocol (UDP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Alan Cox, <alan@lxorguk.ukuu.org.uk> * Hirokazu Takahashi, <taka@valinux.co.jp> * * Fixes: * Alan Cox : verify_area() calls * Alan Cox : stopped close while in use off icmp * messages. Not a fix but a botch that * for udp at least is 'valid'. * Alan Cox : Fixed icmp handling properly * Alan Cox : Correct error for oversized datagrams * Alan Cox : Tidied select() semantics. * Alan Cox : udp_err() fixed properly, also now * select and read wake correctly on errors * Alan Cox : udp_send verify_area moved to avoid mem leak * Alan Cox : UDP can count its memory * Alan Cox : send to an unknown connection causes * an ECONNREFUSED off the icmp, but * does NOT close. * Alan Cox : Switched to new sk_buff handlers. No more backlog! * Alan Cox : Using generic datagram code. Even smaller and the PEEK * bug no longer crashes it. * Fred Van Kempen : Net2e support for sk->broadcast. * Alan Cox : Uses skb_free_datagram * Alan Cox : Added get/set sockopt support. * Alan Cox : Broadcasting without option set returns EACCES. * Alan Cox : No wakeup calls. Instead we now use the callbacks. * Alan Cox : Use ip_tos and ip_ttl * Alan Cox : SNMP Mibs * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. * Matt Dillon : UDP length checks. * Alan Cox : Smarter af_inet used properly. * Alan Cox : Use new kernel side addressing. * Alan Cox : Incorrect return on truncated datagram receive. * Arnt Gulbrandsen : New udp_send and stuff * Alan Cox : Cache last socket * Alan Cox : Route cache * Jon Peatfield : Minor efficiency fix to sendto(). * Mike Shaver : RFC1122 checks. * Alan Cox : Nonblocking error fix. * Willy Konynenberg : Transparent proxying support. * Mike McLagan : Routing by source * David S. Miller : New socket lookup architecture. * Last socket cache retained as it * does have a high hit rate. * Olaf Kirch : Don't linearise iovec on sendmsg. * Andi Kleen : Some cleanups, cache destination entry * for connect. * Vitaly E. Lavrov : Transparent proxy revived after year coma. * Melvin Smith : Check msg_name not msg_namelen in sendto(), * return ENOTCONN for unconnected sockets (POSIX) * Janos Farkas : don't deliver multi/broadcasts to a different * bound-to-device socket * Hirokazu Takahashi : HW checksumming for outgoing UDP * datagrams. * Hirokazu Takahashi : sendfile() on UDP works now. * Arnaldo C. Melo : convert /proc/net/udp to seq_file * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support * James Chapman : Add L2TP encapsulation type. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "UDP: " fmt #include <linux/uaccess.h> #include <asm/ioctls.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/igmp.h> #include <linux/inetdevice.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <net/tcp_states.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/inet_hashtables.h> #include <net/route.h> #include <net/checksum.h> #include <net/xfrm.h> #include <trace/events/udp.h> #include <linux/static_key.h> #include <trace/events/skb.h> #include <net/busy_poll.h> #include "udp_impl.h" #include <net/sock_reuseport.h> #include <net/addrconf.h> struct udp_table udp_table __read_mostly; EXPORT_SYMBOL(udp_table); long sysctl_udp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_udp_mem); int sysctl_udp_rmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_rmem_min); int sysctl_udp_wmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_wmem_min); atomic_long_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); #define MAX_UDP_PORTS 65536 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) /* IPCB reference means this can not be used from early demux */ static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) if (!net->ipv4.sysctl_udp_l3mdev_accept && skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) return true; #endif return false; } static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, struct sock *sk, unsigned int log) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && inet_rcv_saddr_equal(sk, sk2, true)) { if (sk2->sk_reuseport && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && uid_eq(uid, sock_i_uid(sk2))) { if (!bitmap) return 0; } else { if (!bitmap) return 1; __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); } } } return 0; } /* * Note: we still hold spinlock of primary hash chain, so no other writer * can insert/delete a socket with local_port == num */ static int udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, struct sock *sk) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); int res = 0; spin_lock(&hslot2->lock); udp_portaddr_for_each_entry(sk2, &hslot2->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && inet_rcv_saddr_equal(sk, sk2, true)) { if (sk2->sk_reuseport && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && uid_eq(uid, sock_i_uid(sk2))) { res = 0; } else { res = 1; } break; } } spin_unlock(&hslot2->lock); return res; } static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) { struct net *net = sock_net(sk); kuid_t uid = sock_i_uid(sk); struct sock *sk2; sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && sk2->sk_family == sk->sk_family && ipv6_only_sock(sk2) == ipv6_only_sock(sk) && (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && inet_rcv_saddr_equal(sk, sk2, false)) { return reuseport_add_sock(sk, sk2); } } /* Initial allocation may have already happened via setsockopt */ if (!rcu_access_pointer(sk->sk_reuseport_cb)) return reuseport_alloc(sk); return 0; } /** * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 * * @sk: socket struct in question * @snum: port number to look up * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, * with NULL address */ int udp_lib_get_port(struct sock *sk, unsigned short snum, unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; struct udp_table *udptable = sk->sk_prot->h.udp_table; int error = 1; struct net *net = sock_net(sk); if (!snum) { int low, high, remaining; unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rand = prandom_u32(); first = reciprocal_scale(rand, remaining) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ rand = (rand | 1) * (udptable->mask + 1); last = first + udptable->mask + 1; do { hslot = udp_hashslot(udptable, net, first); bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, udptable->log); snum = first; /* * Iterate on all possible values of snum for this hash. * Using steps of an odd multiple of UDP_HTABLE_SIZE * give us randomization and full range coverage. */ do { if (low <= snum && snum <= high && !test_bit(snum >> udptable->log, bitmap) && !inet_is_local_reserved_port(net, snum)) goto found; snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); cond_resched(); } while (++first != last); goto fail; } else { hslot = udp_hashslot(udptable, net, snum); spin_lock_bh(&hslot->lock); if (hslot->count > 10) { int exist; unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; slot2 &= udptable->mask; hash2_nulladdr &= udptable->mask; hslot2 = udp_hashslot2(udptable, slot2); if (hslot->count < hslot2->count) goto scan_primary_hash; exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); if (!exist && (hash2_nulladdr != slot2)) { hslot2 = udp_hashslot2(udptable, hash2_nulladdr); exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); } if (exist) goto fail_unlock; else goto found; } scan_primary_hash: if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) goto fail_unlock; } found: inet_sk(sk)->inet_num = snum; udp_sk(sk)->udp_port_hash = snum; udp_sk(sk)->udp_portaddr_hash ^= snum; if (sk_unhashed(sk)) { if (sk->sk_reuseport && udp_reuseport_add_sock(sk, hslot)) { inet_sk(sk)->inet_num = 0; udp_sk(sk)->udp_port_hash = 0; udp_sk(sk)->udp_portaddr_hash ^= snum; goto fail_unlock; } sk_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock(&hslot2->lock); if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && sk->sk_family == AF_INET6) hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); else hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); hslot2->count++; spin_unlock(&hslot2->lock); } sock_set_flag(sk, SOCK_RCU_FREE); error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); fail: return error; } EXPORT_SYMBOL(udp_lib_get_port); static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, unsigned int port) { return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; } int udp_v4_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); unsigned int hash2_partial = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, hash2_nulladdr); } static int compute_score(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned short hnum, int dif, bool exact_dif) { int score; struct inet_sock *inet; if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || ipv6_only_sock(sk)) return -1; score = (sk->sk_family == PF_INET) ? 2 : 1; inet = inet_sk(sk); if (inet->inet_rcv_saddr) { if (inet->inet_rcv_saddr != daddr) return -1; score += 4; } if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if || exact_dif) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) score++; return score; } static u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { static u32 udp_ehash_secret __read_mostly; net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); } /* called with rcu_read_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif, bool exact_dif, struct udp_hslot *hslot2, struct sk_buff *skb) { struct sock *sk, *result; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; result = NULL; badness = 0; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); if (result) return result; matches = 1; } badness = score; result = sk; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } return result; } /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *udptable, struct sk_buff *skb) { struct sock *sk, *result; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; bool exact_dif = udp_lib_exact_dif_match(net, skb); int score, badness, matches = 0, reuseport = 0; u32 hash = 0; if (hslot->count > 10) { hash2 = udp4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, exact_dif, hslot2, skb); if (!result) { unsigned int old_slot2 = slot2; hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); slot2 = hash2 & udptable->mask; /* avoid searching the same slot again. */ if (unlikely(slot2 == old_slot2)) return result; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, exact_dif, hslot2, skb); } return result; } begin: result = NULL; badness = 0; sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); if (result) return result; matches = 1; } result = sk; badness = score; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } return result; } EXPORT_SYMBOL_GPL(__udp4_lib_lookup); static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { const struct iphdr *iph = ip_hdr(skb); return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), udptable, skb); } struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); } EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); /* Must be called under rcu_read_lock(). * Does increment socket refcount. */ #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \ IS_ENABLED(CONFIG_NF_SOCKET_IPV4) struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { struct sock *sk; sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table, NULL); if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) sk = NULL; return sk; } EXPORT_SYMBOL_GPL(udp4_lib_lookup); #endif static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif, unsigned short hnum) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || (inet->inet_daddr && inet->inet_daddr != rmt_addr) || (inet->inet_dport != rmt_port && inet->inet_dport) || (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || ipv6_only_sock(sk) || (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) return false; if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) return false; return true; } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. * Header points to the ip header of the error packet. We move * on past this. Then (as it used to claim before adjustment) * header points to the first 8 bytes of the udp header. We need * to find the appropriate port. */ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) { struct inet_sock *inet; const struct iphdr *iph = (const struct iphdr *)skb->data; struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; int harderr; int err; struct net *net = dev_net(skb->dev); sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, udptable, NULL); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; /* No socket for error */ } err = 0; harderr = 0; inet = inet_sk(sk); switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: goto out; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ ipv4_sk_update_pmtu(skb, sk, info); if (inet->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; case ICMP_REDIRECT: ipv4_sk_redirect(skb, sk); goto out; } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if (!inet->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: return; } void udp_err(struct sk_buff *skb, u32 info) { __udp4_lib_err(skb, info, &udp_table); } /* * Throw away all pending data and cancel the corking. Socket is locked. */ void udp_flush_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); if (up->pending) { up->len = 0; up->pending = 0; ip_flush_pending_frames(sk); } } EXPORT_SYMBOL(udp_flush_pending_frames); /** * udp4_hwcsum - handle outgoing HW checksumming * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) * @src: source IP address * @dst: destination IP address */ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) { struct udphdr *uh = udp_hdr(skb); int offset = skb_transport_offset(skb); int len = skb->len - offset; int hlen = len; __wsum csum = 0; if (!skb_has_frag_list(skb)) { /* * Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); } else { struct sk_buff *frags; /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ skb_walk_frags(skb, frags) { csum = csum_add(csum, frags->csum); hlen -= frags->len; } csum = skb_checksum(skb, offset, hlen, csum); skb->ip_summed = CHECKSUM_NONE; uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } } EXPORT_SYMBOL_GPL(udp4_hwcsum); /* Function to set UDP checksum for an IPv4 UDP packet. This is intended * for the simple case like when setting the checksum for a UDP tunnel. */ void udp_set_csum(bool nocheck, struct sk_buff *skb, __be32 saddr, __be32 daddr, int len) { struct udphdr *uh = udp_hdr(skb); if (nocheck) { uh->check = 0; } else if (skb_is_gso(skb)) { uh->check = ~udp_v4_check(len, saddr, daddr, 0); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { uh->check = 0; uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } else { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~udp_v4_check(len, saddr, daddr, 0); } } EXPORT_SYMBOL(udp_set_csum); static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) { struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct udphdr *uh; int err = 0; int is_udplite = IS_UDPLITE(sk); int offset = skb_transport_offset(skb); int len = skb->len - offset; __wsum csum = 0; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = inet->inet_sport; uh->dest = fl4->fl4_dport; uh->len = htons(len); uh->check = 0; if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); else if (sk->sk_no_check_tx) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp4_hwcsum(skb, fl4->saddr, fl4->daddr); goto send; } else csum = udp_csum(skb); /* add protocol-dependent pseudo-header */ uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, sk->sk_protocol, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { UDP_INC_STATS(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP_INC_STATS(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); return err; } /* * Push out all pending data as one UDP datagram. Socket is locked. */ int udp_push_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi4 *fl4 = &inet->cork.fl.u.ip4; struct sk_buff *skb; int err = 0; skb = ip_finish_skb(sk, fl4); if (!skb) goto out; err = udp_send_skb(skb, fl4); out: up->len = 0; up->pending = 0; return err; } EXPORT_SYMBOL(udp_push_pending_frames); int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); struct flowi4 fl4_stack; struct flowi4 *fl4; int ulen = len; struct ipcm_cookie ipc; struct rtable *rt = NULL; int free = 0; int connected = 0; __be32 daddr, faddr, saddr; __be16 dport; u8 tos; int err, is_udplite = IS_UDPLITE(sk); int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); struct sk_buff *skb; struct ip_options_data opt_copy; if (len > 0xFFFF) return -EMSGSIZE; /* * Check the flags. */ if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; fl4 = &inet->cork.fl.u.ip4; if (up->pending) { /* * There are pending frames. * The socket lock must be held while it's corked. */ lock_sock(sk); if (likely(up->pending)) { if (unlikely(up->pending != AF_INET)) { release_sock(sk); return -EINVAL; } goto do_append_data; } release_sock(sk); } ulen += sizeof(struct udphdr); /* * Get and verify the address. */ if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) { if (usin->sin_family != AF_UNSPEC) return -EAFNOSUPPORT; } daddr = usin->sin_addr.s_addr; dport = usin->sin_port; if (dport == 0) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = inet->inet_daddr; dport = inet->inet_dport; /* Open fast path for connected socket. Route will not be used, if at least one option is set. */ connected = 1; } ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6); if (unlikely(err)) { kfree(ipc.opt); return err; } if (ipc.opt) free = 1; connected = 0; } if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } saddr = ipc.addr; ipc.addr = faddr = daddr; sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; faddr = ipc.opt->opt.faddr; connected = 0; } tos = get_rttos(&ipc, inet); if (sock_flag(sk, SOCK_LOCALROUTE) || (msg->msg_flags & MSG_DONTROUTE) || (ipc.opt && ipc.opt->opt.is_strictroute)) { tos |= RTO_ONLINK; connected = 0; } if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; connected = 0; } else if (!ipc.oif) ipc.oif = inet->uc_index; if (connected) rt = (struct rtable *)sk_dst_check(sk, 0); if (!rt) { struct net *net = sock_net(sk); __u8 flow_flags = inet_sk_flowi_flags(sk); fl4 = &fl4_stack; flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, flow_flags, faddr, saddr, dport, inet->inet_sport, sk->sk_uid); security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; if (err == -ENETUNREACH) IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); goto out; } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) goto out; if (connected) sk_dst_set(sk, dst_clone(&rt->dst)); } if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: saddr = fl4->saddr; if (!ipc.addr) daddr = ipc.addr = fl4->daddr; /* Lockless fast path for the non-corking case. */ if (!corkreq) { skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, sizeof(struct udphdr), &ipc, &rt, msg->msg_flags); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_send_skb(skb, fl4); goto out; } lock_sock(sk); if (unlikely(up->pending)) { /* The socket is already corked while preparing it. */ /* ... which is an evident application bug. --ANK */ release_sock(sk); net_dbg_ratelimited("cork app bug 2\n"); err = -EINVAL; goto out; } /* * Now cork the socket to pend data. */ fl4 = &inet->cork.fl.u.ip4; fl4->daddr = daddr; fl4->saddr = saddr; fl4->fl4_dport = dport; fl4->fl4_sport = inet->inet_sport; up->pending = AF_INET; do_append_data: up->len += ulen; err = ip_append_data(sk, fl4, getfrag, msg, ulen, sizeof(struct udphdr), &ipc, &rt, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); if (err) udp_flush_pending_frames(sk); else if (!corkreq) err = udp_push_pending_frames(sk); else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) up->pending = 0; release_sock(sk); out: ip_rt_put(rt); if (free) kfree(ipc.opt); if (!err) return len; /* * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting * ENOBUFS might not be good (it's not tunable per se), but otherwise * we don't have a good statistic (IpOutDiscards but it can be too many * things). We could add another new stat but at least for now that * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { UDP_INC_STATS(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); } return err; do_confirm: if (msg->msg_flags & MSG_PROBE) dst_confirm_neigh(&rt->dst, &fl4->daddr); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } EXPORT_SYMBOL(udp_sendmsg); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); int ret; if (flags & MSG_SENDPAGE_NOTLAST) flags |= MSG_MORE; if (!up->pending) { struct msghdr msg = { .msg_flags = flags|MSG_MORE }; /* Call udp_sendmsg to specify destination address which * sendpage interface can't pass. * This will succeed only when the socket is connected. */ ret = udp_sendmsg(sk, &msg, 0); if (ret < 0) return ret; } lock_sock(sk); if (unlikely(!up->pending)) { release_sock(sk); net_dbg_ratelimited("udp cork app bug 3\n"); return -EINVAL; } ret = ip_append_page(sk, &inet->cork.fl.u.ip4, page, offset, size, flags); if (ret == -EOPNOTSUPP) { release_sock(sk); return sock_no_sendpage(sk->sk_socket, page, offset, size, flags); } if (ret < 0) { udp_flush_pending_frames(sk); goto out; } up->len += size; if (!(up->corkflag || (flags&MSG_MORE))) ret = udp_push_pending_frames(sk); if (!ret) ret = size; out: release_sock(sk); return ret; } /* fully reclaim rmem/fwd memory allocated for skb */ static void udp_rmem_release(struct sock *sk, int size, int partial) { struct udp_sock *up = udp_sk(sk); int amt; if (likely(partial)) { up->forward_deficit += size; size = up->forward_deficit; if (size < (sk->sk_rcvbuf >> 2) && !skb_queue_empty(&sk->sk_receive_queue)) return; } else { size += up->forward_deficit; } up->forward_deficit = 0; sk->sk_forward_alloc += size; amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); sk->sk_forward_alloc -= amt; if (amt) __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); atomic_sub(size, &sk->sk_rmem_alloc); } /* Note: called with sk_receive_queue.lock held. * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch * This avoids a cache line miss while receive_queue lock is held. * Look at __udp_enqueue_schedule_skb() to find where this copy is done. */ void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) { udp_rmem_release(sk, skb->dev_scratch, 1); } EXPORT_SYMBOL(udp_skb_destructor); /* Idea of busylocks is to let producers grab an extra spinlock * to relieve pressure on the receive_queue spinlock shared by consumer. * Under flood, this means that only one producer can be in line * trying to acquire the receive_queue spinlock. * These busylock can be allocated on a per cpu manner, instead of a * per socket one (that would consume a cache line per socket) */ static int udp_busylocks_log __read_mostly; static spinlock_t *udp_busylocks __read_mostly; static spinlock_t *busylock_acquire(void *ptr) { spinlock_t *busy; busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); spin_lock(busy); return busy; } static void busylock_release(spinlock_t *busy) { if (busy) spin_unlock(busy); } int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) { struct sk_buff_head *list = &sk->sk_receive_queue; int rmem, delta, amt, err = -ENOMEM; spinlock_t *busy = NULL; int size; /* try to avoid the costly atomic add/sub pair when the receive * queue is full; always allow at least a packet */ rmem = atomic_read(&sk->sk_rmem_alloc); if (rmem > sk->sk_rcvbuf) goto drop; /* Under mem pressure, it might be helpful to help udp_recvmsg() * having linear skbs : * - Reduce memory overhead and thus increase receive queue capacity * - Less cache line misses at copyout() time * - Less work at consume_skb() (less alien page frag freeing) */ if (rmem > (sk->sk_rcvbuf >> 1)) { skb_condense(skb); busy = busylock_acquire(sk); } size = skb->truesize; /* Copy skb->truesize into skb->dev_scratch to avoid a cache line miss * in udp_skb_destructor() */ skb->dev_scratch = size; /* we drop only if the receive buf is full and the receive * queue contains some other skb */ rmem = atomic_add_return(size, &sk->sk_rmem_alloc); if (rmem > (size + sk->sk_rcvbuf)) goto uncharge_drop; spin_lock(&list->lock); if (size >= sk->sk_forward_alloc) { amt = sk_mem_pages(size); delta = amt << SK_MEM_QUANTUM_SHIFT; if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { err = -ENOBUFS; spin_unlock(&list->lock); goto uncharge_drop; } sk->sk_forward_alloc += delta; } sk->sk_forward_alloc -= size; /* no need to setup a destructor, we will explicitly release the * forward allocated memory on dequeue */ sock_skb_set_dropcount(sk, skb); __skb_queue_tail(list, skb); spin_unlock(&list->lock); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); busylock_release(busy); return 0; uncharge_drop: atomic_sub(skb->truesize, &sk->sk_rmem_alloc); drop: atomic_inc(&sk->sk_drops); busylock_release(busy); return err; } EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); void udp_destruct_sock(struct sock *sk) { /* reclaim completely the forward allocated memory */ unsigned int total = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { total += skb->truesize; kfree_skb(skb); } udp_rmem_release(sk, total, 0); inet_sock_destruct(sk); } EXPORT_SYMBOL_GPL(udp_destruct_sock); int udp_init_sock(struct sock *sk) { sk->sk_destruct = udp_destruct_sock; return 0; } EXPORT_SYMBOL_GPL(udp_init_sock); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) { if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { bool slow = lock_sock_fast(sk); sk_peek_offset_bwd(sk, len); unlock_sock_fast(sk, slow); } consume_skb(skb); } EXPORT_SYMBOL_GPL(skb_consume_udp); /** * first_packet_length - return length of first packet in receive queue * @sk: socket * * Drops all bad checksum frames, until a valid one is found. * Returns the length of found skb, or -1 if none is found. */ static int first_packet_length(struct sock *sk) { struct sk_buff_head *rcvq = &sk->sk_receive_queue; struct sk_buff *skb; int total = 0; int res; spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL && udp_lib_checksum_complete(skb)) { __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, IS_UDPLITE(sk)); __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); atomic_inc(&sk->sk_drops); __skb_unlink(skb, rcvq); total += skb->truesize; kfree_skb(skb); } res = skb ? skb->len : -1; if (total) udp_rmem_release(sk, total, 1); spin_unlock_bh(&rcvq->lock); return res; } /* * IOCTL requests applicable to the UDP protocol */ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { int amount = max_t(int, 0, first_packet_length(sk)); return put_user(amount, (int __user *)arg); } default: return -ENOIOCTLCMD; } return 0; } EXPORT_SYMBOL(udp_ioctl); /* * This should be easy, if there is something there we * return it, otherwise we block. */ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct sk_buff *skb; unsigned int ulen, copied; int peeked, peeking, off; int err; int is_udplite = IS_UDPLITE(sk); bool checksum_valid = false; if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len, addr_len); try_again: peeking = off = sk_peek_offset(sk, flags); skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err); if (!skb) return err; ulen = skb->len; copied = len; if (copied > ulen - off) copied = ulen - off; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || peeking || (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { checksum_valid = !udp_lib_checksum_complete(skb); if (!checksum_valid) goto csum_copy_err; } if (checksum_valid || skb_csum_unnecessary(skb)) err = skb_copy_datagram_msg(skb, off, msg, copied); else { err = skb_copy_and_csum_datagram_msg(skb, off, msg); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } kfree_skb(skb); return err; } if (!peeked) UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); err = copied; if (flags & MSG_TRUNC) err = ulen; skb_consume_udp(sk, skb, peeking ? -err : err); return err; csum_copy_err: if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) { UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } kfree_skb(skb); /* starting over for a new packet, but check if we need to yield */ cond_resched(); msg->msg_flags &= ~MSG_TRUNC; goto try_again; } int __udp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); /* * 1003.1g - break association. */ sk->sk_state = TCP_CLOSE; inet->inet_daddr = 0; inet->inet_dport = 0; sock_rps_reset_rxhash(sk); sk->sk_bound_dev_if = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { sk->sk_prot->unhash(sk); inet->inet_sport = 0; } sk_dst_reset(sk); return 0; } EXPORT_SYMBOL(__udp_disconnect); int udp_disconnect(struct sock *sk, int flags) { lock_sock(sk); __udp_disconnect(sk, flags); release_sock(sk); return 0; } EXPORT_SYMBOL(udp_disconnect); void udp_lib_unhash(struct sock *sk) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2; hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock_bh(&hslot->lock); if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); if (sk_del_node_init_rcu(sk)) { hslot->count--; inet_sk(sk)->inet_num = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_lock(&hslot2->lock); hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); } spin_unlock_bh(&hslot->lock); } } EXPORT_SYMBOL(udp_lib_unhash); /* * inet_rcv_saddr was changed, we must rehash secondary hash */ void udp_lib_rehash(struct sock *sk, u16 newhash) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2, *nhslot2; hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); nhslot2 = udp_hashslot2(udptable, newhash); udp_sk(sk)->udp_portaddr_hash = newhash; if (hslot2 != nhslot2 || rcu_access_pointer(sk->sk_reuseport_cb)) { hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); /* we must lock primary chain too */ spin_lock_bh(&hslot->lock); if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); if (hslot2 != nhslot2) { spin_lock(&hslot2->lock); hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); spin_lock(&nhslot2->lock); hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &nhslot2->head); nhslot2->count++; spin_unlock(&nhslot2->lock); } spin_unlock_bh(&hslot->lock); } } } EXPORT_SYMBOL(udp_lib_rehash); static void udp_v4_rehash(struct sock *sk) { u16 new_hash = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); udp_lib_rehash(sk, new_hash); } int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; if (inet_sk(sk)->inet_daddr) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); sk_incoming_cpu_update(sk); } else { sk_mark_napi_id_once(sk, skb); } rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); trace_udp_fail_queue_rcv_skb(rc, sk); return -1; } return 0; } static struct static_key udp_encap_needed __read_mostly; void udp_encap_enable(void) { if (!static_key_enabled(&udp_encap_needed)) static_key_slow_inc(&udp_encap_needed); } EXPORT_SYMBOL(udp_encap_enable); /* returns: * -1: error * 0: success * >0: "udp encap" protocol resubmission * * Note that in the success and error cases, the skb is assumed to * have either been requeued or freed. */ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int is_udplite = IS_UDPLITE(sk); /* * Charge it to the socket, dropping if the queue is full. */ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); if (static_key_false(&udp_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* * This is an encapsulation socket so pass the skb to * the socket's udp_encap_rcv() hook. Otherwise, just * fall through and pass this up the UDP socket. * up->encap_rcv() returns the following value: * =0 if skb was successfully passed to the encap * handler or was discarded by it. * >0 if skb should be passed on to UDP. * <0 if skb should be resubmitted as proto -N */ /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); if (encap_rcv) { int ret; /* Verify checksum before giving to encap */ if (udp_lib_checksum_complete(skb)) goto csum_error; ret = encap_rcv(sk, skb); if (ret <= 0) { __UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); return -ret; } } /* FALLTHROUGH -- it's a UDP Packet */ } /* * UDP-Lite specific tests, ignored on UDP sockets */ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are * disabled for the following two types of errors: these depend * on the application settings, not on the functioning of the * protocol stack as such. * * RFC 3828 here recommends (sec 3.3): "There should also be a * way ... to ... at least let the receiving application block * delivery of packets with coverage values less than a value * provided by the application." */ if (up->pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } /* The next case involves violating the min. coverage requested * by the receiver. This is subtle: if receiver wants x and x is * greater than the buffersize/MTU then receiver will complain * that it wants x while sender emits packets of smaller size y. * Therefore the above ...()->partial_cov statement is essential. */ if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } if (rcu_access_pointer(sk->sk_filter) && udp_lib_checksum_complete(skb)) goto csum_error; if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) goto drop; udp_csum_pull_header(skb); ipv4_pktinfo_prepare(sk, skb); return __udp_queue_rcv_skb(sk, skb); csum_error: __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; } /* For TCP sockets, sk_rx_dst is protected by socket lock * For UDP, we use xchg() to guard against concurrent changes. */ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old; dst_hold(dst); old = xchg(&sk->sk_rx_dst, dst); dst_release(old); } /* * Multicasts and broadcasts go to each listener. * * Note: called only from the BH handler context. */ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udphdr *uh, __be32 saddr, __be32 daddr, struct udp_table *udptable, int proto) { struct sock *sk, *first = NULL; unsigned short hnum = ntohs(uh->dest); struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); unsigned int offset = offsetof(typeof(*sk), sk_node); int dif = skb->dev->ifindex; struct hlist_node *node; struct sk_buff *nskb; if (use_hash2) { hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & udptable->mask; hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: hslot = &udptable->hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, uh->source, saddr, dif, hnum)) continue; if (!first) { first = sk; continue; } nskb = skb_clone(skb, GFP_ATOMIC); if (unlikely(!nskb)) { atomic_inc(&sk->sk_drops); __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); __UDP_INC_STATS(net, UDP_MIB_INERRORS, IS_UDPLITE(sk)); continue; } if (udp_queue_rcv_skb(sk, nskb) > 0) consume_skb(nskb); } /* Also lookup *:port if we are using hash2 and haven't done so yet. */ if (use_hash2 && hash2 != hash2_any) { hash2 = hash2_any; goto start_lookup; } if (first) { if (udp_queue_rcv_skb(first, skb) > 0) consume_skb(skb); } else { kfree_skb(skb); __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, proto == IPPROTO_UDPLITE); } return 0; } /* Initialize UDP checksum. If exited with zero value (success), * CHECKSUM_UNNECESSARY means, that no more checks are required. * Otherwise, csum completion requires chacksumming packet body, * including udp header and folding it to skb->csum. */ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) { int err; UDP_SKB_CB(skb)->partial_cov = 0; UDP_SKB_CB(skb)->cscov = skb->len; if (proto == IPPROTO_UDPLITE) { err = udplite_checksum_init(skb, uh); if (err) return err; } /* Note, we are only interested in != 0 or == 0, thus the * force to int. */ return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, inet_compute_pseudo); } /* * All we need to do is get the socket, and then do a checksum. */ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct sock *sk; struct udphdr *uh; unsigned short ulen; struct rtable *rt = skb_rtable(skb); __be32 saddr, daddr; struct net *net = dev_net(skb->dev); /* * Validate the packet. */ if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto drop; /* No space for header. */ uh = udp_hdr(skb); ulen = ntohs(uh->len); saddr = ip_hdr(skb)->saddr; daddr = ip_hdr(skb)->daddr; if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) goto short_packet; uh = udp_hdr(skb); } if (udp4_csum_init(skb, uh, proto)) goto csum_error; sk = skb_steal_sock(skb); if (sk) { struct dst_entry *dst = skb_dst(skb); int ret; if (unlikely(sk->sk_rx_dst != dst)) udp_sk_rx_dst_set(sk, dst); ret = udp_queue_rcv_skb(sk, skb); sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(net, skb, uh, saddr, daddr, udptable, proto); sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk) { int ret; if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, inet_compute_pseudo); ret = udp_queue_rcv_skb(sk, skb); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); /* No socket. Drop packet silently, if checksum is wrong */ if (udp_lib_checksum_complete(skb)) goto csum_error; __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* * Hmm. We got an UDP packet to a port to which we * don't wanna listen. Ignore it. */ kfree_skb(skb); return 0; short_packet: net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), ulen, skb->len, &daddr, ntohs(uh->dest)); goto drop; csum_error: /* * RFC1122: OK. Discards the bad packet silently (as far as * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), ulen); __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } /* We can only early demux multicast if there is a single matching socket. * If more than one socket found returns NULL */ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct sock *sk, *result; unsigned short hnum = ntohs(loc_port); unsigned int slot = udp_hashfn(net, hnum, udp_table.mask); struct udp_hslot *hslot = &udp_table.hash[slot]; /* Do not bother scanning a too big list */ if (hslot->count > 10) return NULL; result = NULL; sk_for_each_rcu(sk, &hslot->head) { if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, rmt_port, rmt_addr, dif, hnum)) { if (result) return NULL; result = sk; } } return result; } /* For unicast we should only early demux connected sockets or we can * break forwarding setups. The chains here can be long so only check * if the first socket is an exact match and if not move on. */ static struct sock *__udp4_lib_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { unsigned short hnum = ntohs(loc_port); unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); unsigned int slot2 = hash2 & udp_table.mask; struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); struct sock *sk; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { if (INET_MATCH(sk, net, acookie, rmt_addr, loc_addr, ports, dif)) return sk; /* Only check first socket in chain */ break; } return NULL; } void udp_v4_early_demux(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct iphdr *iph; const struct udphdr *uh; struct sock *sk = NULL; struct dst_entry *dst; int dif = skb->dev->ifindex; int ours; /* validate the packet */ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) return; iph = ip_hdr(skb); uh = udp_hdr(skb); if (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) { struct in_device *in_dev = __in_dev_get_rcu(skb->dev); if (!in_dev) return; /* we are supposed to accept bcast packets */ if (skb->pkt_type == PACKET_MULTICAST) { ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, iph->protocol); if (!ours) return; } sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); } else if (skb->pkt_type == PACKET_HOST) { sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); } if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2)) return; skb->sk = sk; skb->destructor = sock_efree; dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); if (dst) { /* DST_NOCACHE can not be used without taking a reference */ if (dst->flags & DST_NOCACHE) { if (likely(atomic_inc_not_zero(&dst->__refcnt))) skb_dst_set(skb, dst); } else { skb_dst_set_noref(skb, dst); } } } int udp_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); } void udp_destroy_sock(struct sock *sk) { struct udp_sock *up = udp_sk(sk); bool slow = lock_sock_fast(sk); udp_flush_pending_frames(sk); unlock_sock_fast(sk, slow); if (static_key_false(&udp_encap_needed) && up->encap_type) { void (*encap_destroy)(struct sock *sk); encap_destroy = ACCESS_ONCE(up->encap_destroy); if (encap_destroy) encap_destroy(sk); } } /* * Socket option code for UDP */ int udp_lib_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)) { struct udp_sock *up = udp_sk(sk); int val, valbool; int err = 0; int is_udplite = IS_UDPLITE(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; valbool = val ? 1 : 0; switch (optname) { case UDP_CORK: if (val != 0) { up->corkflag = 1; } else { up->corkflag = 0; lock_sock(sk); push_pending_frames(sk); release_sock(sk); } break; case UDP_ENCAP: switch (val) { case 0: case UDP_ENCAP_ESPINUDP: case UDP_ENCAP_ESPINUDP_NON_IKE: up->encap_rcv = xfrm4_udp_encap_rcv; /* FALLTHROUGH */ case UDP_ENCAP_L2TPINUDP: up->encap_type = val; udp_encap_enable(); break; default: err = -ENOPROTOOPT; break; } break; case UDP_NO_CHECK6_TX: up->no_check6_tx = valbool; break; case UDP_NO_CHECK6_RX: up->no_check6_rx = valbool; break; /* * UDP-Lite's partial checksum coverage (RFC 3828). */ /* The sender sets actual checksum coverage length via this option. * The case coverage > packet length is handled by send module. */ case UDPLITE_SEND_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcslen = val; up->pcflag |= UDPLITE_SEND_CC; break; /* The receiver specifies a minimum checksum coverage value. To make * sense, this should be set to at least 8 (as done below). If zero is * used, this again means full checksum coverage. */ case UDPLITE_RECV_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Avoid silly minimal values. */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcrlen = val; up->pcflag |= UDPLITE_RECV_CC; break; default: err = -ENOPROTOOPT; break; } return err; } EXPORT_SYMBOL(udp_lib_setsockopt); int udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return compat_ip_setsockopt(sk, level, optname, optval, optlen); } #endif int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct udp_sock *up = udp_sk(sk); int val, len; if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; switch (optname) { case UDP_CORK: val = up->corkflag; break; case UDP_ENCAP: val = up->encap_type; break; case UDP_NO_CHECK6_TX: val = up->no_check6_tx; break; case UDP_NO_CHECK6_RX: val = up->no_check6_rx; break; /* The following two cannot be changed on UDP sockets, the return is * always 0 (which corresponds to the full checksum coverage of UDP). */ case UDPLITE_SEND_CSCOV: val = up->pcslen; break; case UDPLITE_RECV_CSCOV: val = up->pcrlen; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } EXPORT_SYMBOL(udp_lib_getsockopt); int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return ip_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ip_getsockopt(sk, level, optname, optval, optlen); } #endif /** * udp_poll - wait for a UDP event. * @file - file struct * @sock - socket * @wait - poll table * * This is same as datagram poll, except for the special case of * blocking sockets. If application is using a blocking fd * and a packet with checksum error is in the queue; * then it could get return from select indicating data available * but then block when reading it. Add special case code * to work around these arguably broken applications. */ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; sock_rps_record_flow(sk); /* Check for false positives due to checksum errors */ if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) mask &= ~(POLLIN | POLLRDNORM); return mask; } EXPORT_SYMBOL(udp_poll); int udp_abort(struct sock *sk, int err) { lock_sock(sk); sk->sk_err = err; sk->sk_error_report(sk); __udp_disconnect(sk, 0); release_sock(sk); return 0; } EXPORT_SYMBOL_GPL(udp_abort); struct proto udp_prot = { .name = "UDP", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udp_init_sock, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp_sock), .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif .diag_destroy = udp_abort, }; EXPORT_SYMBOL(udp_prot); /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); for (state->bucket = start; state->bucket <= state->udp_table->mask; ++state->bucket) { struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; if (hlist_empty(&hslot->head)) continue; spin_lock_bh(&hslot->lock); sk_for_each(sk, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == state->family) goto found; } spin_unlock_bh(&hslot->lock); } sk = NULL; found: return sk; } static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); do { sk = sk_next(sk); } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); if (!sk) { if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); return udp_get_first(seq, state->bucket + 1); } return sk; } static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = udp_get_first(seq, 0); if (sk) while (pos && (sk = udp_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } static void *udp_seq_start(struct seq_file *seq, loff_t *pos) { struct udp_iter_state *state = seq->private; state->bucket = MAX_UDP_PORTS; return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = udp_get_idx(seq, 0); else sk = udp_get_next(seq, v); ++*pos; return sk; } static void udp_seq_stop(struct seq_file *seq, void *v) { struct udp_iter_state *state = seq->private; if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); } int udp_seq_open(struct inode *inode, struct file *file) { struct udp_seq_afinfo *afinfo = PDE_DATA(inode); struct udp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct udp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->udp_table = afinfo->udp_table; return err; } EXPORT_SYMBOL(udp_seq_open); /* ------------------------------------------------------------------------ */ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) { struct proc_dir_entry *p; int rc = 0; afinfo->seq_ops.start = udp_seq_start; afinfo->seq_ops.next = udp_seq_next; afinfo->seq_ops.stop = udp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; } EXPORT_SYMBOL(udp_proc_register); void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL(udp_proc_unregister); /* ------------------------------------------------------------------------ */ static void udp4_format_sock(struct sock *sp, struct seq_file *f, int bucket) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } int udp4_seq_show(struct seq_file *seq, void *v) { seq_setwidth(seq, 127); if (v == SEQ_START_TOKEN) seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { struct udp_iter_state *state = seq->private; udp4_format_sock(v, seq, state->bucket); } seq_pad(seq, '\n'); return 0; } static const struct file_operations udp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = udp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; /* ------------------------------------------------------------------------ */ static struct udp_seq_afinfo udp4_seq_afinfo = { .name = "udp", .family = AF_INET, .udp_table = &udp_table, .seq_fops = &udp_afinfo_seq_fops, .seq_ops = { .show = udp4_seq_show, }, }; static int __net_init udp4_proc_init_net(struct net *net) { return udp_proc_register(net, &udp4_seq_afinfo); } static void __net_exit udp4_proc_exit_net(struct net *net) { udp_proc_unregister(net, &udp4_seq_afinfo); } static struct pernet_operations udp4_net_ops = { .init = udp4_proc_init_net, .exit = udp4_proc_exit_net, }; int __init udp4_proc_init(void) { return register_pernet_subsys(&udp4_net_ops); } void udp4_proc_exit(void) { unregister_pernet_subsys(&udp4_net_ops); } #endif /* CONFIG_PROC_FS */ static __initdata unsigned long uhash_entries; static int __init set_uhash_entries(char *str) { ssize_t ret; if (!str) return 0; ret = kstrtoul(str, 0, &uhash_entries); if (ret) return 0; if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) uhash_entries = UDP_HTABLE_SIZE_MIN; return 1; } __setup("uhash_entries=", set_uhash_entries); void __init udp_table_init(struct udp_table *table, const char *name) { unsigned int i; table->hash = alloc_large_system_hash(name, 2 * sizeof(struct udp_hslot), uhash_entries, 21, /* one slot per 2 MB */ 0, &table->log, &table->mask, UDP_HTABLE_SIZE_MIN, 64 * 1024); table->hash2 = table->hash + (table->mask + 1); for (i = 0; i <= table->mask; i++) { INIT_HLIST_HEAD(&table->hash[i].head); table->hash[i].count = 0; spin_lock_init(&table->hash[i].lock); } for (i = 0; i <= table->mask; i++) { INIT_HLIST_HEAD(&table->hash2[i].head); table->hash2[i].count = 0; spin_lock_init(&table->hash2[i].lock); } } u32 udp_flow_hashrnd(void) { static u32 hashrnd __read_mostly; net_get_random_once(&hashrnd, sizeof(hashrnd)); return hashrnd; } EXPORT_SYMBOL(udp_flow_hashrnd); void __init udp_init(void) { unsigned long limit; unsigned int i; udp_table_init(&udp_table, "UDP"); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_udp_mem[0] = limit / 4 * 3; sysctl_udp_mem[1] = limit; sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; sysctl_udp_rmem_min = SK_MEM_QUANTUM; sysctl_udp_wmem_min = SK_MEM_QUANTUM; /* 16 spinlocks per cpu */ udp_busylocks_log = ilog2(nr_cpu_ids) + 4; udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, GFP_KERNEL); if (!udp_busylocks) panic("UDP: failed to alloc udp_busylocks\n"); for (i = 0; i < (1U << udp_busylocks_log); i++) spin_lock_init(udp_busylocks + i); }
null
null
null
null
75,962
260
null
train_val
b09a65ece69306a70044ac99ca6928eda58d7c79
270,567
tcpdump
0
https://github.com/the-tcpdump-group/tcpdump
2017-09-14 11:59:38-07:00
/* * Copyright (c) 1998-2007 The TCPDUMP project * Copyright (c) 2009 Florian Forster * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code * distributions retain the above copyright notice and this paragraph * in its entirety, and (2) distributions including binary code include * the above copyright notice and this paragraph in its entirety in * the documentation or other materials provided with the distribution. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND * WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT * LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * * Original code by Hannes Gredler <hannes@gredler.at> * IPv6 additions by Florian Forster <octo at verplant.org> */ /* \summary: Optimized Link State Routing Protocol (OLSR) printer */ /* specification: RFC 3626 */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" /* * RFC 3626 common header * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Packet Length | Packet Sequence Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Message Type | Vtime | Message Size | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Originator Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Time To Live | Hop Count | Message Sequence Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * : MESSAGE : * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Message Type | Vtime | Message Size | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Originator Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Time To Live | Hop Count | Message Sequence Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * : MESSAGE : * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * : : */ struct olsr_common { uint8_t packet_len[2]; uint8_t packet_seq[2]; }; #define OLSR_HELLO_MSG 1 /* rfc3626 */ #define OLSR_TC_MSG 2 /* rfc3626 */ #define OLSR_MID_MSG 3 /* rfc3626 */ #define OLSR_HNA_MSG 4 /* rfc3626 */ #define OLSR_POWERINFO_MSG 128 #define OLSR_NAMESERVICE_MSG 130 #define OLSR_HELLO_LQ_MSG 201 /* LQ extensions olsr.org */ #define OLSR_TC_LQ_MSG 202 /* LQ extensions olsr.org */ static const struct tok olsr_msg_values[] = { { OLSR_HELLO_MSG, "Hello" }, { OLSR_TC_MSG, "TC" }, { OLSR_MID_MSG, "MID" }, { OLSR_HNA_MSG, "HNA" }, { OLSR_POWERINFO_MSG, "Powerinfo" }, { OLSR_NAMESERVICE_MSG, "Nameservice" }, { OLSR_HELLO_LQ_MSG, "Hello-LQ" }, { OLSR_TC_LQ_MSG, "TC-LQ" }, { 0, NULL} }; struct olsr_msg4 { uint8_t msg_type; uint8_t vtime; uint8_t msg_len[2]; uint8_t originator[4]; uint8_t ttl; uint8_t hopcount; uint8_t msg_seq[2]; }; struct olsr_msg6 { uint8_t msg_type; uint8_t vtime; uint8_t msg_len[2]; uint8_t originator[16]; uint8_t ttl; uint8_t hopcount; uint8_t msg_seq[2]; }; struct olsr_hello { uint8_t res[2]; uint8_t htime; uint8_t will; }; struct olsr_hello_link { uint8_t link_code; uint8_t res; uint8_t len[2]; }; struct olsr_tc { uint8_t ans_seq[2]; uint8_t res[2]; }; struct olsr_hna4 { uint8_t network[4]; uint8_t mask[4]; }; struct olsr_hna6 { uint8_t network[16]; uint8_t mask[16]; }; /** gateway HNA flags */ enum gateway_hna_flags { GW_HNA_FLAG_LINKSPEED = 1 << 0, GW_HNA_FLAG_IPV4 = 1 << 1, GW_HNA_FLAG_IPV4_NAT = 1 << 2, GW_HNA_FLAG_IPV6 = 1 << 3, GW_HNA_FLAG_IPV6PREFIX = 1 << 4 }; /** gateway HNA field byte offsets in the netmask field of the HNA */ enum gateway_hna_fields { GW_HNA_PAD = 0, GW_HNA_FLAGS = 1, GW_HNA_UPLINK = 2, GW_HNA_DOWNLINK = 3, GW_HNA_V6PREFIXLEN = 4, GW_HNA_V6PREFIX = 5 }; #define OLSR_EXTRACT_LINK_TYPE(link_code) (link_code & 0x3) #define OLSR_EXTRACT_NEIGHBOR_TYPE(link_code) (link_code >> 2) static const struct tok olsr_link_type_values[] = { { 0, "Unspecified" }, { 1, "Asymmetric" }, { 2, "Symmetric" }, { 3, "Lost" }, { 0, NULL} }; static const struct tok olsr_neighbor_type_values[] = { { 0, "Not-Neighbor" }, { 1, "Symmetric" }, { 2, "Symmetric-MPR" }, { 0, NULL} }; struct olsr_lq_neighbor4 { uint8_t neighbor[4]; uint8_t link_quality; uint8_t neighbor_link_quality; uint8_t res[2]; }; struct olsr_lq_neighbor6 { uint8_t neighbor[16]; uint8_t link_quality; uint8_t neighbor_link_quality; uint8_t res[2]; }; #define MAX_SMARTGW_SPEED 320000000 /** * Convert an encoded 1 byte transport value (5 bits mantissa, 3 bits exponent) * to an uplink/downlink speed value * * @param value the encoded 1 byte transport value * @return the uplink/downlink speed value (in kbit/s) */ static uint32_t deserialize_gw_speed(uint8_t value) { uint32_t speed; uint32_t exp; if (!value) { return 0; } if (value == UINT8_MAX) { /* maximum value: also return maximum value */ return MAX_SMARTGW_SPEED; } speed = (value >> 3) + 1; exp = value & 7; while (exp-- > 0) { speed *= 10; } return speed; } /* * macro to convert the 8-bit mantissa/exponent to a double float * taken from olsr.org. */ #define VTIME_SCALE_FACTOR 0.0625 #define ME_TO_DOUBLE(me) \ (double)(VTIME_SCALE_FACTOR*(1+(double)(me>>4)/16)*(double)(1<<(me&0x0F))) /* * print a neighbor list with LQ extensions. */ static int olsr_print_lq_neighbor4(netdissect_options *ndo, const u_char *msg_data, u_int hello_len) { const struct olsr_lq_neighbor4 *lq_neighbor; while (hello_len >= sizeof(struct olsr_lq_neighbor4)) { lq_neighbor = (const struct olsr_lq_neighbor4 *)msg_data; if (!ND_TTEST(*lq_neighbor)) return (-1); ND_PRINT((ndo, "\n\t neighbor %s, link-quality %.2f%%" ", neighbor-link-quality %.2f%%", ipaddr_string(ndo, lq_neighbor->neighbor), ((double)lq_neighbor->link_quality/2.55), ((double)lq_neighbor->neighbor_link_quality/2.55))); msg_data += sizeof(struct olsr_lq_neighbor4); hello_len -= sizeof(struct olsr_lq_neighbor4); } return (0); } static int olsr_print_lq_neighbor6(netdissect_options *ndo, const u_char *msg_data, u_int hello_len) { const struct olsr_lq_neighbor6 *lq_neighbor; while (hello_len >= sizeof(struct olsr_lq_neighbor6)) { lq_neighbor = (const struct olsr_lq_neighbor6 *)msg_data; if (!ND_TTEST(*lq_neighbor)) return (-1); ND_PRINT((ndo, "\n\t neighbor %s, link-quality %.2f%%" ", neighbor-link-quality %.2f%%", ip6addr_string(ndo, lq_neighbor->neighbor), ((double)lq_neighbor->link_quality/2.55), ((double)lq_neighbor->neighbor_link_quality/2.55))); msg_data += sizeof(struct olsr_lq_neighbor6); hello_len -= sizeof(struct olsr_lq_neighbor6); } return (0); } /* * print a neighbor list. */ static int olsr_print_neighbor(netdissect_options *ndo, const u_char *msg_data, u_int hello_len) { int neighbor; ND_PRINT((ndo, "\n\t neighbor\n\t\t")); neighbor = 1; while (hello_len >= sizeof(struct in_addr)) { if (!ND_TTEST2(*msg_data, sizeof(struct in_addr))) return (-1); /* print 4 neighbors per line */ ND_PRINT((ndo, "%s%s", ipaddr_string(ndo, msg_data), neighbor % 4 == 0 ? "\n\t\t" : " ")); msg_data += sizeof(struct in_addr); hello_len -= sizeof(struct in_addr); } return (0); } void olsr_print(netdissect_options *ndo, const u_char *pptr, u_int length, int is_ipv6) { union { const struct olsr_common *common; const struct olsr_msg4 *msg4; const struct olsr_msg6 *msg6; const struct olsr_hello *hello; const struct olsr_hello_link *hello_link; const struct olsr_tc *tc; const struct olsr_hna4 *hna; } ptr; u_int msg_type, msg_len, msg_tlen, hello_len; uint16_t name_entry_type, name_entry_len; u_int name_entry_padding; uint8_t link_type, neighbor_type; const u_char *tptr, *msg_data; tptr = pptr; if (length < sizeof(struct olsr_common)) { goto trunc; } ND_TCHECK2(*tptr, sizeof(struct olsr_common)); ptr.common = (const struct olsr_common *)tptr; length = min(length, EXTRACT_16BITS(ptr.common->packet_len)); ND_PRINT((ndo, "OLSRv%i, seq 0x%04x, length %u", (is_ipv6 == 0) ? 4 : 6, EXTRACT_16BITS(ptr.common->packet_seq), length)); tptr += sizeof(struct olsr_common); /* * In non-verbose mode, just print version. */ if (ndo->ndo_vflag < 1) { return; } while (tptr < (pptr+length)) { union { const struct olsr_msg4 *v4; const struct olsr_msg6 *v6; } msgptr; int msg_len_valid = 0; if (is_ipv6) { ND_TCHECK2(*tptr, sizeof(struct olsr_msg6)); msgptr.v6 = (const struct olsr_msg6 *) tptr; msg_type = msgptr.v6->msg_type; msg_len = EXTRACT_16BITS(msgptr.v6->msg_len); if ((msg_len >= sizeof (struct olsr_msg6)) && (msg_len <= length)) msg_len_valid = 1; /* infinite loop check */ if (msg_type == 0 || msg_len == 0) { return; } ND_PRINT((ndo, "\n\t%s Message (%#04x), originator %s, ttl %u, hop %u" "\n\t vtime %.3fs, msg-seq 0x%04x, length %u%s", tok2str(olsr_msg_values, "Unknown", msg_type), msg_type, ip6addr_string(ndo, msgptr.v6->originator), msgptr.v6->ttl, msgptr.v6->hopcount, ME_TO_DOUBLE(msgptr.v6->vtime), EXTRACT_16BITS(msgptr.v6->msg_seq), msg_len, (msg_len_valid == 0) ? " (invalid)" : "")); if (!msg_len_valid) { return; } msg_tlen = msg_len - sizeof(struct olsr_msg6); msg_data = tptr + sizeof(struct olsr_msg6); } else /* (!is_ipv6) */ { ND_TCHECK2(*tptr, sizeof(struct olsr_msg4)); msgptr.v4 = (const struct olsr_msg4 *) tptr; msg_type = msgptr.v4->msg_type; msg_len = EXTRACT_16BITS(msgptr.v4->msg_len); if ((msg_len >= sizeof (struct olsr_msg4)) && (msg_len <= length)) msg_len_valid = 1; /* infinite loop check */ if (msg_type == 0 || msg_len == 0) { return; } ND_PRINT((ndo, "\n\t%s Message (%#04x), originator %s, ttl %u, hop %u" "\n\t vtime %.3fs, msg-seq 0x%04x, length %u%s", tok2str(olsr_msg_values, "Unknown", msg_type), msg_type, ipaddr_string(ndo, msgptr.v4->originator), msgptr.v4->ttl, msgptr.v4->hopcount, ME_TO_DOUBLE(msgptr.v4->vtime), EXTRACT_16BITS(msgptr.v4->msg_seq), msg_len, (msg_len_valid == 0) ? " (invalid)" : "")); if (!msg_len_valid) { return; } msg_tlen = msg_len - sizeof(struct olsr_msg4); msg_data = tptr + sizeof(struct olsr_msg4); } switch (msg_type) { case OLSR_HELLO_MSG: case OLSR_HELLO_LQ_MSG: if (msg_tlen < sizeof(struct olsr_hello)) goto trunc; ND_TCHECK2(*msg_data, sizeof(struct olsr_hello)); ptr.hello = (const struct olsr_hello *)msg_data; ND_PRINT((ndo, "\n\t hello-time %.3fs, MPR willingness %u", ME_TO_DOUBLE(ptr.hello->htime), ptr.hello->will)); msg_data += sizeof(struct olsr_hello); msg_tlen -= sizeof(struct olsr_hello); while (msg_tlen >= sizeof(struct olsr_hello_link)) { int hello_len_valid = 0; /* * link-type. */ ND_TCHECK2(*msg_data, sizeof(struct olsr_hello_link)); ptr.hello_link = (const struct olsr_hello_link *)msg_data; hello_len = EXTRACT_16BITS(ptr.hello_link->len); link_type = OLSR_EXTRACT_LINK_TYPE(ptr.hello_link->link_code); neighbor_type = OLSR_EXTRACT_NEIGHBOR_TYPE(ptr.hello_link->link_code); if ((hello_len <= msg_tlen) && (hello_len >= sizeof(struct olsr_hello_link))) hello_len_valid = 1; ND_PRINT((ndo, "\n\t link-type %s, neighbor-type %s, len %u%s", tok2str(olsr_link_type_values, "Unknown", link_type), tok2str(olsr_neighbor_type_values, "Unknown", neighbor_type), hello_len, (hello_len_valid == 0) ? " (invalid)" : "")); if (hello_len_valid == 0) break; msg_data += sizeof(struct olsr_hello_link); msg_tlen -= sizeof(struct olsr_hello_link); hello_len -= sizeof(struct olsr_hello_link); ND_TCHECK2(*msg_data, hello_len); if (msg_type == OLSR_HELLO_MSG) { if (olsr_print_neighbor(ndo, msg_data, hello_len) == -1) goto trunc; } else { if (is_ipv6) { if (olsr_print_lq_neighbor6(ndo, msg_data, hello_len) == -1) goto trunc; } else { if (olsr_print_lq_neighbor4(ndo, msg_data, hello_len) == -1) goto trunc; } } msg_data += hello_len; msg_tlen -= hello_len; } break; case OLSR_TC_MSG: case OLSR_TC_LQ_MSG: if (msg_tlen < sizeof(struct olsr_tc)) goto trunc; ND_TCHECK2(*msg_data, sizeof(struct olsr_tc)); ptr.tc = (const struct olsr_tc *)msg_data; ND_PRINT((ndo, "\n\t advertised neighbor seq 0x%04x", EXTRACT_16BITS(ptr.tc->ans_seq))); msg_data += sizeof(struct olsr_tc); msg_tlen -= sizeof(struct olsr_tc); if (msg_type == OLSR_TC_MSG) { if (olsr_print_neighbor(ndo, msg_data, msg_tlen) == -1) goto trunc; } else { if (is_ipv6) { if (olsr_print_lq_neighbor6(ndo, msg_data, msg_tlen) == -1) goto trunc; } else { if (olsr_print_lq_neighbor4(ndo, msg_data, msg_tlen) == -1) goto trunc; } } break; case OLSR_MID_MSG: { size_t addr_size = sizeof(struct in_addr); if (is_ipv6) addr_size = sizeof(struct in6_addr); while (msg_tlen >= addr_size) { ND_TCHECK2(*msg_data, addr_size); ND_PRINT((ndo, "\n\t interface address %s", is_ipv6 ? ip6addr_string(ndo, msg_data) : ipaddr_string(ndo, msg_data))); msg_data += addr_size; msg_tlen -= addr_size; } break; } case OLSR_HNA_MSG: if (is_ipv6) { int i = 0; ND_PRINT((ndo, "\n\t Advertised networks (total %u)", (unsigned int) (msg_tlen / sizeof(struct olsr_hna6)))); while (msg_tlen >= sizeof(struct olsr_hna6)) { const struct olsr_hna6 *hna6; ND_TCHECK2(*msg_data, sizeof(struct olsr_hna6)); hna6 = (const struct olsr_hna6 *)msg_data; ND_PRINT((ndo, "\n\t #%i: %s/%u", i, ip6addr_string(ndo, hna6->network), mask62plen (hna6->mask))); msg_data += sizeof(struct olsr_hna6); msg_tlen -= sizeof(struct olsr_hna6); } } else { int col = 0; ND_PRINT((ndo, "\n\t Advertised networks (total %u)", (unsigned int) (msg_tlen / sizeof(struct olsr_hna4)))); while (msg_tlen >= sizeof(struct olsr_hna4)) { ND_TCHECK2(*msg_data, sizeof(struct olsr_hna4)); ptr.hna = (const struct olsr_hna4 *)msg_data; /* print 4 prefixes per line */ if (!ptr.hna->network[0] && !ptr.hna->network[1] && !ptr.hna->network[2] && !ptr.hna->network[3] && !ptr.hna->mask[GW_HNA_PAD] && ptr.hna->mask[GW_HNA_FLAGS]) { /* smart gateway */ ND_PRINT((ndo, "%sSmart-Gateway:%s%s%s%s%s %u/%u", col == 0 ? "\n\t " : ", ", /* indent */ /* sgw */ /* LINKSPEED */ (ptr.hna->mask[GW_HNA_FLAGS] & GW_HNA_FLAG_LINKSPEED) ? " LINKSPEED" : "", /* IPV4 */ (ptr.hna->mask[GW_HNA_FLAGS] & GW_HNA_FLAG_IPV4) ? " IPV4" : "", /* IPV4-NAT */ (ptr.hna->mask[GW_HNA_FLAGS] & GW_HNA_FLAG_IPV4_NAT) ? " IPV4-NAT" : "", /* IPV6 */ (ptr.hna->mask[GW_HNA_FLAGS] & GW_HNA_FLAG_IPV6) ? " IPV6" : "", /* IPv6PREFIX */ (ptr.hna->mask[GW_HNA_FLAGS] & GW_HNA_FLAG_IPV6PREFIX) ? " IPv6-PREFIX" : "", /* uplink */ (ptr.hna->mask[GW_HNA_FLAGS] & GW_HNA_FLAG_LINKSPEED) ? deserialize_gw_speed(ptr.hna->mask[GW_HNA_UPLINK]) : 0, /* downlink */ (ptr.hna->mask[GW_HNA_FLAGS] & GW_HNA_FLAG_LINKSPEED) ? deserialize_gw_speed(ptr.hna->mask[GW_HNA_DOWNLINK]) : 0 )); } else { /* normal route */ ND_PRINT((ndo, "%s%s/%u", col == 0 ? "\n\t " : ", ", ipaddr_string(ndo, ptr.hna->network), mask2plen(EXTRACT_32BITS(ptr.hna->mask)))); } msg_data += sizeof(struct olsr_hna4); msg_tlen -= sizeof(struct olsr_hna4); col = (col + 1) % 4; } } break; case OLSR_NAMESERVICE_MSG: { u_int name_entries; u_int addr_size; int name_entries_valid; u_int i; if (msg_tlen < 4) goto trunc; ND_TCHECK2(*msg_data, 4); name_entries = EXTRACT_16BITS(msg_data+2); addr_size = 4; if (is_ipv6) addr_size = 16; name_entries_valid = 0; if ((name_entries > 0) && ((name_entries * (4 + addr_size)) <= msg_tlen)) name_entries_valid = 1; ND_PRINT((ndo, "\n\t Version %u, Entries %u%s", EXTRACT_16BITS(msg_data), name_entries, (name_entries_valid == 0) ? " (invalid)" : "")); if (name_entries_valid == 0) break; msg_data += 4; msg_tlen -= 4; for (i = 0; i < name_entries; i++) { int name_entry_len_valid = 0; if (msg_tlen < 4) break; ND_TCHECK2(*msg_data, 4); name_entry_type = EXTRACT_16BITS(msg_data); name_entry_len = EXTRACT_16BITS(msg_data+2); msg_data += 4; msg_tlen -= 4; if ((name_entry_len > 0) && ((addr_size + name_entry_len) <= msg_tlen)) name_entry_len_valid = 1; ND_PRINT((ndo, "\n\t #%u: type %#06x, length %u%s", (unsigned int) i, name_entry_type, name_entry_len, (name_entry_len_valid == 0) ? " (invalid)" : "")); if (name_entry_len_valid == 0) break; /* 32-bit alignment */ name_entry_padding = 0; if (name_entry_len%4 != 0) name_entry_padding = 4-(name_entry_len%4); if (msg_tlen < addr_size + name_entry_len + name_entry_padding) goto trunc; ND_TCHECK2(*msg_data, addr_size + name_entry_len + name_entry_padding); if (is_ipv6) ND_PRINT((ndo, ", address %s, name \"", ip6addr_string(ndo, msg_data))); else ND_PRINT((ndo, ", address %s, name \"", ipaddr_string(ndo, msg_data))); (void)fn_printn(ndo, msg_data + addr_size, name_entry_len, NULL); ND_PRINT((ndo, "\"")); msg_data += addr_size + name_entry_len + name_entry_padding; msg_tlen -= addr_size + name_entry_len + name_entry_padding; } /* for (i = 0; i < name_entries; i++) */ break; } /* case OLSR_NAMESERVICE_MSG */ /* * FIXME those are the defined messages that lack a decoder * you are welcome to contribute code ;-) */ case OLSR_POWERINFO_MSG: default: print_unknown_data(ndo, msg_data, "\n\t ", msg_tlen); break; } /* switch (msg_type) */ tptr += msg_len; } /* while (tptr < (pptr+length)) */ return; trunc: ND_PRINT((ndo, "[|olsr]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 4 * End: */
null
null
null
null
124,175
52,991
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
52,991
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MEDIA_BASE_DJB2_H_ #define MEDIA_BASE_DJB2_H_ #include <stddef.h> #include <stdint.h> #include "media/base/media_export.h" // DJB2 is a hash algorithm with excellent distribution and speed // on many different sets. // It has marginally more collisions than FNV1, but makes up for it in // performance. // The return value is suitable for table lookups. // For small fixed sizes (ie a pixel), it has low overhead and inlines well. // For large data sets, it optimizes into assembly/simd and is appropriate // for realtime applications. // See Also: // http://www.cse.yorku.ca/~oz/hash.html static const uint32_t kDJB2HashSeed = 5381u; // These functions perform DJB2 hash. The simplest call is DJB2Hash() to // generate the DJB2 hash of the given data: // uint32_t hash = DJB2Hash(data1, length1, kDJB2HashSeed); // // You can also compute the DJB2 hash of data incrementally by making multiple // calls to DJB2Hash(): // uint32_t hash_value = kDJB2HashSeed; // Initial seed for DJB2. // for (size_t i = 0; i < copy_lines; ++i) { // hash_value = DJB2Hash(source, bytes_per_line, hash_value); // source += source_stride; // } // For the given buffer of data, compute the DJB2 hash of // the data. You can call this any number of times during the computation. MEDIA_EXPORT uint32_t DJB2Hash(const void* buf, size_t len, uint32_t seed); #endif // MEDIA_BASE_DJB2_H_
null
null
null
null
49,854
938
null
train_val
1b0d3845b454eaaac0b2064c78926ca4d739a080
263,506
qemu
0
https://github.com/bonzini/qemu
2016-10-18 11:40:27+01:00
#include "io.h" int main(void) { long long rd, rs, rt; long long dspi, dspo; long long result; rs = 0x10FF01FF; rt = 0x10010001; dspi = 0x00002000; result = 0x21000201; __asm ("wrdsp %3\n" "addwc %0, %1, %2\n\t" : "=r"(rd) : "r"(rs), "r"(rt), "r"(dspi) ); if (rd != result) { printf("1 addwc wrong\n"); return -1; } rs = 0xFFFF1111; rt = 0x00020001; dspi = 0x00; result = 0x00011112; __asm ("wrdsp %3\n" "addwc %0, %1, %2\n\t" : "=r"(rd) : "r"(rs), "r"(rt), "r"(dspi) ); if (rd != result) { printf("2 addwc wrong\n"); return -1; } rs = 0x8FFF1111; rt = 0x80020001; dspi = 0x00; result = 0x10011112; __asm ("wrdsp %4\n" "addwc %0, %2, %3\n\t" "rddsp %1\n\t" : "=r"(rd), "=r"(dspo) : "r"(rs), "r"(rt), "r"(dspi) ); if ((rd != result) || (((dspo >> 20) & 0x01) != 1)) { printf("3 addwc wrong\n"); return -1; } return 0; }
null
null
null
null
121,630
11,291
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
176,286
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) * Copyright 2003 PathScale, Inc. * Derived from include/asm-i386/pgtable.h * Licensed under the GPL */ #ifndef __UM_PGTABLE_2LEVEL_H #define __UM_PGTABLE_2LEVEL_H #define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> /* PGDIR_SHIFT determines what a third-level page table entry can map */ #define PGDIR_SHIFT 22 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) /* * entries per page directory level: the i386 is two-level, so * we don't really have any PMD directory physically. */ #define PTRS_PER_PTE 1024 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) #define PTRS_PER_PGD 1024 #define FIRST_USER_ADDRESS 0UL #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \ pte_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \ pgd_val(e)) static inline int pgd_newpage(pgd_t pgd) { return 0; } static inline void pgd_mkuptodate(pgd_t pgd) { } #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #define pte_pfn(x) phys_to_pfn(pte_val(x)) #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot)) #endif
null
null
null
null
84,633
6,254
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
171,249
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef __USBAUDIO_QUIRKS_H #define __USBAUDIO_QUIRKS_H struct audioformat; struct snd_usb_endpoint; struct snd_usb_substream; int snd_usb_create_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, struct usb_driver *driver, const struct snd_usb_audio_quirk *quirk); int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip, int iface, int altno); int snd_usb_apply_boot_quirk(struct usb_device *dev, struct usb_interface *intf, const struct snd_usb_audio_quirk *quirk, unsigned int usb_id); void snd_usb_set_format_quirk(struct snd_usb_substream *subs, struct audioformat *fmt); bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip); int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, struct audioformat *fp); void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep); void snd_usb_set_interface_quirk(struct usb_device *dev); void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size); int snd_usb_select_mode_quirk(struct snd_usb_substream *subs, struct audioformat *fmt); u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, struct audioformat *fp, unsigned int sample_bytes); #endif /* __USBAUDIO_QUIRKS_H */
null
null
null
null
79,596
21,676
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
186,671
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "reg.h" #include "def.h" #include "phy.h" #include "rf.h" #include "dm.h" static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw); void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); switch (bandwidth) { case HT_CHANNEL_WIDTH_20: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff) | 0x0400); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; case HT_CHANNEL_WIDTH_20_40: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff)); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; default: pr_err("unknown bandwidth: %#X\n", bandwidth); break; } } void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 tx_agc[2] = {0, 0}, tmpval; bool turbo_scanoff = false; u8 idx1, idx2; u8 *ptr; if (rtlefuse->eeprom_regulatory != 0) turbo_scanoff = true; if (mac->act_scanning) { tx_agc[RF90_PATH_A] = 0x3f3f3f3f; tx_agc[RF90_PATH_B] = 0x3f3f3f3f; if (turbo_scanoff) { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } } } else { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } if (rtlefuse->eeprom_regulatory == 0) { tmpval = (rtlphy->mcs_offset[0][6]) + (rtlphy->mcs_offset[0][7] << 8); tx_agc[RF90_PATH_A] += tmpval; tmpval = (rtlphy->mcs_offset[0][14]) + (rtlphy->mcs_offset[0][15] << 24); tx_agc[RF90_PATH_B] += tmpval; } } for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { ptr = (u8 *) (&(tx_agc[idx1])); for (idx2 = 0; idx2 < 4; idx2++) { if (*ptr > RF6052_MAX_TX_PWR) *ptr = RF6052_MAX_TX_PWR; ptr++; } } tmpval = tx_agc[RF90_PATH_A] & 0xff; rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_A_CCK1_MCS32); tmpval = tx_agc[RF90_PATH_A] >> 8; tmpval = tmpval & 0xff00ffff; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] >> 24; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK1_55_MCS32); } static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel, u32 *ofdmbase, u32 *mcsbase) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 powerBase0, powerBase1; u8 legacy_pwrdiff, ht20_pwrdiff; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = ppowerlevel[i]; legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; powerBase0 = powerlevel[i] + legacy_pwrdiff; powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0; *(ofdmbase + i) = powerBase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [OFDM power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(ofdmbase + i)); } for (i = 0; i < 2; i++) { if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; powerlevel[i] += ht20_pwrdiff; } powerBase1 = powerlevel[i]; powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; *(mcsbase + i) = powerBase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(mcsbase + i)); } } static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, u32 *powerBase0, u32 *powerBase1, u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup = 0, pwr_diff_limit[4]; u32 writeVal, customer_limit, rf; for (rf = 0; rf < 2; rf++) { switch (rtlefuse->eeprom_regulatory) { case 0: chnlgroup = 0; writeVal = rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 1: if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Realtek regulatory, 40MHz, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); } else { if (rtlphy->pwrgroup_cnt == 1) chnlgroup = 0; if (rtlphy->pwrgroup_cnt >= 3) { if (channel <= 3) chnlgroup = 0; else if (channel >= 4 && channel <= 9) chnlgroup = 1; else if (channel > 9) chnlgroup = 2; if (rtlphy->pwrgroup_cnt == 4) chnlgroup++; } writeVal = rtlphy->mcs_offset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); } break; case 2: writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 3: chnlgroup = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 40MHz rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht40[rf][channel - 1]); } else { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 20MHz rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht20[rf][channel - 1]); } for (i = 0; i < 4; i++) { pwr_diff_limit[i] = (u8) ((rtlphy->mcs_offset [chnlgroup][index + (rf ? 8 : 0)] & (0x7f << (i * 8))) >> (i * 8)); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (pwr_diff_limit[i] > rtlefuse-> pwrgroup_ht40[rf][channel - 1]) pwr_diff_limit[i] = rtlefuse->pwrgroup_ht40[rf] [channel - 1]; } else { if (pwr_diff_limit[i] > rtlefuse-> pwrgroup_ht20[rf][channel - 1]) pwr_diff_limit[i] = rtlefuse->pwrgroup_ht20[rf] [channel - 1]; } } customer_limit = (pwr_diff_limit[3] << 24) | (pwr_diff_limit[2] << 16) | (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', customer_limit); writeVal = customer_limit + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer, writeVal rf(%c)= 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; default: chnlgroup = 0; writeVal = rtlphy->mcs_offset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance, writeVal rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; } if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) writeVal = writeVal - 0x06060606; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT2) writeVal = writeVal - 0x0c0c0c0c; *(p_outwriteval + rf) = writeVal; } } static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, u8 index, u32 *pValue) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u16 regoffset_a[6] = { RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 }; u16 regoffset_b[6] = { RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 }; u8 i, rf, pwr_val[4]; u32 writeVal; u16 regoffset; for (rf = 0; rf < 2; rf++) { writeVal = pValue[rf]; for (i = 0; i < 4; i++) { pwr_val[i] = (u8) ((writeVal & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; else regoffset = regoffset_b[index]; rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Set 0x%x = %08x\n", regoffset, writeVal); if (((get_rf_type(rtlphy) == RF_2T2R) && (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_B_MCS15_MCS12)) || ((get_rf_type(rtlphy) != RF_2T2R) && (regoffset == RTXAGC_A_MCS07_MCS04 || regoffset == RTXAGC_B_MCS07_MCS04))) { writeVal = pwr_val[3]; if (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_A_MCS07_MCS04) regoffset = 0xc90; if (regoffset == RTXAGC_B_MCS15_MCS12 || regoffset == RTXAGC_B_MCS07_MCS04) regoffset = 0xc98; for (i = 0; i < 3; i++) { writeVal = (writeVal > 6) ? (writeVal - 6) : 0; rtl_write_byte(rtlpriv, (u32) (regoffset + i), (u8) writeVal); } } } } void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel) { u32 writeVal[2], powerBase0[2], powerBase1[2]; u8 index; rtl92c_phy_get_power_base(hw, ppowerlevel, channel, &powerBase0[0], &powerBase1[0]); for (index = 0; index < 6; index++) { _rtl92c_get_txpower_writeval_by_regulatory(hw, channel, index, &powerBase0[0], &powerBase1[0], &writeVal[0]); _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]); } } bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); if (rtlphy->rf_type == RF_1T1R) rtlphy->num_total_rfpath = 1; else rtlphy->num_total_rfpath = 2; return _rtl92ce_phy_rf6052_config_parafile(hw); } static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u32 u4_regvalue = 0; u8 rfpath; bool rtstatus = true; struct bb_reg_def *pphyreg; for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { pphyreg = &rtlphy->phyreg_def[rfpath]; switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV); break; case RF90_PATH_B: case RF90_PATH_D: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); break; } rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDREAALENGTH, 0x0); udelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); udelay(1); switch (rfpath) { case RF90_PATH_A: rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, (enum radio_path)rfpath); break; case RF90_PATH_B: rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, (enum radio_path)rfpath); break; case RF90_PATH_C: break; case RF90_PATH_D: break; } switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, u4_regvalue); break; case RF90_PATH_B: case RF90_PATH_D: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, u4_regvalue); break; } if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio[%d] Fail!!\n", rfpath); return false; } } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n"); return rtstatus; }
null
null
null
null
95,018
37,928
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
37,928
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
/* * Copyright (C) 2012 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_IMAGE_DECODING_STORE_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_GRAPHICS_IMAGE_DECODING_STORE_H_ #include <memory> #include <utility> #include "SkSize.h" #include "SkTypes.h" #include "base/memory/ptr_util.h" #include "third_party/blink/renderer/platform/graphics/image_frame_generator.h" #include "third_party/blink/renderer/platform/graphics/skia/sk_size_hash.h" #include "third_party/blink/renderer/platform/image-decoders/image_decoder.h" #include "third_party/blink/renderer/platform/platform_export.h" #include "third_party/blink/renderer/platform/wtf/doubly_linked_list.h" #include "third_party/blink/renderer/platform/wtf/hash_set.h" #include "third_party/blink/renderer/platform/wtf/threading_primitives.h" #include "third_party/blink/renderer/platform/wtf/vector.h" namespace blink { // Decoder cache entry is identified by: // 1. Pointer to ImageFrameGenerator. // 2. Size of the image. // 3. ImageDecoder::AlphaOption struct DecoderCacheKey { const blink::ImageFrameGenerator* gen_; SkISize size_; blink::ImageDecoder::AlphaOption alpha_option_; DecoderCacheKey() : gen_(nullptr), size_(SkISize::Make(0, 0)), alpha_option_(static_cast<blink::ImageDecoder::AlphaOption>(0)) {} }; static inline bool operator==(const DecoderCacheKey& a, const DecoderCacheKey& b) { return a.gen_ == b.gen_ && a.size_ == b.size_ && a.alpha_option_ == b.alpha_option_; } static inline bool operator!=(const DecoderCacheKey& a, const DecoderCacheKey& b) { return !(a == b); } // Base class for all cache entries. class CacheEntry : public DoublyLinkedListNode<CacheEntry> { USING_FAST_MALLOC(CacheEntry); WTF_MAKE_NONCOPYABLE(CacheEntry); friend class WTF::DoublyLinkedListNode<CacheEntry>; public: enum CacheType { kTypeDecoder, }; CacheEntry(const ImageFrameGenerator* generator, int use_count) : generator_(generator), use_count_(use_count), prev_(nullptr), next_(nullptr) {} virtual ~CacheEntry() { DCHECK(!use_count_); } const ImageFrameGenerator* Generator() const { return generator_; } int UseCount() const { return use_count_; } void IncrementUseCount() { ++use_count_; } void DecrementUseCount() { --use_count_; DCHECK_GE(use_count_, 0); } // FIXME: getSafeSize() returns the size in bytes truncated to a 32-bit // integer. Find a way to get the size in 64-bits. virtual size_t MemoryUsageInBytes() const = 0; virtual CacheType GetType() const = 0; protected: const ImageFrameGenerator* generator_; int use_count_; private: CacheEntry* prev_; CacheEntry* next_; }; class DecoderCacheEntry final : public CacheEntry { public: static std::unique_ptr<DecoderCacheEntry> Create( const ImageFrameGenerator* generator, std::unique_ptr<ImageDecoder> decoder) { return base::WrapUnique( new DecoderCacheEntry(generator, 0, std::move(decoder))); } size_t MemoryUsageInBytes() const override { return size_.width() * size_.height() * 4; } CacheType GetType() const override { return kTypeDecoder; } static DecoderCacheKey MakeCacheKey(const ImageFrameGenerator* generator, const SkISize& size, ImageDecoder::AlphaOption alpha_option) { DecoderCacheKey key; key.gen_ = generator; key.size_ = size; key.alpha_option_ = alpha_option; return key; } static DecoderCacheKey MakeCacheKey(const ImageFrameGenerator* generator, const ImageDecoder* decoder) { return MakeCacheKey(generator, SkISize::Make(decoder->DecodedSize().Width(), decoder->DecodedSize().Height()), decoder->GetAlphaOption()); } DecoderCacheKey CacheKey() const { return MakeCacheKey(generator_, size_, alpha_option_); } ImageDecoder* CachedDecoder() const { return cached_decoder_.get(); } private: DecoderCacheEntry(const ImageFrameGenerator* generator, int count, std::unique_ptr<ImageDecoder> decoder) : CacheEntry(generator, count), cached_decoder_(std::move(decoder)), size_(SkISize::Make(cached_decoder_->DecodedSize().Width(), cached_decoder_->DecodedSize().Height())), alpha_option_(cached_decoder_->GetAlphaOption()) {} std::unique_ptr<ImageDecoder> cached_decoder_; SkISize size_; ImageDecoder::AlphaOption alpha_option_; }; } // namespace blink namespace WTF { template <> struct DefaultHash<blink::DecoderCacheKey> { STATIC_ONLY(DefaultHash); struct Hash { STATIC_ONLY(Hash); static unsigned GetHash(const blink::DecoderCacheKey& p) { return HashInts( HashInts(DefaultHash<blink::ImageFrameGenerator*>::Hash::GetHash( const_cast<blink::ImageFrameGenerator*>(p.gen_)), DefaultHash<SkISize>::Hash::GetHash(p.size_)), DefaultHash<uint8_t>::Hash::GetHash( static_cast<uint8_t>(p.alpha_option_))); } static bool Equal(const blink::DecoderCacheKey& a, const blink::DecoderCacheKey& b) { return a.gen_ == b.gen_ && a.size_ == b.size_ && a.alpha_option_ == b.alpha_option_; } static const bool safe_to_compare_to_empty_or_deleted = true; }; }; template <> struct HashTraits<blink::DecoderCacheKey> : GenericHashTraits<blink::DecoderCacheKey> { STATIC_ONLY(HashTraits); static const bool kEmptyValueIsZero = true; static blink::DecoderCacheKey EmptyValue() { return blink::DecoderCacheEntry::MakeCacheKey( nullptr, SkISize::Make(0, 0), static_cast<blink::ImageDecoder::AlphaOption>(0)); } static void ConstructDeletedValue(blink::DecoderCacheKey& slot, bool) { slot = blink::DecoderCacheEntry::MakeCacheKey( nullptr, SkISize::Make(-1, -1), static_cast<blink::ImageDecoder::AlphaOption>(0)); } static bool IsDeletedValue(const blink::DecoderCacheKey& value) { return value.size_ == SkISize::Make(-1, -1); } }; } // namespace WTF namespace blink { // FUNCTION // // ImageDecodingStore is a class used to manage cached decoder objects. // // EXTERNAL OBJECTS // // ImageDecoder // A decoder object. It is used to decode raw data into bitmap images. // // ImageFrameGenerator // This is a direct user of this cache. Responsible for generating bitmap // images using an ImageDecoder. It contains encoded image data and is used // to represent one image file. It is used to index image and decoder // objects in the cache. // // THREAD SAFETY // // All public methods can be used on any thread. class PLATFORM_EXPORT ImageDecodingStore final { USING_FAST_MALLOC(ImageDecodingStore); WTF_MAKE_NONCOPYABLE(ImageDecodingStore); public: static std::unique_ptr<ImageDecodingStore> Create() { return base::WrapUnique(new ImageDecodingStore); } ~ImageDecodingStore(); static ImageDecodingStore& Instance(); // Accesses a cached decoder object. A decoder is indexed by origin // (ImageFrameGenerator) and scaled size. Returns true if the cached object // is found. bool LockDecoder(const ImageFrameGenerator*, const SkISize& scaled_size, ImageDecoder::AlphaOption, ImageDecoder**); void UnlockDecoder(const ImageFrameGenerator*, const ImageDecoder*); void InsertDecoder(const ImageFrameGenerator*, std::unique_ptr<ImageDecoder>); void RemoveDecoder(const ImageFrameGenerator*, const ImageDecoder*); // Remove all cache entries indexed by ImageFrameGenerator. void RemoveCacheIndexedByGenerator(const ImageFrameGenerator*); void Clear(); void SetCacheLimitInBytes(size_t); size_t MemoryUsageInBytes(); int CacheEntries(); int DecoderCacheEntries(); private: ImageDecodingStore(); void Prune(); // These helper methods are called while m_mutex is locked. template <class T, class U, class V> void InsertCacheInternal(std::unique_ptr<T> cache_entry, U* cache_map, V* identifier_map); // Helper method to remove a cache entry. Ownership is transferred to // deletionList. Use of Vector<> is handy when removing multiple entries. template <class T, class U, class V> void RemoveFromCacheInternal( const T* cache_entry, U* cache_map, V* identifier_map, Vector<std::unique_ptr<CacheEntry>>* deletion_list); // Helper method to remove a cache entry. Uses the templated version base on // the type of cache entry. void RemoveFromCacheInternal( const CacheEntry*, Vector<std::unique_ptr<CacheEntry>>* deletion_list); // Helper method to remove all cache entries associated with an // ImageFrameGenerator. Ownership of the cache entries is transferred to // |deletionList|. template <class U, class V> void RemoveCacheIndexedByGeneratorInternal( U* cache_map, V* identifier_map, const ImageFrameGenerator*, Vector<std::unique_ptr<CacheEntry>>* deletion_list); // Helper method to remove cache entry pointers from the LRU list. void RemoveFromCacheListInternal( const Vector<std::unique_ptr<CacheEntry>>& deletion_list); // A doubly linked list that maintains usage history of cache entries. // This is used for eviction of old entries. // Head of this list is the least recently used cache entry. // Tail of this list is the most recently used cache entry. DoublyLinkedList<CacheEntry> ordered_cache_list_; // A lookup table for all decoder cache objects. Owns all decoder cache // objects. typedef HashMap<DecoderCacheKey, std::unique_ptr<DecoderCacheEntry>> DecoderCacheMap; DecoderCacheMap decoder_cache_map_; // A lookup table to map ImageFrameGenerator to all associated // decoder cache keys. typedef HashSet<DecoderCacheKey> DecoderCacheKeySet; typedef HashMap<const ImageFrameGenerator*, DecoderCacheKeySet> DecoderCacheKeyMap; DecoderCacheKeyMap decoder_cache_key_map_; size_t heap_limit_in_bytes_; size_t heap_memory_usage_in_bytes_; // Protect concurrent access to these members: // m_orderedCacheList // m_decoderCacheMap and all CacheEntrys stored in it // m_decoderCacheKeyMap // m_heapLimitInBytes // m_heapMemoryUsageInBytes // This mutex also protects calls to underlying skBitmap's // lockPixels()/unlockPixels() as they are not threadsafe. Mutex mutex_; }; } // namespace blink #endif
null
null
null
null
34,791
32,449
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
32,449
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/testing/sim/sim_web_frame_client.h" #include "third_party/blink/public/web/web_console_message.h" #include "third_party/blink/renderer/core/testing/sim/sim_test.h" namespace blink { SimWebFrameClient::SimWebFrameClient(SimTest& test) : test_(&test) {} void SimWebFrameClient::DidAddMessageToConsole(const WebConsoleMessage& message, const WebString& source_name, unsigned source_line, const WebString& stack_trace) { test_->AddConsoleMessage(message.text); } } // namespace blink
null
null
null
null
29,312
72,244
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
72,244
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SERVICES_NETWORK_PUBLIC_CPP_MUTABLE_NETWORK_TRAFFIC_ANNOTATION_TAG_MOJOM_TRAITS_H_ #define SERVICES_NETWORK_PUBLIC_CPP_MUTABLE_NETWORK_TRAFFIC_ANNOTATION_TAG_MOJOM_TRAITS_H_ #include "net/traffic_annotation/network_traffic_annotation.h" #include "services/network/public/mojom/mutable_network_traffic_annotation_tag.mojom.h" namespace mojo { template <> struct StructTraits<network::mojom::MutableNetworkTrafficAnnotationTagDataView, net::MutableNetworkTrafficAnnotationTag> { static int32_t unique_id_hash_code( const net::MutableNetworkTrafficAnnotationTag& traffic_annotation) { return traffic_annotation.unique_id_hash_code; } static bool Read( network::mojom::MutableNetworkTrafficAnnotationTagDataView data, net::MutableNetworkTrafficAnnotationTag* out) { out->unique_id_hash_code = data.unique_id_hash_code(); return true; } }; } // namespace mojo #endif // SERVICES_NETWORK_PUBLIC_CPP_MUTABLE_NETWORK_TRAFFIC_ANNOTATION_TAG_MOJOM_TRAITS_H_
null
null
null
null
69,107
3,263
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
3,263
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "storage/browser/fileapi/file_system_options.h" namespace storage { FileSystemOptions::FileSystemOptions( ProfileMode profile_mode, const std::vector<std::string>& additional_allowed_schemes, leveldb::Env* env_override) : profile_mode_(profile_mode), additional_allowed_schemes_(additional_allowed_schemes), env_override_(env_override) { } FileSystemOptions::FileSystemOptions(const FileSystemOptions& other) = default; FileSystemOptions::~FileSystemOptions() = default; } // namespace storage
null
null
null
null
126
58,435
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
58,435
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_IMPORTER_PROFILE_WRITER_H_ #define CHROME_BROWSER_IMPORTER_PROFILE_WRITER_H_ #include <vector> #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/strings/string16.h" #include "base/time/time.h" #include "build/build_config.h" #include "components/favicon_base/favicon_usage_data.h" #include "components/history/core/browser/history_types.h" #include "components/search_engines/template_url_service.h" #include "url/gurl.h" struct ImportedBookmarkEntry; class Profile; namespace autofill { struct PasswordForm; class AutofillEntry; } #if defined(OS_WIN) struct IE7PasswordInfo; #endif // ProfileWriter encapsulates profile for writing entries into it. // This object must be invoked on UI thread. class ProfileWriter : public base::RefCountedThreadSafe<ProfileWriter> { public: explicit ProfileWriter(Profile* profile); // These functions return true if the corresponding model has been loaded. // If the models haven't been loaded, the importer waits to run until they've // completed. virtual bool BookmarkModelIsLoaded() const; virtual bool TemplateURLServiceIsLoaded() const; // Helper methods for adding data to local stores. virtual void AddPasswordForm(const autofill::PasswordForm& form); #if defined(OS_WIN) virtual void AddIE7PasswordInfo(const IE7PasswordInfo& info); #endif virtual void AddHistoryPage(const history::URLRows& page, history::VisitSource visit_source); virtual void AddHomepage(const GURL& homepage); // Adds the |bookmarks| to the bookmark model. // // (a) If the bookmarks bar is empty: // (i) If |bookmarks| includes at least one bookmark that was originally // located in a toolbar, all such bookmarks are imported directly to // the toolbar; any other bookmarks are imported to a subfolder in // the toolbar. // (i) If |bookmarks| includes no bookmarks that were originally located // in a toolbar, all bookmarks are imported directly to the toolbar. // (b) If the bookmarks bar is not empty, all bookmarks are imported to a // subfolder in the toolbar. // // In either case, if a subfolder is created, the name will be the value of // |top_level_folder_name|, unless a folder with this name already exists. // If a folder with this name already exists, then the name is uniquified. // For example, if |first_folder_name| is 'Imported from IE' and a folder with // the name 'Imported from IE' already exists in the bookmarks toolbar, then // we will instead create a subfolder named 'Imported from IE (1)'. virtual void AddBookmarks( const std::vector<ImportedBookmarkEntry>& bookmarks, const base::string16& top_level_folder_name); virtual void AddFavicons(const favicon_base::FaviconUsageDataList& favicons); // Adds the TemplateURLs in |template_urls| to the local store. // Some TemplateURLs in |template_urls| may conflict (same keyword or same // host name in the URL) with existing TemplateURLs in the local store, in // which case the existing ones take precedence and the duplicates in // |template_urls| are deleted. If |unique_on_host_and_path| is true, a // TemplateURL is only added if there is not an existing TemplateURL that has // a replaceable search url with the same host+path combination. virtual void AddKeywords( TemplateURLService::OwnedTemplateURLVector template_urls, bool unique_on_host_and_path); // Adds the imported autofill entries to the autofill database. virtual void AddAutofillFormDataEntries( const std::vector<autofill::AutofillEntry>& autofill_entries); protected: friend class base::RefCountedThreadSafe<ProfileWriter>; virtual ~ProfileWriter(); private: Profile* const profile_; DISALLOW_COPY_AND_ASSIGN(ProfileWriter); }; #endif // CHROME_BROWSER_IMPORTER_PROFILE_WRITER_H_
null
null
null
null
55,298
35,205
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
200,200
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * vs6624.c ST VS6624 CMOS image sensor driver * * Copyright (c) 2011 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> #include <media/v4l2-image-sizes.h> #include "vs6624_regs.h" #define MAX_FRAME_RATE 30 struct vs6624 { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; struct v4l2_fract frame_rate; struct v4l2_mbus_framefmt fmt; unsigned ce_pin; }; static const struct vs6624_format { u32 mbus_code; enum v4l2_colorspace colorspace; } vs6624_formats[] = { { .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, }, { .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, }, { .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE, .colorspace = V4L2_COLORSPACE_SRGB, }, }; static struct v4l2_mbus_framefmt vs6624_default_fmt = { .width = VGA_WIDTH, .height = VGA_HEIGHT, .code = MEDIA_BUS_FMT_UYVY8_2X8, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_JPEG, }; static const u16 vs6624_p1[] = { 0x8104, 0x03, 0x8105, 0x01, 0xc900, 0x03, 0xc904, 0x47, 0xc905, 0x10, 0xc906, 0x80, 0xc907, 0x3a, 0x903a, 0x02, 0x903b, 0x47, 0x903c, 0x15, 0xc908, 0x31, 0xc909, 0xdc, 0xc90a, 0x80, 0xc90b, 0x44, 0x9044, 0x02, 0x9045, 0x31, 0x9046, 0xe2, 0xc90c, 0x07, 0xc90d, 0xe0, 0xc90e, 0x80, 0xc90f, 0x47, 0x9047, 0x90, 0x9048, 0x83, 0x9049, 0x81, 0x904a, 0xe0, 0x904b, 0x60, 0x904c, 0x08, 0x904d, 0x90, 0x904e, 0xc0, 0x904f, 0x43, 0x9050, 0x74, 0x9051, 0x01, 0x9052, 0xf0, 0x9053, 0x80, 0x9054, 0x05, 0x9055, 0xE4, 0x9056, 0x90, 0x9057, 0xc0, 0x9058, 0x43, 0x9059, 0xf0, 0x905a, 0x02, 0x905b, 0x07, 0x905c, 0xec, 0xc910, 0x5d, 0xc911, 0xca, 0xc912, 0x80, 0xc913, 0x5d, 0x905d, 0xa3, 0x905e, 0x04, 0x905f, 0xf0, 0x9060, 0xa3, 0x9061, 0x04, 0x9062, 0xf0, 0x9063, 0x22, 0xc914, 0x72, 0xc915, 0x92, 0xc916, 0x80, 0xc917, 0x64, 0x9064, 0x74, 0x9065, 0x01, 0x9066, 0x02, 0x9067, 0x72, 0x9068, 0x95, 0xc918, 0x47, 0xc919, 0xf2, 0xc91a, 0x81, 0xc91b, 0x69, 0x9169, 0x74, 0x916a, 0x02, 0x916b, 0xf0, 0x916c, 0xec, 0x916d, 0xb4, 0x916e, 0x10, 0x916f, 0x0a, 0x9170, 0x90, 0x9171, 0x80, 0x9172, 0x16, 0x9173, 0xe0, 0x9174, 0x70, 0x9175, 0x04, 0x9176, 0x90, 0x9177, 0xd3, 0x9178, 0xc4, 0x9179, 0xf0, 0x917a, 0x22, 0xc91c, 0x0a, 0xc91d, 0xbe, 0xc91e, 0x80, 0xc91f, 0x73, 0x9073, 0xfc, 0x9074, 0xa3, 0x9075, 0xe0, 0x9076, 0xf5, 0x9077, 0x82, 0x9078, 0x8c, 0x9079, 0x83, 0x907a, 0xa3, 0x907b, 0xa3, 0x907c, 0xe0, 0x907d, 0xfc, 0x907e, 0xa3, 0x907f, 0xe0, 0x9080, 0xc3, 0x9081, 0x9f, 0x9082, 0xff, 0x9083, 0xec, 0x9084, 0x9e, 0x9085, 0xfe, 0x9086, 0x02, 0x9087, 0x0a, 0x9088, 0xea, 0xc920, 0x47, 0xc921, 0x38, 0xc922, 0x80, 0xc923, 0x89, 0x9089, 0xec, 0x908a, 0xd3, 0x908b, 0x94, 0x908c, 0x20, 0x908d, 0x40, 0x908e, 0x01, 0x908f, 0x1c, 0x9090, 0x90, 0x9091, 0xd3, 0x9092, 0xd4, 0x9093, 0xec, 0x9094, 0xf0, 0x9095, 0x02, 0x9096, 0x47, 0x9097, 0x3d, 0xc924, 0x45, 0xc925, 0xca, 0xc926, 0x80, 0xc927, 0x98, 0x9098, 0x12, 0x9099, 0x77, 0x909a, 0xd6, 0x909b, 0x02, 0x909c, 0x45, 0x909d, 0xcd, 0xc928, 0x20, 0xc929, 0xd5, 0xc92a, 0x80, 0xc92b, 0x9e, 0x909e, 0x90, 0x909f, 0x82, 0x90a0, 0x18, 0x90a1, 0xe0, 0x90a2, 0xb4, 0x90a3, 0x03, 0x90a4, 0x0e, 0x90a5, 0x90, 0x90a6, 0x83, 0x90a7, 0xbf, 0x90a8, 0xe0, 0x90a9, 0x60, 0x90aa, 0x08, 0x90ab, 0x90, 0x90ac, 0x81, 0x90ad, 0xfc, 0x90ae, 0xe0, 0x90af, 0xff, 0x90b0, 0xc3, 0x90b1, 0x13, 0x90b2, 0xf0, 0x90b3, 0x90, 0x90b4, 0x81, 0x90b5, 0xfc, 0x90b6, 0xe0, 0x90b7, 0xff, 0x90b8, 0x02, 0x90b9, 0x20, 0x90ba, 0xda, 0xc92c, 0x70, 0xc92d, 0xbc, 0xc92e, 0x80, 0xc92f, 0xbb, 0x90bb, 0x90, 0x90bc, 0x82, 0x90bd, 0x18, 0x90be, 0xe0, 0x90bf, 0xb4, 0x90c0, 0x03, 0x90c1, 0x06, 0x90c2, 0x90, 0x90c3, 0xc1, 0x90c4, 0x06, 0x90c5, 0x74, 0x90c6, 0x05, 0x90c7, 0xf0, 0x90c8, 0x90, 0x90c9, 0xd3, 0x90ca, 0xa0, 0x90cb, 0x02, 0x90cc, 0x70, 0x90cd, 0xbf, 0xc930, 0x72, 0xc931, 0x21, 0xc932, 0x81, 0xc933, 0x3b, 0x913b, 0x7d, 0x913c, 0x02, 0x913d, 0x7f, 0x913e, 0x7b, 0x913f, 0x02, 0x9140, 0x72, 0x9141, 0x25, 0xc934, 0x28, 0xc935, 0xae, 0xc936, 0x80, 0xc937, 0xd2, 0x90d2, 0xf0, 0x90d3, 0x90, 0x90d4, 0xd2, 0x90d5, 0x0a, 0x90d6, 0x02, 0x90d7, 0x28, 0x90d8, 0xb4, 0xc938, 0x28, 0xc939, 0xb1, 0xc93a, 0x80, 0xc93b, 0xd9, 0x90d9, 0x90, 0x90da, 0x83, 0x90db, 0xba, 0x90dc, 0xe0, 0x90dd, 0xff, 0x90de, 0x90, 0x90df, 0xd2, 0x90e0, 0x08, 0x90e1, 0xe0, 0x90e2, 0xe4, 0x90e3, 0xef, 0x90e4, 0xf0, 0x90e5, 0xa3, 0x90e6, 0xe0, 0x90e7, 0x74, 0x90e8, 0xff, 0x90e9, 0xf0, 0x90ea, 0x90, 0x90eb, 0xd2, 0x90ec, 0x0a, 0x90ed, 0x02, 0x90ee, 0x28, 0x90ef, 0xb4, 0xc93c, 0x29, 0xc93d, 0x79, 0xc93e, 0x80, 0xc93f, 0xf0, 0x90f0, 0xf0, 0x90f1, 0x90, 0x90f2, 0xd2, 0x90f3, 0x0e, 0x90f4, 0x02, 0x90f5, 0x29, 0x90f6, 0x7f, 0xc940, 0x29, 0xc941, 0x7c, 0xc942, 0x80, 0xc943, 0xf7, 0x90f7, 0x90, 0x90f8, 0x83, 0x90f9, 0xba, 0x90fa, 0xe0, 0x90fb, 0xff, 0x90fc, 0x90, 0x90fd, 0xd2, 0x90fe, 0x0c, 0x90ff, 0xe0, 0x9100, 0xe4, 0x9101, 0xef, 0x9102, 0xf0, 0x9103, 0xa3, 0x9104, 0xe0, 0x9105, 0x74, 0x9106, 0xff, 0x9107, 0xf0, 0x9108, 0x90, 0x9109, 0xd2, 0x910a, 0x0e, 0x910b, 0x02, 0x910c, 0x29, 0x910d, 0x7f, 0xc944, 0x2a, 0xc945, 0x42, 0xc946, 0x81, 0xc947, 0x0e, 0x910e, 0xf0, 0x910f, 0x90, 0x9110, 0xd2, 0x9111, 0x12, 0x9112, 0x02, 0x9113, 0x2a, 0x9114, 0x48, 0xc948, 0x2a, 0xc949, 0x45, 0xc94a, 0x81, 0xc94b, 0x15, 0x9115, 0x90, 0x9116, 0x83, 0x9117, 0xba, 0x9118, 0xe0, 0x9119, 0xff, 0x911a, 0x90, 0x911b, 0xd2, 0x911c, 0x10, 0x911d, 0xe0, 0x911e, 0xe4, 0x911f, 0xef, 0x9120, 0xf0, 0x9121, 0xa3, 0x9122, 0xe0, 0x9123, 0x74, 0x9124, 0xff, 0x9125, 0xf0, 0x9126, 0x90, 0x9127, 0xd2, 0x9128, 0x12, 0x9129, 0x02, 0x912a, 0x2a, 0x912b, 0x48, 0xc900, 0x01, 0x0000, 0x00, }; static const u16 vs6624_p2[] = { 0x806f, 0x01, 0x058c, 0x01, 0x0000, 0x00, }; static const u16 vs6624_run_setup[] = { 0x1d18, 0x00, /* Enableconstrainedwhitebalance */ VS6624_PEAK_MIN_OUT_G_MSB, 0x3c, /* Damper PeakGain Output MSB */ VS6624_PEAK_MIN_OUT_G_LSB, 0x66, /* Damper PeakGain Output LSB */ VS6624_CM_LOW_THR_MSB, 0x65, /* Damper Low MSB */ VS6624_CM_LOW_THR_LSB, 0xd1, /* Damper Low LSB */ VS6624_CM_HIGH_THR_MSB, 0x66, /* Damper High MSB */ VS6624_CM_HIGH_THR_LSB, 0x62, /* Damper High LSB */ VS6624_CM_MIN_OUT_MSB, 0x00, /* Damper Min output MSB */ VS6624_CM_MIN_OUT_LSB, 0x00, /* Damper Min output LSB */ VS6624_NORA_DISABLE, 0x00, /* Nora fDisable */ VS6624_NORA_USAGE, 0x04, /* Nora usage */ VS6624_NORA_LOW_THR_MSB, 0x63, /* Damper Low MSB Changed 0x63 to 0x65 */ VS6624_NORA_LOW_THR_LSB, 0xd1, /* Damper Low LSB */ VS6624_NORA_HIGH_THR_MSB, 0x68, /* Damper High MSB */ VS6624_NORA_HIGH_THR_LSB, 0xdd, /* Damper High LSB */ VS6624_NORA_MIN_OUT_MSB, 0x3a, /* Damper Min output MSB */ VS6624_NORA_MIN_OUT_LSB, 0x00, /* Damper Min output LSB */ VS6624_F2B_DISABLE, 0x00, /* Disable */ 0x1d8a, 0x30, /* MAXWeightHigh */ 0x1d91, 0x62, /* fpDamperLowThresholdHigh MSB */ 0x1d92, 0x4a, /* fpDamperLowThresholdHigh LSB */ 0x1d95, 0x65, /* fpDamperHighThresholdHigh MSB */ 0x1d96, 0x0e, /* fpDamperHighThresholdHigh LSB */ 0x1da1, 0x3a, /* fpMinimumDamperOutputLow MSB */ 0x1da2, 0xb8, /* fpMinimumDamperOutputLow LSB */ 0x1e08, 0x06, /* MAXWeightLow */ 0x1e0a, 0x0a, /* MAXWeightHigh */ 0x1601, 0x3a, /* Red A MSB */ 0x1602, 0x14, /* Red A LSB */ 0x1605, 0x3b, /* Blue A MSB */ 0x1606, 0x85, /* BLue A LSB */ 0x1609, 0x3b, /* RED B MSB */ 0x160a, 0x85, /* RED B LSB */ 0x160d, 0x3a, /* Blue B MSB */ 0x160e, 0x14, /* Blue B LSB */ 0x1611, 0x30, /* Max Distance from Locus MSB */ 0x1612, 0x8f, /* Max Distance from Locus MSB */ 0x1614, 0x01, /* Enable constrainer */ 0x0000, 0x00, }; static const u16 vs6624_default[] = { VS6624_CONTRAST0, 0x84, VS6624_SATURATION0, 0x75, VS6624_GAMMA0, 0x11, VS6624_CONTRAST1, 0x84, VS6624_SATURATION1, 0x75, VS6624_GAMMA1, 0x11, VS6624_MAN_RG, 0x80, VS6624_MAN_GG, 0x80, VS6624_MAN_BG, 0x80, VS6624_WB_MODE, 0x1, VS6624_EXPO_COMPENSATION, 0xfe, VS6624_EXPO_METER, 0x0, VS6624_LIGHT_FREQ, 0x64, VS6624_PEAK_GAIN, 0xe, VS6624_PEAK_LOW_THR, 0x28, VS6624_HMIRROR0, 0x0, VS6624_VFLIP0, 0x0, VS6624_ZOOM_HSTEP0_MSB, 0x0, VS6624_ZOOM_HSTEP0_LSB, 0x1, VS6624_ZOOM_VSTEP0_MSB, 0x0, VS6624_ZOOM_VSTEP0_LSB, 0x1, VS6624_PAN_HSTEP0_MSB, 0x0, VS6624_PAN_HSTEP0_LSB, 0xf, VS6624_PAN_VSTEP0_MSB, 0x0, VS6624_PAN_VSTEP0_LSB, 0xf, VS6624_SENSOR_MODE, 0x1, VS6624_SYNC_CODE_SETUP, 0x21, VS6624_DISABLE_FR_DAMPER, 0x0, VS6624_FR_DEN, 0x1, VS6624_FR_NUM_LSB, 0xf, VS6624_INIT_PIPE_SETUP, 0x0, VS6624_IMG_FMT0, 0x0, VS6624_YUV_SETUP, 0x1, VS6624_IMAGE_SIZE0, 0x2, 0x0000, 0x00, }; static inline struct vs6624 *to_vs6624(struct v4l2_subdev *sd) { return container_of(sd, struct vs6624, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct vs6624, hdl)->sd; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int vs6624_read(struct v4l2_subdev *sd, u16 index) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[2]; buf[0] = index >> 8; buf[1] = index; i2c_master_send(client, buf, 2); i2c_master_recv(client, buf, 1); return buf[0]; } #endif static int vs6624_write(struct v4l2_subdev *sd, u16 index, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[3]; buf[0] = index >> 8; buf[1] = index; buf[2] = value; return i2c_master_send(client, buf, 3); } static int vs6624_writeregs(struct v4l2_subdev *sd, const u16 *regs) { u16 reg; u8 data; while (*regs != 0x00) { reg = *regs++; data = *regs++; vs6624_write(sd, reg, data); } return 0; } static int vs6624_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); switch (ctrl->id) { case V4L2_CID_CONTRAST: vs6624_write(sd, VS6624_CONTRAST0, ctrl->val); break; case V4L2_CID_SATURATION: vs6624_write(sd, VS6624_SATURATION0, ctrl->val); break; case V4L2_CID_HFLIP: vs6624_write(sd, VS6624_HMIRROR0, ctrl->val); break; case V4L2_CID_VFLIP: vs6624_write(sd, VS6624_VFLIP0, ctrl->val); break; default: return -EINVAL; } return 0; } static int vs6624_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code) { if (code->pad || code->index >= ARRAY_SIZE(vs6624_formats)) return -EINVAL; code->code = vs6624_formats[code->index].mbus_code; return 0; } static int vs6624_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct vs6624 *sensor = to_vs6624(sd); int index; if (format->pad) return -EINVAL; for (index = 0; index < ARRAY_SIZE(vs6624_formats); index++) if (vs6624_formats[index].mbus_code == fmt->code) break; if (index >= ARRAY_SIZE(vs6624_formats)) { /* default to first format */ index = 0; fmt->code = vs6624_formats[0].mbus_code; } /* sensor mode is VGA */ if (fmt->width > VGA_WIDTH) fmt->width = VGA_WIDTH; if (fmt->height > VGA_HEIGHT) fmt->height = VGA_HEIGHT; fmt->width = fmt->width & (~3); fmt->height = fmt->height & (~3); fmt->field = V4L2_FIELD_NONE; fmt->colorspace = vs6624_formats[index].colorspace; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { cfg->try_fmt = *fmt; return 0; } /* set image format */ switch (fmt->code) { case MEDIA_BUS_FMT_UYVY8_2X8: vs6624_write(sd, VS6624_IMG_FMT0, 0x0); vs6624_write(sd, VS6624_YUV_SETUP, 0x1); break; case MEDIA_BUS_FMT_YUYV8_2X8: vs6624_write(sd, VS6624_IMG_FMT0, 0x0); vs6624_write(sd, VS6624_YUV_SETUP, 0x3); break; case MEDIA_BUS_FMT_RGB565_2X8_LE: vs6624_write(sd, VS6624_IMG_FMT0, 0x4); vs6624_write(sd, VS6624_RGB_SETUP, 0x0); break; default: return -EINVAL; } /* set image size */ if ((fmt->width == VGA_WIDTH) && (fmt->height == VGA_HEIGHT)) vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x2); else if ((fmt->width == QVGA_WIDTH) && (fmt->height == QVGA_HEIGHT)) vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x4); else if ((fmt->width == QQVGA_WIDTH) && (fmt->height == QQVGA_HEIGHT)) vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x6); else if ((fmt->width == CIF_WIDTH) && (fmt->height == CIF_HEIGHT)) vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x3); else if ((fmt->width == QCIF_WIDTH) && (fmt->height == QCIF_HEIGHT)) vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x5); else if ((fmt->width == QQCIF_WIDTH) && (fmt->height == QQCIF_HEIGHT)) vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x7); else { vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x8); vs6624_write(sd, VS6624_MAN_HSIZE0_MSB, fmt->width >> 8); vs6624_write(sd, VS6624_MAN_HSIZE0_LSB, fmt->width & 0xFF); vs6624_write(sd, VS6624_MAN_VSIZE0_MSB, fmt->height >> 8); vs6624_write(sd, VS6624_MAN_VSIZE0_LSB, fmt->height & 0xFF); vs6624_write(sd, VS6624_CROP_CTRL0, 0x1); } sensor->fmt = *fmt; return 0; } static int vs6624_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) { struct vs6624 *sensor = to_vs6624(sd); if (format->pad) return -EINVAL; format->format = sensor->fmt; return 0; } static int vs6624_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms) { struct vs6624 *sensor = to_vs6624(sd); struct v4l2_captureparm *cp = &parms->parm.capture; if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; memset(cp, 0, sizeof(*cp)); cp->capability = V4L2_CAP_TIMEPERFRAME; cp->timeperframe.numerator = sensor->frame_rate.denominator; cp->timeperframe.denominator = sensor->frame_rate.numerator; return 0; } static int vs6624_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms) { struct vs6624 *sensor = to_vs6624(sd); struct v4l2_captureparm *cp = &parms->parm.capture; struct v4l2_fract *tpf = &cp->timeperframe; if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (cp->extendedmode != 0) return -EINVAL; if (tpf->numerator == 0 || tpf->denominator == 0 || (tpf->denominator > tpf->numerator * MAX_FRAME_RATE)) { /* reset to max frame rate */ tpf->numerator = 1; tpf->denominator = MAX_FRAME_RATE; } sensor->frame_rate.numerator = tpf->denominator; sensor->frame_rate.denominator = tpf->numerator; vs6624_write(sd, VS6624_DISABLE_FR_DAMPER, 0x0); vs6624_write(sd, VS6624_FR_NUM_MSB, sensor->frame_rate.numerator >> 8); vs6624_write(sd, VS6624_FR_NUM_LSB, sensor->frame_rate.numerator & 0xFF); vs6624_write(sd, VS6624_FR_DEN, sensor->frame_rate.denominator & 0xFF); return 0; } static int vs6624_s_stream(struct v4l2_subdev *sd, int enable) { if (enable) vs6624_write(sd, VS6624_USER_CMD, 0x2); else vs6624_write(sd, VS6624_USER_CMD, 0x4); udelay(100); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int vs6624_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { reg->val = vs6624_read(sd, reg->reg & 0xffff); reg->size = 1; return 0; } static int vs6624_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { vs6624_write(sd, reg->reg & 0xffff, reg->val & 0xff); return 0; } #endif static const struct v4l2_ctrl_ops vs6624_ctrl_ops = { .s_ctrl = vs6624_s_ctrl, }; static const struct v4l2_subdev_core_ops vs6624_core_ops = { #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = vs6624_g_register, .s_register = vs6624_s_register, #endif }; static const struct v4l2_subdev_video_ops vs6624_video_ops = { .s_parm = vs6624_s_parm, .g_parm = vs6624_g_parm, .s_stream = vs6624_s_stream, }; static const struct v4l2_subdev_pad_ops vs6624_pad_ops = { .enum_mbus_code = vs6624_enum_mbus_code, .get_fmt = vs6624_get_fmt, .set_fmt = vs6624_set_fmt, }; static const struct v4l2_subdev_ops vs6624_ops = { .core = &vs6624_core_ops, .video = &vs6624_video_ops, .pad = &vs6624_pad_ops, }; static int vs6624_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct vs6624 *sensor; struct v4l2_subdev *sd; struct v4l2_ctrl_handler *hdl; const unsigned *ce; int ret; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -EIO; ce = client->dev.platform_data; if (ce == NULL) return -EINVAL; ret = devm_gpio_request_one(&client->dev, *ce, GPIOF_OUT_INIT_HIGH, "VS6624 Chip Enable"); if (ret) { v4l_err(client, "failed to request GPIO %d\n", *ce); return ret; } /* wait 100ms before any further i2c writes are performed */ mdelay(100); sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL); if (sensor == NULL) return -ENOMEM; sd = &sensor->sd; v4l2_i2c_subdev_init(sd, client, &vs6624_ops); vs6624_writeregs(sd, vs6624_p1); vs6624_write(sd, VS6624_MICRO_EN, 0x2); vs6624_write(sd, VS6624_DIO_EN, 0x1); mdelay(10); vs6624_writeregs(sd, vs6624_p2); vs6624_writeregs(sd, vs6624_default); vs6624_write(sd, VS6624_HSYNC_SETUP, 0xF); vs6624_writeregs(sd, vs6624_run_setup); /* set frame rate */ sensor->frame_rate.numerator = MAX_FRAME_RATE; sensor->frame_rate.denominator = 1; vs6624_write(sd, VS6624_DISABLE_FR_DAMPER, 0x0); vs6624_write(sd, VS6624_FR_NUM_MSB, sensor->frame_rate.numerator >> 8); vs6624_write(sd, VS6624_FR_NUM_LSB, sensor->frame_rate.numerator & 0xFF); vs6624_write(sd, VS6624_FR_DEN, sensor->frame_rate.denominator & 0xFF); sensor->fmt = vs6624_default_fmt; sensor->ce_pin = *ce; v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); hdl = &sensor->hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &vs6624_ctrl_ops, V4L2_CID_CONTRAST, 0, 0xFF, 1, 0x87); v4l2_ctrl_new_std(hdl, &vs6624_ctrl_ops, V4L2_CID_SATURATION, 0, 0xFF, 1, 0x78); v4l2_ctrl_new_std(hdl, &vs6624_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(hdl, &vs6624_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); /* hook the control handler into the driver */ sd->ctrl_handler = hdl; if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); return err; } /* initialize the hardware to the default control values */ ret = v4l2_ctrl_handler_setup(hdl); if (ret) v4l2_ctrl_handler_free(hdl); return ret; } static int vs6624_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); return 0; } static const struct i2c_device_id vs6624_id[] = { {"vs6624", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, vs6624_id); static struct i2c_driver vs6624_driver = { .driver = { .name = "vs6624", }, .probe = vs6624_probe, .remove = vs6624_remove, .id_table = vs6624_id, }; module_i2c_driver(vs6624_driver); MODULE_DESCRIPTION("VS6624 sensor driver"); MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>"); MODULE_LICENSE("GPL v2");
null
null
null
null
108,547
23,029
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
188,024
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * LTC2952 (PowerPath) driver * * Copyright (C) 2014, Xsens Technologies BV <info@xsens.com> * Maintainer: Ren Moll <linux@r-moll.nl> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * ---------------------------------------- * - Description * ---------------------------------------- * * This driver is to be used with an external PowerPath Controller (LTC2952). * Its function is to determine when a external shut down is triggered * and react by properly shutting down the system. * * This driver expects a device tree with a ltc2952 entry for pin mapping. * * ---------------------------------------- * - GPIO * ---------------------------------------- * * The following GPIOs are used: * - trigger (input) * A level change indicates the shut-down trigger. If it's state reverts * within the time-out defined by trigger_delay, the shut down is not * executed. If no pin is assigned to this input, the driver will start the * watchdog toggle immediately. The chip will only power off the system if * it is requested to do so through the kill line. * * - watchdog (output) * Once a shut down is triggered, the driver will toggle this signal, * with an internal (wde_interval) to stall the hardware shut down. * * - kill (output) * The last action during shut down is triggering this signalling, such * that the PowerPath Control will power down the hardware. * * ---------------------------------------- * - Interrupts * ---------------------------------------- * * The driver requires a non-shared, edge-triggered interrupt on the trigger * GPIO. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/ktime.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/gpio/consumer.h> #include <linux/reboot.h> struct ltc2952_poweroff { struct hrtimer timer_trigger; struct hrtimer timer_wde; ktime_t trigger_delay; ktime_t wde_interval; struct device *dev; struct gpio_desc *gpio_trigger; struct gpio_desc *gpio_watchdog; struct gpio_desc *gpio_kill; bool kernel_panic; struct notifier_block panic_notifier; }; #define to_ltc2952(p, m) container_of(p, struct ltc2952_poweroff, m) /* * This global variable is only needed for pm_power_off. We should * remove it entirely once we don't need the global state anymore. */ static struct ltc2952_poweroff *ltc2952_data; /** * ltc2952_poweroff_timer_wde - Timer callback * Toggles the watchdog reset signal each wde_interval * * @timer: corresponding timer * * Returns HRTIMER_RESTART for an infinite loop which will only stop when the * machine actually shuts down */ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer) { ktime_t now; int state; unsigned long overruns; struct ltc2952_poweroff *data = to_ltc2952(timer, timer_wde); if (data->kernel_panic) return HRTIMER_NORESTART; state = gpiod_get_value(data->gpio_watchdog); gpiod_set_value(data->gpio_watchdog, !state); now = hrtimer_cb_get_time(timer); overruns = hrtimer_forward(timer, now, data->wde_interval); return HRTIMER_RESTART; } static void ltc2952_poweroff_start_wde(struct ltc2952_poweroff *data) { hrtimer_start(&data->timer_wde, data->wde_interval, HRTIMER_MODE_REL); } static enum hrtimer_restart ltc2952_poweroff_timer_trigger(struct hrtimer *timer) { struct ltc2952_poweroff *data = to_ltc2952(timer, timer_trigger); ltc2952_poweroff_start_wde(data); dev_info(data->dev, "executing shutdown\n"); orderly_poweroff(true); return HRTIMER_NORESTART; } /** * ltc2952_poweroff_handler - Interrupt handler * Triggered each time the trigger signal changes state and (de)activates a * time-out (timer_trigger). Once the time-out is actually reached the shut * down is executed. * * @irq: IRQ number * @dev_id: pointer to the main data structure */ static irqreturn_t ltc2952_poweroff_handler(int irq, void *dev_id) { struct ltc2952_poweroff *data = dev_id; if (data->kernel_panic || hrtimer_active(&data->timer_wde)) { /* shutdown is already triggered, nothing to do any more */ return IRQ_HANDLED; } if (gpiod_get_value(data->gpio_trigger)) { hrtimer_start(&data->timer_trigger, data->trigger_delay, HRTIMER_MODE_REL); } else { hrtimer_cancel(&data->timer_trigger); } return IRQ_HANDLED; } static void ltc2952_poweroff_kill(void) { gpiod_set_value(ltc2952_data->gpio_kill, 1); } static void ltc2952_poweroff_default(struct ltc2952_poweroff *data) { data->wde_interval = 300L * 1E6L; data->trigger_delay = ktime_set(2, 500L*1E6L); hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL); data->timer_trigger.function = ltc2952_poweroff_timer_trigger; hrtimer_init(&data->timer_wde, CLOCK_MONOTONIC, HRTIMER_MODE_REL); data->timer_wde.function = ltc2952_poweroff_timer_wde; } static int ltc2952_poweroff_init(struct platform_device *pdev) { int ret; struct ltc2952_poweroff *data = platform_get_drvdata(pdev); ltc2952_poweroff_default(data); data->gpio_watchdog = devm_gpiod_get(&pdev->dev, "watchdog", GPIOD_OUT_LOW); if (IS_ERR(data->gpio_watchdog)) { ret = PTR_ERR(data->gpio_watchdog); dev_err(&pdev->dev, "unable to claim gpio \"watchdog\"\n"); return ret; } data->gpio_kill = devm_gpiod_get(&pdev->dev, "kill", GPIOD_OUT_LOW); if (IS_ERR(data->gpio_kill)) { ret = PTR_ERR(data->gpio_kill); dev_err(&pdev->dev, "unable to claim gpio \"kill\"\n"); return ret; } data->gpio_trigger = devm_gpiod_get_optional(&pdev->dev, "trigger", GPIOD_IN); if (IS_ERR(data->gpio_trigger)) { /* * It's not a problem if the trigger gpio isn't available, but * it is worth a warning if its use was defined in the device * tree. */ dev_err(&pdev->dev, "unable to claim gpio \"trigger\"\n"); data->gpio_trigger = NULL; } if (devm_request_irq(&pdev->dev, gpiod_to_irq(data->gpio_trigger), ltc2952_poweroff_handler, (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING), "ltc2952-poweroff", data)) { /* * Some things may have happened: * - No trigger input was defined * - Claiming the GPIO failed * - We could not map to an IRQ * - We couldn't register an interrupt handler * * None of these really are problems, but all of them * disqualify the push button from controlling the power. * * It is therefore important to note that if the ltc2952 * detects a button press for long enough, it will still start * its own powerdown window and cut the power on us if we don't * start the watchdog trigger. */ if (data->gpio_trigger) { dev_warn(&pdev->dev, "unable to configure the trigger interrupt\n"); devm_gpiod_put(&pdev->dev, data->gpio_trigger); data->gpio_trigger = NULL; } dev_info(&pdev->dev, "power down trigger input will not be used\n"); ltc2952_poweroff_start_wde(data); } return 0; } static int ltc2952_poweroff_notify_panic(struct notifier_block *nb, unsigned long code, void *unused) { struct ltc2952_poweroff *data = to_ltc2952(nb, panic_notifier); data->kernel_panic = true; return NOTIFY_DONE; } static int ltc2952_poweroff_probe(struct platform_device *pdev) { int ret; struct ltc2952_poweroff *data; if (pm_power_off) { dev_err(&pdev->dev, "pm_power_off already registered"); return -EBUSY; } data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->dev = &pdev->dev; platform_set_drvdata(pdev, data); ret = ltc2952_poweroff_init(pdev); if (ret) return ret; /* TODO: remove ltc2952_data */ ltc2952_data = data; pm_power_off = ltc2952_poweroff_kill; data->panic_notifier.notifier_call = ltc2952_poweroff_notify_panic; atomic_notifier_chain_register(&panic_notifier_list, &data->panic_notifier); dev_info(&pdev->dev, "probe successful\n"); return 0; } static int ltc2952_poweroff_remove(struct platform_device *pdev) { struct ltc2952_poweroff *data = platform_get_drvdata(pdev); pm_power_off = NULL; hrtimer_cancel(&data->timer_trigger); hrtimer_cancel(&data->timer_wde); atomic_notifier_chain_unregister(&panic_notifier_list, &data->panic_notifier); return 0; } static const struct of_device_id of_ltc2952_poweroff_match[] = { { .compatible = "lltc,ltc2952"}, {}, }; MODULE_DEVICE_TABLE(of, of_ltc2952_poweroff_match); static struct platform_driver ltc2952_poweroff_driver = { .probe = ltc2952_poweroff_probe, .remove = ltc2952_poweroff_remove, .driver = { .name = "ltc2952-poweroff", .of_match_table = of_ltc2952_poweroff_match, }, }; module_platform_driver(ltc2952_poweroff_driver); MODULE_AUTHOR("Ren Moll <rene.moll@xsens.com>"); MODULE_DESCRIPTION("LTC PowerPath power-off driver"); MODULE_LICENSE("GPL v2");
null
null
null
null
96,371
59,061
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
59,061
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/extensions/activity_log/activity_log.h" #include <stddef.h> #include <memory> #include "base/command_line.h" #include "base/macros.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/synchronization/waitable_event.h" #include "build/build_config.h" #include "chrome/browser/extensions/activity_log/activity_action_constants.h" #include "chrome/browser/extensions/activity_log/activity_log_task_runner.h" #include "chrome/browser/extensions/extension_service.h" #include "chrome/browser/extensions/test_extension_system.h" #include "chrome/browser/prerender/prerender_handle.h" #include "chrome/browser/prerender/prerender_manager.h" #include "chrome/browser/prerender/prerender_manager_factory.h" #include "chrome/browser/prerender/prerender_test_utils.h" #include "chrome/common/chrome_constants.h" #include "chrome/common/chrome_switches.h" #include "chrome/test/base/chrome_render_view_host_test_harness.h" #include "chrome/test/base/testing_profile.h" #include "content/public/browser/web_contents.h" #include "content/public/test/test_browser_thread_bundle.h" #include "extensions/browser/extension_registry.h" #include "extensions/browser/uninstall_reason.h" #include "extensions/common/dom_action_types.h" #include "extensions/common/extension_builder.h" #include "testing/gtest/include/gtest/gtest.h" namespace { const char kExtensionId[] = "abc"; const char* const kUrlApiCalls[] = { "HTMLButtonElement.formAction", "HTMLEmbedElement.src", "HTMLFormElement.action", "HTMLFrameElement.src", "HTMLHtmlElement.manifest", "HTMLIFrameElement.src", "HTMLImageElement.longDesc", "HTMLImageElement.src", "HTMLImageElement.lowsrc", "HTMLInputElement.formAction", "HTMLInputElement.src", "HTMLLinkElement.href", "HTMLMediaElement.src", "HTMLMediaElement.currentSrc", "HTMLModElement.cite", "HTMLObjectElement.data", "HTMLQuoteElement.cite", "HTMLScriptElement.src", "HTMLSourceElement.src", "HTMLTrackElement.src", "HTMLVideoElement.poster"}; } // namespace namespace extensions { class ActivityLogTest : public ChromeRenderViewHostTestHarness { protected: virtual bool enable_activity_logging_switch() const { return true; } void SetUp() override { ChromeRenderViewHostTestHarness::SetUp(); SetActivityLogTaskRunnerForTesting( base::ThreadTaskRunnerHandle::Get().get()); base::CommandLine command_line(base::CommandLine::NO_PROGRAM); if (enable_activity_logging_switch()) { base::CommandLine::ForCurrentProcess()->AppendSwitch( switches::kEnableExtensionActivityLogging); } base::CommandLine::ForCurrentProcess()->AppendSwitch( switches::kEnableExtensionActivityLogTesting); extension_service_ = static_cast<TestExtensionSystem*>( ExtensionSystem::Get(profile()))->CreateExtensionService (&command_line, base::FilePath(), false); base::RunLoop().RunUntilIdle(); } void TearDown() override { base::RunLoop().RunUntilIdle(); SetActivityLogTaskRunnerForTesting(nullptr); ChromeRenderViewHostTestHarness::TearDown(); } static void RetrieveActions_LogAndFetchActions0( std::unique_ptr<std::vector<scoped_refptr<Action>>> i) { ASSERT_EQ(0, static_cast<int>(i->size())); } static void RetrieveActions_LogAndFetchActions2( std::unique_ptr<std::vector<scoped_refptr<Action>>> i) { ASSERT_EQ(2, static_cast<int>(i->size())); } void SetPolicy(bool log_arguments) { ActivityLog* activity_log = ActivityLog::GetInstance(profile()); if (log_arguments) activity_log->SetDatabasePolicy(ActivityLogPolicy::POLICY_FULLSTREAM); else activity_log->SetDatabasePolicy(ActivityLogPolicy::POLICY_COUNTS); } bool GetDatabaseEnabled() { ActivityLog* activity_log = ActivityLog::GetInstance(profile()); return activity_log->IsDatabaseEnabled(); } bool GetWatchdogActive() { ActivityLog* activity_log = ActivityLog::GetInstance(profile()); return activity_log->IsWatchdogAppActive(); } static void Arguments_Prerender( std::unique_ptr<std::vector<scoped_refptr<Action>>> i) { ASSERT_EQ(1U, i->size()); scoped_refptr<Action> last = i->front(); ASSERT_EQ("odlameecjipmbmbejkplpemijjgpljce", last->extension_id()); ASSERT_EQ(Action::ACTION_CONTENT_SCRIPT, last->action_type()); ASSERT_EQ("[\"script\"]", ActivityLogPolicy::Util::Serialize(last->args())); ASSERT_EQ("http://www.google.com/", last->SerializePageUrl()); ASSERT_EQ("{\"prerender\":true}", ActivityLogPolicy::Util::Serialize(last->other())); ASSERT_EQ("", last->api_name()); ASSERT_EQ("", last->page_title()); ASSERT_EQ("", last->SerializeArgUrl()); } static void RetrieveActions_ArgUrlExtraction( std::unique_ptr<std::vector<scoped_refptr<Action>>> i) { const base::DictionaryValue* other = NULL; int dom_verb = -1; ASSERT_EQ(4U, i->size()); scoped_refptr<Action> action = i->at(0); ASSERT_EQ("XMLHttpRequest.open", action->api_name()); ASSERT_EQ("[\"POST\",\"\\u003Carg_url>\"]", ActivityLogPolicy::Util::Serialize(action->args())); ASSERT_EQ("http://api.google.com/", action->arg_url().spec()); // Test that the dom_verb field was changed to XHR (from METHOD). This // could be tested on all retrieved XHR actions but it would be redundant, // so just test once. other = action->other(); ASSERT_TRUE(other); ASSERT_TRUE(other->GetInteger(activity_log_constants::kActionDomVerb, &dom_verb)); ASSERT_EQ(DomActionType::XHR, dom_verb); action = i->at(1); ASSERT_EQ("XMLHttpRequest.open", action->api_name()); ASSERT_EQ("[\"POST\",\"\\u003Carg_url>\"]", ActivityLogPolicy::Util::Serialize(action->args())); ASSERT_EQ("http://www.google.com/api/", action->arg_url().spec()); action = i->at(2); ASSERT_EQ("XMLHttpRequest.open", action->api_name()); ASSERT_EQ("[\"POST\",\"/api/\"]", ActivityLogPolicy::Util::Serialize(action->args())); ASSERT_FALSE(action->arg_url().is_valid()); action = i->at(3); ASSERT_EQ("windows.create", action->api_name()); ASSERT_EQ("[{\"url\":\"\\u003Carg_url>\"}]", ActivityLogPolicy::Util::Serialize(action->args())); ASSERT_EQ("http://www.google.co.uk/", action->arg_url().spec()); } static void RetrieveActions_ArgUrlApiCalls( std::unique_ptr<std::vector<scoped_refptr<Action>>> actions) { size_t api_calls_size = arraysize(kUrlApiCalls); const base::DictionaryValue* other = NULL; int dom_verb = -1; ASSERT_EQ(api_calls_size, actions->size()); for (size_t i = 0; i < actions->size(); i++) { scoped_refptr<Action> action = actions->at(i); ASSERT_EQ(kExtensionId, action->extension_id()); ASSERT_EQ(Action::ACTION_DOM_ACCESS, action->action_type()); ASSERT_EQ(kUrlApiCalls[i], action->api_name()); ASSERT_EQ("[\"\\u003Carg_url>\"]", ActivityLogPolicy::Util::Serialize(action->args())); ASSERT_EQ("http://www.google.co.uk/", action->arg_url().spec()); other = action->other(); ASSERT_TRUE(other); ASSERT_TRUE( other->GetInteger(activity_log_constants::kActionDomVerb, &dom_verb)); ASSERT_EQ(DomActionType::SETTER, dom_verb); } } ExtensionService* extension_service_; }; TEST_F(ActivityLogTest, Construct) { ASSERT_TRUE(GetDatabaseEnabled()); ASSERT_FALSE(GetWatchdogActive()); } TEST_F(ActivityLogTest, LogAndFetchActions) { ActivityLog* activity_log = ActivityLog::GetInstance(profile()); std::unique_ptr<base::ListValue> args(new base::ListValue()); ASSERT_TRUE(GetDatabaseEnabled()); // Write some API calls scoped_refptr<Action> action = new Action(kExtensionId, base::Time::Now(), Action::ACTION_API_CALL, "tabs.testMethod"); activity_log->LogAction(action); action = new Action(kExtensionId, base::Time::Now(), Action::ACTION_DOM_ACCESS, "document.write"); action->set_page_url(GURL("http://www.google.com")); activity_log->LogAction(action); activity_log->GetFilteredActions( kExtensionId, Action::ACTION_ANY, "", "", "", 0, base::BindOnce(ActivityLogTest::RetrieveActions_LogAndFetchActions2)); } TEST_F(ActivityLogTest, LogPrerender) { scoped_refptr<const Extension> extension = ExtensionBuilder() .SetManifest(DictionaryBuilder() .Set("name", "Test extension") .Set("version", "1.0.0") .Set("manifest_version", 2) .Build()) .Build(); extension_service_->AddExtension(extension.get()); ActivityLog* activity_log = ActivityLog::GetInstance(profile()); EXPECT_TRUE(activity_log->ShouldLog(extension->id())); ASSERT_TRUE(GetDatabaseEnabled()); GURL url("http://www.google.com"); prerender::test_utils::RestorePrerenderMode restore_prerender_mode; prerender::PrerenderManager::SetOmniboxMode( prerender::PrerenderManager::PRERENDER_MODE_ENABLED); prerender::PrerenderManager* prerender_manager = prerender::PrerenderManagerFactory::GetForBrowserContext(profile()); const gfx::Size kSize(640, 480); std::unique_ptr<prerender::PrerenderHandle> prerender_handle( prerender_manager->AddPrerenderFromOmnibox( url, web_contents()->GetController().GetDefaultSessionStorageNamespace(), kSize)); const std::vector<content::WebContents*> contentses = prerender_manager->GetAllPrerenderingContents(); ASSERT_EQ(1U, contentses.size()); content::WebContents *contents = contentses[0]; ASSERT_TRUE(prerender_manager->IsWebContentsPrerendering(contents, NULL)); ScriptExecutionObserver::ExecutingScriptsMap executing_scripts; executing_scripts[extension->id()].insert("script"); static_cast<ScriptExecutionObserver*>(activity_log) ->OnScriptsExecuted(contents, executing_scripts, url); activity_log->GetFilteredActions( extension->id(), Action::ACTION_ANY, "", "", "", 0, base::BindOnce(ActivityLogTest::Arguments_Prerender)); prerender_manager->CancelAllPrerenders(); } TEST_F(ActivityLogTest, ArgUrlExtraction) { ActivityLog* activity_log = ActivityLog::GetInstance(profile()); std::unique_ptr<base::ListValue> args(new base::ListValue()); base::Time now = base::Time::Now(); // Submit a DOM API call which should have its URL extracted into the arg_url // field. EXPECT_TRUE(activity_log->ShouldLog(kExtensionId)); scoped_refptr<Action> action = new Action(kExtensionId, now, Action::ACTION_DOM_ACCESS, "XMLHttpRequest.open"); action->set_page_url(GURL("http://www.google.com/")); action->mutable_args()->AppendString("POST"); action->mutable_args()->AppendString("http://api.google.com/"); action->mutable_other()->SetInteger(activity_log_constants::kActionDomVerb, DomActionType::METHOD); activity_log->LogAction(action); // Submit a DOM API call with a relative URL in the argument, which should be // resolved relative to the page URL. action = new Action(kExtensionId, now - base::TimeDelta::FromSeconds(1), Action::ACTION_DOM_ACCESS, "XMLHttpRequest.open"); action->set_page_url(GURL("http://www.google.com/")); action->mutable_args()->AppendString("POST"); action->mutable_args()->AppendString("/api/"); action->mutable_other()->SetInteger(activity_log_constants::kActionDomVerb, DomActionType::METHOD); activity_log->LogAction(action); // Submit a DOM API call with a relative URL but no base page URL against // which to resolve. action = new Action(kExtensionId, now - base::TimeDelta::FromSeconds(2), Action::ACTION_DOM_ACCESS, "XMLHttpRequest.open"); action->mutable_args()->AppendString("POST"); action->mutable_args()->AppendString("/api/"); action->mutable_other()->SetInteger(activity_log_constants::kActionDomVerb, DomActionType::METHOD); activity_log->LogAction(action); // Submit an API call with an embedded URL. action = new Action(kExtensionId, now - base::TimeDelta::FromSeconds(3), Action::ACTION_API_CALL, "windows.create"); action->set_args( ListBuilder() .Append( DictionaryBuilder().Set("url", "http://www.google.co.uk").Build()) .Build()); activity_log->LogAction(action); activity_log->GetFilteredActions( kExtensionId, Action::ACTION_ANY, "", "", "", -1, base::BindOnce(ActivityLogTest::RetrieveActions_ArgUrlExtraction)); } TEST_F(ActivityLogTest, UninstalledExtension) { scoped_refptr<const Extension> extension = ExtensionBuilder() .SetManifest(DictionaryBuilder() .Set("name", "Test extension") .Set("version", "1.0.0") .Set("manifest_version", 2) .Build()) .Build(); ActivityLog* activity_log = ActivityLog::GetInstance(profile()); std::unique_ptr<base::ListValue> args(new base::ListValue()); ASSERT_TRUE(GetDatabaseEnabled()); // Write some API calls scoped_refptr<Action> action = new Action(extension->id(), base::Time::Now(), Action::ACTION_API_CALL, "tabs.testMethod"); activity_log->LogAction(action); action = new Action(extension->id(), base::Time::Now(), Action::ACTION_DOM_ACCESS, "document.write"); action->set_page_url(GURL("http://www.google.com")); activity_log->OnExtensionUninstalled( NULL, extension.get(), extensions::UNINSTALL_REASON_FOR_TESTING); activity_log->GetFilteredActions( extension->id(), Action::ACTION_ANY, "", "", "", -1, base::BindOnce(ActivityLogTest::RetrieveActions_LogAndFetchActions0)); } TEST_F(ActivityLogTest, ArgUrlApiCalls) { ActivityLog* activity_log = ActivityLog::GetInstance(profile()); std::unique_ptr<base::ListValue> args(new base::ListValue()); base::Time now = base::Time::Now(); int api_calls_size = arraysize(kUrlApiCalls); scoped_refptr<Action> action; for (int i = 0; i < api_calls_size; i++) { action = new Action(kExtensionId, now - base::TimeDelta::FromSeconds(i), Action::ACTION_DOM_ACCESS, kUrlApiCalls[i]); action->mutable_args()->AppendString("http://www.google.co.uk"); action->mutable_other()->SetInteger(activity_log_constants::kActionDomVerb, DomActionType::SETTER); activity_log->LogAction(action); } activity_log->GetFilteredActions( kExtensionId, Action::ACTION_ANY, "", "", "", -1, base::BindOnce(ActivityLogTest::RetrieveActions_ArgUrlApiCalls)); } class ActivityLogTestWithoutSwitch : public ActivityLogTest { public: ActivityLogTestWithoutSwitch() {} ~ActivityLogTestWithoutSwitch() override {} bool enable_activity_logging_switch() const override { return false; } }; TEST_F(ActivityLogTestWithoutSwitch, TestShouldLog) { static_cast<TestExtensionSystem*>( ExtensionSystem::Get(profile()))->SetReady(); ActivityLog* activity_log = ActivityLog::GetInstance(profile()); scoped_refptr<const Extension> empty_extension = ExtensionBuilder("Test").Build(); extension_service_->AddExtension(empty_extension.get()); // Since the command line switch for logging isn't enabled and there's no // watchdog app active, the activity log shouldn't log anything. EXPECT_FALSE(activity_log->ShouldLog(empty_extension->id())); const char kWhitelistedExtensionId[] = "eplckmlabaanikjjcgnigddmagoglhmp"; scoped_refptr<const Extension> activity_log_extension = ExtensionBuilder("Test").SetID(kWhitelistedExtensionId).Build(); extension_service_->AddExtension(activity_log_extension.get()); // Loading a watchdog app means the activity log should log other extension // activities... EXPECT_TRUE(activity_log->ShouldLog(empty_extension->id())); // ... but not those of the watchdog app. EXPECT_FALSE(activity_log->ShouldLog(activity_log_extension->id())); extension_service_->DisableExtension(activity_log_extension->id(), disable_reason::DISABLE_USER_ACTION); // Disabling the watchdog app means that we're back to never logging anything. EXPECT_FALSE(activity_log->ShouldLog(empty_extension->id())); } } // namespace extensions
null
null
null
null
55,924
3,152
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
168,147
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/fs/lockd/host.c * * Management for NLM peer hosts. The nlm_host struct is shared * between client and server implementation. The only reason to * do so is to reduce code bloat. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <linux/mutex.h> #include <linux/sunrpc/svc_xprt.h> #include <net/ipv6.h> #include "netns.h" #define NLMDBG_FACILITY NLMDBG_HOSTCACHE #define NLM_HOST_NRHASH 32 #define NLM_HOST_REBIND (60 * HZ) #define NLM_HOST_EXPIRE (300 * HZ) #define NLM_HOST_COLLECT (120 * HZ) static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; #define for_each_host(host, chain, table) \ for ((chain) = (table); \ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ hlist_for_each_entry((host), (chain), h_hash) #define for_each_host_safe(host, next, chain, table) \ for ((chain) = (table); \ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ hlist_for_each_entry_safe((host), (next), \ (chain), h_hash) static unsigned long nrhosts; static DEFINE_MUTEX(nlm_host_mutex); static void nlm_gc_hosts(struct net *net); struct nlm_lookup_host_info { const int server; /* search for server|client */ const struct sockaddr *sap; /* address to search for */ const size_t salen; /* it's length */ const unsigned short protocol; /* transport to search for*/ const u32 version; /* NLM version to search for */ const char *hostname; /* remote's hostname */ const size_t hostname_len; /* it's length */ const int noresvport; /* use non-priv port */ struct net *net; /* network namespace to bind */ }; /* * Hash function must work well on big- and little-endian platforms */ static unsigned int __nlm_hash32(const __be32 n) { unsigned int hash = (__force u32)n ^ ((__force u32)n >> 16); return hash ^ (hash >> 8); } static unsigned int __nlm_hash_addr4(const struct sockaddr *sap) { const struct sockaddr_in *sin = (struct sockaddr_in *)sap; return __nlm_hash32(sin->sin_addr.s_addr); } static unsigned int __nlm_hash_addr6(const struct sockaddr *sap) { const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; const struct in6_addr addr = sin6->sin6_addr; return __nlm_hash32(addr.s6_addr32[0]) ^ __nlm_hash32(addr.s6_addr32[1]) ^ __nlm_hash32(addr.s6_addr32[2]) ^ __nlm_hash32(addr.s6_addr32[3]); } static unsigned int nlm_hash_address(const struct sockaddr *sap) { unsigned int hash; switch (sap->sa_family) { case AF_INET: hash = __nlm_hash_addr4(sap); break; case AF_INET6: hash = __nlm_hash_addr6(sap); break; default: hash = 0; } return hash & (NLM_HOST_NRHASH - 1); } /* * Allocate and initialize an nlm_host. Common to both client and server. */ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, struct nsm_handle *nsm) { struct nlm_host *host = NULL; unsigned long now = jiffies; if (nsm != NULL) atomic_inc(&nsm->sm_count); else { host = NULL; nsm = nsm_get_handle(ni->net, ni->sap, ni->salen, ni->hostname, ni->hostname_len); if (unlikely(nsm == NULL)) { dprintk("lockd: %s failed; no nsm handle\n", __func__); goto out; } } host = kmalloc(sizeof(*host), GFP_KERNEL); if (unlikely(host == NULL)) { dprintk("lockd: %s failed; no memory\n", __func__); nsm_release(nsm); goto out; } memcpy(nlm_addr(host), ni->sap, ni->salen); host->h_addrlen = ni->salen; rpc_set_port(nlm_addr(host), 0); host->h_srcaddrlen = 0; host->h_rpcclnt = NULL; host->h_name = nsm->sm_name; host->h_version = ni->version; host->h_proto = ni->protocol; host->h_reclaiming = 0; host->h_server = ni->server; host->h_noresvport = ni->noresvport; host->h_inuse = 0; init_waitqueue_head(&host->h_gracewait); init_rwsem(&host->h_rwsem); host->h_state = 0; host->h_nsmstate = 0; host->h_pidcount = 0; atomic_set(&host->h_count, 1); mutex_init(&host->h_mutex); host->h_nextrebind = now + NLM_HOST_REBIND; host->h_expires = now + NLM_HOST_EXPIRE; INIT_LIST_HEAD(&host->h_lockowners); spin_lock_init(&host->h_lock); INIT_LIST_HEAD(&host->h_granted); INIT_LIST_HEAD(&host->h_reclaim); host->h_nsmhandle = nsm; host->h_addrbuf = nsm->sm_addrbuf; host->net = ni->net; strlcpy(host->nodename, utsname()->nodename, sizeof(host->nodename)); out: return host; } /* * Destroy an nlm_host and free associated resources * * Caller must hold nlm_host_mutex. */ static void nlm_destroy_host_locked(struct nlm_host *host) { struct rpc_clnt *clnt; struct lockd_net *ln = net_generic(host->net, lockd_net_id); dprintk("lockd: destroy host %s\n", host->h_name); hlist_del_init(&host->h_hash); nsm_unmonitor(host); nsm_release(host->h_nsmhandle); clnt = host->h_rpcclnt; if (clnt != NULL) rpc_shutdown_client(clnt); kfree(host); ln->nrhosts--; nrhosts--; } /** * nlmclnt_lookup_host - Find an NLM host handle matching a remote server * @sap: network address of server * @salen: length of server address * @protocol: transport protocol to use * @version: NLM protocol version * @hostname: '\0'-terminated hostname of server * @noresvport: 1 if non-privileged port should be used * * Returns an nlm_host structure that matches the passed-in * [server address, transport protocol, NLM version, server hostname]. * If one doesn't already exist in the host cache, a new handle is * created and returned. */ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const size_t salen, const unsigned short protocol, const u32 version, const char *hostname, int noresvport, struct net *net) { struct nlm_lookup_host_info ni = { .server = 0, .sap = sap, .salen = salen, .protocol = protocol, .version = version, .hostname = hostname, .hostname_len = strlen(hostname), .noresvport = noresvport, .net = net, }; struct hlist_head *chain; struct nlm_host *host; struct nsm_handle *nsm = NULL; struct lockd_net *ln = net_generic(net, lockd_net_id); dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, (hostname ? hostname : "<none>"), version, (protocol == IPPROTO_UDP ? "udp" : "tcp")); mutex_lock(&nlm_host_mutex); chain = &nlm_client_hosts[nlm_hash_address(sap)]; hlist_for_each_entry(host, chain, h_hash) { if (host->net != net) continue; if (!rpc_cmp_addr(nlm_addr(host), sap)) continue; /* Same address. Share an NSM handle if we already have one */ if (nsm == NULL) nsm = host->h_nsmhandle; if (host->h_proto != protocol) continue; if (host->h_version != version) continue; nlm_get_host(host); dprintk("lockd: %s found host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); goto out; } host = nlm_alloc_host(&ni, nsm); if (unlikely(host == NULL)) goto out; hlist_add_head(&host->h_hash, chain); ln->nrhosts++; nrhosts++; dprintk("lockd: %s created host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); out: mutex_unlock(&nlm_host_mutex); return host; } /** * nlmclnt_release_host - release client nlm_host * @host: nlm_host to release * */ void nlmclnt_release_host(struct nlm_host *host) { if (host == NULL) return; dprintk("lockd: release client host %s\n", host->h_name); WARN_ON_ONCE(host->h_server); if (atomic_dec_and_test(&host->h_count)) { WARN_ON_ONCE(!list_empty(&host->h_lockowners)); WARN_ON_ONCE(!list_empty(&host->h_granted)); WARN_ON_ONCE(!list_empty(&host->h_reclaim)); mutex_lock(&nlm_host_mutex); nlm_destroy_host_locked(host); mutex_unlock(&nlm_host_mutex); } } /** * nlmsvc_lookup_host - Find an NLM host handle matching a remote client * @rqstp: incoming NLM request * @hostname: name of client host * @hostname_len: length of client hostname * * Returns an nlm_host structure that matches the [client address, * transport protocol, NLM version, client hostname] of the passed-in * NLM request. If one doesn't already exist in the host cache, a * new handle is created and returned. * * Before possibly creating a new nlm_host, construct a sockaddr * for a specific source address in case the local system has * multiple network addresses. The family of the address in * rq_daddr is guaranteed to be the same as the family of the * address in rq_addr, so it's safe to use the same family for * the source address. */ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, const size_t hostname_len) { struct hlist_head *chain; struct nlm_host *host = NULL; struct nsm_handle *nsm = NULL; struct sockaddr *src_sap = svc_daddr(rqstp); size_t src_len = rqstp->rq_daddrlen; struct net *net = SVC_NET(rqstp); struct nlm_lookup_host_info ni = { .server = 1, .sap = svc_addr(rqstp), .salen = rqstp->rq_addrlen, .protocol = rqstp->rq_prot, .version = rqstp->rq_vers, .hostname = hostname, .hostname_len = hostname_len, .net = net, }; struct lockd_net *ln = net_generic(net, lockd_net_id); dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__, (int)hostname_len, hostname, rqstp->rq_vers, (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp")); mutex_lock(&nlm_host_mutex); if (time_after_eq(jiffies, ln->next_gc)) nlm_gc_hosts(net); chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; hlist_for_each_entry(host, chain, h_hash) { if (host->net != net) continue; if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) continue; /* Same address. Share an NSM handle if we already have one */ if (nsm == NULL) nsm = host->h_nsmhandle; if (host->h_proto != ni.protocol) continue; if (host->h_version != ni.version) continue; if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap)) continue; /* Move to head of hash chain. */ hlist_del(&host->h_hash); hlist_add_head(&host->h_hash, chain); nlm_get_host(host); dprintk("lockd: %s found host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); goto out; } host = nlm_alloc_host(&ni, nsm); if (unlikely(host == NULL)) goto out; memcpy(nlm_srcaddr(host), src_sap, src_len); host->h_srcaddrlen = src_len; hlist_add_head(&host->h_hash, chain); ln->nrhosts++; nrhosts++; dprintk("lockd: %s created host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); out: mutex_unlock(&nlm_host_mutex); return host; } /** * nlmsvc_release_host - release server nlm_host * @host: nlm_host to release * * Host is destroyed later in nlm_gc_host(). */ void nlmsvc_release_host(struct nlm_host *host) { if (host == NULL) return; dprintk("lockd: release server host %s\n", host->h_name); WARN_ON_ONCE(!host->h_server); atomic_dec(&host->h_count); } /* * Create the NLM RPC client for an NLM peer */ struct rpc_clnt * nlm_bind_host(struct nlm_host *host) { struct rpc_clnt *clnt; dprintk("lockd: nlm_bind_host %s (%s)\n", host->h_name, host->h_addrbuf); /* Lock host handle */ mutex_lock(&host->h_mutex); /* If we've already created an RPC client, check whether * RPC rebind is required */ if ((clnt = host->h_rpcclnt) != NULL) { if (time_after_eq(jiffies, host->h_nextrebind)) { rpc_force_rebind(clnt); host->h_nextrebind = jiffies + NLM_HOST_REBIND; dprintk("lockd: next rebind in %lu jiffies\n", host->h_nextrebind - jiffies); } } else { unsigned long increment = nlmsvc_timeout; struct rpc_timeout timeparms = { .to_initval = increment, .to_increment = increment, .to_maxval = increment * 6UL, .to_retries = 5U, }; struct rpc_create_args args = { .net = host->net, .protocol = host->h_proto, .address = nlm_addr(host), .addrsize = host->h_addrlen, .timeout = &timeparms, .servername = host->h_name, .program = &nlm_program, .version = host->h_version, .authflavor = RPC_AUTH_UNIX, .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_AUTOBIND), }; /* * lockd retries server side blocks automatically so we want * those to be soft RPC calls. Client side calls need to be * hard RPC tasks. */ if (!host->h_server) args.flags |= RPC_CLNT_CREATE_HARDRTRY; if (host->h_noresvport) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; if (host->h_srcaddrlen) args.saddress = nlm_srcaddr(host); clnt = rpc_create(&args); if (!IS_ERR(clnt)) host->h_rpcclnt = clnt; else { printk("lockd: couldn't create RPC handle for %s\n", host->h_name); clnt = NULL; } } mutex_unlock(&host->h_mutex); return clnt; } /* * Force a portmap lookup of the remote lockd port */ void nlm_rebind_host(struct nlm_host *host) { dprintk("lockd: rebind host %s\n", host->h_name); if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { rpc_force_rebind(host->h_rpcclnt); host->h_nextrebind = jiffies + NLM_HOST_REBIND; } } /* * Increment NLM host count */ struct nlm_host * nlm_get_host(struct nlm_host *host) { if (host) { dprintk("lockd: get host %s\n", host->h_name); atomic_inc(&host->h_count); host->h_expires = jiffies + NLM_HOST_EXPIRE; } return host; } static struct nlm_host *next_host_state(struct hlist_head *cache, struct nsm_handle *nsm, const struct nlm_reboot *info) { struct nlm_host *host; struct hlist_head *chain; mutex_lock(&nlm_host_mutex); for_each_host(host, chain, cache) { if (host->h_nsmhandle == nsm && host->h_nsmstate != info->state) { host->h_nsmstate = info->state; host->h_state++; nlm_get_host(host); mutex_unlock(&nlm_host_mutex); return host; } } mutex_unlock(&nlm_host_mutex); return NULL; } /** * nlm_host_rebooted - Release all resources held by rebooted host * @net: network namespace * @info: pointer to decoded results of NLM_SM_NOTIFY call * * We were notified that the specified host has rebooted. Release * all resources held by that peer. */ void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info) { struct nsm_handle *nsm; struct nlm_host *host; nsm = nsm_reboot_lookup(net, info); if (unlikely(nsm == NULL)) return; /* Mark all hosts tied to this NSM state as having rebooted. * We run the loop repeatedly, because we drop the host table * lock for this. * To avoid processing a host several times, we match the nsmstate. */ while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) { nlmsvc_free_host_resources(host); nlmsvc_release_host(host); } while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) { nlmclnt_recovery(host); nlmclnt_release_host(host); } nsm_release(nsm); } static void nlm_complain_hosts(struct net *net) { struct hlist_head *chain; struct nlm_host *host; if (net) { struct lockd_net *ln = net_generic(net, lockd_net_id); if (ln->nrhosts == 0) return; printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net); dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net); } else { if (nrhosts == 0) return; printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); dprintk("lockd: %lu hosts left:\n", nrhosts); } for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; dprintk(" %s (cnt %d use %d exp %ld net %p)\n", host->h_name, atomic_read(&host->h_count), host->h_inuse, host->h_expires, host->net); } } void nlm_shutdown_hosts_net(struct net *net) { struct hlist_head *chain; struct nlm_host *host; mutex_lock(&nlm_host_mutex); /* First, make all hosts eligible for gc */ dprintk("lockd: nuking all hosts in net %p...\n", net); for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; host->h_expires = jiffies - 1; if (host->h_rpcclnt) { rpc_shutdown_client(host->h_rpcclnt); host->h_rpcclnt = NULL; } } /* Then, perform a garbage collection pass */ nlm_gc_hosts(net); mutex_unlock(&nlm_host_mutex); nlm_complain_hosts(net); } /* * Shut down the hosts module. * Note that this routine is called only at server shutdown time. */ void nlm_shutdown_hosts(void) { dprintk("lockd: shutting down host module\n"); nlm_shutdown_hosts_net(NULL); } /* * Garbage collect any unused NLM hosts. * This GC combines reference counting for async operations with * mark & sweep for resources held by remote clients. */ static void nlm_gc_hosts(struct net *net) { struct hlist_head *chain; struct hlist_node *next; struct nlm_host *host; dprintk("lockd: host garbage collection for net %p\n", net); for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; host->h_inuse = 0; } /* Mark all hosts that hold locks, blocks or shares */ nlmsvc_mark_resources(net); for_each_host_safe(host, next, chain, nlm_server_hosts) { if (net && host->net != net) continue; if (atomic_read(&host->h_count) || host->h_inuse || time_before(jiffies, host->h_expires)) { dprintk("nlm_gc_hosts skipping %s " "(cnt %d use %d exp %ld net %p)\n", host->h_name, atomic_read(&host->h_count), host->h_inuse, host->h_expires, host->net); continue; } nlm_destroy_host_locked(host); } if (net) { struct lockd_net *ln = net_generic(net, lockd_net_id); ln->next_gc = jiffies + NLM_HOST_COLLECT; } }
null
null
null
null
76,495
30,345
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
195,340
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * property.c - Unified device property interface. * * Copyright (C) 2014, Intel Corporation * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/acpi.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/property.h> #include <linux/etherdevice.h> #include <linux/phy.h> struct property_set { struct fwnode_handle fwnode; const struct property_entry *properties; }; static inline bool is_pset_node(struct fwnode_handle *fwnode) { return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA; } static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) { return is_pset_node(fwnode) ? container_of(fwnode, struct property_set, fwnode) : NULL; } static const struct property_entry *pset_prop_get(struct property_set *pset, const char *name) { const struct property_entry *prop; if (!pset || !pset->properties) return NULL; for (prop = pset->properties; prop->name; prop++) if (!strcmp(name, prop->name)) return prop; return NULL; } static const void *pset_prop_find(struct property_set *pset, const char *propname, size_t length) { const struct property_entry *prop; const void *pointer; prop = pset_prop_get(pset, propname); if (!prop) return ERR_PTR(-EINVAL); if (prop->is_array) pointer = prop->pointer.raw_data; else pointer = &prop->value.raw_data; if (!pointer) return ERR_PTR(-ENODATA); if (length > prop->length) return ERR_PTR(-EOVERFLOW); return pointer; } static int pset_prop_read_u8_array(struct property_set *pset, const char *propname, u8 *values, size_t nval) { const void *pointer; size_t length = nval * sizeof(*values); pointer = pset_prop_find(pset, propname, length); if (IS_ERR(pointer)) return PTR_ERR(pointer); memcpy(values, pointer, length); return 0; } static int pset_prop_read_u16_array(struct property_set *pset, const char *propname, u16 *values, size_t nval) { const void *pointer; size_t length = nval * sizeof(*values); pointer = pset_prop_find(pset, propname, length); if (IS_ERR(pointer)) return PTR_ERR(pointer); memcpy(values, pointer, length); return 0; } static int pset_prop_read_u32_array(struct property_set *pset, const char *propname, u32 *values, size_t nval) { const void *pointer; size_t length = nval * sizeof(*values); pointer = pset_prop_find(pset, propname, length); if (IS_ERR(pointer)) return PTR_ERR(pointer); memcpy(values, pointer, length); return 0; } static int pset_prop_read_u64_array(struct property_set *pset, const char *propname, u64 *values, size_t nval) { const void *pointer; size_t length = nval * sizeof(*values); pointer = pset_prop_find(pset, propname, length); if (IS_ERR(pointer)) return PTR_ERR(pointer); memcpy(values, pointer, length); return 0; } static int pset_prop_count_elems_of_size(struct property_set *pset, const char *propname, size_t length) { const struct property_entry *prop; prop = pset_prop_get(pset, propname); if (!prop) return -EINVAL; return prop->length / length; } static int pset_prop_read_string_array(struct property_set *pset, const char *propname, const char **strings, size_t nval) { const void *pointer; size_t length = nval * sizeof(*strings); pointer = pset_prop_find(pset, propname, length); if (IS_ERR(pointer)) return PTR_ERR(pointer); memcpy(strings, pointer, length); return 0; } static int pset_prop_read_string(struct property_set *pset, const char *propname, const char **strings) { const struct property_entry *prop; const char * const *pointer; prop = pset_prop_get(pset, propname); if (!prop) return -EINVAL; if (!prop->is_string) return -EILSEQ; if (prop->is_array) { pointer = prop->pointer.str; if (!pointer) return -ENODATA; } else { pointer = &prop->value.str; if (*pointer && strnlen(*pointer, prop->length) >= prop->length) return -EILSEQ; } *strings = *pointer; return 0; } static inline struct fwnode_handle *dev_fwnode(struct device *dev) { return IS_ENABLED(CONFIG_OF) && dev->of_node ? &dev->of_node->fwnode : dev->fwnode; } /** * device_property_present - check if a property of a device is present * @dev: Device whose property is being checked * @propname: Name of the property * * Check if property @propname is present in the device firmware description. */ bool device_property_present(struct device *dev, const char *propname) { return fwnode_property_present(dev_fwnode(dev), propname); } EXPORT_SYMBOL_GPL(device_property_present); static bool __fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) { if (is_of_node(fwnode)) return of_property_read_bool(to_of_node(fwnode), propname); else if (is_acpi_node(fwnode)) return !acpi_node_prop_get(fwnode, propname, NULL); else if (is_pset_node(fwnode)) return !!pset_prop_get(to_pset_node(fwnode), propname); return false; } /** * fwnode_property_present - check if a property of a firmware node is present * @fwnode: Firmware node whose property to check * @propname: Name of the property */ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) { bool ret; ret = __fwnode_property_present(fwnode, propname); if (ret == false && !IS_ERR_OR_NULL(fwnode) && !IS_ERR_OR_NULL(fwnode->secondary)) ret = __fwnode_property_present(fwnode->secondary, propname); return ret; } EXPORT_SYMBOL_GPL(fwnode_property_present); /** * device_property_read_u8_array - return a u8 array property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of u8 properties with @propname from the device * firmware description and stores them to @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u8_array(struct device *dev, const char *propname, u8 *val, size_t nval) { return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_u8_array); /** * device_property_read_u16_array - return a u16 array property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of u16 properties with @propname from the device * firmware description and stores them to @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u16_array(struct device *dev, const char *propname, u16 *val, size_t nval) { return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_u16_array); /** * device_property_read_u32_array - return a u32 array property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of u32 properties with @propname from the device * firmware description and stores them to @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u32_array(struct device *dev, const char *propname, u32 *val, size_t nval) { return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_u32_array); /** * device_property_read_u64_array - return a u64 array property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of u64 properties with @propname from the device * firmware description and stores them to @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u64_array(struct device *dev, const char *propname, u64 *val, size_t nval) { return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_u64_array); /** * device_property_read_string_array - return a string array property of device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of string properties with @propname from the device * firmware description and stores them to @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property is not an array of strings, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_string_array(struct device *dev, const char *propname, const char **val, size_t nval) { return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_string_array); /** * device_property_read_string - return a string property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The value is stored here * * Function reads property @propname from the device firmware description and * stores the value into @val if found. The value is checked to be a string. * * Return: %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property type is not a string. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_string(struct device *dev, const char *propname, const char **val) { return fwnode_property_read_string(dev_fwnode(dev), propname, val); } EXPORT_SYMBOL_GPL(device_property_read_string); /** * device_property_match_string - find a string in an array and return index * @dev: Device to get the property of * @propname: Name of the property holding the array * @string: String to look for * * Find a given string in a string array and if it is found return the * index back. * * Return: %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, * %-ENXIO if no suitable firmware interface is present. */ int device_property_match_string(struct device *dev, const char *propname, const char *string) { return fwnode_property_match_string(dev_fwnode(dev), propname, string); } EXPORT_SYMBOL_GPL(device_property_match_string); #define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \ (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \ : of_property_count_elems_of_size((node), (propname), sizeof(type)) #define PSET_PROP_READ_ARRAY(node, propname, type, val, nval) \ (val) ? pset_prop_read_##type##_array((node), (propname), (val), (nval)) \ : pset_prop_count_elems_of_size((node), (propname), sizeof(type)) #define FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ ({ \ int _ret_; \ if (is_of_node(_fwnode_)) \ _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \ _type_, _val_, _nval_); \ else if (is_acpi_node(_fwnode_)) \ _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \ _val_, _nval_); \ else if (is_pset_node(_fwnode_)) \ _ret_ = PSET_PROP_READ_ARRAY(to_pset_node(_fwnode_), _propname_, \ _type_, _val_, _nval_); \ else \ _ret_ = -ENXIO; \ _ret_; \ }) #define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ ({ \ int _ret_; \ _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \ _val_, _nval_); \ if (_ret_ == -EINVAL && !IS_ERR_OR_NULL(_fwnode_) && \ !IS_ERR_OR_NULL(_fwnode_->secondary)) \ _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \ _proptype_, _val_, _nval_); \ _ret_; \ }) /** * fwnode_property_read_u8_array - return a u8 array property of firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an array of u8 properties with @propname from @fwnode and stores them to * @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, const char *propname, u8 *val, size_t nval) { return FWNODE_PROP_READ_ARRAY(fwnode, propname, u8, DEV_PROP_U8, val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array); /** * fwnode_property_read_u16_array - return a u16 array property of firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an array of u16 properties with @propname from @fwnode and store them to * @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, const char *propname, u16 *val, size_t nval) { return FWNODE_PROP_READ_ARRAY(fwnode, propname, u16, DEV_PROP_U16, val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array); /** * fwnode_property_read_u32_array - return a u32 array property of firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an array of u32 properties with @propname from @fwnode store them to * @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, const char *propname, u32 *val, size_t nval) { return FWNODE_PROP_READ_ARRAY(fwnode, propname, u32, DEV_PROP_U32, val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array); /** * fwnode_property_read_u64_array - return a u64 array property firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an array of u64 properties with @propname from @fwnode and store them to * @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, const char *propname, u64 *val, size_t nval) { return FWNODE_PROP_READ_ARRAY(fwnode, propname, u64, DEV_PROP_U64, val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); static int __fwnode_property_read_string_array(struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval) { if (is_of_node(fwnode)) return val ? of_property_read_string_array(to_of_node(fwnode), propname, val, nval) : of_property_count_strings(to_of_node(fwnode), propname); else if (is_acpi_node(fwnode)) return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, val, nval); else if (is_pset_node(fwnode)) return val ? pset_prop_read_string_array(to_pset_node(fwnode), propname, val, nval) : pset_prop_count_elems_of_size(to_pset_node(fwnode), propname, sizeof(const char *)); return -ENXIO; } static int __fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val) { if (is_of_node(fwnode)) return of_property_read_string(to_of_node(fwnode), propname, val); else if (is_acpi_node(fwnode)) return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, val, 1); else if (is_pset_node(fwnode)) return pset_prop_read_string(to_pset_node(fwnode), propname, val); return -ENXIO; } /** * fwnode_property_read_string_array - return string array property of a node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an string list property @propname from the given firmware node and store * them to @val if found. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval) { int ret; ret = __fwnode_property_read_string_array(fwnode, propname, val, nval); if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && !IS_ERR_OR_NULL(fwnode->secondary)) ret = __fwnode_property_read_string_array(fwnode->secondary, propname, val, nval); return ret; } EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); /** * fwnode_property_read_string - return a string property of a firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The value is stored here * * Read property @propname from the given firmware node and store the value into * @val if found. The value is checked to be a string. * * Return: %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property is not a string, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val) { int ret; ret = __fwnode_property_read_string(fwnode, propname, val); if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && !IS_ERR_OR_NULL(fwnode->secondary)) ret = __fwnode_property_read_string(fwnode->secondary, propname, val); return ret; } EXPORT_SYMBOL_GPL(fwnode_property_read_string); /** * fwnode_property_match_string - find a string in an array and return index * @fwnode: Firmware node to get the property of * @propname: Name of the property holding the array * @string: String to look for * * Find a given string in a string array and if it is found return the * index back. * * Return: %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_match_string(struct fwnode_handle *fwnode, const char *propname, const char *string) { const char **values; int nval, ret; nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0); if (nval < 0) return nval; if (nval == 0) return -ENODATA; values = kcalloc(nval, sizeof(*values), GFP_KERNEL); if (!values) return -ENOMEM; ret = fwnode_property_read_string_array(fwnode, propname, values, nval); if (ret < 0) goto out; ret = match_string(values, nval, string); if (ret < 0) ret = -ENODATA; out: kfree(values); return ret; } EXPORT_SYMBOL_GPL(fwnode_property_match_string); static int property_copy_string_array(struct property_entry *dst, const struct property_entry *src) { char **d; size_t nval = src->length / sizeof(*d); int i; d = kcalloc(nval, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; for (i = 0; i < nval; i++) { d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL); if (!d[i] && src->pointer.str[i]) { while (--i >= 0) kfree(d[i]); kfree(d); return -ENOMEM; } } dst->pointer.raw_data = d; return 0; } static int property_entry_copy_data(struct property_entry *dst, const struct property_entry *src) { int error; dst->name = kstrdup(src->name, GFP_KERNEL); if (!dst->name) return -ENOMEM; if (src->is_array) { if (!src->length) { error = -ENODATA; goto out_free_name; } if (src->is_string) { error = property_copy_string_array(dst, src); if (error) goto out_free_name; } else { dst->pointer.raw_data = kmemdup(src->pointer.raw_data, src->length, GFP_KERNEL); if (!dst->pointer.raw_data) { error = -ENOMEM; goto out_free_name; } } } else if (src->is_string) { dst->value.str = kstrdup(src->value.str, GFP_KERNEL); if (!dst->value.str && src->value.str) { error = -ENOMEM; goto out_free_name; } } else { dst->value.raw_data = src->value.raw_data; } dst->length = src->length; dst->is_array = src->is_array; dst->is_string = src->is_string; return 0; out_free_name: kfree(dst->name); return error; } static void property_entry_free_data(const struct property_entry *p) { size_t i, nval; if (p->is_array) { if (p->is_string && p->pointer.str) { nval = p->length / sizeof(const char *); for (i = 0; i < nval; i++) kfree(p->pointer.str[i]); } kfree(p->pointer.raw_data); } else if (p->is_string) { kfree(p->value.str); } kfree(p->name); } /** * property_entries_dup - duplicate array of properties * @properties: array of properties to copy * * This function creates a deep copy of the given NULL-terminated array * of property entries. */ struct property_entry * property_entries_dup(const struct property_entry *properties) { struct property_entry *p; int i, n = 0; while (properties[n].name) n++; p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); for (i = 0; i < n; i++) { int ret = property_entry_copy_data(&p[i], &properties[i]); if (ret) { while (--i >= 0) property_entry_free_data(&p[i]); kfree(p); return ERR_PTR(ret); } } return p; } EXPORT_SYMBOL_GPL(property_entries_dup); /** * property_entries_free - free previously allocated array of properties * @properties: array of properties to destroy * * This function frees given NULL-terminated array of property entries, * along with their data. */ void property_entries_free(const struct property_entry *properties) { const struct property_entry *p; for (p = properties; p->name; p++) property_entry_free_data(p); kfree(properties); } EXPORT_SYMBOL_GPL(property_entries_free); /** * pset_free_set - releases memory allocated for copied property set * @pset: Property set to release * * Function takes previously copied property set and releases all the * memory allocated to it. */ static void pset_free_set(struct property_set *pset) { if (!pset) return; property_entries_free(pset->properties); kfree(pset); } /** * pset_copy_set - copies property set * @pset: Property set to copy * * This function takes a deep copy of the given property set and returns * pointer to the copy. Call device_free_property_set() to free resources * allocated in this function. * * Return: Pointer to the new property set or error pointer. */ static struct property_set *pset_copy_set(const struct property_set *pset) { struct property_entry *properties; struct property_set *p; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); properties = property_entries_dup(pset->properties); if (IS_ERR(properties)) { kfree(p); return ERR_CAST(properties); } p->properties = properties; return p; } /** * device_remove_properties - Remove properties from a device object. * @dev: Device whose properties to remove. * * The function removes properties previously associated to the device * secondary firmware node with device_add_properties(). Memory allocated * to the properties will also be released. */ void device_remove_properties(struct device *dev) { struct fwnode_handle *fwnode; fwnode = dev_fwnode(dev); if (!fwnode) return; /* * Pick either primary or secondary node depending which one holds * the pset. If there is no real firmware node (ACPI/DT) primary * will hold the pset. */ if (is_pset_node(fwnode)) { set_primary_fwnode(dev, NULL); pset_free_set(to_pset_node(fwnode)); } else { fwnode = fwnode->secondary; if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { set_secondary_fwnode(dev, NULL); pset_free_set(to_pset_node(fwnode)); } } } EXPORT_SYMBOL_GPL(device_remove_properties); /** * device_add_properties - Add a collection of properties to a device object. * @dev: Device to add properties to. * @properties: Collection of properties to add. * * Associate a collection of device properties represented by @properties with * @dev as its secondary firmware node. The function takes a copy of * @properties. */ int device_add_properties(struct device *dev, const struct property_entry *properties) { struct property_set *p, pset; if (!properties) return -EINVAL; pset.properties = properties; p = pset_copy_set(&pset); if (IS_ERR(p)) return PTR_ERR(p); p->fwnode.type = FWNODE_PDATA; set_secondary_fwnode(dev, &p->fwnode); return 0; } EXPORT_SYMBOL_GPL(device_add_properties); /** * device_get_next_child_node - Return the next child node handle for a device * @dev: Device to find the next child node for. * @child: Handle to one of the device's child nodes or a null handle. */ struct fwnode_handle *device_get_next_child_node(struct device *dev, struct fwnode_handle *child) { if (IS_ENABLED(CONFIG_OF) && dev->of_node) { struct device_node *node; node = of_get_next_available_child(dev->of_node, to_of_node(child)); if (node) return &node->fwnode; } else if (IS_ENABLED(CONFIG_ACPI)) { return acpi_get_next_subnode(dev, child); } return NULL; } EXPORT_SYMBOL_GPL(device_get_next_child_node); /** * device_get_named_child_node - Return first matching named child node handle * @dev: Device to find the named child node for. * @childname: String to match child node name against. */ struct fwnode_handle *device_get_named_child_node(struct device *dev, const char *childname) { struct fwnode_handle *child; /* * Find first matching named child node of this device. * For ACPI this will be a data only sub-node. */ device_for_each_child_node(dev, child) { if (is_of_node(child)) { if (!of_node_cmp(to_of_node(child)->name, childname)) return child; } else if (is_acpi_data_node(child)) { if (acpi_data_node_match(child, childname)) return child; } } return NULL; } EXPORT_SYMBOL_GPL(device_get_named_child_node); /** * fwnode_handle_put - Drop reference to a device node * @fwnode: Pointer to the device node to drop the reference to. * * This has to be used when terminating device_for_each_child_node() iteration * with break or return to prevent stale device node references from being left * behind. */ void fwnode_handle_put(struct fwnode_handle *fwnode) { if (is_of_node(fwnode)) of_node_put(to_of_node(fwnode)); } EXPORT_SYMBOL_GPL(fwnode_handle_put); /** * device_get_child_node_count - return the number of child nodes for device * @dev: Device to cound the child nodes for */ unsigned int device_get_child_node_count(struct device *dev) { struct fwnode_handle *child; unsigned int count = 0; device_for_each_child_node(dev, child) count++; return count; } EXPORT_SYMBOL_GPL(device_get_child_node_count); bool device_dma_supported(struct device *dev) { /* For DT, this is always supported. * For ACPI, this depends on CCA, which * is determined by the acpi_dma_supported(). */ if (IS_ENABLED(CONFIG_OF) && dev->of_node) return true; return acpi_dma_supported(ACPI_COMPANION(dev)); } EXPORT_SYMBOL_GPL(device_dma_supported); enum dev_dma_attr device_get_dma_attr(struct device *dev) { enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED; if (IS_ENABLED(CONFIG_OF) && dev->of_node) { if (of_dma_is_coherent(dev->of_node)) attr = DEV_DMA_COHERENT; else attr = DEV_DMA_NON_COHERENT; } else attr = acpi_get_dma_attr(ACPI_COMPANION(dev)); return attr; } EXPORT_SYMBOL_GPL(device_get_dma_attr); /** * device_get_phy_mode - Get phy mode for given device * @dev: Pointer to the given device * * The function gets phy interface string from property 'phy-mode' or * 'phy-connection-type', and return its index in phy_modes table, or errno in * error case. */ int device_get_phy_mode(struct device *dev) { const char *pm; int err, i; err = device_property_read_string(dev, "phy-mode", &pm); if (err < 0) err = device_property_read_string(dev, "phy-connection-type", &pm); if (err < 0) return err; for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) if (!strcasecmp(pm, phy_modes(i))) return i; return -ENODEV; } EXPORT_SYMBOL_GPL(device_get_phy_mode); static void *device_get_mac_addr(struct device *dev, const char *name, char *addr, int alen) { int ret = device_property_read_u8_array(dev, name, addr, alen); if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr)) return addr; return NULL; } /** * device_get_mac_address - Get the MAC for a given device * @dev: Pointer to the device * @addr: Address of buffer to store the MAC in * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN * * Search the firmware node for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC * address. If that isn't set, then 'local-mac-address' is checked next, * because that is the default address. If that isn't set, then the obsolete * 'address' is checked, just in case we're using an old device tree. * * Note that the 'address' property is supposed to contain a virtual address of * the register set, but some DTS files have redefined that property to be the * MAC address. * * All-zero MAC addresses are rejected, because those could be properties that * exist in the firmware tables, but were not updated by the firmware. For * example, the DTS could define 'mac-address' and 'local-mac-address', with * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. * In this case, the real MAC is in 'local-mac-address', and 'mac-address' * exists but is all zeros. */ void *device_get_mac_address(struct device *dev, char *addr, int alen) { char *res; res = device_get_mac_addr(dev, "mac-address", addr, alen); if (res) return res; res = device_get_mac_addr(dev, "local-mac-address", addr, alen); if (res) return res; return device_get_mac_addr(dev, "address", addr, alen); } EXPORT_SYMBOL(device_get_mac_address);
null
null
null
null
103,687
55,041
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
55,041
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/renderer/safe_browsing/phishing_url_feature_extractor.h" #include <algorithm> #include <string> #include <vector> #include "base/logging.h" #include "base/metrics/histogram_macros.h" #include "base/strings/string_split.h" #include "base/strings/string_util.h" #include "base/timer/elapsed_timer.h" #include "chrome/renderer/safe_browsing/features.h" #include "net/base/registry_controlled_domains/registry_controlled_domain.h" #include "url/gurl.h" namespace safe_browsing { PhishingUrlFeatureExtractor::PhishingUrlFeatureExtractor() {} PhishingUrlFeatureExtractor::~PhishingUrlFeatureExtractor() {} bool PhishingUrlFeatureExtractor::ExtractFeatures(const GURL& url, FeatureMap* features) { base::ElapsedTimer timer; if (url.HostIsIPAddress()) { if (!features->AddBooleanFeature(features::kUrlHostIsIpAddress)) return false; } else { // Remove any leading/trailing dots. std::string host; base::TrimString(url.host(), ".", &host); // TODO(bryner): Ensure that the url encoding is consistent with // the features in the model. // Disallow unknown registries so that we don't classify // partial hostnames (e.g. "www.subdomain"). size_t registry_length = net::registry_controlled_domains::GetCanonicalHostRegistryLength( host, net::registry_controlled_domains::EXCLUDE_UNKNOWN_REGISTRIES, net::registry_controlled_domains::EXCLUDE_PRIVATE_REGISTRIES); if (registry_length == 0 || registry_length == std::string::npos) { DVLOG(1) << "Could not find TLD for host: " << host; return false; } DCHECK_LT(registry_length, host.size()) << "Non-zero registry length, but " "host is only a TLD: " << host; size_t tld_start = host.size() - registry_length; if (!features->AddBooleanFeature(features::kUrlTldToken + host.substr(tld_start))) return false; // Pull off the TLD and the preceeding dot. host.erase(tld_start - 1); std::vector<std::string> host_tokens = base::SplitString( host, ".", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); if (host_tokens.empty()) { DVLOG(1) << "Could not find domain for host: " << host; return false; } if (!features->AddBooleanFeature(features::kUrlDomainToken + host_tokens.back())) return false; host_tokens.pop_back(); // Now we're just left with the "other" host tokens. for (std::vector<std::string>::iterator it = host_tokens.begin(); it != host_tokens.end(); ++it) { if (!features->AddBooleanFeature(features::kUrlOtherHostToken + *it)) return false; } if (host_tokens.size() > 1) { if (!features->AddBooleanFeature(features::kUrlNumOtherHostTokensGTOne)) return false; if (host_tokens.size() > 3) { if (!features->AddBooleanFeature( features::kUrlNumOtherHostTokensGTThree)) return false; } } } std::vector<std::string> long_tokens; SplitStringIntoLongAlphanumTokens(url.path(), &long_tokens); for (const std::string& token : long_tokens) { if (!features->AddBooleanFeature(features::kUrlPathToken + token)) return false; } UMA_HISTOGRAM_TIMES("SBClientPhishing.URLFeatureTime", timer.Elapsed()); return true; } // static void PhishingUrlFeatureExtractor::SplitStringIntoLongAlphanumTokens( const std::string& full, std::vector<std::string>* tokens) { // Split on common non-alphanumerics. // TODO(bryner): Split on all(?) non-alphanumerics and handle %XX properly. static const char kTokenSeparators[] = ".,\\/_-|=%:!&"; for (const base::StringPiece& token : base::SplitStringPiece(full, kTokenSeparators, base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY)) { // Copy over only the splits that are 3 or more chars long. // TODO(bryner): Determine a meaningful min size. if (token.length() >= kMinPathComponentLength) tokens->push_back(token.as_string()); } } } // namespace safe_browsing
null
null
null
null
51,904
21,558
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
21,558
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/renderer/media/stream/webmediaplayer_ms.h" #include <stddef.h> #include <limits> #include <string> #include <utility> #include "base/bind.h" #include "base/callback.h" #include "base/threading/thread_task_runner_handle.h" #include "build/build_config.h" #include "cc/blink/web_layer_impl.h" #include "cc/layers/video_frame_provider_client_impl.h" #include "cc/layers/video_layer.h" #include "content/child/child_process.h" #include "content/public/common/content_features.h" #include "content/public/renderer/media_stream_audio_renderer.h" #include "content/public/renderer/media_stream_renderer_factory.h" #include "content/public/renderer/media_stream_video_renderer.h" #include "content/renderer/media/stream/media_stream_audio_track.h" #include "content/renderer/media/stream/media_stream_video_track.h" #include "content/renderer/media/stream/webmediaplayer_ms_compositor.h" #include "content/renderer/media/web_media_element_source_utils.h" #include "content/renderer/media/webrtc_logging.h" #include "content/renderer/render_frame_impl.h" #include "content/renderer/render_thread_impl.h" #include "media/base/bind_to_current_loop.h" #include "media/base/media_content_type.h" #include "media/base/media_log.h" #include "media/base/video_frame.h" #include "media/base/video_rotation.h" #include "media/base/video_types.h" #include "media/blink/webmediaplayer_util.h" #include "media/video/gpu_memory_buffer_video_frame_pool.h" #include "services/ui/public/cpp/gpu/context_provider_command_buffer.h" #include "third_party/blink/public/platform/web_media_player_client.h" #include "third_party/blink/public/platform/web_media_player_source.h" #include "third_party/blink/public/platform/web_rect.h" #include "third_party/blink/public/platform/web_size.h" #include "third_party/blink/public/web/web_local_frame.h" namespace { enum class RendererReloadAction { KEEP_RENDERER, REMOVE_RENDERER, NEW_RENDERER }; } // namespace namespace content { // FrameDeliverer is responsible for delivering frames received on // the IO thread by calling of EnqueueFrame() method of |compositor_|. // // It is created on the main thread, but methods should be called and class // should be destructed on the IO thread. class WebMediaPlayerMS::FrameDeliverer { public: FrameDeliverer(const base::WeakPtr<WebMediaPlayerMS>& player, const MediaStreamVideoRenderer::RepaintCB& enqueue_frame_cb, scoped_refptr<base::SingleThreadTaskRunner> media_task_runner, scoped_refptr<base::TaskRunner> worker_task_runner, media::GpuVideoAcceleratorFactories* gpu_factories) : last_frame_opaque_(true), last_frame_rotation_(media::VIDEO_ROTATION_0), received_first_frame_(false), main_task_runner_(base::ThreadTaskRunnerHandle::Get()), player_(player), enqueue_frame_cb_(enqueue_frame_cb), media_task_runner_(media_task_runner), weak_factory_for_pool_(this), weak_factory_(this) { io_thread_checker_.DetachFromThread(); if (gpu_factories && gpu_factories->ShouldUseGpuMemoryBuffersForVideoFrames() && base::FeatureList::IsEnabled( features::kWebRtcUseGpuMemoryBufferVideoFrames)) { gpu_memory_buffer_pool_.reset(new media::GpuMemoryBufferVideoFramePool( media_task_runner, worker_task_runner, gpu_factories)); } } ~FrameDeliverer() { DCHECK(io_thread_checker_.CalledOnValidThread()); if (gpu_memory_buffer_pool_) { DropCurrentPoolTasks(); media_task_runner_->DeleteSoon(FROM_HERE, gpu_memory_buffer_pool_.release()); } } void OnVideoFrame(scoped_refptr<media::VideoFrame> frame) { DCHECK(io_thread_checker_.CalledOnValidThread()); // On Android, stop passing frames. #if defined(OS_ANDROID) if (render_frame_suspended_) return; #endif // defined(OS_ANDROID) if (!gpu_memory_buffer_pool_) { EnqueueFrame(std::move(frame)); return; } // If |render_frame_suspended_|, we can keep passing the frames to keep the // latest frame in compositor up to date. However, creating GMB backed // frames is unnecessary, because the frames are not going to be shown for // the time period. if (render_frame_suspended_) { EnqueueFrame(std::move(frame)); // If there are any existing MaybeCreateHardwareFrame() calls, we do not // want those frames to be placed after the current one, so just drop // them. DropCurrentPoolTasks(); return; } // |gpu_memory_buffer_pool_| deletion is going to be posted to // |media_task_runner_|. base::Unretained() usage is fine since // |gpu_memory_buffer_pool_| outlives the task. media_task_runner_->PostTask( FROM_HERE, base::BindOnce( &media::GpuMemoryBufferVideoFramePool::MaybeCreateHardwareFrame, base::Unretained(gpu_memory_buffer_pool_.get()), frame, media::BindToCurrentLoop( base::BindOnce(&FrameDeliverer::EnqueueFrame, weak_factory_for_pool_.GetWeakPtr())))); } void SetRenderFrameSuspended(bool render_frame_suspended) { DCHECK(io_thread_checker_.CalledOnValidThread()); render_frame_suspended_ = render_frame_suspended; } MediaStreamVideoRenderer::RepaintCB GetRepaintCallback() { return base::Bind(&FrameDeliverer::OnVideoFrame, weak_factory_.GetWeakPtr()); } private: friend class WebMediaPlayerMS; void EnqueueFrame(const scoped_refptr<media::VideoFrame>& frame) { DCHECK(io_thread_checker_.CalledOnValidThread()); base::TimeTicks render_time; if (frame->metadata()->GetTimeTicks( media::VideoFrameMetadata::REFERENCE_TIME, &render_time)) { TRACE_EVENT1("media", "EnqueueFrame", "Ideal Render Instant", render_time.ToInternalValue()); } else { TRACE_EVENT0("media", "EnqueueFrame"); } const bool is_opaque = media::IsOpaque(frame->format()); media::VideoRotation video_rotation = media::VIDEO_ROTATION_0; ignore_result(frame->metadata()->GetRotation( media::VideoFrameMetadata::ROTATION, &video_rotation)); if (!received_first_frame_) { received_first_frame_ = true; last_frame_opaque_ = is_opaque; last_frame_rotation_ = video_rotation; main_task_runner_->PostTask( FROM_HERE, base::BindOnce(&WebMediaPlayerMS::OnFirstFrameReceived, player_, video_rotation, is_opaque)); } else { if (last_frame_opaque_ != is_opaque) { last_frame_opaque_ = is_opaque; main_task_runner_->PostTask( FROM_HERE, base::BindOnce(&WebMediaPlayerMS::OnOpacityChanged, player_, is_opaque)); } if (last_frame_rotation_ != video_rotation) { last_frame_rotation_ = video_rotation; main_task_runner_->PostTask( FROM_HERE, base::BindOnce(&WebMediaPlayerMS::OnRotationChanged, player_, video_rotation, is_opaque)); } } enqueue_frame_cb_.Run(frame); } void DropCurrentPoolTasks() { DCHECK(io_thread_checker_.CalledOnValidThread()); DCHECK(gpu_memory_buffer_pool_); if (!weak_factory_for_pool_.HasWeakPtrs()) return; // |gpu_memory_buffer_pool_| deletion is going to be posted to // |media_task_runner_|. base::Unretained() usage is fine since // |gpu_memory_buffer_pool_| outlives the task. media_task_runner_->PostTask( FROM_HERE, base::BindOnce(&media::GpuMemoryBufferVideoFramePool::Abort, base::Unretained(gpu_memory_buffer_pool_.get()))); weak_factory_for_pool_.InvalidateWeakPtrs(); } bool last_frame_opaque_; media::VideoRotation last_frame_rotation_; bool received_first_frame_; bool render_frame_suspended_ = false; const scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_; const base::WeakPtr<WebMediaPlayerMS> player_; const MediaStreamVideoRenderer::RepaintCB enqueue_frame_cb_; // Pool of GpuMemoryBuffers and resources used to create hardware frames. std::unique_ptr<media::GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_; const scoped_refptr<base::SingleThreadTaskRunner> media_task_runner_; // Used for DCHECKs to ensure method calls are executed on the correct thread. base::ThreadChecker io_thread_checker_; base::WeakPtrFactory<FrameDeliverer> weak_factory_for_pool_; base::WeakPtrFactory<FrameDeliverer> weak_factory_; DISALLOW_COPY_AND_ASSIGN(FrameDeliverer); }; WebMediaPlayerMS::WebMediaPlayerMS( blink::WebLocalFrame* frame, blink::WebMediaPlayerClient* client, media::WebMediaPlayerDelegate* delegate, std::unique_ptr<media::MediaLog> media_log, std::unique_ptr<MediaStreamRendererFactory> factory, scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, scoped_refptr<base::SingleThreadTaskRunner> compositor_task_runner, scoped_refptr<base::SingleThreadTaskRunner> media_task_runner, scoped_refptr<base::TaskRunner> worker_task_runner, media::GpuVideoAcceleratorFactories* gpu_factories, const blink::WebString& sink_id, const blink::WebSecurityOrigin& security_origin) : frame_(frame), network_state_(WebMediaPlayer::kNetworkStateEmpty), ready_state_(WebMediaPlayer::kReadyStateHaveNothing), buffered_(static_cast<size_t>(0)), client_(client), delegate_(delegate), delegate_id_(0), paused_(true), video_rotation_(media::VIDEO_ROTATION_0), media_log_(std::move(media_log)), renderer_factory_(std::move(factory)), io_task_runner_(io_task_runner), compositor_task_runner_(compositor_task_runner), media_task_runner_(media_task_runner), worker_task_runner_(worker_task_runner), gpu_factories_(gpu_factories), initial_audio_output_device_id_(sink_id.Utf8()), initial_security_origin_(security_origin.IsNull() ? url::Origin() : url::Origin(security_origin)), volume_(1.0), volume_multiplier_(1.0), should_play_upon_shown_(false) { DVLOG(1) << __func__; DCHECK(client); DCHECK(delegate_); delegate_id_ = delegate_->AddObserver(this); media_log_->AddEvent( media_log_->CreateEvent(media::MediaLogEvent::WEBMEDIAPLAYER_CREATED)); } WebMediaPlayerMS::~WebMediaPlayerMS() { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); if (!web_stream_.IsNull()) web_stream_.RemoveObserver(this); // Destruct compositor resources in the proper order. get_client()->SetWebLayer(nullptr); if (video_weblayer_) static_cast<cc::VideoLayer*>(video_weblayer_->layer())->StopUsingProvider(); if (frame_deliverer_) io_task_runner_->DeleteSoon(FROM_HERE, frame_deliverer_.release()); if (compositor_) compositor_->StopUsingProvider(); if (video_frame_provider_) video_frame_provider_->Stop(); if (audio_renderer_) audio_renderer_->Stop(); media_log_->AddEvent( media_log_->CreateEvent(media::MediaLogEvent::WEBMEDIAPLAYER_DESTROYED)); delegate_->PlayerGone(delegate_id_); delegate_->RemoveObserver(delegate_id_); } void WebMediaPlayerMS::Load(LoadType load_type, const blink::WebMediaPlayerSource& source, CORSMode /*cors_mode*/) { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); // TODO(acolwell): Change this to DCHECK_EQ(load_type, LoadTypeMediaStream) // once Blink-side changes land. DCHECK_NE(load_type, kLoadTypeMediaSource); web_stream_ = GetWebMediaStreamFromWebMediaPlayerSource(source); if (!web_stream_.IsNull()) web_stream_.AddObserver(this); compositor_ = new WebMediaPlayerMSCompositor( compositor_task_runner_, io_task_runner_, web_stream_, AsWeakPtr()); SetNetworkState(WebMediaPlayer::kNetworkStateLoading); SetReadyState(WebMediaPlayer::kReadyStateHaveNothing); std::string stream_id = web_stream_.IsNull() ? std::string() : web_stream_.Id().Utf8(); media_log_->AddEvent(media_log_->CreateLoadEvent(stream_id)); frame_deliverer_.reset(new WebMediaPlayerMS::FrameDeliverer( AsWeakPtr(), base::BindRepeating(&WebMediaPlayerMSCompositor::EnqueueFrame, compositor_), media_task_runner_, worker_task_runner_, gpu_factories_)); video_frame_provider_ = renderer_factory_->GetVideoRenderer( web_stream_, media::BindToCurrentLoop( base::Bind(&WebMediaPlayerMS::OnSourceError, AsWeakPtr())), frame_deliverer_->GetRepaintCallback(), io_task_runner_); RenderFrame* const frame = RenderFrame::FromWebFrame(frame_); int routing_id = MSG_ROUTING_NONE; GURL url = source.IsURL() ? GURL(source.GetAsURL()) : GURL(); if (frame) { // Report UMA and RAPPOR metrics. media::ReportMetrics(load_type, url, frame_->GetSecurityOrigin(), media_log_.get()); routing_id = frame->GetRoutingID(); } audio_renderer_ = renderer_factory_->GetAudioRenderer( web_stream_, routing_id, initial_audio_output_device_id_, initial_security_origin_); if (!audio_renderer_) WebRtcLogMessage("Warning: Failed to instantiate audio renderer."); if (!video_frame_provider_ && !audio_renderer_) { SetNetworkState(WebMediaPlayer::kNetworkStateNetworkError); return; } if (audio_renderer_) { audio_renderer_->SetVolume(volume_); audio_renderer_->Start(); // Store the ID of audio track being played in |current_video_track_id_| blink::WebVector<blink::WebMediaStreamTrack> audio_tracks; if (!web_stream_.IsNull()) { web_stream_.AudioTracks(audio_tracks); DCHECK_GT(audio_tracks.size(), 0U); current_audio_track_id_ = audio_tracks[0].Id(); } } if (video_frame_provider_) { video_frame_provider_->Start(); // Store the ID of video track being played in |current_video_track_id_| if (!web_stream_.IsNull()) { blink::WebVector<blink::WebMediaStreamTrack> video_tracks; web_stream_.VideoTracks(video_tracks); DCHECK_GT(video_tracks.size(), 0U); current_video_track_id_ = video_tracks[0].Id(); } } // When associated with an <audio> element, we don't want to wait for the // first video fram to become available as we do for <video> elements // (<audio> elements can also be assigned video tracks). // For more details, see crbug.com/738379 if (audio_renderer_ && (client_->IsAudioElement() || !video_frame_provider_)) { // This is audio-only mode. SetReadyState(WebMediaPlayer::kReadyStateHaveMetadata); SetReadyState(WebMediaPlayer::kReadyStateHaveEnoughData); } } void WebMediaPlayerMS::TrackAdded(const blink::WebMediaStreamTrack& track) { Reload(); } void WebMediaPlayerMS::TrackRemoved(const blink::WebMediaStreamTrack& track) { Reload(); } void WebMediaPlayerMS::Reload() { DCHECK(thread_checker_.CalledOnValidThread()); if (web_stream_.IsNull()) return; ReloadVideo(); ReloadAudio(); } void WebMediaPlayerMS::ReloadVideo() { DCHECK(thread_checker_.CalledOnValidThread()); DCHECK(!web_stream_.IsNull()); blink::WebVector<blink::WebMediaStreamTrack> video_tracks; // VideoTracks() is a getter. web_stream_.VideoTracks(video_tracks); RendererReloadAction renderer_action = RendererReloadAction::KEEP_RENDERER; if (video_tracks.IsEmpty()) { if (video_frame_provider_) renderer_action = RendererReloadAction::REMOVE_RENDERER; current_video_track_id_ = blink::WebString(); } else if (video_tracks[0].Id() != current_video_track_id_) { renderer_action = RendererReloadAction::NEW_RENDERER; current_video_track_id_ = video_tracks[0].Id(); } switch (renderer_action) { case RendererReloadAction::NEW_RENDERER: if (video_frame_provider_) video_frame_provider_->Stop(); video_frame_provider_ = renderer_factory_->GetVideoRenderer( web_stream_, media::BindToCurrentLoop( base::Bind(&WebMediaPlayerMS::OnSourceError, AsWeakPtr())), frame_deliverer_->GetRepaintCallback(), io_task_runner_); DCHECK(video_frame_provider_); video_frame_provider_->Start(); break; case RendererReloadAction::REMOVE_RENDERER: video_frame_provider_->Stop(); video_frame_provider_ = nullptr; break; default: return; } DCHECK_NE(renderer_action, RendererReloadAction::KEEP_RENDERER); if (!paused_) delegate_->DidPlayerSizeChange(delegate_id_, NaturalSize()); } void WebMediaPlayerMS::ReloadAudio() { DCHECK(thread_checker_.CalledOnValidThread()); DCHECK(!web_stream_.IsNull()); RenderFrame* const frame = RenderFrame::FromWebFrame(frame_); if (!frame) return; blink::WebVector<blink::WebMediaStreamTrack> audio_tracks; // AudioTracks() is a getter. web_stream_.AudioTracks(audio_tracks); RendererReloadAction renderer_action = RendererReloadAction::KEEP_RENDERER; if (audio_tracks.IsEmpty()) { if (audio_renderer_) renderer_action = RendererReloadAction::REMOVE_RENDERER; current_audio_track_id_ = blink::WebString(); } else if (audio_tracks[0].Id() != current_video_track_id_) { renderer_action = RendererReloadAction::NEW_RENDERER; current_audio_track_id_ = audio_tracks[0].Id(); } switch (renderer_action) { case RendererReloadAction::NEW_RENDERER: if (audio_renderer_) audio_renderer_->Stop(); audio_renderer_ = renderer_factory_->GetAudioRenderer( web_stream_, frame->GetRoutingID(), initial_audio_output_device_id_, initial_security_origin_); audio_renderer_->SetVolume(volume_); audio_renderer_->Start(); audio_renderer_->Play(); break; case RendererReloadAction::REMOVE_RENDERER: audio_renderer_->Stop(); audio_renderer_ = nullptr; break; default: break; } } void WebMediaPlayerMS::Play() { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); media_log_->AddEvent(media_log_->CreateEvent(media::MediaLogEvent::PLAY)); if (!paused_) return; if (video_frame_provider_) video_frame_provider_->Resume(); compositor_->StartRendering(); if (audio_renderer_) audio_renderer_->Play(); if (HasVideo()) delegate_->DidPlayerSizeChange(delegate_id_, NaturalSize()); // |delegate_| expects the notification only if there is at least one track // actually playing. A media stream might have none since tracks can be // removed from the stream. if (HasAudio() || HasVideo()) { // TODO(perkj, magjed): We use OneShot focus type here so that it takes // audio focus once it starts, and then will not respond to further audio // focus changes. See http://crbug.com/596516 for more details. delegate_->DidPlay(delegate_id_, HasVideo(), HasAudio(), media::MediaContentType::OneShot); } delegate_->SetIdle(delegate_id_, false); paused_ = false; } void WebMediaPlayerMS::Pause() { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); should_play_upon_shown_ = false; media_log_->AddEvent(media_log_->CreateEvent(media::MediaLogEvent::PAUSE)); if (paused_) return; if (video_frame_provider_) video_frame_provider_->Pause(); compositor_->StopRendering(); compositor_->ReplaceCurrentFrameWithACopy(); if (audio_renderer_) audio_renderer_->Pause(); delegate_->DidPause(delegate_id_); delegate_->SetIdle(delegate_id_, true); paused_ = true; } void WebMediaPlayerMS::Seek(double seconds) { DCHECK(thread_checker_.CalledOnValidThread()); } void WebMediaPlayerMS::SetRate(double rate) { DCHECK(thread_checker_.CalledOnValidThread()); } void WebMediaPlayerMS::SetVolume(double volume) { DVLOG(1) << __func__ << "(volume=" << volume << ")"; DCHECK(thread_checker_.CalledOnValidThread()); volume_ = volume; if (audio_renderer_.get()) audio_renderer_->SetVolume(volume_ * volume_multiplier_); delegate_->DidPlayerMutedStatusChange(delegate_id_, volume == 0.0); } void WebMediaPlayerMS::EnterPictureInPicture() { NOTIMPLEMENTED(); // TODO(apacible): Implement after video in surfaces is supported for // WebMediaPlayerMS. See http://crbug/746182. } void WebMediaPlayerMS::ExitPictureInPicture() { NOTIMPLEMENTED(); // TODO(apacible): Implement after video in surfaces is supported for // WebMediaPlayerMS. See http://crbug/746182. } void WebMediaPlayerMS::SetSinkId( const blink::WebString& sink_id, const blink::WebSecurityOrigin& security_origin, blink::WebSetSinkIdCallbacks* web_callback) { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); const media::OutputDeviceStatusCB callback = media::ConvertToOutputDeviceStatusCB(web_callback); if (audio_renderer_) { audio_renderer_->SwitchOutputDevice(sink_id.Utf8(), security_origin, callback); } else { callback.Run(media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL); } } void WebMediaPlayerMS::SetPreload(WebMediaPlayer::Preload preload) { DCHECK(thread_checker_.CalledOnValidThread()); } bool WebMediaPlayerMS::HasVideo() const { DCHECK(thread_checker_.CalledOnValidThread()); return (video_frame_provider_.get() != nullptr); } bool WebMediaPlayerMS::HasAudio() const { DCHECK(thread_checker_.CalledOnValidThread()); return (audio_renderer_.get() != nullptr); } blink::WebSize WebMediaPlayerMS::NaturalSize() const { DCHECK(thread_checker_.CalledOnValidThread()); if (!video_frame_provider_) return blink::WebSize(); if (video_rotation_ == media::VIDEO_ROTATION_90 || video_rotation_ == media::VideoRotation::VIDEO_ROTATION_270) { const gfx::Size& current_size = compositor_->GetCurrentSize(); return blink::WebSize(current_size.height(), current_size.width()); } return blink::WebSize(compositor_->GetCurrentSize()); } blink::WebSize WebMediaPlayerMS::VisibleRect() const { DCHECK(thread_checker_.CalledOnValidThread()); scoped_refptr<media::VideoFrame> video_frame = compositor_->GetCurrentFrameWithoutUpdatingStatistics(); if (!video_frame) return blink::WebSize(); const gfx::Rect& visible_rect = video_frame->visible_rect(); if (video_rotation_ == media::VIDEO_ROTATION_90 || video_rotation_ == media::VideoRotation::VIDEO_ROTATION_270) { return blink::WebSize(visible_rect.height(), visible_rect.width()); } return blink::WebSize(visible_rect.width(), visible_rect.height()); } bool WebMediaPlayerMS::Paused() const { DCHECK(thread_checker_.CalledOnValidThread()); return paused_; } bool WebMediaPlayerMS::Seeking() const { DCHECK(thread_checker_.CalledOnValidThread()); return false; } double WebMediaPlayerMS::Duration() const { DCHECK(thread_checker_.CalledOnValidThread()); return std::numeric_limits<double>::infinity(); } double WebMediaPlayerMS::CurrentTime() const { DCHECK(thread_checker_.CalledOnValidThread()); const base::TimeDelta current_time = compositor_->GetCurrentTime(); if (current_time.ToInternalValue() != 0) return current_time.InSecondsF(); else if (audio_renderer_.get()) return audio_renderer_->GetCurrentRenderTime().InSecondsF(); return 0.0; } blink::WebMediaPlayer::NetworkState WebMediaPlayerMS::GetNetworkState() const { DVLOG(1) << __func__ << ", state:" << network_state_; DCHECK(thread_checker_.CalledOnValidThread()); return network_state_; } blink::WebMediaPlayer::ReadyState WebMediaPlayerMS::GetReadyState() const { DVLOG(1) << __func__ << ", state:" << ready_state_; DCHECK(thread_checker_.CalledOnValidThread()); return ready_state_; } blink::WebString WebMediaPlayerMS::GetErrorMessage() const { return blink::WebString::FromUTF8(media_log_->GetErrorMessage()); } blink::WebTimeRanges WebMediaPlayerMS::Buffered() const { DCHECK(thread_checker_.CalledOnValidThread()); return buffered_; } blink::WebTimeRanges WebMediaPlayerMS::Seekable() const { DCHECK(thread_checker_.CalledOnValidThread()); return blink::WebTimeRanges(); } bool WebMediaPlayerMS::DidLoadingProgress() { DCHECK(thread_checker_.CalledOnValidThread()); return true; } void WebMediaPlayerMS::Paint(blink::WebCanvas* canvas, const blink::WebRect& rect, cc::PaintFlags& flags, int already_uploaded_id, VideoFrameUploadMetadata* out_metadata) { DVLOG(3) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); const scoped_refptr<media::VideoFrame> frame = compositor_->GetCurrentFrameWithoutUpdatingStatistics(); media::Context3D context_3d; if (frame && frame->HasTextures()) { auto* provider = RenderThreadImpl::current()->SharedMainThreadContextProvider().get(); // GPU Process crashed. if (!provider) return; context_3d = media::Context3D(provider->ContextGL(), provider->GrContext()); DCHECK(context_3d.gl); } const gfx::RectF dest_rect(rect.x, rect.y, rect.width, rect.height); video_renderer_.Paint(frame, canvas, dest_rect, flags, video_rotation_, context_3d); } bool WebMediaPlayerMS::DidGetOpaqueResponseFromServiceWorker() const { DCHECK(thread_checker_.CalledOnValidThread()); return false; } bool WebMediaPlayerMS::HasSingleSecurityOrigin() const { DCHECK(thread_checker_.CalledOnValidThread()); return true; } bool WebMediaPlayerMS::DidPassCORSAccessCheck() const { DCHECK(thread_checker_.CalledOnValidThread()); return true; } double WebMediaPlayerMS::MediaTimeForTimeValue(double timeValue) const { return base::TimeDelta::FromSecondsD(timeValue).InSecondsF(); } unsigned WebMediaPlayerMS::DecodedFrameCount() const { DCHECK(thread_checker_.CalledOnValidThread()); return compositor_->total_frame_count(); } unsigned WebMediaPlayerMS::DroppedFrameCount() const { DCHECK(thread_checker_.CalledOnValidThread()); return compositor_->dropped_frame_count(); } size_t WebMediaPlayerMS::AudioDecodedByteCount() const { DCHECK(thread_checker_.CalledOnValidThread()); NOTIMPLEMENTED(); return 0; } size_t WebMediaPlayerMS::VideoDecodedByteCount() const { DCHECK(thread_checker_.CalledOnValidThread()); NOTIMPLEMENTED(); return 0; } void WebMediaPlayerMS::OnFrameHidden() { DCHECK(thread_checker_.CalledOnValidThread()); // This method is called when the RenderFrame is sent to background or // suspended. During undoable tab closures OnHidden() may be called back to // back, so we can't rely on |render_frame_suspended_| being false here. if (frame_deliverer_) { io_task_runner_->PostTask( FROM_HERE, base::BindOnce(&FrameDeliverer::SetRenderFrameSuspended, base::Unretained(frame_deliverer_.get()), true)); } // On Android, substitute the displayed VideoFrame with a copy to avoid holding // onto it unnecessarily. #if defined(OS_ANDROID) if (!paused_) compositor_->ReplaceCurrentFrameWithACopy(); #endif // defined(OS_ANDROID) } void WebMediaPlayerMS::OnFrameClosed() { DCHECK(thread_checker_.CalledOnValidThread()); // On Android, pause the video completely for this time period. #if defined(OS_ANDROID) if (!paused_) { Pause(); should_play_upon_shown_ = true; } delegate_->PlayerGone(delegate_id_); #endif // defined(OS_ANDROID) if (frame_deliverer_) { io_task_runner_->PostTask( FROM_HERE, base::BindOnce(&FrameDeliverer::SetRenderFrameSuspended, base::Unretained(frame_deliverer_.get()), true)); } } void WebMediaPlayerMS::OnFrameShown() { DCHECK(thread_checker_.CalledOnValidThread()); if (frame_deliverer_) { io_task_runner_->PostTask( FROM_HERE, base::BindOnce(&FrameDeliverer::SetRenderFrameSuspended, base::Unretained(frame_deliverer_.get()), false)); } // On Android, resume playback on visibility. play() clears // |should_play_upon_shown_|. #if defined(OS_ANDROID) if (should_play_upon_shown_) Play(); #endif // defined(OS_ANDROID) } void WebMediaPlayerMS::OnIdleTimeout() {} void WebMediaPlayerMS::OnPlay() { // TODO(perkj, magjed): It's not clear how WebRTC should work with an // MediaSession, until these issues are resolved, disable session controls. // http://crbug.com/595297. } void WebMediaPlayerMS::OnPause() { // TODO(perkj, magjed): See TODO in OnPlay(). } void WebMediaPlayerMS::OnSeekForward(double seconds) { // TODO(perkj, magjed): See TODO in OnPlay(). } void WebMediaPlayerMS::OnSeekBackward(double seconds) { // TODO(perkj, magjed): See TODO in OnPlay(). } void WebMediaPlayerMS::OnVolumeMultiplierUpdate(double multiplier) { // TODO(perkj, magjed): See TODO in OnPlay(). } void WebMediaPlayerMS::OnBecamePersistentVideo(bool value) { get_client()->OnBecamePersistentVideo(value); } bool WebMediaPlayerMS::CopyVideoTextureToPlatformTexture( gpu::gles2::GLES2Interface* gl, unsigned target, unsigned int texture, unsigned internal_format, unsigned format, unsigned type, int level, bool premultiply_alpha, bool flip_y, int already_uploaded_id, VideoFrameUploadMetadata* out_metadata) { TRACE_EVENT0("media", "copyVideoTextureToPlatformTexture"); DCHECK(thread_checker_.CalledOnValidThread()); scoped_refptr<media::VideoFrame> video_frame = compositor_->GetCurrentFrameWithoutUpdatingStatistics(); if (!video_frame.get() || !video_frame->HasTextures()) return false; media::Context3D context_3d; auto* provider = RenderThreadImpl::current()->SharedMainThreadContextProvider().get(); // GPU Process crashed. if (!provider) return false; context_3d = media::Context3D(provider->ContextGL(), provider->GrContext()); DCHECK(context_3d.gl); return video_renderer_.CopyVideoFrameTexturesToGLTexture( context_3d, gl, video_frame.get(), target, texture, internal_format, format, type, level, premultiply_alpha, flip_y); } bool WebMediaPlayerMS::TexImageImpl(TexImageFunctionID functionID, unsigned target, gpu::gles2::GLES2Interface* gl, unsigned int texture, int level, int internalformat, unsigned format, unsigned type, int xoffset, int yoffset, int zoffset, bool flip_y, bool premultiply_alpha) { TRACE_EVENT0("media", "texImageImpl"); DCHECK(thread_checker_.CalledOnValidThread()); const scoped_refptr<media::VideoFrame> video_frame = compositor_->GetCurrentFrameWithoutUpdatingStatistics(); if (!video_frame || !video_frame->IsMappable() || video_frame->HasTextures() || video_frame->format() != media::PIXEL_FORMAT_Y16) { return false; } if (functionID == kTexImage2D) { auto* provider = RenderThreadImpl::current()->SharedMainThreadContextProvider().get(); // GPU Process crashed. if (!provider) return false; return media::PaintCanvasVideoRenderer::TexImage2D( target, texture, gl, provider->ContextCapabilities(), video_frame.get(), level, internalformat, format, type, flip_y, premultiply_alpha); } else if (functionID == kTexSubImage2D) { return media::PaintCanvasVideoRenderer::TexSubImage2D( target, gl, video_frame.get(), level, format, type, xoffset, yoffset, flip_y, premultiply_alpha); } return false; } void WebMediaPlayerMS::OnFirstFrameReceived(media::VideoRotation video_rotation, bool is_opaque) { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); SetReadyState(WebMediaPlayer::kReadyStateHaveMetadata); SetReadyState(WebMediaPlayer::kReadyStateHaveEnoughData); OnRotationChanged(video_rotation, is_opaque); } void WebMediaPlayerMS::OnOpacityChanged(bool is_opaque) { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); // Opacity can be changed during the session without resetting // |video_weblayer_|. video_weblayer_->layer()->SetContentsOpaque(is_opaque); } void WebMediaPlayerMS::OnRotationChanged(media::VideoRotation video_rotation, bool is_opaque) { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); video_rotation_ = video_rotation; std::unique_ptr<cc_blink::WebLayerImpl> rotated_weblayer = base::WrapUnique(new cc_blink::WebLayerImpl( cc::VideoLayer::Create(compositor_.get(), video_rotation))); rotated_weblayer->layer()->SetContentsOpaque(is_opaque); rotated_weblayer->SetContentsOpaqueIsFixed(true); get_client()->SetWebLayer(rotated_weblayer.get()); video_weblayer_ = std::move(rotated_weblayer); } void WebMediaPlayerMS::RepaintInternal() { DVLOG(1) << __func__; DCHECK(thread_checker_.CalledOnValidThread()); get_client()->Repaint(); } void WebMediaPlayerMS::OnSourceError() { DCHECK(thread_checker_.CalledOnValidThread()); SetNetworkState(WebMediaPlayer::kNetworkStateFormatError); RepaintInternal(); } void WebMediaPlayerMS::SetNetworkState(WebMediaPlayer::NetworkState state) { DCHECK(thread_checker_.CalledOnValidThread()); network_state_ = state; // Always notify to ensure client has the latest value. get_client()->NetworkStateChanged(); } void WebMediaPlayerMS::SetReadyState(WebMediaPlayer::ReadyState state) { DCHECK(thread_checker_.CalledOnValidThread()); ready_state_ = state; // Always notify to ensure client has the latest value. get_client()->ReadyStateChanged(); } media::PaintCanvasVideoRenderer* WebMediaPlayerMS::GetPaintCanvasVideoRenderer() { return &video_renderer_; } void WebMediaPlayerMS::ResetCanvasCache() { DCHECK(thread_checker_.CalledOnValidThread()); video_renderer_.ResetCache(); } void WebMediaPlayerMS::TriggerResize() { if (HasVideo()) get_client()->SizeChanged(); delegate_->DidPlayerSizeChange(delegate_id_, NaturalSize()); } void WebMediaPlayerMS::SetGpuMemoryBufferVideoForTesting( media::GpuMemoryBufferVideoFramePool* gpu_memory_buffer_pool) { CHECK(frame_deliverer_); frame_deliverer_->gpu_memory_buffer_pool_.reset(gpu_memory_buffer_pool); } } // namespace content
null
null
null
null
18,421
32,814
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
197,809
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Amlogic Meson Successive Approximation Register (SAR) A/D Converter * * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/iio/iio.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #define MESON_SAR_ADC_REG0 0x00 #define MESON_SAR_ADC_REG0_PANEL_DETECT BIT(31) #define MESON_SAR_ADC_REG0_BUSY_MASK GENMASK(30, 28) #define MESON_SAR_ADC_REG0_DELTA_BUSY BIT(30) #define MESON_SAR_ADC_REG0_AVG_BUSY BIT(29) #define MESON_SAR_ADC_REG0_SAMPLE_BUSY BIT(28) #define MESON_SAR_ADC_REG0_FIFO_FULL BIT(27) #define MESON_SAR_ADC_REG0_FIFO_EMPTY BIT(26) #define MESON_SAR_ADC_REG0_FIFO_COUNT_MASK GENMASK(25, 21) #define MESON_SAR_ADC_REG0_ADC_BIAS_CTRL_MASK GENMASK(20, 19) #define MESON_SAR_ADC_REG0_CURR_CHAN_ID_MASK GENMASK(18, 16) #define MESON_SAR_ADC_REG0_ADC_TEMP_SEN_SEL BIT(15) #define MESON_SAR_ADC_REG0_SAMPLING_STOP BIT(14) #define MESON_SAR_ADC_REG0_CHAN_DELTA_EN_MASK GENMASK(13, 12) #define MESON_SAR_ADC_REG0_DETECT_IRQ_POL BIT(10) #define MESON_SAR_ADC_REG0_DETECT_IRQ_EN BIT(9) #define MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK GENMASK(8, 4) #define MESON_SAR_ADC_REG0_FIFO_IRQ_EN BIT(3) #define MESON_SAR_ADC_REG0_SAMPLING_START BIT(2) #define MESON_SAR_ADC_REG0_CONTINUOUS_EN BIT(1) #define MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE BIT(0) #define MESON_SAR_ADC_CHAN_LIST 0x04 #define MESON_SAR_ADC_CHAN_LIST_MAX_INDEX_MASK GENMASK(26, 24) #define MESON_SAR_ADC_CHAN_LIST_ENTRY_MASK(_chan) \ (GENMASK(2, 0) << ((_chan) * 3)) #define MESON_SAR_ADC_AVG_CNTL 0x08 #define MESON_SAR_ADC_AVG_CNTL_AVG_MODE_SHIFT(_chan) \ (16 + ((_chan) * 2)) #define MESON_SAR_ADC_AVG_CNTL_AVG_MODE_MASK(_chan) \ (GENMASK(17, 16) << ((_chan) * 2)) #define MESON_SAR_ADC_AVG_CNTL_NUM_SAMPLES_SHIFT(_chan) \ (0 + ((_chan) * 2)) #define MESON_SAR_ADC_AVG_CNTL_NUM_SAMPLES_MASK(_chan) \ (GENMASK(1, 0) << ((_chan) * 2)) #define MESON_SAR_ADC_REG3 0x0c #define MESON_SAR_ADC_REG3_CNTL_USE_SC_DLY BIT(31) #define MESON_SAR_ADC_REG3_CLK_EN BIT(30) #define MESON_SAR_ADC_REG3_BL30_INITIALIZED BIT(28) #define MESON_SAR_ADC_REG3_CTRL_CONT_RING_COUNTER_EN BIT(27) #define MESON_SAR_ADC_REG3_CTRL_SAMPLING_CLOCK_PHASE BIT(26) #define MESON_SAR_ADC_REG3_CTRL_CHAN7_MUX_SEL_MASK GENMASK(25, 23) #define MESON_SAR_ADC_REG3_DETECT_EN BIT(22) #define MESON_SAR_ADC_REG3_ADC_EN BIT(21) #define MESON_SAR_ADC_REG3_PANEL_DETECT_COUNT_MASK GENMASK(20, 18) #define MESON_SAR_ADC_REG3_PANEL_DETECT_FILTER_TB_MASK GENMASK(17, 16) #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_SHIFT 10 #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 5 #define MESON_SAR_ADC_REG3_BLOCK_DLY_SEL_MASK GENMASK(9, 8) #define MESON_SAR_ADC_REG3_BLOCK_DLY_MASK GENMASK(7, 0) #define MESON_SAR_ADC_DELAY 0x10 #define MESON_SAR_ADC_DELAY_INPUT_DLY_SEL_MASK GENMASK(25, 24) #define MESON_SAR_ADC_DELAY_BL30_BUSY BIT(15) #define MESON_SAR_ADC_DELAY_KERNEL_BUSY BIT(14) #define MESON_SAR_ADC_DELAY_INPUT_DLY_CNT_MASK GENMASK(23, 16) #define MESON_SAR_ADC_DELAY_SAMPLE_DLY_SEL_MASK GENMASK(9, 8) #define MESON_SAR_ADC_DELAY_SAMPLE_DLY_CNT_MASK GENMASK(7, 0) #define MESON_SAR_ADC_LAST_RD 0x14 #define MESON_SAR_ADC_LAST_RD_LAST_CHANNEL1_MASK GENMASK(23, 16) #define MESON_SAR_ADC_LAST_RD_LAST_CHANNEL0_MASK GENMASK(9, 0) #define MESON_SAR_ADC_FIFO_RD 0x18 #define MESON_SAR_ADC_FIFO_RD_CHAN_ID_MASK GENMASK(14, 12) #define MESON_SAR_ADC_FIFO_RD_SAMPLE_VALUE_MASK GENMASK(11, 0) #define MESON_SAR_ADC_AUX_SW 0x1c #define MESON_SAR_ADC_AUX_SW_MUX_SEL_CHAN_MASK(_chan) \ (GENMASK(10, 8) << (((_chan) - 2) * 2)) #define MESON_SAR_ADC_AUX_SW_VREF_P_MUX BIT(6) #define MESON_SAR_ADC_AUX_SW_VREF_N_MUX BIT(5) #define MESON_SAR_ADC_AUX_SW_MODE_SEL BIT(4) #define MESON_SAR_ADC_AUX_SW_YP_DRIVE_SW BIT(3) #define MESON_SAR_ADC_AUX_SW_XP_DRIVE_SW BIT(2) #define MESON_SAR_ADC_AUX_SW_YM_DRIVE_SW BIT(1) #define MESON_SAR_ADC_AUX_SW_XM_DRIVE_SW BIT(0) #define MESON_SAR_ADC_CHAN_10_SW 0x20 #define MESON_SAR_ADC_CHAN_10_SW_CHAN1_MUX_SEL_MASK GENMASK(25, 23) #define MESON_SAR_ADC_CHAN_10_SW_CHAN1_VREF_P_MUX BIT(22) #define MESON_SAR_ADC_CHAN_10_SW_CHAN1_VREF_N_MUX BIT(21) #define MESON_SAR_ADC_CHAN_10_SW_CHAN1_MODE_SEL BIT(20) #define MESON_SAR_ADC_CHAN_10_SW_CHAN1_YP_DRIVE_SW BIT(19) #define MESON_SAR_ADC_CHAN_10_SW_CHAN1_XP_DRIVE_SW BIT(18) #define MESON_SAR_ADC_CHAN_10_SW_CHAN1_YM_DRIVE_SW BIT(17) #define MESON_SAR_ADC_CHAN_10_SW_CHAN1_XM_DRIVE_SW BIT(16) #define MESON_SAR_ADC_CHAN_10_SW_CHAN0_MUX_SEL_MASK GENMASK(9, 7) #define MESON_SAR_ADC_CHAN_10_SW_CHAN0_VREF_P_MUX BIT(6) #define MESON_SAR_ADC_CHAN_10_SW_CHAN0_VREF_N_MUX BIT(5) #define MESON_SAR_ADC_CHAN_10_SW_CHAN0_MODE_SEL BIT(4) #define MESON_SAR_ADC_CHAN_10_SW_CHAN0_YP_DRIVE_SW BIT(3) #define MESON_SAR_ADC_CHAN_10_SW_CHAN0_XP_DRIVE_SW BIT(2) #define MESON_SAR_ADC_CHAN_10_SW_CHAN0_YM_DRIVE_SW BIT(1) #define MESON_SAR_ADC_CHAN_10_SW_CHAN0_XM_DRIVE_SW BIT(0) #define MESON_SAR_ADC_DETECT_IDLE_SW 0x24 #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_SW_EN BIT(26) #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_MUX_MASK GENMASK(25, 23) #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_VREF_P_MUX BIT(22) #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_VREF_N_MUX BIT(21) #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_MODE_SEL BIT(20) #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_YP_DRIVE_SW BIT(19) #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_XP_DRIVE_SW BIT(18) #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_YM_DRIVE_SW BIT(17) #define MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_XM_DRIVE_SW BIT(16) #define MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_MUX_SEL_MASK GENMASK(9, 7) #define MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_VREF_P_MUX BIT(6) #define MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_VREF_N_MUX BIT(5) #define MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_MODE_SEL BIT(4) #define MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_YP_DRIVE_SW BIT(3) #define MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_XP_DRIVE_SW BIT(2) #define MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_YM_DRIVE_SW BIT(1) #define MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_XM_DRIVE_SW BIT(0) #define MESON_SAR_ADC_DELTA_10 0x28 #define MESON_SAR_ADC_DELTA_10_TEMP_SEL BIT(27) #define MESON_SAR_ADC_DELTA_10_TS_REVE1 BIT(26) #define MESON_SAR_ADC_DELTA_10_CHAN1_DELTA_VALUE_MASK GENMASK(25, 16) #define MESON_SAR_ADC_DELTA_10_TS_REVE0 BIT(15) #define MESON_SAR_ADC_DELTA_10_TS_C_SHIFT 11 #define MESON_SAR_ADC_DELTA_10_TS_C_MASK GENMASK(14, 11) #define MESON_SAR_ADC_DELTA_10_TS_VBG_EN BIT(10) #define MESON_SAR_ADC_DELTA_10_CHAN0_DELTA_VALUE_MASK GENMASK(9, 0) /* * NOTE: registers from here are undocumented (the vendor Linux kernel driver * and u-boot source served as reference). These only seem to be relevant on * GXBB and newer. */ #define MESON_SAR_ADC_REG11 0x2c #define MESON_SAR_ADC_REG11_BANDGAP_EN BIT(13) #define MESON_SAR_ADC_REG13 0x34 #define MESON_SAR_ADC_REG13_12BIT_CALIBRATION_MASK GENMASK(13, 8) #define MESON_SAR_ADC_MAX_FIFO_SIZE 32 #define MESON_SAR_ADC_CHAN(_chan) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = _chan, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_AVERAGE_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .datasheet_name = "SAR_ADC_CH"#_chan, \ } /* * TODO: the hardware supports IIO_TEMP for channel 6 as well which is * currently not supported by this driver. */ static const struct iio_chan_spec meson_sar_adc_iio_channels[] = { MESON_SAR_ADC_CHAN(0), MESON_SAR_ADC_CHAN(1), MESON_SAR_ADC_CHAN(2), MESON_SAR_ADC_CHAN(3), MESON_SAR_ADC_CHAN(4), MESON_SAR_ADC_CHAN(5), MESON_SAR_ADC_CHAN(6), MESON_SAR_ADC_CHAN(7), IIO_CHAN_SOFT_TIMESTAMP(8), }; enum meson_sar_adc_avg_mode { NO_AVERAGING = 0x0, MEAN_AVERAGING = 0x1, MEDIAN_AVERAGING = 0x2, }; enum meson_sar_adc_num_samples { ONE_SAMPLE = 0x0, TWO_SAMPLES = 0x1, FOUR_SAMPLES = 0x2, EIGHT_SAMPLES = 0x3, }; enum meson_sar_adc_chan7_mux_sel { CHAN7_MUX_VSS = 0x0, CHAN7_MUX_VDD_DIV4 = 0x1, CHAN7_MUX_VDD_DIV2 = 0x2, CHAN7_MUX_VDD_MUL3_DIV4 = 0x3, CHAN7_MUX_VDD = 0x4, CHAN7_MUX_CH7_INPUT = 0x7, }; struct meson_sar_adc_data { unsigned int resolution; const char *name; }; struct meson_sar_adc_priv { struct regmap *regmap; struct regulator *vref; const struct meson_sar_adc_data *data; struct clk *clkin; struct clk *core_clk; struct clk *sana_clk; struct clk *adc_sel_clk; struct clk *adc_clk; struct clk_gate clk_gate; struct clk *adc_div_clk; struct clk_divider clk_div; }; static const struct regmap_config meson_sar_adc_regmap_config = { .reg_bits = 8, .val_bits = 32, .reg_stride = 4, .max_register = MESON_SAR_ADC_REG13, }; static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); u32 regval; regmap_read(priv->regmap, MESON_SAR_ADC_REG0, &regval); return FIELD_GET(MESON_SAR_ADC_REG0_FIFO_COUNT_MASK, regval); } static int meson_sar_adc_wait_busy_clear(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int regval, timeout = 10000; /* * NOTE: we need a small delay before reading the status, otherwise * the sample engine may not have started internally (which would * seem to us that sampling is already finished). */ do { udelay(1); regmap_read(priv->regmap, MESON_SAR_ADC_REG0, &regval); } while (FIELD_GET(MESON_SAR_ADC_REG0_BUSY_MASK, regval) && timeout--); if (timeout < 0) return -ETIMEDOUT; return 0; } static int meson_sar_adc_read_raw_sample(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, int *val) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int ret, regval, fifo_chan, fifo_val, sum = 0, count = 0; ret = meson_sar_adc_wait_busy_clear(indio_dev); if (ret) return ret; while (meson_sar_adc_get_fifo_count(indio_dev) > 0 && count < MESON_SAR_ADC_MAX_FIFO_SIZE) { regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &regval); fifo_chan = FIELD_GET(MESON_SAR_ADC_FIFO_RD_CHAN_ID_MASK, regval); if (fifo_chan != chan->channel) continue; fifo_val = FIELD_GET(MESON_SAR_ADC_FIFO_RD_SAMPLE_VALUE_MASK, regval); fifo_val &= (BIT(priv->data->resolution) - 1); sum += fifo_val; count++; } if (!count) return -ENOENT; *val = sum / count; return 0; } static void meson_sar_adc_set_averaging(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum meson_sar_adc_avg_mode mode, enum meson_sar_adc_num_samples samples) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int val, channel = chan->channel; val = samples << MESON_SAR_ADC_AVG_CNTL_NUM_SAMPLES_SHIFT(channel); regmap_update_bits(priv->regmap, MESON_SAR_ADC_AVG_CNTL, MESON_SAR_ADC_AVG_CNTL_NUM_SAMPLES_MASK(channel), val); val = mode << MESON_SAR_ADC_AVG_CNTL_AVG_MODE_SHIFT(channel); regmap_update_bits(priv->regmap, MESON_SAR_ADC_AVG_CNTL, MESON_SAR_ADC_AVG_CNTL_AVG_MODE_MASK(channel), val); } static void meson_sar_adc_enable_channel(struct iio_dev *indio_dev, const struct iio_chan_spec *chan) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); u32 regval; /* * the SAR ADC engine allows sampling multiple channels at the same * time. to keep it simple we're only working with one *internal* * channel, which starts counting at index 0 (which means: count = 1). */ regval = FIELD_PREP(MESON_SAR_ADC_CHAN_LIST_MAX_INDEX_MASK, 0); regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_LIST, MESON_SAR_ADC_CHAN_LIST_MAX_INDEX_MASK, regval); /* map channel index 0 to the channel which we want to read */ regval = FIELD_PREP(MESON_SAR_ADC_CHAN_LIST_ENTRY_MASK(0), chan->channel); regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_LIST, MESON_SAR_ADC_CHAN_LIST_ENTRY_MASK(0), regval); regval = FIELD_PREP(MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_MUX_MASK, chan->channel); regmap_update_bits(priv->regmap, MESON_SAR_ADC_DETECT_IDLE_SW, MESON_SAR_ADC_DETECT_IDLE_SW_DETECT_MUX_MASK, regval); regval = FIELD_PREP(MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_MUX_SEL_MASK, chan->channel); regmap_update_bits(priv->regmap, MESON_SAR_ADC_DETECT_IDLE_SW, MESON_SAR_ADC_DETECT_IDLE_SW_IDLE_MUX_SEL_MASK, regval); if (chan->channel == 6) regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10, MESON_SAR_ADC_DELTA_10_TEMP_SEL, 0); } static void meson_sar_adc_set_chan7_mux(struct iio_dev *indio_dev, enum meson_sar_adc_chan7_mux_sel sel) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); u32 regval; regval = FIELD_PREP(MESON_SAR_ADC_REG3_CTRL_CHAN7_MUX_SEL_MASK, sel); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_CTRL_CHAN7_MUX_SEL_MASK, regval); usleep_range(10, 20); } static void meson_sar_adc_start_sample_engine(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE, MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_SAMPLING_START, MESON_SAR_ADC_REG0_SAMPLING_START); } static void meson_sar_adc_stop_sample_engine(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_SAMPLING_STOP, MESON_SAR_ADC_REG0_SAMPLING_STOP); /* wait until all modules are stopped */ meson_sar_adc_wait_busy_clear(indio_dev); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE, 0); } static int meson_sar_adc_lock(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int val, timeout = 10000; mutex_lock(&indio_dev->mlock); /* prevent BL30 from using the SAR ADC while we are using it */ regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY, MESON_SAR_ADC_DELAY_KERNEL_BUSY, MESON_SAR_ADC_DELAY_KERNEL_BUSY); /* wait until BL30 releases it's lock (so we can use the SAR ADC) */ do { udelay(1); regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val); } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); if (timeout < 0) return -ETIMEDOUT; return 0; } static void meson_sar_adc_unlock(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); /* allow BL30 to use the SAR ADC again */ regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY, MESON_SAR_ADC_DELAY_KERNEL_BUSY, 0); mutex_unlock(&indio_dev->mlock); } static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int count; for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { if (!meson_sar_adc_get_fifo_count(indio_dev)) break; regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); } } static int meson_sar_adc_get_sample(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum meson_sar_adc_avg_mode avg_mode, enum meson_sar_adc_num_samples avg_samples, int *val) { int ret; ret = meson_sar_adc_lock(indio_dev); if (ret) return ret; /* clear the FIFO to make sure we're not reading old values */ meson_sar_adc_clear_fifo(indio_dev); meson_sar_adc_set_averaging(indio_dev, chan, avg_mode, avg_samples); meson_sar_adc_enable_channel(indio_dev, chan); meson_sar_adc_start_sample_engine(indio_dev); ret = meson_sar_adc_read_raw_sample(indio_dev, chan, val); meson_sar_adc_stop_sample_engine(indio_dev); meson_sar_adc_unlock(indio_dev); if (ret) { dev_warn(indio_dev->dev.parent, "failed to read sample for channel %d: %d\n", chan->channel, ret); return ret; } return IIO_VAL_INT; } static int meson_sar_adc_iio_info_read_raw(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, int *val, int *val2, long mask) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int ret; switch (mask) { case IIO_CHAN_INFO_RAW: return meson_sar_adc_get_sample(indio_dev, chan, NO_AVERAGING, ONE_SAMPLE, val); break; case IIO_CHAN_INFO_AVERAGE_RAW: return meson_sar_adc_get_sample(indio_dev, chan, MEAN_AVERAGING, EIGHT_SAMPLES, val); break; case IIO_CHAN_INFO_SCALE: ret = regulator_get_voltage(priv->vref); if (ret < 0) { dev_err(indio_dev->dev.parent, "failed to get vref voltage: %d\n", ret); return ret; } *val = ret / 1000; *val2 = priv->data->resolution; return IIO_VAL_FRACTIONAL_LOG2; default: return -EINVAL; } } static int meson_sar_adc_clk_init(struct iio_dev *indio_dev, void __iomem *base) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); struct clk_init_data init; const char *clk_parents[1]; init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div", of_node_full_name(indio_dev->dev.of_node)); init.flags = 0; init.ops = &clk_divider_ops; clk_parents[0] = __clk_get_name(priv->clkin); init.parent_names = clk_parents; init.num_parents = 1; priv->clk_div.reg = base + MESON_SAR_ADC_REG3; priv->clk_div.shift = MESON_SAR_ADC_REG3_ADC_CLK_DIV_SHIFT; priv->clk_div.width = MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH; priv->clk_div.hw.init = &init; priv->clk_div.flags = 0; priv->adc_div_clk = devm_clk_register(&indio_dev->dev, &priv->clk_div.hw); if (WARN_ON(IS_ERR(priv->adc_div_clk))) return PTR_ERR(priv->adc_div_clk); init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en", of_node_full_name(indio_dev->dev.of_node)); init.flags = CLK_SET_RATE_PARENT; init.ops = &clk_gate_ops; clk_parents[0] = __clk_get_name(priv->adc_div_clk); init.parent_names = clk_parents; init.num_parents = 1; priv->clk_gate.reg = base + MESON_SAR_ADC_REG3; priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN); priv->clk_gate.hw.init = &init; priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw); if (WARN_ON(IS_ERR(priv->adc_clk))) return PTR_ERR(priv->adc_clk); return 0; } static int meson_sar_adc_init(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int regval, ret; /* * make sure we start at CH7 input since the other muxes are only used * for internal calibration. */ meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_CH7_INPUT); /* * leave sampling delay and the input clocks as configured by BL30 to * make sure BL30 gets the values it expects when reading the * temperature sensor. */ regmap_read(priv->regmap, MESON_SAR_ADC_REG3, &regval); if (regval & MESON_SAR_ADC_REG3_BL30_INITIALIZED) return 0; meson_sar_adc_stop_sample_engine(indio_dev); /* update the channel 6 MUX to select the temperature sensor */ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_ADC_TEMP_SEN_SEL, MESON_SAR_ADC_REG0_ADC_TEMP_SEN_SEL); /* disable all channels by default */ regmap_write(priv->regmap, MESON_SAR_ADC_CHAN_LIST, 0x0); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_CTRL_SAMPLING_CLOCK_PHASE, 0); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_CNTL_USE_SC_DLY, MESON_SAR_ADC_REG3_CNTL_USE_SC_DLY); /* delay between two samples = (10+1) * 1uS */ regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY, MESON_SAR_ADC_DELAY_INPUT_DLY_CNT_MASK, FIELD_PREP(MESON_SAR_ADC_DELAY_SAMPLE_DLY_CNT_MASK, 10)); regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY, MESON_SAR_ADC_DELAY_SAMPLE_DLY_SEL_MASK, FIELD_PREP(MESON_SAR_ADC_DELAY_SAMPLE_DLY_SEL_MASK, 0)); /* delay between two samples = (10+1) * 1uS */ regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY, MESON_SAR_ADC_DELAY_INPUT_DLY_CNT_MASK, FIELD_PREP(MESON_SAR_ADC_DELAY_INPUT_DLY_CNT_MASK, 10)); regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY, MESON_SAR_ADC_DELAY_INPUT_DLY_SEL_MASK, FIELD_PREP(MESON_SAR_ADC_DELAY_INPUT_DLY_SEL_MASK, 1)); ret = clk_set_parent(priv->adc_sel_clk, priv->clkin); if (ret) { dev_err(indio_dev->dev.parent, "failed to set adc parent to clkin\n"); return ret; } ret = clk_set_rate(priv->adc_clk, 1200000); if (ret) { dev_err(indio_dev->dev.parent, "failed to set adc clock rate\n"); return ret; } return 0; } static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int ret; ret = meson_sar_adc_lock(indio_dev); if (ret) goto err_lock; ret = regulator_enable(priv->vref); if (ret < 0) { dev_err(indio_dev->dev.parent, "failed to enable vref regulator\n"); goto err_vref; } ret = clk_prepare_enable(priv->core_clk); if (ret) { dev_err(indio_dev->dev.parent, "failed to enable core clk\n"); goto err_core_clk; } ret = clk_prepare_enable(priv->sana_clk); if (ret) { dev_err(indio_dev->dev.parent, "failed to enable sana clk\n"); goto err_sana_clk; } regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, MESON_SAR_ADC_REG11_BANDGAP_EN, MESON_SAR_ADC_REG11_BANDGAP_EN); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, MESON_SAR_ADC_REG3_ADC_EN); udelay(5); ret = clk_prepare_enable(priv->adc_clk); if (ret) { dev_err(indio_dev->dev.parent, "failed to enable adc clk\n"); goto err_adc_clk; } meson_sar_adc_unlock(indio_dev); return 0; err_adc_clk: regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, MESON_SAR_ADC_REG11_BANDGAP_EN, 0); clk_disable_unprepare(priv->sana_clk); err_sana_clk: clk_disable_unprepare(priv->core_clk); err_core_clk: regulator_disable(priv->vref); err_vref: meson_sar_adc_unlock(indio_dev); err_lock: return ret; } static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); int ret; ret = meson_sar_adc_lock(indio_dev); if (ret) return ret; clk_disable_unprepare(priv->adc_clk); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, MESON_SAR_ADC_REG11_BANDGAP_EN, 0); clk_disable_unprepare(priv->sana_clk); clk_disable_unprepare(priv->core_clk); regulator_disable(priv->vref); meson_sar_adc_unlock(indio_dev); return 0; } static const struct iio_info meson_sar_adc_iio_info = { .read_raw = meson_sar_adc_iio_info_read_raw, .driver_module = THIS_MODULE, }; struct meson_sar_adc_data meson_sar_adc_gxbb_data = { .resolution = 10, .name = "meson-gxbb-saradc", }; struct meson_sar_adc_data meson_sar_adc_gxl_data = { .resolution = 12, .name = "meson-gxl-saradc", }; struct meson_sar_adc_data meson_sar_adc_gxm_data = { .resolution = 12, .name = "meson-gxm-saradc", }; static const struct of_device_id meson_sar_adc_of_match[] = { { .compatible = "amlogic,meson-gxbb-saradc", .data = &meson_sar_adc_gxbb_data, }, { .compatible = "amlogic,meson-gxl-saradc", .data = &meson_sar_adc_gxl_data, }, { .compatible = "amlogic,meson-gxm-saradc", .data = &meson_sar_adc_gxm_data, }, {}, }; MODULE_DEVICE_TABLE(of, meson_sar_adc_of_match); static int meson_sar_adc_probe(struct platform_device *pdev) { struct meson_sar_adc_priv *priv; struct iio_dev *indio_dev; struct resource *res; void __iomem *base; const struct of_device_id *match; int ret; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv)); if (!indio_dev) { dev_err(&pdev->dev, "failed allocating iio device\n"); return -ENOMEM; } priv = iio_priv(indio_dev); match = of_match_device(meson_sar_adc_of_match, &pdev->dev); priv->data = match->data; indio_dev->name = priv->data->name; indio_dev->dev.parent = &pdev->dev; indio_dev->dev.of_node = pdev->dev.of_node; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &meson_sar_adc_iio_info; indio_dev->channels = meson_sar_adc_iio_channels; indio_dev->num_channels = ARRAY_SIZE(meson_sar_adc_iio_channels); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &meson_sar_adc_regmap_config); if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); priv->clkin = devm_clk_get(&pdev->dev, "clkin"); if (IS_ERR(priv->clkin)) { dev_err(&pdev->dev, "failed to get clkin\n"); return PTR_ERR(priv->clkin); } priv->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(priv->core_clk)) { dev_err(&pdev->dev, "failed to get core clk\n"); return PTR_ERR(priv->core_clk); } priv->sana_clk = devm_clk_get(&pdev->dev, "sana"); if (IS_ERR(priv->sana_clk)) { if (PTR_ERR(priv->sana_clk) == -ENOENT) { priv->sana_clk = NULL; } else { dev_err(&pdev->dev, "failed to get sana clk\n"); return PTR_ERR(priv->sana_clk); } } priv->adc_clk = devm_clk_get(&pdev->dev, "adc_clk"); if (IS_ERR(priv->adc_clk)) { if (PTR_ERR(priv->adc_clk) == -ENOENT) { priv->adc_clk = NULL; } else { dev_err(&pdev->dev, "failed to get adc clk\n"); return PTR_ERR(priv->adc_clk); } } priv->adc_sel_clk = devm_clk_get(&pdev->dev, "adc_sel"); if (IS_ERR(priv->adc_sel_clk)) { if (PTR_ERR(priv->adc_sel_clk) == -ENOENT) { priv->adc_sel_clk = NULL; } else { dev_err(&pdev->dev, "failed to get adc_sel clk\n"); return PTR_ERR(priv->adc_sel_clk); } } /* on pre-GXBB SoCs the SAR ADC itself provides the ADC clock: */ if (!priv->adc_clk) { ret = meson_sar_adc_clk_init(indio_dev, base); if (ret) return ret; } priv->vref = devm_regulator_get(&pdev->dev, "vref"); if (IS_ERR(priv->vref)) { dev_err(&pdev->dev, "failed to get vref regulator\n"); return PTR_ERR(priv->vref); } ret = meson_sar_adc_init(indio_dev); if (ret) goto err; ret = meson_sar_adc_hw_enable(indio_dev); if (ret) goto err; platform_set_drvdata(pdev, indio_dev); ret = iio_device_register(indio_dev); if (ret) goto err_hw; return 0; err_hw: meson_sar_adc_hw_disable(indio_dev); err: return ret; } static int meson_sar_adc_remove(struct platform_device *pdev) { struct iio_dev *indio_dev = platform_get_drvdata(pdev); iio_device_unregister(indio_dev); return meson_sar_adc_hw_disable(indio_dev); } static int __maybe_unused meson_sar_adc_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); return meson_sar_adc_hw_disable(indio_dev); } static int __maybe_unused meson_sar_adc_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); return meson_sar_adc_hw_enable(indio_dev); } static SIMPLE_DEV_PM_OPS(meson_sar_adc_pm_ops, meson_sar_adc_suspend, meson_sar_adc_resume); static struct platform_driver meson_sar_adc_driver = { .probe = meson_sar_adc_probe, .remove = meson_sar_adc_remove, .driver = { .name = "meson-saradc", .of_match_table = meson_sar_adc_of_match, .pm = &meson_sar_adc_pm_ops, }, }; module_platform_driver(meson_sar_adc_driver); MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); MODULE_DESCRIPTION("Amlogic Meson SAR ADC driver"); MODULE_LICENSE("GPL v2");
null
null
null
null
106,156
45,364
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
45,364
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/display/mirror_window_controller.h" #include "ash/display/mirror_window_test_api.h" #include "ash/display/window_tree_host_manager.h" #include "ash/public/cpp/config.h" #include "ash/shell.h" #include "ash/test/ash_test_base.h" #include "ash/wm/cursor_manager_test_api.h" #include "base/command_line.h" #include "base/strings/stringprintf.h" #include "ui/aura/env.h" #include "ui/aura/test/test_window_delegate.h" #include "ui/aura/test/test_windows.h" #include "ui/aura/window.h" #include "ui/aura/window_event_dispatcher.h" #include "ui/base/hit_test.h" #include "ui/display/display_switches.h" #include "ui/display/manager/display_manager.h" #include "ui/display/test/display_manager_test_api.h" #include "ui/events/test/event_generator.h" namespace ash { namespace { display::ManagedDisplayInfo CreateDisplayInfo(int64_t id, const gfx::Rect& bounds) { display::ManagedDisplayInfo info( id, base::StringPrintf("x-%d", static_cast<int>(id)), false); info.SetBounds(bounds); return info; } class MirrorOnBootTest : public AshTestBase { public: MirrorOnBootTest() = default; ~MirrorOnBootTest() override = default; void SetUp() override { base::CommandLine::ForCurrentProcess()->AppendSwitchASCII( ::switches::kHostWindowBounds, "1+1-300x300,1+301-300x300"); base::CommandLine::ForCurrentProcess()->AppendSwitch( ::switches::kEnableSoftwareMirroring); AshTestBase::SetUp(); } void TearDown() override { AshTestBase::TearDown(); } private: DISALLOW_COPY_AND_ASSIGN(MirrorOnBootTest); }; } using MirrorWindowControllerTest = AshTestBase; class MirrorWindowControllerTestDisableMultiMirroring : public AshTestBase { public: MirrorWindowControllerTestDisableMultiMirroring() = default; ~MirrorWindowControllerTestDisableMultiMirroring() override = default; void SetUp() override { base::CommandLine::ForCurrentProcess()->AppendSwitch( ::switches::kDisableMultiMirroring); AshTestBase::SetUp(); } private: DISALLOW_COPY_AND_ASSIGN(MirrorWindowControllerTestDisableMultiMirroring); }; // TODO(weidongg/774795) Remove this test when multi mirroring is enabled by // default, because cursor compositing will be enabled for software mirroring. TEST_F(MirrorWindowControllerTestDisableMultiMirroring, MirrorCursorBasic) { // MirrorWindowController is not used in the MUS or MASH configs. if (Shell::GetAshConfig() != Config::CLASSIC) return; MirrorWindowTestApi test_api; aura::test::TestWindowDelegate test_window_delegate; test_window_delegate.set_window_component(HTTOP); UpdateDisplay("400x400,400x400"); display_manager()->SetMirrorMode(display::MirrorMode::kNormal, base::nullopt); RunAllPendingInMessageLoop(); aura::Window* root = Shell::Get()->GetPrimaryRootWindow(); std::unique_ptr<aura::Window> window(aura::test::CreateTestWindowWithDelegate( &test_window_delegate, 0, gfx::Rect(50, 50, 100, 100), root)); window->Show(); window->SetName("foo"); EXPECT_TRUE(test_api.GetCursorWindow()); EXPECT_EQ("50,50 100x100", window->bounds().ToString()); ui::test::EventGenerator generator(root); generator.MoveMouseTo(10, 10); // Test if cursor movement is propertly reflected in mirror window. EXPECT_EQ("4,4", test_api.GetCursorHotPoint().ToString()); EXPECT_EQ("10,10", test_api.GetCursorHotPointLocationInRootWindow().ToString()); EXPECT_EQ(ui::CursorType::kNull, test_api.GetCurrentCursorType()); EXPECT_TRUE(test_api.GetCursorWindow()->IsVisible()); // Test if cursor type change is propertly reflected in mirror window. generator.MoveMouseTo(100, 100); EXPECT_EQ("100,100", test_api.GetCursorHotPointLocationInRootWindow().ToString()); EXPECT_EQ(ui::CursorType::kNorthResize, test_api.GetCurrentCursorType()); // Test if visibility change is propertly reflected in mirror window. // A key event hides cursor. generator.PressKey(ui::VKEY_A, 0); generator.ReleaseKey(ui::VKEY_A, 0); EXPECT_FALSE(test_api.GetCursorWindow()->IsVisible()); // Mouse event makes it visible again. generator.MoveMouseTo(300, 300); EXPECT_EQ("300,300", test_api.GetCursorHotPointLocationInRootWindow().ToString()); EXPECT_EQ(ui::CursorType::kNull, test_api.GetCurrentCursorType()); EXPECT_TRUE(test_api.GetCursorWindow()->IsVisible()); } // TODO(weidongg/774795) Remove this test when multi mirroring is enabled by // default, because cursor compositing will be enabled for software mirroring. TEST_F(MirrorWindowControllerTestDisableMultiMirroring, MirrorCursorRotate) { // MirrorWindowController is not used in the MUS or MASH configs. if (Shell::GetAshConfig() != Config::CLASSIC) return; MirrorWindowTestApi test_api; aura::test::TestWindowDelegate test_window_delegate; test_window_delegate.set_window_component(HTTOP); UpdateDisplay("400x400,400x400"); display_manager()->SetMirrorMode(display::MirrorMode::kNormal, base::nullopt); RunAllPendingInMessageLoop(); aura::Window* root = Shell::Get()->GetPrimaryRootWindow(); std::unique_ptr<aura::Window> window(aura::test::CreateTestWindowWithDelegate( &test_window_delegate, 0, gfx::Rect(50, 50, 100, 100), root)); window->Show(); window->SetName("foo"); EXPECT_TRUE(test_api.GetCursorWindow()); EXPECT_EQ("50,50 100x100", window->bounds().ToString()); ui::test::EventGenerator generator(root); generator.MoveMouseToInHost(100, 100); // Test if cursor movement is propertly reflected in mirror window. EXPECT_EQ("11,12", test_api.GetCursorHotPoint().ToString()); EXPECT_EQ("100,100", test_api.GetCursorHotPointLocationInRootWindow().ToString()); EXPECT_EQ(ui::CursorType::kNorthResize, test_api.GetCurrentCursorType()); UpdateDisplay("400x400/r,400x400"); // 90 degrees. generator.MoveMouseToInHost(300, 100); EXPECT_EQ(ui::CursorType::kNorthResize, test_api.GetCurrentCursorType()); // The size of cursor image is 25x25, so the rotated hot point must // be (25-12, 11). EXPECT_EQ("13,11", test_api.GetCursorHotPoint().ToString()); EXPECT_EQ("300,100", test_api.GetCursorHotPointLocationInRootWindow().ToString()); UpdateDisplay("400x400/u,400x400"); // 180 degrees. generator.MoveMouseToInHost(300, 300); EXPECT_EQ(ui::CursorType::kNorthResize, test_api.GetCurrentCursorType()); // Rotated hot point must be (25-11, 25-12). EXPECT_EQ("14,13", test_api.GetCursorHotPoint().ToString()); EXPECT_EQ("300,300", test_api.GetCursorHotPointLocationInRootWindow().ToString()); UpdateDisplay("400x400/l,400x400"); // 270 degrees. generator.MoveMouseToInHost(100, 300); EXPECT_EQ(ui::CursorType::kNorthResize, test_api.GetCurrentCursorType()); // Rotated hot point must be (12, 25-11). EXPECT_EQ("12,14", test_api.GetCursorHotPoint().ToString()); EXPECT_EQ("100,300", test_api.GetCursorHotPointLocationInRootWindow().ToString()); } // Make sure that the mirror cursor's location is same as // the source display's host location in the mirror root window's // coordinates. // TODO(weidongg/774795) Remove this test when multi mirroring is enabled by // default, because cursor compositing will be enabled for software mirroring. TEST_F(MirrorWindowControllerTestDisableMultiMirroring, MirrorCursorLocations) { // MirrorWindowController is not used in the MUS or MASH configs. if (Shell::GetAshConfig() != Config::CLASSIC) return; MirrorWindowTestApi test_api; // Test with device scale factor. UpdateDisplay("400x600*2,400x600"); display_manager()->SetMirrorMode(display::MirrorMode::kNormal, base::nullopt); RunAllPendingInMessageLoop(); aura::Window* root = Shell::Get()->GetPrimaryRootWindow(); ui::test::EventGenerator generator(root); generator.MoveMouseToInHost(10, 20); EXPECT_EQ("7,7", test_api.GetCursorHotPoint().ToString()); EXPECT_EQ("10,20", test_api.GetCursorHotPointLocationInRootWindow().ToString()); // Test with ui scale UpdateDisplay("400x600@0.5,400x600"); generator.MoveMouseToInHost(20, 30); EXPECT_EQ("4,4", test_api.GetCursorHotPoint().ToString()); EXPECT_EQ("20,30", test_api.GetCursorHotPointLocationInRootWindow().ToString()); // Test with rotation UpdateDisplay("400x600/r,400x600"); generator.MoveMouseToInHost(30, 40); EXPECT_EQ("21,4", test_api.GetCursorHotPoint().ToString()); EXPECT_EQ("30,40", test_api.GetCursorHotPointLocationInRootWindow().ToString()); } // Test the behavior of the cursor when entering software mirror mode swaps the // cursor's display. // TODO(weidongg/774795) Remove this test when multi mirroring is enabled by // default, because cursor compositing will be enabled for software mirroring. TEST_F(MirrorWindowControllerTestDisableMultiMirroring, MirrorCursorMoveOnEnter) { // MirrorWindowController is not used in the MUS or MASH configs. if (Shell::GetAshConfig() != Config::CLASSIC) return; aura::Env* env = aura::Env::GetInstance(); Shell* shell = Shell::Get(); WindowTreeHostManager* window_tree_host_manager = shell->window_tree_host_manager(); UpdateDisplay("400x400*2/r,400x400"); int64_t primary_display_id = window_tree_host_manager->GetPrimaryDisplayId(); int64_t secondary_display_id = display_manager()->GetSecondaryDisplay().id(); display::test::ScopedSetInternalDisplayId set_internal(display_manager(), primary_display_id); // Chrome uses the internal display as the source display for software mirror // mode. Move the cursor to the external display. aura::Window* secondary_root_window = window_tree_host_manager->GetRootWindowForDisplayId(secondary_display_id); secondary_root_window->MoveCursorTo(gfx::Point(100, 200)); EXPECT_EQ("300,200", env->last_mouse_location().ToString()); CursorManagerTestApi cursor_test_api(shell->cursor_manager()); EXPECT_EQ(1.0f, cursor_test_api.GetCurrentCursor().device_scale_factor()); EXPECT_EQ(display::Display::ROTATE_0, cursor_test_api.GetCurrentCursorRotation()); UpdateDisplay("400x400*2/r,400x400"); display_manager()->SetMirrorMode(display::MirrorMode::kNormal, base::nullopt); RunAllPendingInMessageLoop(); // Entering mirror mode should have centered the cursor on the primary display // because the cursor's previous position is out of bounds. // Check real cursor's position and properties. EXPECT_EQ("100,100", env->last_mouse_location().ToString()); EXPECT_EQ(2.0f, cursor_test_api.GetCurrentCursor().device_scale_factor()); EXPECT_EQ(display::Display::ROTATE_90, cursor_test_api.GetCurrentCursorRotation()); // Check mirrored cursor's location. MirrorWindowTestApi test_api; // The hot point location depends on the specific cursor. EXPECT_EQ(ui::CursorType::kNull, test_api.GetCurrentCursorType()); // Rotated hot point must be (25-7, 7). EXPECT_EQ("18,7", test_api.GetCursorHotPoint().ToString()); // New coordinates are not (200,200) because (200,200) is not the center of // the display. EXPECT_EQ("200,200", test_api.GetCursorHotPointLocationInRootWindow().ToString()); } // Make sure that the compositor based mirroring can switch // from/to dock mode. TEST_F(MirrorWindowControllerTest, DockMode) { const int64_t internal_id = 1; const int64_t external_id = 2; const display::ManagedDisplayInfo internal_display_info = CreateDisplayInfo(internal_id, gfx::Rect(0, 0, 500, 500)); const display::ManagedDisplayInfo external_display_info = CreateDisplayInfo(external_id, gfx::Rect(1, 1, 100, 100)); std::vector<display::ManagedDisplayInfo> display_info_list; // software mirroring. display_info_list.push_back(internal_display_info); display_info_list.push_back(external_display_info); display_manager()->OnNativeDisplaysChanged(display_info_list); const int64_t internal_display_id = display::test::DisplayManagerTestApi(display_manager()) .SetFirstDisplayAsInternalDisplay(); EXPECT_EQ(internal_id, internal_display_id); display_manager()->SetMirrorMode(display::MirrorMode::kNormal, base::nullopt); RunAllPendingInMessageLoop(); EXPECT_EQ(1U, display_manager()->GetNumDisplays()); EXPECT_TRUE(display_manager()->IsInSoftwareMirrorMode()); EXPECT_EQ(external_id, display_manager()->GetMirroringDestinationDisplayIdList()[0]); // dock mode. display_info_list.clear(); display_info_list.push_back(external_display_info); display_manager()->OnNativeDisplaysChanged(display_info_list); EXPECT_EQ(1U, display_manager()->GetNumDisplays()); EXPECT_FALSE(display_manager()->IsInMirrorMode()); // back to software mirroring. display_info_list.clear(); display_info_list.push_back(internal_display_info); display_info_list.push_back(external_display_info); display_manager()->OnNativeDisplaysChanged(display_info_list); EXPECT_EQ(1U, display_manager()->GetNumDisplays()); EXPECT_TRUE(display_manager()->IsInMirrorMode()); EXPECT_EQ(external_id, display_manager()->GetMirroringDestinationDisplayIdList()[0]); } TEST_F(MirrorOnBootTest, MirrorOnBoot) { EXPECT_TRUE(display_manager()->IsInMirrorMode()); // MirrorWindowController is not used in the MUS or MASH configs. if (Shell::GetAshConfig() != Config::CLASSIC) return; RunAllPendingInMessageLoop(); MirrorWindowTestApi test_api; EXPECT_EQ(1U, test_api.GetHosts().size()); } } // namespace ash
null
null
null
null
42,227
33,022
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
33,022
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/inspector/resolve_node.h" #include "third_party/blink/renderer/bindings/core/v8/binding_security.h" #include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h" #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/dom/node.h" #include "third_party/blink/renderer/core/frame/local_frame.h" #include "third_party/blink/renderer/core/inspector/v8_inspector_string.h" namespace blink { v8::Local<v8::Value> NodeV8Value(v8::Local<v8::Context> context, Node* node) { v8::Isolate* isolate = context->GetIsolate(); if (!node || !BindingSecurity::ShouldAllowAccessTo( CurrentDOMWindow(isolate), node, BindingSecurity::ErrorReportOption::kDoNotReport)) return v8::Null(isolate); return ToV8(node, context->Global(), isolate); } std::unique_ptr<v8_inspector::protocol::Runtime::API::RemoteObject> ResolveNode( v8_inspector::V8InspectorSession* v8_session, Node* node, const String& object_group) { if (!node) return nullptr; Document* document = node->IsDocumentNode() ? &node->GetDocument() : node->ownerDocument(); LocalFrame* frame = document ? document->GetFrame() : nullptr; if (!frame) return nullptr; ScriptState* script_state = ToScriptStateForMainWorld(frame); if (!script_state) return nullptr; ScriptState::Scope scope(script_state); return v8_session->wrapObject( script_state->GetContext(), NodeV8Value(script_state->GetContext(), node), ToV8InspectorStringView(object_group), false /* generatePreview */); } std::unique_ptr<v8_inspector::protocol::Runtime::API::RemoteObject> NullRemoteObject(v8_inspector::V8InspectorSession* v8_session, LocalFrame* frame, const String& object_group) { if (!frame) return nullptr; ScriptState* script_state = ToScriptStateForMainWorld(frame); if (!script_state) return nullptr; ScriptState::Scope scope(script_state); return v8_session->wrapObject( script_state->GetContext(), NodeV8Value(script_state->GetContext(), nullptr), ToV8InspectorStringView(object_group), false /* generatePreview */); } } // namespace blink
null
null
null
null
29,885
33,650
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
198,645
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* STV6110(A) Silicon tuner driver Copyright (C) Manu Abraham <abraham.manu@gmail.com> Copyright (C) ST Microelectronics This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __STV6110x_PRIV_H #define __STV6110x_PRIV_H #define FE_ERROR 0 #define FE_NOTICE 1 #define FE_INFO 2 #define FE_DEBUG 3 #define FE_DEBUGREG 4 #define dprintk(__y, __z, format, arg...) do { \ if (__z) { \ if ((verbose > FE_ERROR) && (verbose > __y)) \ printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \ else if ((verbose > FE_NOTICE) && (verbose > __y)) \ printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \ else if ((verbose > FE_INFO) && (verbose > __y)) \ printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \ else if ((verbose > FE_DEBUG) && (verbose > __y)) \ printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \ } else { \ if (verbose > __y) \ printk(format, ##arg); \ } \ } while (0) #define STV6110x_SETFIELD(mask, bitf, val) \ (mask = (mask & (~(((1 << STV6110x_WIDTH_##bitf) - 1) << \ STV6110x_OFFST_##bitf))) | \ (val << STV6110x_OFFST_##bitf)) #define STV6110x_GETFIELD(bitf, val) \ ((val >> STV6110x_OFFST_##bitf) & \ ((1 << STV6110x_WIDTH_##bitf) - 1)) #define MAKEWORD16(a, b) (((a) << 8) | (b)) #define LSB(x) ((x & 0xff)) #define MSB(y) ((y >> 8) & 0xff) #define TRIALS 10 #define R_DIV(__div) (1 << (__div + 1)) #define REFCLOCK_kHz (stv6110x->config->refclk / 1000) #define REFCLOCK_MHz (stv6110x->config->refclk / 1000000) struct stv6110x_state { struct i2c_adapter *i2c; const struct stv6110x_config *config; u8 regs[8]; const struct stv6110x_devctl *devctl; }; #endif /* __STV6110x_PRIV_H */
null
null
null
null
106,992
28,302
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
28,302
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Protocol Buffers - Google's data interchange format // Copyright 2012 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // This file is an internal atomic implementation, use atomicops.h instead. // // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_ #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_ namespace google { namespace protobuf { namespace internal { // 0xffff0fc0 is the hard coded address of a function provided by // the kernel which implements an atomic compare-exchange. On older // ARM architecture revisions (pre-v6) this may be implemented using // a syscall. This address is stable, and in active use (hard coded) // by at least glibc-2.7 and the Android C library. typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, Atomic32 new_value, volatile Atomic32* ptr); LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = (LinuxKernelCmpxchgFunc) 0xffff0fc0; typedef void (*LinuxKernelMemoryBarrierFunc)(void); LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value = *ptr; do { if (!pLinuxKernelCmpxchg(old_value, new_value, const_cast<Atomic32*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 old_value; do { old_value = *ptr; } while (pLinuxKernelCmpxchg(old_value, new_value, const_cast<Atomic32*>(ptr))); return old_value; } inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { return Barrier_AtomicIncrement(ptr, increment); } inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { for (;;) { // Atomic exchange the old value with an incremented one. Atomic32 old_value = *ptr; Atomic32 new_value = old_value + increment; if (pLinuxKernelCmpxchg(old_value, new_value, const_cast<Atomic32*>(ptr)) == 0) { // The exchange took place as expected. return new_value; } // Otherwise, *ptr changed mid-loop and we need to retry. } } inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } inline void MemoryBarrierInternal() { pLinuxKernelMemoryBarrier(); } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrierInternal(); } inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrierInternal(); *ptr = value; } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; MemoryBarrierInternal(); return value; } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrierInternal(); return *ptr; } } // namespace internal } // namespace protobuf } // namespace google #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_
null
null
null
null
25,165
64,687
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
64,687
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_COCOA_EXTENSIONS_MEDIA_GALLERIES_DIALOG_COCOA_H_ #define CHROME_BROWSER_UI_COCOA_EXTENSIONS_MEDIA_GALLERIES_DIALOG_COCOA_H_ #import <Cocoa/Cocoa.h> #include "base/gtest_prod_util.h" #include "base/macros.h" #include "chrome/browser/media_galleries/media_galleries_dialog_controller.h" #import "chrome/browser/ui/cocoa/constrained_window/constrained_window_mac.h" #import "chrome/browser/ui/cocoa/extensions/media_gallery_list_entry_view.h" @class ConstrainedWindowAlert; @class MediaGalleriesCocoaController; @class NSString; class MediaGalleriesDialogBrowserTest; class MediaGalleriesDialogTest; namespace ui { class MenuModel; } // This class displays an alert that can be used to manage lists of media // galleries. class MediaGalleriesDialogCocoa : public ConstrainedWindowMacDelegate, public MediaGalleriesDialog, public MediaGalleryListEntryController { public: MediaGalleriesDialogCocoa( MediaGalleriesDialogController* controller, MediaGalleriesCocoaController* delegate); ~MediaGalleriesDialogCocoa() override; // Called when the user clicks the accept button. void OnAcceptClicked(); // Called when the user clicks the cancel button. void OnCancelClicked(); // Called when the user clicks the auxiliary button. void OnAuxiliaryButtonClicked(); // MediaGalleriesDialog implementation: void UpdateGalleries() override; // ConstrainedWindowMacDelegate implementation. void OnConstrainedWindowClosed(ConstrainedWindowMac* window) override; // MediaGalleryListEntryController implementation. void OnCheckboxToggled(MediaGalleryPrefId pref_id, bool checked) override; ui::MenuModel* GetContextMenu(MediaGalleryPrefId pref_id) override; private: FRIEND_TEST_ALL_PREFIXES(MediaGalleriesDialogBrowserTest, Close); FRIEND_TEST_ALL_PREFIXES(MediaGalleriesDialogTest, InitializeCheckboxes); FRIEND_TEST_ALL_PREFIXES(MediaGalleriesDialogTest, ToggleCheckboxes); FRIEND_TEST_ALL_PREFIXES(MediaGalleriesDialogTest, UpdateAdds); FRIEND_TEST_ALL_PREFIXES(MediaGalleriesDialogTest, ForgetDeletes); // MediaGalleriesDialog implementation: void AcceptDialogForTesting() override; void InitDialogControls(); CGFloat CreateCheckboxes( CGFloat y_pos, const MediaGalleriesDialogController::Entries& entries); CGFloat CreateCheckboxSeparator(CGFloat y_pos, NSString* header); MediaGalleriesDialogController* controller_; // weak std::unique_ptr<ConstrainedWindowMac> window_; // The alert that the dialog is being displayed as. base::scoped_nsobject<ConstrainedWindowAlert> alert_; // True if the user has pressed accept. bool accepted_; // Container view for checkboxes. base::scoped_nsobject<NSView> checkbox_container_; // Container view for the main dialog contents. base::scoped_nsobject<NSBox> main_container_; // An Objective-C class to route callbacks from Cocoa code. base::scoped_nsobject<MediaGalleriesCocoaController> cocoa_controller_; DISALLOW_COPY_AND_ASSIGN(MediaGalleriesDialogCocoa); }; #endif // CHROME_BROWSER_UI_COCOA_EXTENSIONS_MEDIA_GALLERIES_DIALOG_COCOA_H_
null
null
null
null
61,550
50,704
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
50,704
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef UI_OZONE_COMMON_STUB_CLIENT_NATIVE_PIXMAP_FACTORY_H_ #define UI_OZONE_COMMON_STUB_CLIENT_NATIVE_PIXMAP_FACTORY_H_ #include "ui/gfx/client_native_pixmap_factory.h" namespace ui { // Platforms which don't need to share native pixmap use this. // The caller takes ownership of the instance. gfx::ClientNativePixmapFactory* CreateStubClientNativePixmapFactory(); } // namespace ui #endif // UI_OZONE_COMMON_STUB_CLIENT_NATIVE_PIXMAP_FACTORY_H_
null
null
null
null
47,567
69,116
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
69,116
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <fcntl.h> #include <gmock/gmock.h> #include <ppapi/c/ppb_file_io.h> #include <ppapi/c/pp_errors.h> #include <ppapi/c/pp_instance.h> #include <sys/stat.h> #include <sys/types.h> #include "fake_ppapi/fake_pepper_interface_url_loader.h" #include "nacl_io/dir_node.h" #include "nacl_io/httpfs/http_fs.h" #include "nacl_io/kernel_handle.h" #include "nacl_io/kernel_intercept.h" #include "nacl_io/osdirent.h" #include "nacl_io/osunistd.h" using namespace nacl_io; namespace { class HttpFsForTesting : public HttpFs { public: HttpFsForTesting(StringMap_t map, PepperInterface* ppapi) { FsInitArgs args(1); args.string_map = map; args.ppapi = ppapi; EXPECT_EQ(0, Init(args)); } using HttpFs::GetNodeCacheForTesting; using HttpFs::ParseManifest; using HttpFs::FindOrCreateDir; }; enum { kStringMapParamCacheNone = 0, kStringMapParamCacheContent = 1, kStringMapParamCacheStat = 2, kStringMapParamCacheContentStat = kStringMapParamCacheContent | kStringMapParamCacheStat, }; typedef uint32_t StringMapParam; StringMap_t MakeStringMap(StringMapParam param) { StringMap_t smap; if (param & kStringMapParamCacheContent) smap["cache_content"] = "true"; else smap["cache_content"] = "false"; if (param & kStringMapParamCacheStat) smap["cache_stat"] = "true"; else smap["cache_stat"] = "false"; return smap; } class HttpFsTest : public ::testing::TestWithParam<StringMapParam> { public: HttpFsTest(); protected: FakePepperInterfaceURLLoader ppapi_; HttpFsForTesting fs_; }; HttpFsTest::HttpFsTest() : fs_(MakeStringMap(GetParam()), &ppapi_) {} class HttpFsLargeFileTest : public HttpFsTest { public: HttpFsLargeFileTest() {} }; } // namespace TEST_P(HttpFsTest, OpenAndCloseServerError) { EXPECT_TRUE(ppapi_.server_template()->AddError("file", 500)); ScopedNode node; ASSERT_EQ(EIO, fs_.Open(Path("/file"), O_RDONLY, &node)); } TEST_P(HttpFsTest, ReadPartial) { const char contents[] = "0123456789abcdefg"; ASSERT_TRUE(ppapi_.server_template()->AddEntity("file", contents, NULL)); ppapi_.server_template()->set_allow_partial(true); int result_bytes = 0; char buf[10]; memset(&buf[0], 0, sizeof(buf)); ScopedNode node; ASSERT_EQ(0, fs_.Open(Path("/file"), O_RDONLY, &node)); HandleAttr attr; EXPECT_EQ(0, node->Read(attr, buf, sizeof(buf) - 1, &result_bytes)); EXPECT_EQ(sizeof(buf) - 1, result_bytes); EXPECT_STREQ("012345678", &buf[0]); // Read is clamped when reading past the end of the file. attr.offs = 10; ASSERT_EQ(0, node->Read(attr, buf, sizeof(buf) - 1, &result_bytes)); ASSERT_EQ(strlen("abcdefg"), result_bytes); buf[result_bytes] = 0; EXPECT_STREQ("abcdefg", &buf[0]); // Read nothing when starting past the end of the file. attr.offs = 100; EXPECT_EQ(0, node->Read(attr, &buf[0], sizeof(buf), &result_bytes)); EXPECT_EQ(0, result_bytes); } TEST_P(HttpFsTest, ReadPartialNoServerSupport) { const char contents[] = "0123456789abcdefg"; ASSERT_TRUE(ppapi_.server_template()->AddEntity("file", contents, NULL)); ppapi_.server_template()->set_allow_partial(false); int result_bytes = 0; char buf[10]; memset(&buf[0], 0, sizeof(buf)); ScopedNode node; ASSERT_EQ(0, fs_.Open(Path("/file"), O_RDONLY, &node)); HandleAttr attr; EXPECT_EQ(0, node->Read(attr, buf, sizeof(buf) - 1, &result_bytes)); EXPECT_EQ(sizeof(buf) - 1, result_bytes); EXPECT_STREQ("012345678", &buf[0]); // Read is clamped when reading past the end of the file. attr.offs = 10; ASSERT_EQ(0, node->Read(attr, buf, sizeof(buf) - 1, &result_bytes)); ASSERT_EQ(strlen("abcdefg"), result_bytes); buf[result_bytes] = 0; EXPECT_STREQ("abcdefg", &buf[0]); // Read nothing when starting past the end of the file. attr.offs = 100; EXPECT_EQ(0, node->Read(attr, &buf[0], sizeof(buf), &result_bytes)); EXPECT_EQ(0, result_bytes); } TEST_P(HttpFsTest, Write) { const char contents[] = "contents"; ASSERT_TRUE(ppapi_.server_template()->AddEntity("file", contents, NULL)); ScopedNode node; ASSERT_EQ(0, fs_.Open(Path("/file"), O_WRONLY, &node)); // Writing always fails. HandleAttr attr; attr.offs = 3; int bytes_written = 1; // Set to a non-zero value. EXPECT_EQ(EACCES, node->Write(attr, "struct", 6, &bytes_written)); EXPECT_EQ(0, bytes_written); } TEST_P(HttpFsTest, GetStat) { const char contents[] = "contents"; ASSERT_TRUE(ppapi_.server_template()->AddEntity("file", contents, NULL)); ScopedNode node; ASSERT_EQ(0, fs_.Open(Path("/file"), O_RDONLY, &node)); struct stat statbuf; EXPECT_EQ(0, node->GetStat(&statbuf)); EXPECT_EQ(S_IFREG | S_IRUSR | S_IRGRP | S_IROTH, statbuf.st_mode); EXPECT_EQ(strlen(contents), statbuf.st_size); // These are not currently set. EXPECT_EQ(0, statbuf.st_atime); EXPECT_EQ(0, statbuf.st_ctime); EXPECT_EQ(0, statbuf.st_mtime); } TEST_P(HttpFsTest, FTruncate) { const char contents[] = "contents"; ASSERT_TRUE(ppapi_.server_template()->AddEntity("file", contents, NULL)); ScopedNode node; ASSERT_EQ(0, fs_.Open(Path("/file"), O_RDWR, &node)); EXPECT_EQ(EACCES, node->FTruncate(4)); } // Instantiate the above tests for all caching types. INSTANTIATE_TEST_CASE_P( Default, HttpFsTest, ::testing::Values((uint32_t)kStringMapParamCacheNone, (uint32_t)kStringMapParamCacheContent, (uint32_t)kStringMapParamCacheStat, (uint32_t)kStringMapParamCacheContentStat)); TEST_P(HttpFsLargeFileTest, ReadPartial) { const char contents[] = "0123456789abcdefg"; off_t size = 0x110000000ll; ASSERT_TRUE( ppapi_.server_template()->AddEntity("file", contents, size, NULL)); ppapi_.server_template()->set_send_content_length(true); ppapi_.server_template()->set_allow_partial(true); int result_bytes = 0; char buf[10]; memset(&buf[0], 0, sizeof(buf)); ScopedNode node; ASSERT_EQ(0, fs_.Open(Path("/file"), O_RDONLY, &node)); HandleAttr attr; EXPECT_EQ(0, node->Read(attr, buf, sizeof(buf) - 1, &result_bytes)); EXPECT_EQ(sizeof(buf) - 1, result_bytes); EXPECT_STREQ("012345678", &buf[0]); // Read is clamped when reading past the end of the file. attr.offs = size - 7; ASSERT_EQ(0, node->Read(attr, buf, sizeof(buf) - 1, &result_bytes)); ASSERT_EQ(strlen("abcdefg"), result_bytes); buf[result_bytes] = 0; EXPECT_STREQ("abcdefg", &buf[0]); // Read nothing when starting past the end of the file. attr.offs = size + 100; EXPECT_EQ(0, node->Read(attr, &buf[0], sizeof(buf), &result_bytes)); EXPECT_EQ(0, result_bytes); } TEST_P(HttpFsLargeFileTest, GetStat) { const char contents[] = "contents"; off_t size = 0x110000000ll; ASSERT_TRUE( ppapi_.server_template()->AddEntity("file", contents, size, NULL)); // TODO(binji): If the server doesn't send the content length, this operation // will be incredibly slow; it will attempt to read all of the data from the // server to find the file length. Can we do anything smarter? ppapi_.server_template()->set_send_content_length(true); ScopedNode node; ASSERT_EQ(0, fs_.Open(Path("/file"), O_RDONLY, &node)); struct stat statbuf; EXPECT_EQ(0, node->GetStat(&statbuf)); EXPECT_TRUE(S_ISREG(statbuf.st_mode)); EXPECT_EQ(S_IRUSR | S_IRGRP | S_IROTH, statbuf.st_mode & S_MODEBITS); EXPECT_EQ(size, statbuf.st_size); // These are not currently set. EXPECT_EQ(0, statbuf.st_atime); EXPECT_EQ(0, statbuf.st_ctime); EXPECT_EQ(0, statbuf.st_mtime); } // Instantiate the large file tests, only when cache content is off. // TODO(binji): make cache content smarter, so it doesn't try to cache enormous // files. See http://crbug.com/369279. INSTANTIATE_TEST_CASE_P(Default, HttpFsLargeFileTest, ::testing::Values((uint32_t)kStringMapParamCacheNone, (uint32_t)kStringMapParamCacheStat)); TEST(HttpFsDirTest, Root) { StringMap_t args; HttpFsForTesting fs(args, NULL); // Check root node is directory ScopedNode node; ASSERT_EQ(0, fs.Open(Path("/"), O_RDONLY, &node)); ASSERT_TRUE(node->IsaDir()); // We have to r+w access to the root node struct stat buf; ASSERT_EQ(0, node->GetStat(&buf)); ASSERT_EQ(S_IXUSR | S_IRUSR, buf.st_mode & S_IRWXU); } TEST(HttpFsDirTest, Mkdir) { StringMap_t args; HttpFsForTesting fs(args, NULL); char manifest[] = "-r-- 123 /mydir/foo\n-rw- 234 /thatdir/bar\n"; ASSERT_EQ(0, fs.ParseManifest(manifest)); // mkdir of existing directories should give "File exists". EXPECT_EQ(EEXIST, fs.Mkdir(Path("/"), 0)); EXPECT_EQ(EEXIST, fs.Mkdir(Path("/mydir"), 0)); // mkdir of non-existent directories should give "Permission denied". EXPECT_EQ(EACCES, fs.Mkdir(Path("/non_existent"), 0)); } TEST(HttpFsDirTest, Rmdir) { StringMap_t args; HttpFsForTesting fs(args, NULL); char manifest[] = "-r-- 123 /mydir/foo\n-rw- 234 /thatdir/bar\n"; ASSERT_EQ(0, fs.ParseManifest(manifest)); // Rmdir on existing dirs should give "Permission Denied" EXPECT_EQ(EACCES, fs.Rmdir(Path("/"))); EXPECT_EQ(EACCES, fs.Rmdir(Path("/mydir"))); // Rmdir on existing files should give "Not a direcotory" EXPECT_EQ(ENOTDIR, fs.Rmdir(Path("/mydir/foo"))); // Rmdir on non-existent files should give "No such file or directory" EXPECT_EQ(ENOENT, fs.Rmdir(Path("/non_existent"))); } TEST(HttpFsDirTest, Unlink) { StringMap_t args; HttpFsForTesting fs(args, NULL); char manifest[] = "-r-- 123 /mydir/foo\n-rw- 234 /thatdir/bar\n"; ASSERT_EQ(0, fs.ParseManifest(manifest)); // Unlink of existing files should give "Permission Denied" EXPECT_EQ(EACCES, fs.Unlink(Path("/mydir/foo"))); // Unlink of existing directory should give "Is a directory" EXPECT_EQ(EISDIR, fs.Unlink(Path("/mydir"))); // Unlink of non-existent files should give "No such file or directory" EXPECT_EQ(ENOENT, fs.Unlink(Path("/non_existent"))); } TEST(HttpFsDirTest, Remove) { StringMap_t args; HttpFsForTesting fs(args, NULL); char manifest[] = "-r-- 123 /mydir/foo\n-rw- 234 /thatdir/bar\n"; ASSERT_EQ(0, fs.ParseManifest(manifest)); // Remove of existing files should give "Permission Denied" EXPECT_EQ(EACCES, fs.Remove(Path("/mydir/foo"))); // Remove of existing directory should give "Permission Denied" EXPECT_EQ(EACCES, fs.Remove(Path("/mydir"))); // Unlink of non-existent files should give "No such file or directory" EXPECT_EQ(ENOENT, fs.Remove(Path("/non_existent"))); } TEST(HttpFsDirTest, ParseManifest) { StringMap_t args; off_t result_size = 0; HttpFsForTesting fs(args, NULL); // Multiple consecutive newlines or spaces should be ignored. char manifest[] = "-r-- 123 /mydir/foo\n\n-rw- 234 /thatdir/bar\n"; ASSERT_EQ(0, fs.ParseManifest(manifest)); ScopedNode root; EXPECT_EQ(0, fs.FindOrCreateDir(Path("/"), &root)); ASSERT_NE((Node*)NULL, root.get()); EXPECT_EQ(2, root->ChildCount()); ScopedNode dir; EXPECT_EQ(0, fs.FindOrCreateDir(Path("/mydir"), &dir)); ASSERT_NE((Node*)NULL, dir.get()); EXPECT_EQ(1, dir->ChildCount()); Node* node = (*fs.GetNodeCacheForTesting())["/mydir/foo"].get(); EXPECT_NE((Node*)NULL, node); EXPECT_EQ(0, node->GetSize(&result_size)); EXPECT_EQ(123, result_size); // Since these files are cached thanks to the manifest, we can open them // without accessing the PPAPI URL API. ScopedNode foo; ASSERT_EQ(0, fs.Open(Path("/mydir/foo"), O_RDONLY, &foo)); ScopedNode bar; ASSERT_EQ(0, fs.Open(Path("/thatdir/bar"), O_RDWR, &bar)); struct stat sfoo; struct stat sbar; EXPECT_FALSE(foo->GetStat(&sfoo)); EXPECT_FALSE(bar->GetStat(&sbar)); EXPECT_EQ(123, sfoo.st_size); EXPECT_EQ(S_IFREG | S_IRALL, sfoo.st_mode); EXPECT_EQ(234, sbar.st_size); EXPECT_EQ(S_IFREG | S_IRALL | S_IWALL, sbar.st_mode); } TEST(HttpFsBlobUrlTest, Basic) { const char* kUrl = "blob:http://example.com/6b87a5a6-713e"; const char* kContent = "hello"; FakePepperInterfaceURLLoader ppapi; ASSERT_TRUE(ppapi.server_template()->SetBlobEntity(kUrl, kContent, NULL)); StringMap_t args; args["SOURCE"] = kUrl; HttpFsForTesting fs(args, &ppapi); // Any other path than / should fail. ScopedNode node; ASSERT_EQ(ENOENT, fs.Open(Path("/blah"), R_OK, &node)); // Check access to blob file ASSERT_EQ(0, fs.Open(Path("/"), O_RDONLY, &node)); ASSERT_EQ(true, node->IsaFile()); // Verify file size and permissions struct stat buf; ASSERT_EQ(0, node->GetStat(&buf)); ASSERT_EQ(S_IRUSR, buf.st_mode & S_IRWXU); ASSERT_EQ(strlen(kContent), buf.st_size); }
null
null
null
null
65,979
60,489
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
60,489
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/vr/service/vr_service_impl.h" #include <utility> #include "base/bind.h" #include "chrome/browser/vr/service/vr_device_manager.h" #include "chrome/browser/vr/service/vr_display_host.h" #include "content/public/browser/render_frame_host.h" #include "content/public/browser/render_widget_host.h" #include "content/public/browser/render_widget_host_view.h" #include "content/public/browser/web_contents.h" #include "device/vr/vr_device.h" #include "device/vr/vr_display_impl.h" namespace vr { VRServiceImpl::VRServiceImpl(content::RenderFrameHost* render_frame_host) : WebContentsObserver( content::WebContents::FromRenderFrameHost(render_frame_host)), render_frame_host_(render_frame_host) { DCHECK(render_frame_host_); // TODO(crbug/701027): make sure that client_ is never null by initializing it // in the constructor. } // Constructor for testing. VRServiceImpl::VRServiceImpl() : render_frame_host_(nullptr) {} void VRServiceImpl::SetBinding(mojo::StrongBindingPtr<VRService> binding) { binding_ = std::move(binding); } VRServiceImpl::~VRServiceImpl() { // Destroy VRDisplay before calling RemoveService below. RemoveService might // implicitly trigger destory VRDevice which VRDisplay needs to access in its // dtor. displays_.clear(); VRDeviceManager::GetInstance()->RemoveService(this); } void VRServiceImpl::Create(content::RenderFrameHost* render_frame_host, device::mojom::VRServiceRequest request) { std::unique_ptr<VRServiceImpl> vr_service_impl = std::make_unique<VRServiceImpl>(render_frame_host); VRServiceImpl* impl = vr_service_impl.get(); impl->SetBinding( mojo::MakeStrongBinding(std::move(vr_service_impl), std::move(request))); } void VRServiceImpl::SetClient(device::mojom::VRServiceClientPtr service_client, SetClientCallback callback) { DCHECK(!client_.get()); client_ = std::move(service_client); set_client_callback_ = std::move(callback); // Once a client has been connected AddService will force any VRDisplays to // send ConnectDevice to it so that it's populated with the currently active // displays. Thereafter it will stay up to date by virtue of listening for new // connected events. VRDeviceManager::GetInstance()->AddService(this); } void VRServiceImpl::InitializationComplete() { DCHECK(!set_client_callback_.is_null()); base::ResetAndReturn(&set_client_callback_).Run(); } // Creates a VRDisplayImpl unique to this service so that the associated page // can communicate with the VRDevice. void VRServiceImpl::ConnectDevice(device::VRDevice* device) { // Client should always be set as this is called through SetClient. DCHECK(client_); DCHECK(displays_.find(device) == displays_.end()); device::mojom::VRDisplayInfoPtr display_info = device->GetVRDisplayInfo(); DCHECK(display_info); if (!display_info) return; displays_[device] = std::make_unique<VRDisplayHost>( device, render_frame_host_, client_.get(), std::move(display_info)); } void VRServiceImpl::RemoveDevice(device::VRDevice* device) { DCHECK(client_); auto it = displays_.find(device); DCHECK(it != displays_.end()); displays_.erase(it); } void VRServiceImpl::SetListeningForActivate(bool listening) { for (const auto& display : displays_) display.second->SetListeningForActivate(listening); } void VRServiceImpl::OnWebContentsFocused(content::RenderWidgetHost* host) { OnWebContentsFocusChanged(host, true); } void VRServiceImpl::OnWebContentsLostFocus(content::RenderWidgetHost* host) { OnWebContentsFocusChanged(host, false); } void VRServiceImpl::RenderFrameDeleted(content::RenderFrameHost* host) { if (host != render_frame_host_) return; // Binding should always be live here, as this is a StrongBinding. // Close the binding (and delete this VrServiceImpl) when the RenderFrameHost // is deleted. DCHECK(binding_.get()); binding_->Close(); } void VRServiceImpl::OnWebContentsFocusChanged(content::RenderWidgetHost* host, bool focused) { if (!render_frame_host_->GetView() || render_frame_host_->GetView()->GetRenderWidgetHost() != host) { return; } for (const auto& display : displays_) display.second->SetInFocusedFrame(focused); } } // namespace vr
null
null
null
null
57,352
2,650
null
train_val
1b0d3845b454eaaac0b2064c78926ca4d739a080
265,218
qemu
0
https://github.com/bonzini/qemu
2016-10-18 11:40:27+01:00
/* * QEMU crypto TLS x509 credential support * * Copyright (c) 2015 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #include "qemu/osdep.h" #include "crypto/tlscredsx509.h" #include "crypto/tlscredspriv.h" #include "crypto/secret.h" #include "qapi/error.h" #include "qom/object_interfaces.h" #include "trace.h" #ifdef CONFIG_GNUTLS #include <gnutls/x509.h> static int qcrypto_tls_creds_check_cert_times(gnutls_x509_crt_t cert, const char *certFile, bool isServer, bool isCA, Error **errp) { time_t now = time(NULL); if (now == ((time_t)-1)) { error_setg_errno(errp, errno, "cannot get current time"); return -1; } if (gnutls_x509_crt_get_expiration_time(cert) < now) { error_setg(errp, (isCA ? "The CA certificate %s has expired" : (isServer ? "The server certificate %s has expired" : "The client certificate %s has expired")), certFile); return -1; } if (gnutls_x509_crt_get_activation_time(cert) > now) { error_setg(errp, (isCA ? "The CA certificate %s is not yet active" : (isServer ? "The server certificate %s is not yet active" : "The client certificate %s is not yet active")), certFile); return -1; } return 0; } #if LIBGNUTLS_VERSION_NUMBER >= 2 /* * The gnutls_x509_crt_get_basic_constraints function isn't * available in GNUTLS 1.0.x branches. This isn't critical * though, since gnutls_certificate_verify_peers2 will do * pretty much the same check at runtime, so we can just * disable this code */ static int qcrypto_tls_creds_check_cert_basic_constraints(QCryptoTLSCredsX509 *creds, gnutls_x509_crt_t cert, const char *certFile, bool isServer, bool isCA, Error **errp) { int status; status = gnutls_x509_crt_get_basic_constraints(cert, NULL, NULL, NULL); trace_qcrypto_tls_creds_x509_check_basic_constraints( creds, certFile, status); if (status > 0) { /* It is a CA cert */ if (!isCA) { error_setg(errp, isServer ? "The certificate %s basic constraints show a CA, " "but we need one for a server" : "The certificate %s basic constraints show a CA, " "but we need one for a client", certFile); return -1; } } else if (status == 0) { /* It is not a CA cert */ if (isCA) { error_setg(errp, "The certificate %s basic constraints do not " "show a CA", certFile); return -1; } } else if (status == GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE) { /* Missing basicConstraints */ if (isCA) { error_setg(errp, "The certificate %s is missing basic constraints " "for a CA", certFile); return -1; } } else { /* General error */ error_setg(errp, "Unable to query certificate %s basic constraints: %s", certFile, gnutls_strerror(status)); return -1; } return 0; } #endif static int qcrypto_tls_creds_check_cert_key_usage(QCryptoTLSCredsX509 *creds, gnutls_x509_crt_t cert, const char *certFile, bool isCA, Error **errp) { int status; unsigned int usage = 0; unsigned int critical = 0; status = gnutls_x509_crt_get_key_usage(cert, &usage, &critical); trace_qcrypto_tls_creds_x509_check_key_usage( creds, certFile, status, usage, critical); if (status < 0) { if (status == GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE) { usage = isCA ? GNUTLS_KEY_KEY_CERT_SIGN : GNUTLS_KEY_DIGITAL_SIGNATURE|GNUTLS_KEY_KEY_ENCIPHERMENT; } else { error_setg(errp, "Unable to query certificate %s key usage: %s", certFile, gnutls_strerror(status)); return -1; } } if (isCA) { if (!(usage & GNUTLS_KEY_KEY_CERT_SIGN)) { if (critical) { error_setg(errp, "Certificate %s usage does not permit " "certificate signing", certFile); return -1; } } } else { if (!(usage & GNUTLS_KEY_DIGITAL_SIGNATURE)) { if (critical) { error_setg(errp, "Certificate %s usage does not permit digital " "signature", certFile); return -1; } } if (!(usage & GNUTLS_KEY_KEY_ENCIPHERMENT)) { if (critical) { error_setg(errp, "Certificate %s usage does not permit key " "encipherment", certFile); return -1; } } } return 0; } static int qcrypto_tls_creds_check_cert_key_purpose(QCryptoTLSCredsX509 *creds, gnutls_x509_crt_t cert, const char *certFile, bool isServer, Error **errp) { int status; size_t i; unsigned int purposeCritical; unsigned int critical; char *buffer = NULL; size_t size; bool allowClient = false, allowServer = false; critical = 0; for (i = 0; ; i++) { size = 0; status = gnutls_x509_crt_get_key_purpose_oid(cert, i, buffer, &size, NULL); if (status == GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE) { /* If there is no data at all, then we must allow client/server to pass */ if (i == 0) { allowServer = allowClient = true; } break; } if (status != GNUTLS_E_SHORT_MEMORY_BUFFER) { error_setg(errp, "Unable to query certificate %s key purpose: %s", certFile, gnutls_strerror(status)); return -1; } buffer = g_new0(char, size); status = gnutls_x509_crt_get_key_purpose_oid(cert, i, buffer, &size, &purposeCritical); if (status < 0) { trace_qcrypto_tls_creds_x509_check_key_purpose( creds, certFile, status, "<none>", purposeCritical); g_free(buffer); error_setg(errp, "Unable to query certificate %s key purpose: %s", certFile, gnutls_strerror(status)); return -1; } trace_qcrypto_tls_creds_x509_check_key_purpose( creds, certFile, status, buffer, purposeCritical); if (purposeCritical) { critical = true; } if (g_str_equal(buffer, GNUTLS_KP_TLS_WWW_SERVER)) { allowServer = true; } else if (g_str_equal(buffer, GNUTLS_KP_TLS_WWW_CLIENT)) { allowClient = true; } else if (g_str_equal(buffer, GNUTLS_KP_ANY)) { allowServer = allowClient = true; } g_free(buffer); buffer = NULL; } if (isServer) { if (!allowServer) { if (critical) { error_setg(errp, "Certificate %s purpose does not allow " "use with a TLS server", certFile); return -1; } } } else { if (!allowClient) { if (critical) { error_setg(errp, "Certificate %s purpose does not allow use " "with a TLS client", certFile); return -1; } } } return 0; } static int qcrypto_tls_creds_check_cert(QCryptoTLSCredsX509 *creds, gnutls_x509_crt_t cert, const char *certFile, bool isServer, bool isCA, Error **errp) { if (qcrypto_tls_creds_check_cert_times(cert, certFile, isServer, isCA, errp) < 0) { return -1; } #if LIBGNUTLS_VERSION_NUMBER >= 2 if (qcrypto_tls_creds_check_cert_basic_constraints(creds, cert, certFile, isServer, isCA, errp) < 0) { return -1; } #endif if (qcrypto_tls_creds_check_cert_key_usage(creds, cert, certFile, isCA, errp) < 0) { return -1; } if (!isCA && qcrypto_tls_creds_check_cert_key_purpose(creds, cert, certFile, isServer, errp) < 0) { return -1; } return 0; } static int qcrypto_tls_creds_check_cert_pair(gnutls_x509_crt_t cert, const char *certFile, gnutls_x509_crt_t *cacerts, size_t ncacerts, const char *cacertFile, bool isServer, Error **errp) { unsigned int status; if (gnutls_x509_crt_list_verify(&cert, 1, cacerts, ncacerts, NULL, 0, 0, &status) < 0) { error_setg(errp, isServer ? "Unable to verify server certificate %s against " "CA certificate %s" : "Unable to verify client certificate %s against " "CA certificate %s", certFile, cacertFile); return -1; } if (status != 0) { const char *reason = "Invalid certificate"; if (status & GNUTLS_CERT_INVALID) { reason = "The certificate is not trusted"; } if (status & GNUTLS_CERT_SIGNER_NOT_FOUND) { reason = "The certificate hasn't got a known issuer"; } if (status & GNUTLS_CERT_REVOKED) { reason = "The certificate has been revoked"; } #ifndef GNUTLS_1_0_COMPAT if (status & GNUTLS_CERT_INSECURE_ALGORITHM) { reason = "The certificate uses an insecure algorithm"; } #endif error_setg(errp, "Our own certificate %s failed validation against %s: %s", certFile, cacertFile, reason); return -1; } return 0; } static gnutls_x509_crt_t qcrypto_tls_creds_load_cert(QCryptoTLSCredsX509 *creds, const char *certFile, bool isServer, Error **errp) { gnutls_datum_t data; gnutls_x509_crt_t cert = NULL; char *buf = NULL; gsize buflen; GError *gerr; int ret = -1; int err; trace_qcrypto_tls_creds_x509_load_cert(creds, isServer, certFile); err = gnutls_x509_crt_init(&cert); if (err < 0) { error_setg(errp, "Unable to initialize certificate: %s", gnutls_strerror(err)); goto cleanup; } if (!g_file_get_contents(certFile, &buf, &buflen, &gerr)) { error_setg(errp, "Cannot load CA cert list %s: %s", certFile, gerr->message); g_error_free(gerr); goto cleanup; } data.data = (unsigned char *)buf; data.size = strlen(buf); err = gnutls_x509_crt_import(cert, &data, GNUTLS_X509_FMT_PEM); if (err < 0) { error_setg(errp, isServer ? "Unable to import server certificate %s: %s" : "Unable to import client certificate %s: %s", certFile, gnutls_strerror(err)); goto cleanup; } ret = 0; cleanup: if (ret != 0) { gnutls_x509_crt_deinit(cert); cert = NULL; } g_free(buf); return cert; } static int qcrypto_tls_creds_load_ca_cert_list(QCryptoTLSCredsX509 *creds, const char *certFile, gnutls_x509_crt_t *certs, unsigned int certMax, size_t *ncerts, Error **errp) { gnutls_datum_t data; char *buf = NULL; gsize buflen; int ret = -1; GError *gerr = NULL; *ncerts = 0; trace_qcrypto_tls_creds_x509_load_cert_list(creds, certFile); if (!g_file_get_contents(certFile, &buf, &buflen, &gerr)) { error_setg(errp, "Cannot load CA cert list %s: %s", certFile, gerr->message); g_error_free(gerr); goto cleanup; } data.data = (unsigned char *)buf; data.size = strlen(buf); if (gnutls_x509_crt_list_import(certs, &certMax, &data, GNUTLS_X509_FMT_PEM, 0) < 0) { error_setg(errp, "Unable to import CA certificate list %s", certFile); goto cleanup; } *ncerts = certMax; ret = 0; cleanup: g_free(buf); return ret; } #define MAX_CERTS 16 static int qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, bool isServer, const char *cacertFile, const char *certFile, Error **errp) { gnutls_x509_crt_t cert = NULL; gnutls_x509_crt_t cacerts[MAX_CERTS]; size_t ncacerts = 0; size_t i; int ret = -1; memset(cacerts, 0, sizeof(cacerts)); if (certFile && access(certFile, R_OK) == 0) { cert = qcrypto_tls_creds_load_cert(creds, certFile, isServer, errp); if (!cert) { goto cleanup; } } if (access(cacertFile, R_OK) == 0) { if (qcrypto_tls_creds_load_ca_cert_list(creds, cacertFile, cacerts, MAX_CERTS, &ncacerts, errp) < 0) { goto cleanup; } } if (cert && qcrypto_tls_creds_check_cert(creds, cert, certFile, isServer, false, errp) < 0) { goto cleanup; } for (i = 0; i < ncacerts; i++) { if (qcrypto_tls_creds_check_cert(creds, cacerts[i], cacertFile, isServer, true, errp) < 0) { goto cleanup; } } if (cert && ncacerts && qcrypto_tls_creds_check_cert_pair(cert, certFile, cacerts, ncacerts, cacertFile, isServer, errp) < 0) { goto cleanup; } ret = 0; cleanup: if (cert) { gnutls_x509_crt_deinit(cert); } for (i = 0; i < ncacerts; i++) { gnutls_x509_crt_deinit(cacerts[i]); } return ret; } static int qcrypto_tls_creds_x509_load(QCryptoTLSCredsX509 *creds, Error **errp) { char *cacert = NULL, *cacrl = NULL, *cert = NULL, *key = NULL, *dhparams = NULL; int ret; int rv = -1; trace_qcrypto_tls_creds_x509_load(creds, creds->parent_obj.dir ? creds->parent_obj.dir : "<nodir>"); if (creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { if (qcrypto_tls_creds_get_path(&creds->parent_obj, QCRYPTO_TLS_CREDS_X509_CA_CERT, true, &cacert, errp) < 0 || qcrypto_tls_creds_get_path(&creds->parent_obj, QCRYPTO_TLS_CREDS_X509_CA_CRL, false, &cacrl, errp) < 0 || qcrypto_tls_creds_get_path(&creds->parent_obj, QCRYPTO_TLS_CREDS_X509_SERVER_CERT, true, &cert, errp) < 0 || qcrypto_tls_creds_get_path(&creds->parent_obj, QCRYPTO_TLS_CREDS_X509_SERVER_KEY, true, &key, errp) < 0 || qcrypto_tls_creds_get_path(&creds->parent_obj, QCRYPTO_TLS_CREDS_DH_PARAMS, false, &dhparams, errp) < 0) { goto cleanup; } } else { if (qcrypto_tls_creds_get_path(&creds->parent_obj, QCRYPTO_TLS_CREDS_X509_CA_CERT, true, &cacert, errp) < 0 || qcrypto_tls_creds_get_path(&creds->parent_obj, QCRYPTO_TLS_CREDS_X509_CLIENT_CERT, false, &cert, errp) < 0 || qcrypto_tls_creds_get_path(&creds->parent_obj, QCRYPTO_TLS_CREDS_X509_CLIENT_KEY, false, &key, errp) < 0) { goto cleanup; } } if (creds->sanityCheck && qcrypto_tls_creds_x509_sanity_check(creds, creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER, cacert, cert, errp) < 0) { goto cleanup; } ret = gnutls_certificate_allocate_credentials(&creds->data); if (ret < 0) { error_setg(errp, "Cannot allocate credentials: '%s'", gnutls_strerror(ret)); goto cleanup; } ret = gnutls_certificate_set_x509_trust_file(creds->data, cacert, GNUTLS_X509_FMT_PEM); if (ret < 0) { error_setg(errp, "Cannot load CA certificate '%s': %s", cacert, gnutls_strerror(ret)); goto cleanup; } if (cert != NULL && key != NULL) { #if LIBGNUTLS_VERSION_NUMBER >= 0x030111 char *password = NULL; if (creds->passwordid) { password = qcrypto_secret_lookup_as_utf8(creds->passwordid, errp); if (!password) { goto cleanup; } } ret = gnutls_certificate_set_x509_key_file2(creds->data, cert, key, GNUTLS_X509_FMT_PEM, password, 0); g_free(password); #else /* LIBGNUTLS_VERSION_NUMBER < 0x030111 */ if (creds->passwordid) { error_setg(errp, "PKCS8 decryption requires GNUTLS >= 3.1.11"); goto cleanup; } ret = gnutls_certificate_set_x509_key_file(creds->data, cert, key, GNUTLS_X509_FMT_PEM); #endif if (ret < 0) { error_setg(errp, "Cannot load certificate '%s' & key '%s': %s", cert, key, gnutls_strerror(ret)); goto cleanup; } } if (cacrl != NULL) { ret = gnutls_certificate_set_x509_crl_file(creds->data, cacrl, GNUTLS_X509_FMT_PEM); if (ret < 0) { error_setg(errp, "Cannot load CRL '%s': %s", cacrl, gnutls_strerror(ret)); goto cleanup; } } if (creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { if (qcrypto_tls_creds_get_dh_params_file(&creds->parent_obj, dhparams, &creds->parent_obj.dh_params, errp) < 0) { goto cleanup; } gnutls_certificate_set_dh_params(creds->data, creds->parent_obj.dh_params); } rv = 0; cleanup: g_free(cacert); g_free(cacrl); g_free(cert); g_free(key); g_free(dhparams); return rv; } static void qcrypto_tls_creds_x509_unload(QCryptoTLSCredsX509 *creds) { if (creds->data) { gnutls_certificate_free_credentials(creds->data); creds->data = NULL; } if (creds->parent_obj.dh_params) { gnutls_dh_params_deinit(creds->parent_obj.dh_params); creds->parent_obj.dh_params = NULL; } } #else /* ! CONFIG_GNUTLS */ static void qcrypto_tls_creds_x509_load(QCryptoTLSCredsX509 *creds G_GNUC_UNUSED, Error **errp) { error_setg(errp, "TLS credentials support requires GNUTLS"); } static void qcrypto_tls_creds_x509_unload(QCryptoTLSCredsX509 *creds G_GNUC_UNUSED) { /* nada */ } #endif /* ! CONFIG_GNUTLS */ static void qcrypto_tls_creds_x509_prop_set_loaded(Object *obj, bool value, Error **errp) { QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); if (value) { qcrypto_tls_creds_x509_load(creds, errp); } else { qcrypto_tls_creds_x509_unload(creds); } } #ifdef CONFIG_GNUTLS static bool qcrypto_tls_creds_x509_prop_get_loaded(Object *obj, Error **errp G_GNUC_UNUSED) { QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); return creds->data != NULL; } #else /* ! CONFIG_GNUTLS */ static bool qcrypto_tls_creds_x509_prop_get_loaded(Object *obj G_GNUC_UNUSED, Error **errp G_GNUC_UNUSED) { return false; } #endif /* ! CONFIG_GNUTLS */ static void qcrypto_tls_creds_x509_prop_set_sanity(Object *obj, bool value, Error **errp G_GNUC_UNUSED) { QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); creds->sanityCheck = value; } static void qcrypto_tls_creds_x509_prop_set_passwordid(Object *obj, const char *value, Error **errp G_GNUC_UNUSED) { QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); creds->passwordid = g_strdup(value); } static char * qcrypto_tls_creds_x509_prop_get_passwordid(Object *obj, Error **errp G_GNUC_UNUSED) { QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); return g_strdup(creds->passwordid); } static bool qcrypto_tls_creds_x509_prop_get_sanity(Object *obj, Error **errp G_GNUC_UNUSED) { QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); return creds->sanityCheck; } static void qcrypto_tls_creds_x509_complete(UserCreatable *uc, Error **errp) { object_property_set_bool(OBJECT(uc), true, "loaded", errp); } static void qcrypto_tls_creds_x509_init(Object *obj) { QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); creds->sanityCheck = true; } static void qcrypto_tls_creds_x509_finalize(Object *obj) { QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); g_free(creds->passwordid); qcrypto_tls_creds_x509_unload(creds); } static void qcrypto_tls_creds_x509_class_init(ObjectClass *oc, void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); ucc->complete = qcrypto_tls_creds_x509_complete; object_class_property_add_bool(oc, "loaded", qcrypto_tls_creds_x509_prop_get_loaded, qcrypto_tls_creds_x509_prop_set_loaded, NULL); object_class_property_add_bool(oc, "sanity-check", qcrypto_tls_creds_x509_prop_get_sanity, qcrypto_tls_creds_x509_prop_set_sanity, NULL); object_class_property_add_str(oc, "passwordid", qcrypto_tls_creds_x509_prop_get_passwordid, qcrypto_tls_creds_x509_prop_set_passwordid, NULL); } static const TypeInfo qcrypto_tls_creds_x509_info = { .parent = TYPE_QCRYPTO_TLS_CREDS, .name = TYPE_QCRYPTO_TLS_CREDS_X509, .instance_size = sizeof(QCryptoTLSCredsX509), .instance_init = qcrypto_tls_creds_x509_init, .instance_finalize = qcrypto_tls_creds_x509_finalize, .class_size = sizeof(QCryptoTLSCredsX509Class), .class_init = qcrypto_tls_creds_x509_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } }; static void qcrypto_tls_creds_x509_register_types(void) { type_register_static(&qcrypto_tls_creds_x509_info); } type_init(qcrypto_tls_creds_x509_register_types);
null
null
null
null
123,342
62,768
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
62,768
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/find_bar_host.h" #include <algorithm> #include "build/build_config.h" #include "chrome/browser/ui/find_bar/find_bar_controller.h" #include "chrome/browser/ui/find_bar/find_tab_helper.h" #include "chrome/browser/ui/view_ids.h" #include "chrome/browser/ui/views/find_bar_view.h" #include "chrome/browser/ui/views/frame/browser_view.h" #include "chrome/browser/ui/views/location_bar/background_with_1_px_border.h" #include "chrome/browser/ui/views/location_bar/location_bar_view.h" #include "content/public/browser/render_view_host.h" #include "content/public/browser/render_widget_host.h" #include "content/public/browser/web_contents.h" #include "ui/events/event.h" #include "ui/events/keycodes/keyboard_codes.h" #include "ui/views/border.h" #include "ui/views/focus/external_focus_tracker.h" #include "ui/views/widget/root_view.h" #include "ui/views/widget/widget.h" using content::NativeWebKeyboardEvent; //////////////////////////////////////////////////////////////////////////////// // FindBarHost, public: FindBarHost::FindBarHost(BrowserView* browser_view) : DropdownBarHost(browser_view), find_bar_controller_(NULL), audible_alerts_(0) { FindBarView* find_bar_view = new FindBarView(this); Init(browser_view->find_bar_host_view(), find_bar_view, find_bar_view); } FindBarHost::~FindBarHost() { } bool FindBarHost::MaybeForwardKeyEventToWebpage( const ui::KeyEvent& key_event) { switch (key_event.key_code()) { case ui::VKEY_DOWN: case ui::VKEY_UP: case ui::VKEY_PRIOR: case ui::VKEY_NEXT: break; case ui::VKEY_HOME: case ui::VKEY_END: if (key_event.IsControlDown()) break; FALLTHROUGH; default: return false; } content::WebContents* contents = find_bar_controller_->web_contents(); if (!contents) return false; // Make sure we don't have a text field element interfering with keyboard // input. Otherwise Up and Down arrow key strokes get eaten. "Nom Nom Nom". contents->ClearFocusedElement(); NativeWebKeyboardEvent event(key_event); contents->GetRenderViewHost() ->GetWidget() ->ForwardKeyboardEventWithLatencyInfo(event, *key_event.latency()); return true; } FindBarController* FindBarHost::GetFindBarController() const { return find_bar_controller_; } void FindBarHost::SetFindBarController(FindBarController* find_bar_controller) { find_bar_controller_ = find_bar_controller; } void FindBarHost::Show(bool animate) { DropdownBarHost::Show(animate); } void FindBarHost::Hide(bool animate) { DropdownBarHost::Hide(animate); } void FindBarHost::SetFocusAndSelection() { DropdownBarHost::SetFocusAndSelection(); } void FindBarHost::ClearResults(const FindNotificationDetails& results) { find_bar_view()->UpdateForResult(results, base::string16()); } void FindBarHost::StopAnimation() { DropdownBarHost::StopAnimation(); } void FindBarHost::MoveWindowIfNecessary(const gfx::Rect& selection_rect) { // We only move the window if one is active for the current WebContents. If we // don't check this, then SetDialogPosition below will end up making the Find // Bar visible. content::WebContents* web_contents = find_bar_controller_->web_contents(); if (!web_contents) return; FindTabHelper* find_tab_helper = FindTabHelper::FromWebContents(web_contents); if (!find_tab_helper || !find_tab_helper->find_ui_active()) return; gfx::Rect new_pos = GetDialogPosition(selection_rect); SetDialogPosition(new_pos); // May need to redraw our frame to accommodate bookmark bar styles. view()->Layout(); // Bounds may have changed. view()->SchedulePaint(); } void FindBarHost::SetFindTextAndSelectedRange( const base::string16& find_text, const gfx::Range& selected_range) { find_bar_view()->SetFindTextAndSelectedRange(find_text, selected_range); } base::string16 FindBarHost::GetFindText() { return find_bar_view()->GetFindText(); } gfx::Range FindBarHost::GetSelectedRange() { return find_bar_view()->GetSelectedRange(); } void FindBarHost::UpdateUIForFindResult(const FindNotificationDetails& result, const base::string16& find_text) { if (!find_text.empty()) find_bar_view()->UpdateForResult(result, find_text); else find_bar_view()->ClearMatchCount(); // We now need to check if the window is obscuring the search results. MoveWindowIfNecessary(result.selection_rect()); // Once we find a match we no longer want to keep track of what had // focus. EndFindSession will then set the focus to the page content. if (result.number_of_matches() > 0) ResetFocusTracker(); } void FindBarHost::AudibleAlert() { ++audible_alerts_; #if defined(OS_WIN) MessageBeep(MB_OK); #endif } bool FindBarHost::IsFindBarVisible() { return DropdownBarHost::IsVisible(); } void FindBarHost::RestoreSavedFocus() { if (focus_tracker() == NULL) { // TODO(brettw): Focus() should be on WebContentsView. find_bar_controller_->web_contents()->Focus(); } else { focus_tracker()->FocusLastFocusedExternalView(); } } bool FindBarHost::HasGlobalFindPasteboard() { return false; } void FindBarHost::UpdateFindBarForChangedWebContents() { } FindBarTesting* FindBarHost::GetFindBarTesting() { return this; } //////////////////////////////////////////////////////////////////////////////// // FindBarWin, ui::AcceleratorTarget implementation: bool FindBarHost::AcceleratorPressed(const ui::Accelerator& accelerator) { ui::KeyboardCode key = accelerator.key_code(); if (key == ui::VKEY_RETURN && accelerator.IsCtrlDown()) { // Ctrl+Enter closes the Find session and navigates any link that is active. find_bar_controller_->EndFindSession( FindBarController::kActivateSelectionOnPage, FindBarController::kClearResultsInFindBox); return true; } else if (key == ui::VKEY_ESCAPE) { // This will end the Find session and hide the window, causing it to loose // focus and in the process unregister us as the handler for the Escape // accelerator through the OnWillChangeFocus event. find_bar_controller_->EndFindSession( FindBarController::kKeepSelectionOnPage, FindBarController::kKeepResultsInFindBox); return true; } else { NOTREACHED() << "Unknown accelerator"; } return false; } bool FindBarHost::CanHandleAccelerators() const { return true; } //////////////////////////////////////////////////////////////////////////////// // FindBarTesting implementation: bool FindBarHost::GetFindBarWindowInfo(gfx::Point* position, bool* fully_visible) { if (!find_bar_controller_ || #if defined(OS_WIN) && !defined(USE_AURA) !::IsWindow(host()->GetNativeView())) { #else false) { // TODO(sky): figure out linux side. // This is tricky due to asynchronous nature of x11. // See bug http://crbug.com/28629. #endif if (position) *position = gfx::Point(); if (fully_visible) *fully_visible = false; return false; } gfx::Rect window_rect = host()->GetWindowBoundsInScreen(); if (position) *position = window_rect.origin(); if (fully_visible) *fully_visible = IsVisible() && !IsAnimating(); return true; } base::string16 FindBarHost::GetFindSelectedText() { return find_bar_view()->GetFindSelectedText(); } base::string16 FindBarHost::GetMatchCountText() { return find_bar_view()->GetMatchCountText(); } int FindBarHost::GetWidth() { return view()->width(); } size_t FindBarHost::GetAudibleAlertCount() { return audible_alerts_; } //////////////////////////////////////////////////////////////////////////////// // Overridden from DropdownBarHost: gfx::Rect FindBarHost::GetDialogPosition(gfx::Rect avoid_overlapping_rect) { // Find the area we have to work with (after accounting for scrollbars, etc). gfx::Rect widget_bounds; GetWidgetBounds(&widget_bounds); if (widget_bounds.IsEmpty()) return gfx::Rect(); gfx::Insets insets = view()->border()->GetInsets() - gfx::Insets(0, BackgroundWith1PxBorder::kLocationBarBorderThicknessDip); // Ask the view how large an area it needs to draw on. gfx::Size prefsize = view()->GetPreferredSize(); // Limit width to the available area. if (widget_bounds.width() < prefsize.width()) prefsize.set_width(widget_bounds.width()); // Don't show the find bar if |widget_bounds| is not tall enough. if (widget_bounds.height() < prefsize.height()) return gfx::Rect(); // Place the view in the top right corner of the widget boundaries (top left // for RTL languages). Adjust for the view insets to ensure the border lines // up with the location bar. gfx::Rect view_location; int x = widget_bounds.x() - insets.left(); if (!base::i18n::IsRTL()) x += widget_bounds.width() - prefsize.width() + insets.width(); int y = widget_bounds.y() - insets.top(); view_location.SetRect(x, y, prefsize.width(), prefsize.height()); // When we get Find results back, we specify a selection rect, which we // should strive to avoid overlapping. But first, we need to offset the // selection rect (if one was provided). if (!avoid_overlapping_rect.IsEmpty()) { // For comparison (with the Intersects function below) we need to account // for the fact that we draw the Find widget relative to the Chrome frame, // whereas the selection rect is relative to the page. GetWidgetPositionNative(&avoid_overlapping_rect); } gfx::Rect new_pos = FindBarController::GetLocationForFindbarView( view_location, widget_bounds, avoid_overlapping_rect); return new_pos; } void FindBarHost::SetDialogPosition(const gfx::Rect& new_pos) { DropdownBarHost::SetDialogPosition(new_pos); if (new_pos.IsEmpty()) return; // Tell the immersive mode controller about the find bar's new bounds. The // immersive mode controller uses the bounds to keep the top-of-window views // revealed when the mouse is hovered over the find bar. browser_view()->immersive_mode_controller()->OnFindBarVisibleBoundsChanged( host()->GetWindowBoundsInScreen()); find_bar_controller_->FindBarVisibilityChanged(); } void FindBarHost::GetWidgetBounds(gfx::Rect* bounds) { DCHECK(bounds); // The BrowserView does Layout for the components that we care about // positioning relative to, so we ask it to tell us where we should go. *bounds = browser_view()->GetFindBarBoundingBox(); } void FindBarHost::RegisterAccelerators() { DropdownBarHost::RegisterAccelerators(); // Register for Ctrl+Return. ui::Accelerator escape(ui::VKEY_RETURN, ui::EF_CONTROL_DOWN); focus_manager()->RegisterAccelerator( escape, ui::AcceleratorManager::kNormalPriority, this); } void FindBarHost::UnregisterAccelerators() { // Unregister Ctrl+Return. ui::Accelerator escape(ui::VKEY_RETURN, ui::EF_CONTROL_DOWN); focus_manager()->UnregisterAccelerator(escape, this); DropdownBarHost::UnregisterAccelerators(); } void FindBarHost::OnVisibilityChanged() { // Tell the immersive mode controller about the find bar's bounds. The // immersive mode controller uses the bounds to keep the top-of-window views // revealed when the mouse is hovered over the find bar. gfx::Rect visible_bounds; if (IsVisible()) visible_bounds = host()->GetWindowBoundsInScreen(); browser_view()->immersive_mode_controller()->OnFindBarVisibleBoundsChanged( visible_bounds); find_bar_controller_->FindBarVisibilityChanged(); } //////////////////////////////////////////////////////////////////////////////// // private: void FindBarHost::GetWidgetPositionNative(gfx::Rect* avoid_overlapping_rect) { gfx::Rect frame_rect = host()->GetTopLevelWidget()->GetWindowBoundsInScreen(); gfx::Rect webcontents_rect = find_bar_controller_->web_contents()->GetViewBounds(); avoid_overlapping_rect->Offset(0, webcontents_rect.y() - frame_rect.y()); }
null
null
null
null
59,631
8,159
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
173,154
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _ASM_X86_NUMA_H #define _ASM_X86_NUMA_H #include <linux/nodemask.h> #include <asm/topology.h> #include <asm/apicdef.h> #ifdef CONFIG_NUMA #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) /* * Too small node sizes may confuse the VM badly. Usually they * result from BIOS bugs. So dont recognize nodes as standalone * NUMA entities that have less than this amount of RAM listed: */ #define NODE_MIN_SIZE (4*1024*1024) extern int numa_off; /* * __apicid_to_node[] stores the raw mapping between physical apicid and * node and is used to initialize cpu_to_node mapping. * * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus * should be accessed by the accessors - set_apicid_to_node() and * numa_cpu_node(). */ extern s16 __apicid_to_node[MAX_LOCAL_APIC]; extern nodemask_t numa_nodes_parsed __initdata; extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); extern void __init numa_set_distance(int from, int to, int distance); static inline void set_apicid_to_node(int apicid, s16 node) { __apicid_to_node[apicid] = node; } extern int numa_cpu_node(int cpu); #else /* CONFIG_NUMA */ static inline void set_apicid_to_node(int apicid, s16 node) { } static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } #endif /* CONFIG_NUMA */ #ifdef CONFIG_X86_32 # include <asm/numa_32.h> #endif #ifdef CONFIG_NUMA extern void numa_set_node(int cpu, int node); extern void numa_clear_node(int cpu); extern void __init init_cpu_to_node(void); extern void numa_add_cpu(int cpu); extern void numa_remove_cpu(int cpu); #else /* CONFIG_NUMA */ static inline void numa_set_node(int cpu, int node) { } static inline void numa_clear_node(int cpu) { } static inline void init_cpu_to_node(void) { } static inline void numa_add_cpu(int cpu) { } static inline void numa_remove_cpu(int cpu) { } #endif /* CONFIG_NUMA */ #ifdef CONFIG_DEBUG_PER_CPU_MAPS void debug_cpumask_set_cpu(int cpu, int node, bool enable); #endif #ifdef CONFIG_NUMA_EMU #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) void numa_emu_cmdline(char *); #endif /* CONFIG_NUMA_EMU */ #endif /* _ASM_X86_NUMA_H */
null
null
null
null
81,501
15,905
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
180,900
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/arch/mips/include/asm/perf_event.h * * Copyright (C) 2010 MIPS Technologies, Inc. * Author: Deng-Cheng Zhu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __MIPS_PERF_EVENT_H__ #define __MIPS_PERF_EVENT_H__ /* Leave it empty here. The file is required by linux/perf_event.h */ #endif /* __MIPS_PERF_EVENT_H__ */
null
null
null
null
89,247
59,678
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
59,678
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/extensions/api/music_manager_private/device_id.h" // Note: The order of header includes is important, as we want both pre-Vista // and post-Vista data structures to be defined, specifically // PIP_ADAPTER_ADDRESSES and PMIB_IF_ROW2. #include <limits.h> #include <stddef.h> #include <winsock2.h> #include <ws2def.h> #include <ws2ipdef.h> #include <iphlpapi.h> #include <string> #include "base/files/file_path.h" #include "base/logging.h" #include "base/scoped_native_library.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_util.h" #include "base/task_scheduler/post_task.h" #include "base/threading/thread_restrictions.h" #include "base/win/windows_version.h" #include "content/public/browser/browser_thread.h" #include "rlz/buildflags/buildflags.h" #if BUILDFLAG(ENABLE_RLZ) #include "rlz/lib/machine_id.h" #endif namespace { using extensions::api::DeviceId; typedef base::Callback<bool(const void* bytes, size_t size)> IsValidMacAddressCallback; class MacAddressProcessor { public: MacAddressProcessor(const IsValidMacAddressCallback& is_valid_mac_address) : is_valid_mac_address_(is_valid_mac_address), found_index_(ULONG_MAX) { } // Iterate through the interfaces, looking for the valid MAC address with the // lowest IfIndex. void ProcessAdapterAddress(PIP_ADAPTER_ADDRESSES address) { if (address->IfType == IF_TYPE_TUNNEL) return; ProcessPhysicalAddress(address->IfIndex, address->PhysicalAddress, address->PhysicalAddressLength); } void ProcessInterfaceRow(const PMIB_IF_ROW2 row) { if (row->Type == IF_TYPE_TUNNEL || !row->InterfaceAndOperStatusFlags.HardwareInterface) { return; } ProcessPhysicalAddress(row->InterfaceIndex, row->PhysicalAddress, row->PhysicalAddressLength); } std::string mac_address() const { return found_mac_address_; } private: void ProcessPhysicalAddress(NET_IFINDEX index, const void* bytes, size_t size) { if (index >= found_index_ || size == 0) return; if (!is_valid_mac_address_.Run(bytes, size)) return; found_mac_address_ = base::ToLowerASCII(base::HexEncode(bytes, size)); found_index_ = index; } const IsValidMacAddressCallback& is_valid_mac_address_; std::string found_mac_address_; NET_IFINDEX found_index_; }; std::string GetMacAddressFromGetAdaptersAddresses( const IsValidMacAddressCallback& is_valid_mac_address) { base::AssertBlockingAllowed(); // MS recommends a default size of 15k. ULONG bufferSize = 15 * 1024; // Disable as much as we can, since all we want is MAC addresses. ULONG flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_SKIP_FRIENDLY_NAME | GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_UNICAST; std::vector<unsigned char> buffer(bufferSize); PIP_ADAPTER_ADDRESSES adapterAddresses = reinterpret_cast<PIP_ADAPTER_ADDRESSES>(&buffer.front()); DWORD result = GetAdaptersAddresses(AF_UNSPEC, flags, 0, adapterAddresses, &bufferSize); if (result == ERROR_BUFFER_OVERFLOW) { buffer.resize(bufferSize); adapterAddresses = reinterpret_cast<PIP_ADAPTER_ADDRESSES>(&buffer.front()); result = GetAdaptersAddresses(AF_UNSPEC, flags, 0, adapterAddresses, &bufferSize); } if (result != NO_ERROR) { VLOG(ERROR) << "GetAdapatersAddresses failed with error " << result; return ""; } MacAddressProcessor processor(is_valid_mac_address); for (; adapterAddresses != NULL; adapterAddresses = adapterAddresses->Next) { processor.ProcessAdapterAddress(adapterAddresses); } return processor.mac_address(); } std::string GetMacAddressFromGetIfTable2( const IsValidMacAddressCallback& is_valid_mac_address) { base::AssertBlockingAllowed(); // This is available on Vista+ only. base::ScopedNativeLibrary library(base::FilePath(L"Iphlpapi.dll")); typedef DWORD (NETIOAPI_API_ *GetIfTablePtr)(PMIB_IF_TABLE2*); typedef void (NETIOAPI_API_ *FreeMibTablePtr)(PMIB_IF_TABLE2); GetIfTablePtr getIfTable = reinterpret_cast<GetIfTablePtr>( library.GetFunctionPointer("GetIfTable2")); FreeMibTablePtr freeMibTablePtr = reinterpret_cast<FreeMibTablePtr>( library.GetFunctionPointer("FreeMibTable")); if (getIfTable == NULL || freeMibTablePtr == NULL) { VLOG(ERROR) << "Could not get proc addresses for machine identifier."; return ""; } PMIB_IF_TABLE2 ifTable = NULL; DWORD result = getIfTable(&ifTable); if (result != NO_ERROR || ifTable == NULL) { VLOG(ERROR) << "GetIfTable failed with error " << result; return ""; } MacAddressProcessor processor(is_valid_mac_address); for (size_t i = 0; i < ifTable->NumEntries; i++) { processor.ProcessInterfaceRow(&(ifTable->Table[i])); } if (ifTable != NULL) { freeMibTablePtr(ifTable); ifTable = NULL; } return processor.mac_address(); } void GetMacAddress(const IsValidMacAddressCallback& is_valid_mac_address, const DeviceId::IdCallback& callback) { base::AssertBlockingAllowed(); std::string mac_address = GetMacAddressFromGetAdaptersAddresses(is_valid_mac_address); if (mac_address.empty()) mac_address = GetMacAddressFromGetIfTable2(is_valid_mac_address); static bool error_logged = false; if (mac_address.empty() && !error_logged) { error_logged = true; LOG(ERROR) << "Could not find appropriate MAC address."; } content::BrowserThread::PostTask( content::BrowserThread::UI, FROM_HERE, base::Bind(callback, mac_address)); } std::string GetRlzMachineId() { #if BUILDFLAG(ENABLE_RLZ) std::string machine_id; if (!rlz_lib::GetMachineId(&machine_id)) return std::string(); return machine_id; #else return std::string(); #endif } void GetMacAddressCallback(const DeviceId::IdCallback& callback, const std::string& mac_address) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); std::string machine_id = GetRlzMachineId(); if (mac_address.empty() || machine_id.empty()) { callback.Run(""); return; } callback.Run(mac_address + machine_id); } } // namespace namespace extensions { namespace api { // static void DeviceId::GetRawDeviceId(const IdCallback& callback) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); base::PostTaskWithTraits( FROM_HERE, traits(), base::Bind(&GetMacAddress, base::Bind(&DeviceId::IsValidMacAddress), base::Bind(&GetMacAddressCallback, callback))); } } // namespace api } // namespace extensions
null
null
null
null
56,541
17,542
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
182,537
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa * processor CORE configuration * * See <xtensa/config/core.h>, which includes this file, for more details. */ /* Xtensa processor core configuration information. Copyright (c) 1999-2015 Tensilica Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _XTENSA_CORE_CONFIGURATION_H #define _XTENSA_CORE_CONFIGURATION_H /**************************************************************************** Parameters Useful for Any Code, USER or PRIVILEGED ****************************************************************************/ /* * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is * configured, and a value of 0 otherwise. These macros are always defined. */ /*---------------------------------------------------------------------- ISA ----------------------------------------------------------------------*/ #define XCHAL_HAVE_BE 0 /* big-endian byte ordering */ #define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */ #define XCHAL_NUM_AREGS 32 /* num of physical addr regs */ #define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */ #define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */ #define XCHAL_HAVE_DEBUG 1 /* debug option */ #define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */ #define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */ #define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */ #define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */ #define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */ #define XCHAL_HAVE_SEXT 1 /* SEXT instruction */ #define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */ #define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */ #define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */ #define XCHAL_HAVE_MUL32 1 /* MULL instruction */ #define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */ #define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */ #define XCHAL_HAVE_L32R 1 /* L32R instruction */ #define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */ #define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */ #define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */ #define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */ #define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */ #define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */ #define XCHAL_HAVE_ABS 1 /* ABS instruction */ /*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */ /*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */ #define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */ #define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */ #define XCHAL_HAVE_SPECULATION 0 /* speculation */ #define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */ #define XCHAL_NUM_CONTEXTS 1 /* */ #define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */ #define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */ #define XCHAL_HAVE_PRID 1 /* processor ID register */ #define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */ #define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */ #define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */ #define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */ #define XCHAL_HAVE_PSO 0 /* Power Shut-Off */ #define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */ #define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */ #define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */ #define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */ #define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */ #define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */ #define XCHAL_HAVE_MAC16 1 /* MAC16 package */ #define XCHAL_HAVE_FUSION 0 /* Fusion*/ #define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */ #define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */ #define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */ #define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */ #define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */ #define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */ #define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */ #define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */ #define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */ #define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */ #define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */ #define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */ #define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */ #define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */ #define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */ #define XCHAL_HAVE_HIFI_MINI 0 #define XCHAL_HAVE_VECTORFPU2005 0 /* vector or user floating-point pkg */ #define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */ #define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */ #define XCHAL_HAVE_FP 0 /* single prec floating point */ #define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */ #define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */ #define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */ #define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */ #define XCHAL_HAVE_DFP 0 /* double precision FP pkg */ #define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */ #define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/ #define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */ #define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/ #define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */ #define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */ #define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */ #define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */ #define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */ #define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */ #define XCHAL_HAVE_PDX4 0 /* PDX4 */ #define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */ #define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */ #define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */ #define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */ #define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */ #define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */ #define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */ #define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */ #define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */ #define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */ #define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */ #define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */ #define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */ #define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */ #define XCHAL_HAVE_GRIVPEP 0 /* GRIVPEP is General Release of IVPEP */ #define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */ /*---------------------------------------------------------------------- MISC ----------------------------------------------------------------------*/ #define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */ #define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */ #define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */ #define XCHAL_DATA_WIDTH 4 /* data width in bytes */ #define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay (1 = 5-stage, 2 = 7-stage) */ #define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */ #define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */ /* In T1050, applies to selected core load and store instructions (see ISA): */ #define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */ #define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/ #define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */ #define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/ #define XCHAL_SW_VERSION 1100002 /* sw version of this header */ #define XCHAL_CORE_ID "de212" /* alphanum core name (CoreID) set in the Xtensa Processor Generator */ #define XCHAL_BUILD_UNIQUE_ID 0x0005A985 /* 22-bit sw build ID */ /* * These definitions describe the hardware targeted by this software. */ #define XCHAL_HW_CONFIGID0 0xC283DFFE /* ConfigID hi 32 bits*/ #define XCHAL_HW_CONFIGID1 0x1C85A985 /* ConfigID lo 32 bits*/ #define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */ #define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */ #define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */ #define XCHAL_HW_VERSION 260002 /* major*100+minor */ #define XCHAL_HW_REL_LX6 1 #define XCHAL_HW_REL_LX6_0 1 #define XCHAL_HW_REL_LX6_0_2 1 #define XCHAL_HW_CONFIGID_RELIABLE 1 /* If software targets a *range* of hardware versions, these are the bounds: */ #define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */ #define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */ #define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */ #define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */ #define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */ #define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */ /*---------------------------------------------------------------------- CACHE ----------------------------------------------------------------------*/ #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */ #define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */ #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */ #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */ #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */ #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */ #define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */ #define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */ #define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */ #define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */ #define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */ #define XCHAL_PREFETCH_ENTRIES 0 /* cache prefetch entries */ #define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */ #define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */ #define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */ #define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */ #define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */ #define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */ /**************************************************************************** Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code ****************************************************************************/ #ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY /*---------------------------------------------------------------------- CACHE ----------------------------------------------------------------------*/ #define XCHAL_HAVE_PIF 1 /* any outbound PIF present */ /* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */ /* Number of cache sets in log2(lines per way): */ #define XCHAL_ICACHE_SETWIDTH 7 #define XCHAL_DCACHE_SETWIDTH 7 /* Cache set associativity (number of ways): */ #define XCHAL_ICACHE_WAYS 2 #define XCHAL_DCACHE_WAYS 2 /* Cache features: */ #define XCHAL_ICACHE_LINE_LOCKABLE 1 #define XCHAL_DCACHE_LINE_LOCKABLE 1 #define XCHAL_ICACHE_ECC_PARITY 0 #define XCHAL_DCACHE_ECC_PARITY 0 /* Cache access size in bytes (affects operation of SICW instruction): */ #define XCHAL_ICACHE_ACCESS_SIZE 4 #define XCHAL_DCACHE_ACCESS_SIZE 4 #define XCHAL_DCACHE_BANKS 1 /* number of banks */ /* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */ #define XCHAL_CA_BITS 4 /* Whether MEMCTL register has anything useful */ #define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \ XCHAL_DCACHE_IS_COHERENT || \ XCHAL_HAVE_ICACHE_DYN_WAYS || \ XCHAL_HAVE_DCACHE_DYN_WAYS) && \ (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0)) /*---------------------------------------------------------------------- INTERNAL I/D RAM/ROMs and XLMI ----------------------------------------------------------------------*/ #define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */ #define XCHAL_NUM_INSTRAM 1 /* number of core instr. RAMs */ #define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */ #define XCHAL_NUM_DATARAM 1 /* number of core data RAMs */ #define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/ #define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */ /* Instruction RAM 0: */ #define XCHAL_INSTRAM0_VADDR 0x40000000 /* virtual address */ #define XCHAL_INSTRAM0_PADDR 0x40000000 /* physical address */ #define XCHAL_INSTRAM0_SIZE 131072 /* size in bytes */ #define XCHAL_INSTRAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */ /* Data RAM 0: */ #define XCHAL_DATARAM0_VADDR 0x3FFE0000 /* virtual address */ #define XCHAL_DATARAM0_PADDR 0x3FFE0000 /* physical address */ #define XCHAL_DATARAM0_SIZE 131072 /* size in bytes */ #define XCHAL_DATARAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */ #define XCHAL_DATARAM0_BANKS 1 /* number of banks */ /* XLMI Port 0: */ #define XCHAL_XLMI0_VADDR 0x3FFC0000 /* virtual address */ #define XCHAL_XLMI0_PADDR 0x3FFC0000 /* physical address */ #define XCHAL_XLMI0_SIZE 131072 /* size in bytes */ #define XCHAL_XLMI0_ECC_PARITY 0 /* ECC/parity type, 0=none */ #define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/ /*---------------------------------------------------------------------- INTERRUPTS and TIMERS ----------------------------------------------------------------------*/ #define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */ #define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */ #define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */ #define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */ #define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */ #define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */ #define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */ #define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */ #define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels (not including level zero) */ #define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */ /* Masks of interrupts at each interrupt level: */ #define XCHAL_INTLEVEL1_MASK 0x001F80FF #define XCHAL_INTLEVEL2_MASK 0x00000100 #define XCHAL_INTLEVEL3_MASK 0x00200E00 #define XCHAL_INTLEVEL4_MASK 0x00001000 #define XCHAL_INTLEVEL5_MASK 0x00002000 #define XCHAL_INTLEVEL6_MASK 0x00000000 #define XCHAL_INTLEVEL7_MASK 0x00004000 /* Masks of interrupts at each range 1..n of interrupt levels: */ #define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF #define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF #define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF #define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF #define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF #define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF #define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF /* Level of each interrupt: */ #define XCHAL_INT0_LEVEL 1 #define XCHAL_INT1_LEVEL 1 #define XCHAL_INT2_LEVEL 1 #define XCHAL_INT3_LEVEL 1 #define XCHAL_INT4_LEVEL 1 #define XCHAL_INT5_LEVEL 1 #define XCHAL_INT6_LEVEL 1 #define XCHAL_INT7_LEVEL 1 #define XCHAL_INT8_LEVEL 2 #define XCHAL_INT9_LEVEL 3 #define XCHAL_INT10_LEVEL 3 #define XCHAL_INT11_LEVEL 3 #define XCHAL_INT12_LEVEL 4 #define XCHAL_INT13_LEVEL 5 #define XCHAL_INT14_LEVEL 7 #define XCHAL_INT15_LEVEL 1 #define XCHAL_INT16_LEVEL 1 #define XCHAL_INT17_LEVEL 1 #define XCHAL_INT18_LEVEL 1 #define XCHAL_INT19_LEVEL 1 #define XCHAL_INT20_LEVEL 1 #define XCHAL_INT21_LEVEL 3 #define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */ #define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */ #define XCHAL_NMILEVEL 7 /* NMI "level" (for use with EXCSAVE/EPS/EPC_n, RFI n) */ /* Type of each interrupt: */ #define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER #define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE #define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER #define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE #define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL #define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER #define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI #define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE #define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE #define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE #define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE #define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE #define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE #define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE /* Masks of interrupts for each type of interrupt: */ #define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000 #define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880 #define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000 #define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F #define XCHAL_INTTYPE_MASK_TIMER 0x00002440 #define XCHAL_INTTYPE_MASK_NMI 0x00004000 #define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000 #define XCHAL_INTTYPE_MASK_PROFILING 0x00000000 /* Interrupt numbers assigned to specific interrupt sources: */ #define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */ #define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */ #define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */ #define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED #define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */ /* Interrupt numbers for levels at which only one interrupt is configured: */ #define XCHAL_INTLEVEL2_NUM 8 #define XCHAL_INTLEVEL4_NUM 12 #define XCHAL_INTLEVEL5_NUM 13 #define XCHAL_INTLEVEL7_NUM 14 /* (There are many interrupts each at level(s) 1, 3.) */ /* * External interrupt mapping. * These macros describe how Xtensa processor interrupt numbers * (as numbered internally, eg. in INTERRUPT and INTENABLE registers) * map to external BInterrupt<n> pins, for those interrupts * configured as external (level-triggered, edge-triggered, or NMI). * See the Xtensa processor databook for more details. */ /* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */ #define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */ #define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */ #define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */ #define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */ #define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */ #define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */ #define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */ #define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */ #define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */ #define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */ #define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */ #define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */ #define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */ #define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */ #define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */ #define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */ #define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */ /* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */ #define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */ #define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */ #define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */ #define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */ #define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */ #define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */ #define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */ #define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */ #define XCHAL_INT12_EXTNUM 8 /* (intlevel 4) */ #define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */ #define XCHAL_INT15_EXTNUM 10 /* (intlevel 1) */ #define XCHAL_INT16_EXTNUM 11 /* (intlevel 1) */ #define XCHAL_INT17_EXTNUM 12 /* (intlevel 1) */ #define XCHAL_INT18_EXTNUM 13 /* (intlevel 1) */ #define XCHAL_INT19_EXTNUM 14 /* (intlevel 1) */ #define XCHAL_INT20_EXTNUM 15 /* (intlevel 1) */ #define XCHAL_INT21_EXTNUM 16 /* (intlevel 3) */ /*---------------------------------------------------------------------- EXCEPTIONS and VECTORS ----------------------------------------------------------------------*/ #define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture number: 1 == XEA1 (old) 2 == XEA2 (new) 0 == XEAX (extern) or TX */ #define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */ #define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */ #define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */ #define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */ #define XCHAL_HAVE_HALT 0 /* halt architecture option */ #define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */ #define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */ #define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */ #define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */ #define XCHAL_VECBASE_RESET_VADDR 0x60000000 /* VECBASE reset value */ #define XCHAL_VECBASE_RESET_PADDR 0x60000000 #define XCHAL_RESET_VECBASE_OVERLAP 0 #define XCHAL_RESET_VECTOR0_VADDR 0x50000000 #define XCHAL_RESET_VECTOR0_PADDR 0x50000000 #define XCHAL_RESET_VECTOR1_VADDR 0x40000400 #define XCHAL_RESET_VECTOR1_PADDR 0x40000400 #define XCHAL_RESET_VECTOR_VADDR 0x50000000 #define XCHAL_RESET_VECTOR_PADDR 0x50000000 #define XCHAL_USER_VECOFS 0x00000340 #define XCHAL_USER_VECTOR_VADDR 0x60000340 #define XCHAL_USER_VECTOR_PADDR 0x60000340 #define XCHAL_KERNEL_VECOFS 0x00000300 #define XCHAL_KERNEL_VECTOR_VADDR 0x60000300 #define XCHAL_KERNEL_VECTOR_PADDR 0x60000300 #define XCHAL_DOUBLEEXC_VECOFS 0x000003C0 #define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x600003C0 #define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x600003C0 #define XCHAL_WINDOW_OF4_VECOFS 0x00000000 #define XCHAL_WINDOW_UF4_VECOFS 0x00000040 #define XCHAL_WINDOW_OF8_VECOFS 0x00000080 #define XCHAL_WINDOW_UF8_VECOFS 0x000000C0 #define XCHAL_WINDOW_OF12_VECOFS 0x00000100 #define XCHAL_WINDOW_UF12_VECOFS 0x00000140 #define XCHAL_WINDOW_VECTORS_VADDR 0x60000000 #define XCHAL_WINDOW_VECTORS_PADDR 0x60000000 #define XCHAL_INTLEVEL2_VECOFS 0x00000180 #define XCHAL_INTLEVEL2_VECTOR_VADDR 0x60000180 #define XCHAL_INTLEVEL2_VECTOR_PADDR 0x60000180 #define XCHAL_INTLEVEL3_VECOFS 0x000001C0 #define XCHAL_INTLEVEL3_VECTOR_VADDR 0x600001C0 #define XCHAL_INTLEVEL3_VECTOR_PADDR 0x600001C0 #define XCHAL_INTLEVEL4_VECOFS 0x00000200 #define XCHAL_INTLEVEL4_VECTOR_VADDR 0x60000200 #define XCHAL_INTLEVEL4_VECTOR_PADDR 0x60000200 #define XCHAL_INTLEVEL5_VECOFS 0x00000240 #define XCHAL_INTLEVEL5_VECTOR_VADDR 0x60000240 #define XCHAL_INTLEVEL5_VECTOR_PADDR 0x60000240 #define XCHAL_INTLEVEL6_VECOFS 0x00000280 #define XCHAL_INTLEVEL6_VECTOR_VADDR 0x60000280 #define XCHAL_INTLEVEL6_VECTOR_PADDR 0x60000280 #define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS #define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR #define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR #define XCHAL_NMI_VECOFS 0x000002C0 #define XCHAL_NMI_VECTOR_VADDR 0x600002C0 #define XCHAL_NMI_VECTOR_PADDR 0x600002C0 #define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS #define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR #define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR /*---------------------------------------------------------------------- DEBUG MODULE ----------------------------------------------------------------------*/ /* Misc */ #define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */ #define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */ #define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */ /* On-Chip Debug (OCD) */ #define XCHAL_HAVE_OCD 1 /* OnChipDebug option */ #define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */ #define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */ #define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */ #define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */ /* TRAX (in core) */ #define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */ #define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */ #define XCHAL_TRAX_MEM_SHAREABLE 0 /* start/end regs; ready sig. */ #define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */ #define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */ /* Perf counters */ #define XCHAL_NUM_PERF_COUNTERS 0 /* performance counters */ /*---------------------------------------------------------------------- MMU ----------------------------------------------------------------------*/ /* See core-matmap.h header file for more details. */ #define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */ #define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */ #define XCHAL_SPANNING_WAY 0 /* TLB spanning way number */ #define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */ #define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */ #define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */ #define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */ #define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table [autorefill] and protection) usable for an MMU-based OS */ /* If none of the above last 4 are set, it's a custom TLB configuration. */ #define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */ #define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */ #define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */ #endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ #endif /* _XTENSA_CORE_CONFIGURATION_H */
null
null
null
null
90,884
55,505
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
55,505
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/utility/importer/bookmark_html_reader.h" #include <stddef.h> #include <stdint.h> #include "base/callback.h" #include "base/files/file_util.h" #include "base/i18n/icu_string_conversions.h" #include "base/macros.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_split.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "base/time/time.h" #include "chrome/common/importer/imported_bookmark_entry.h" #include "chrome/utility/importer/favicon_reencode.h" #include "components/search_engines/search_terms_data.h" #include "components/search_engines/template_url.h" #include "net/base/data_url.h" #include "net/base/escape.h" #include "url/gurl.h" #include "url/url_constants.h" namespace { // Fetches the given |attribute| value from the |attribute_list|. Returns true // if successful, and |value| will contain the value. bool GetAttribute(const std::string& attribute_list, const std::string& attribute, std::string* value) { const char kQuote[] = "\""; size_t begin = attribute_list.find(attribute + "=" + kQuote); if (begin == std::string::npos) return false; // Can't find the attribute. begin += attribute.size() + 2; size_t end = begin + 1; while (end < attribute_list.size()) { if (attribute_list[end] == '"' && attribute_list[end - 1] != '\\') { break; } end++; } if (end == attribute_list.size()) return false; // The value is not quoted. *value = attribute_list.substr(begin, end - begin); return true; } // Given the URL of a page and a favicon data URL, adds an appropriate record // to the given favicon usage vector. void DataURLToFaviconUsage(const GURL& link_url, const GURL& favicon_data, favicon_base::FaviconUsageDataList* favicons) { if (!link_url.is_valid() || !favicon_data.is_valid() || !favicon_data.SchemeIs(url::kDataScheme)) return; // Parse the data URL. std::string mime_type, char_set, data; if (!net::DataURL::Parse(favicon_data, &mime_type, &char_set, &data) || data.empty()) return; favicon_base::FaviconUsageData usage; if (!importer::ReencodeFavicon( reinterpret_cast<const unsigned char*>(&data[0]), data.size(), &usage.png_data)) return; // Unable to decode. // We need to make up a URL for the favicon. We use a version of the page's // URL so that we can be sure it will not collide. usage.favicon_url = GURL(std::string("made-up-favicon:") + link_url.spec()); // We only have one URL per favicon for Firefox 2 bookmarks. usage.urls.insert(link_url); favicons->push_back(usage); } } // namespace namespace bookmark_html_reader { static std::string stripDt(const std::string& lineDt) { // Remove "<DT>" if the line starts with "<DT>". This may not occur if // "<DT>" was on the previous line. Liberally accept entries that do not // have an opening "<DT>" at all. std::string line = lineDt; static const char kDtTag[] = "<DT>"; if (base::StartsWith(line, kDtTag, base::CompareCase::INSENSITIVE_ASCII)) { line.erase(0, arraysize(kDtTag) - 1); base::TrimString(line, " ", &line); } return line; } void ImportBookmarksFile( const base::Callback<bool(void)>& cancellation_callback, const base::Callback<bool(const GURL&)>& valid_url_callback, const base::FilePath& file_path, std::vector<ImportedBookmarkEntry>* bookmarks, std::vector<importer::SearchEngineInfo>* search_engines, favicon_base::FaviconUsageDataList* favicons) { std::string content; base::ReadFileToString(file_path, &content); std::vector<std::string> lines = base::SplitString( content, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL); base::string16 last_folder; bool last_folder_on_toolbar = false; bool last_folder_is_empty = true; bool has_subfolder = false; bool has_last_folder = false; base::Time last_folder_add_date; std::vector<base::string16> path; size_t toolbar_folder_index = 0; std::string charset = "UTF-8"; // If no charset is specified, assume utf-8. for (size_t i = 0; i < lines.size() && (cancellation_callback.is_null() || !cancellation_callback.Run()); ++i) { std::string line; base::TrimString(lines[i], " ", &line); // Remove "<HR>" if |line| starts with it. "<HR>" is the bookmark entries // separator in Firefox that Chrome does not support. Note that there can be // multiple "<HR>" tags at the beginning of a single line. // See http://crbug.com/257474. static const char kHrTag[] = "<HR>"; while (base::StartsWith(line, kHrTag, base::CompareCase::INSENSITIVE_ASCII)) { line.erase(0, arraysize(kHrTag) - 1); base::TrimString(line, " ", &line); } // Get the encoding of the bookmark file. if (internal::ParseCharsetFromLine(line, &charset)) continue; // Get the folder name. if (internal::ParseFolderNameFromLine(line, charset, &last_folder, &last_folder_on_toolbar, &last_folder_add_date)) { has_last_folder = true; continue; } // Get the bookmark entry. base::string16 title; base::string16 shortcut; GURL url, favicon; base::Time add_date; base::string16 post_data; bool is_bookmark; // TODO(jcampan): http://b/issue?id=1196285 we do not support POST based // keywords yet. is_bookmark = internal::ParseBookmarkFromLine(line, charset, &title, &url, &favicon, &shortcut, &add_date, &post_data) || internal::ParseMinimumBookmarkFromLine(line, charset, &title, &url); // If bookmark contains a valid replaceable url and a keyword then import // it as search engine. std::string search_engine_url; if (is_bookmark && post_data.empty() && CanImportURLAsSearchEngine(url, &search_engine_url) && !shortcut.empty()) { importer::SearchEngineInfo search_engine_info; search_engine_info.url.assign(base::UTF8ToUTF16(search_engine_url)); search_engine_info.keyword = shortcut; search_engine_info.display_name = title; search_engines->push_back(search_engine_info); continue; } if (is_bookmark) last_folder_is_empty = false; if (is_bookmark && post_data.empty() && (valid_url_callback.is_null() || valid_url_callback.Run(url))) { if (toolbar_folder_index > path.size() && !path.empty()) { NOTREACHED(); // error in parsing. break; } ImportedBookmarkEntry entry; entry.creation_time = add_date; entry.url = url; entry.title = title; if (toolbar_folder_index) { // The toolbar folder should be at the top level. entry.in_toolbar = true; entry.path.assign(path.begin() + toolbar_folder_index - 1, path.end()); } else { // Add this bookmark to the list of |bookmarks|. if (!has_subfolder && has_last_folder) { path.push_back(last_folder); has_last_folder = false; last_folder.clear(); } entry.path.assign(path.begin(), path.end()); } bookmarks->push_back(entry); // Save the favicon. DataURLToFaviconUsage will handle the case where // there is no favicon. if (favicons) DataURLToFaviconUsage(url, favicon, favicons); continue; } // Bookmarks in sub-folder are encapsulated with <DL> tag. if (base::StartsWith(line, "<DL>", base::CompareCase::INSENSITIVE_ASCII)) { has_subfolder = true; if (has_last_folder) { path.push_back(last_folder); has_last_folder = false; last_folder.clear(); } if (last_folder_on_toolbar && !toolbar_folder_index) toolbar_folder_index = path.size(); // Mark next folder empty as initial state. last_folder_is_empty = true; } else if (base::StartsWith(line, "</DL>", base::CompareCase::INSENSITIVE_ASCII)) { if (path.empty()) break; // Mismatch <DL>. base::string16 folder_title = path.back(); path.pop_back(); if (last_folder_is_empty) { // Empty folder should be added explicitly. ImportedBookmarkEntry entry; entry.is_folder = true; entry.creation_time = last_folder_add_date; entry.title = folder_title; if (toolbar_folder_index) { // The toolbar folder should be at the top level. // Make sure we don't add the toolbar folder itself if it is empty. if (toolbar_folder_index <= path.size()) { entry.in_toolbar = true; entry.path.assign(path.begin() + toolbar_folder_index - 1, path.end()); bookmarks->push_back(entry); } } else { // Add this folder to the list of |bookmarks|. entry.path.assign(path.begin(), path.end()); bookmarks->push_back(entry); } // Parent folder include current one, so it's not empty. last_folder_is_empty = false; } if (toolbar_folder_index > path.size()) toolbar_folder_index = 0; } } } bool CanImportURLAsSearchEngine(const GURL& url, std::string* search_engine_url) { std::string url_spec = url.possibly_invalid_spec(); if (url_spec.empty()) return false; url_spec = net::UnescapeURLComponent( url_spec, net::UnescapeRule::URL_SPECIAL_CHARS_EXCEPT_PATH_SEPARATORS); // Replace replacement terms ("%s") in |url_spec| with {searchTerms}. url_spec = TemplateURLRef::DisplayURLToURLRef(base::UTF8ToUTF16(url_spec)); TemplateURLData data; data.SetURL(url_spec); *search_engine_url = url_spec; return TemplateURL(data).SupportsReplacement(SearchTermsData()); } namespace internal { bool ParseCharsetFromLine(const std::string& line, std::string* charset) { if (!base::StartsWith(line, "<META", base::CompareCase::INSENSITIVE_ASCII) || (line.find("CONTENT=\"") == std::string::npos && line.find("content=\"") == std::string::npos)) { return false; } const char kCharset[] = "charset="; size_t begin = line.find(kCharset); if (begin == std::string::npos) return false; begin += sizeof(kCharset) - 1; size_t end = line.find_first_of('\"', begin); *charset = line.substr(begin, end - begin); return true; } bool ParseFolderNameFromLine(const std::string& lineDt, const std::string& charset, base::string16* folder_name, bool* is_toolbar_folder, base::Time* add_date) { const char kFolderOpen[] = "<H3"; const char kFolderClose[] = "</H3>"; const char kToolbarFolderAttribute[] = "PERSONAL_TOOLBAR_FOLDER"; const char kAddDateAttribute[] = "ADD_DATE"; std::string line = stripDt(lineDt); if (!base::StartsWith(line, kFolderOpen, base::CompareCase::SENSITIVE)) return false; size_t end = line.find(kFolderClose); size_t tag_end = line.rfind('>', end) + 1; // If no end tag or start tag is broken, we skip to find the folder name. if (end == std::string::npos || tag_end < arraysize(kFolderOpen)) return false; base::CodepageToUTF16(line.substr(tag_end, end - tag_end), charset.c_str(), base::OnStringConversionError::SKIP, folder_name); *folder_name = net::UnescapeForHTML(*folder_name); std::string attribute_list = line.substr(arraysize(kFolderOpen), tag_end - arraysize(kFolderOpen) - 1); std::string value; // Add date if (GetAttribute(attribute_list, kAddDateAttribute, &value)) { int64_t time; base::StringToInt64(value, &time); // Upper bound it at 32 bits. if (0 < time && time < (1LL << 32)) *add_date = base::Time::FromTimeT(time); } if (GetAttribute(attribute_list, kToolbarFolderAttribute, &value) && base::LowerCaseEqualsASCII(value, "true")) *is_toolbar_folder = true; else *is_toolbar_folder = false; return true; } bool ParseBookmarkFromLine(const std::string& lineDt, const std::string& charset, base::string16* title, GURL* url, GURL* favicon, base::string16* shortcut, base::Time* add_date, base::string16* post_data) { const char kItemOpen[] = "<A"; const char kItemClose[] = "</A>"; const char kFeedURLAttribute[] = "FEEDURL"; const char kHrefAttribute[] = "HREF"; const char kIconAttribute[] = "ICON"; const char kShortcutURLAttribute[] = "SHORTCUTURL"; const char kAddDateAttribute[] = "ADD_DATE"; const char kPostDataAttribute[] = "POST_DATA"; std::string line = stripDt(lineDt); title->clear(); *url = GURL(); *favicon = GURL(); shortcut->clear(); post_data->clear(); *add_date = base::Time(); if (!base::StartsWith(line, kItemOpen, base::CompareCase::SENSITIVE)) return false; size_t end = line.find(kItemClose); size_t tag_end = line.rfind('>', end) + 1; if (end == std::string::npos || tag_end < arraysize(kItemOpen)) return false; // No end tag or start tag is broken. std::string attribute_list = line.substr(arraysize(kItemOpen), tag_end - arraysize(kItemOpen) - 1); // We don't import Live Bookmark folders, which is Firefox's RSS reading // feature, since the user never necessarily bookmarked them and we don't // have this feature to update their contents. std::string value; if (GetAttribute(attribute_list, kFeedURLAttribute, &value)) return false; // Title base::CodepageToUTF16(line.substr(tag_end, end - tag_end), charset.c_str(), base::OnStringConversionError::SKIP, title); *title = net::UnescapeForHTML(*title); // URL if (GetAttribute(attribute_list, kHrefAttribute, &value)) { base::string16 url16; base::CodepageToUTF16(value, charset.c_str(), base::OnStringConversionError::SKIP, &url16); url16 = net::UnescapeForHTML(url16); *url = GURL(url16); } // Favicon if (GetAttribute(attribute_list, kIconAttribute, &value)) *favicon = GURL(value); // Keyword if (GetAttribute(attribute_list, kShortcutURLAttribute, &value)) { base::CodepageToUTF16(value, charset.c_str(), base::OnStringConversionError::SKIP, shortcut); *shortcut = net::UnescapeForHTML(*shortcut); } // Add date if (GetAttribute(attribute_list, kAddDateAttribute, &value)) { int64_t time; base::StringToInt64(value, &time); // Upper bound it at 32 bits. if (0 < time && time < (1LL << 32)) *add_date = base::Time::FromTimeT(time); } // Post data. if (GetAttribute(attribute_list, kPostDataAttribute, &value)) { base::CodepageToUTF16(value, charset.c_str(), base::OnStringConversionError::SKIP, post_data); *post_data = net::UnescapeForHTML(*post_data); } return true; } bool ParseMinimumBookmarkFromLine(const std::string& lineDt, const std::string& charset, base::string16* title, GURL* url) { const char kItemOpen[] = "<A"; const char kItemClose[] = "</"; const char kHrefAttributeUpper[] = "HREF"; const char kHrefAttributeLower[] = "href"; std::string line = stripDt(lineDt); title->clear(); *url = GURL(); // Case-insensitive check of open tag. if (!base::StartsWith(line, kItemOpen, base::CompareCase::INSENSITIVE_ASCII)) return false; // Find any close tag. size_t end = line.find(kItemClose); size_t tag_end = line.rfind('>', end) + 1; if (end == std::string::npos || tag_end < arraysize(kItemOpen)) return false; // No end tag or start tag is broken. std::string attribute_list = line.substr(arraysize(kItemOpen), tag_end - arraysize(kItemOpen) - 1); // Title base::CodepageToUTF16(line.substr(tag_end, end - tag_end), charset.c_str(), base::OnStringConversionError::SKIP, title); *title = net::UnescapeForHTML(*title); // URL std::string value; if (GetAttribute(attribute_list, kHrefAttributeUpper, &value) || GetAttribute(attribute_list, kHrefAttributeLower, &value)) { if (charset.length() != 0) { base::string16 url16; base::CodepageToUTF16(value, charset.c_str(), base::OnStringConversionError::SKIP, &url16); url16 = net::UnescapeForHTML(url16); *url = GURL(url16); } else { *url = GURL(value); } } return true; } } // namespace internal } // namespace bookmark_html_reader
null
null
null
null
52,368
14,684
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
179,679
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Copyright 2007-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #ifndef _CDEF_BF548_H #define _CDEF_BF548_H /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */ #include "cdefBF54x_base.h" /* The BF548 is like the BF547, but has additional CANs */ #include "cdefBF547.h" /* CAN Controller 1 Config 1 Registers */ #define bfin_read_CAN1_MC1() bfin_read16(CAN1_MC1) #define bfin_write_CAN1_MC1(val) bfin_write16(CAN1_MC1, val) #define bfin_read_CAN1_MD1() bfin_read16(CAN1_MD1) #define bfin_write_CAN1_MD1(val) bfin_write16(CAN1_MD1, val) #define bfin_read_CAN1_TRS1() bfin_read16(CAN1_TRS1) #define bfin_write_CAN1_TRS1(val) bfin_write16(CAN1_TRS1, val) #define bfin_read_CAN1_TRR1() bfin_read16(CAN1_TRR1) #define bfin_write_CAN1_TRR1(val) bfin_write16(CAN1_TRR1, val) #define bfin_read_CAN1_TA1() bfin_read16(CAN1_TA1) #define bfin_write_CAN1_TA1(val) bfin_write16(CAN1_TA1, val) #define bfin_read_CAN1_AA1() bfin_read16(CAN1_AA1) #define bfin_write_CAN1_AA1(val) bfin_write16(CAN1_AA1, val) #define bfin_read_CAN1_RMP1() bfin_read16(CAN1_RMP1) #define bfin_write_CAN1_RMP1(val) bfin_write16(CAN1_RMP1, val) #define bfin_read_CAN1_RML1() bfin_read16(CAN1_RML1) #define bfin_write_CAN1_RML1(val) bfin_write16(CAN1_RML1, val) #define bfin_read_CAN1_MBTIF1() bfin_read16(CAN1_MBTIF1) #define bfin_write_CAN1_MBTIF1(val) bfin_write16(CAN1_MBTIF1, val) #define bfin_read_CAN1_MBRIF1() bfin_read16(CAN1_MBRIF1) #define bfin_write_CAN1_MBRIF1(val) bfin_write16(CAN1_MBRIF1, val) #define bfin_read_CAN1_MBIM1() bfin_read16(CAN1_MBIM1) #define bfin_write_CAN1_MBIM1(val) bfin_write16(CAN1_MBIM1, val) #define bfin_read_CAN1_RFH1() bfin_read16(CAN1_RFH1) #define bfin_write_CAN1_RFH1(val) bfin_write16(CAN1_RFH1, val) #define bfin_read_CAN1_OPSS1() bfin_read16(CAN1_OPSS1) #define bfin_write_CAN1_OPSS1(val) bfin_write16(CAN1_OPSS1, val) /* CAN Controller 1 Config 2 Registers */ #define bfin_read_CAN1_MC2() bfin_read16(CAN1_MC2) #define bfin_write_CAN1_MC2(val) bfin_write16(CAN1_MC2, val) #define bfin_read_CAN1_MD2() bfin_read16(CAN1_MD2) #define bfin_write_CAN1_MD2(val) bfin_write16(CAN1_MD2, val) #define bfin_read_CAN1_TRS2() bfin_read16(CAN1_TRS2) #define bfin_write_CAN1_TRS2(val) bfin_write16(CAN1_TRS2, val) #define bfin_read_CAN1_TRR2() bfin_read16(CAN1_TRR2) #define bfin_write_CAN1_TRR2(val) bfin_write16(CAN1_TRR2, val) #define bfin_read_CAN1_TA2() bfin_read16(CAN1_TA2) #define bfin_write_CAN1_TA2(val) bfin_write16(CAN1_TA2, val) #define bfin_read_CAN1_AA2() bfin_read16(CAN1_AA2) #define bfin_write_CAN1_AA2(val) bfin_write16(CAN1_AA2, val) #define bfin_read_CAN1_RMP2() bfin_read16(CAN1_RMP2) #define bfin_write_CAN1_RMP2(val) bfin_write16(CAN1_RMP2, val) #define bfin_read_CAN1_RML2() bfin_read16(CAN1_RML2) #define bfin_write_CAN1_RML2(val) bfin_write16(CAN1_RML2, val) #define bfin_read_CAN1_MBTIF2() bfin_read16(CAN1_MBTIF2) #define bfin_write_CAN1_MBTIF2(val) bfin_write16(CAN1_MBTIF2, val) #define bfin_read_CAN1_MBRIF2() bfin_read16(CAN1_MBRIF2) #define bfin_write_CAN1_MBRIF2(val) bfin_write16(CAN1_MBRIF2, val) #define bfin_read_CAN1_MBIM2() bfin_read16(CAN1_MBIM2) #define bfin_write_CAN1_MBIM2(val) bfin_write16(CAN1_MBIM2, val) #define bfin_read_CAN1_RFH2() bfin_read16(CAN1_RFH2) #define bfin_write_CAN1_RFH2(val) bfin_write16(CAN1_RFH2, val) #define bfin_read_CAN1_OPSS2() bfin_read16(CAN1_OPSS2) #define bfin_write_CAN1_OPSS2(val) bfin_write16(CAN1_OPSS2, val) /* CAN Controller 1 Clock/Interrubfin_read_()t/Counter Registers */ #define bfin_read_CAN1_CLOCK() bfin_read16(CAN1_CLOCK) #define bfin_write_CAN1_CLOCK(val) bfin_write16(CAN1_CLOCK, val) #define bfin_read_CAN1_TIMING() bfin_read16(CAN1_TIMING) #define bfin_write_CAN1_TIMING(val) bfin_write16(CAN1_TIMING, val) #define bfin_read_CAN1_DEBUG() bfin_read16(CAN1_DEBUG) #define bfin_write_CAN1_DEBUG(val) bfin_write16(CAN1_DEBUG, val) #define bfin_read_CAN1_STATUS() bfin_read16(CAN1_STATUS) #define bfin_write_CAN1_STATUS(val) bfin_write16(CAN1_STATUS, val) #define bfin_read_CAN1_CEC() bfin_read16(CAN1_CEC) #define bfin_write_CAN1_CEC(val) bfin_write16(CAN1_CEC, val) #define bfin_read_CAN1_GIS() bfin_read16(CAN1_GIS) #define bfin_write_CAN1_GIS(val) bfin_write16(CAN1_GIS, val) #define bfin_read_CAN1_GIM() bfin_read16(CAN1_GIM) #define bfin_write_CAN1_GIM(val) bfin_write16(CAN1_GIM, val) #define bfin_read_CAN1_GIF() bfin_read16(CAN1_GIF) #define bfin_write_CAN1_GIF(val) bfin_write16(CAN1_GIF, val) #define bfin_read_CAN1_CONTROL() bfin_read16(CAN1_CONTROL) #define bfin_write_CAN1_CONTROL(val) bfin_write16(CAN1_CONTROL, val) #define bfin_read_CAN1_INTR() bfin_read16(CAN1_INTR) #define bfin_write_CAN1_INTR(val) bfin_write16(CAN1_INTR, val) #define bfin_read_CAN1_MBTD() bfin_read16(CAN1_MBTD) #define bfin_write_CAN1_MBTD(val) bfin_write16(CAN1_MBTD, val) #define bfin_read_CAN1_EWR() bfin_read16(CAN1_EWR) #define bfin_write_CAN1_EWR(val) bfin_write16(CAN1_EWR, val) #define bfin_read_CAN1_ESR() bfin_read16(CAN1_ESR) #define bfin_write_CAN1_ESR(val) bfin_write16(CAN1_ESR, val) #define bfin_read_CAN1_UCCNT() bfin_read16(CAN1_UCCNT) #define bfin_write_CAN1_UCCNT(val) bfin_write16(CAN1_UCCNT, val) #define bfin_read_CAN1_UCRC() bfin_read16(CAN1_UCRC) #define bfin_write_CAN1_UCRC(val) bfin_write16(CAN1_UCRC, val) #define bfin_read_CAN1_UCCNF() bfin_read16(CAN1_UCCNF) #define bfin_write_CAN1_UCCNF(val) bfin_write16(CAN1_UCCNF, val) /* CAN Controller 1 Mailbox Accebfin_read_()tance Registers */ #define bfin_read_CAN1_AM00L() bfin_read16(CAN1_AM00L) #define bfin_write_CAN1_AM00L(val) bfin_write16(CAN1_AM00L, val) #define bfin_read_CAN1_AM00H() bfin_read16(CAN1_AM00H) #define bfin_write_CAN1_AM00H(val) bfin_write16(CAN1_AM00H, val) #define bfin_read_CAN1_AM01L() bfin_read16(CAN1_AM01L) #define bfin_write_CAN1_AM01L(val) bfin_write16(CAN1_AM01L, val) #define bfin_read_CAN1_AM01H() bfin_read16(CAN1_AM01H) #define bfin_write_CAN1_AM01H(val) bfin_write16(CAN1_AM01H, val) #define bfin_read_CAN1_AM02L() bfin_read16(CAN1_AM02L) #define bfin_write_CAN1_AM02L(val) bfin_write16(CAN1_AM02L, val) #define bfin_read_CAN1_AM02H() bfin_read16(CAN1_AM02H) #define bfin_write_CAN1_AM02H(val) bfin_write16(CAN1_AM02H, val) #define bfin_read_CAN1_AM03L() bfin_read16(CAN1_AM03L) #define bfin_write_CAN1_AM03L(val) bfin_write16(CAN1_AM03L, val) #define bfin_read_CAN1_AM03H() bfin_read16(CAN1_AM03H) #define bfin_write_CAN1_AM03H(val) bfin_write16(CAN1_AM03H, val) #define bfin_read_CAN1_AM04L() bfin_read16(CAN1_AM04L) #define bfin_write_CAN1_AM04L(val) bfin_write16(CAN1_AM04L, val) #define bfin_read_CAN1_AM04H() bfin_read16(CAN1_AM04H) #define bfin_write_CAN1_AM04H(val) bfin_write16(CAN1_AM04H, val) #define bfin_read_CAN1_AM05L() bfin_read16(CAN1_AM05L) #define bfin_write_CAN1_AM05L(val) bfin_write16(CAN1_AM05L, val) #define bfin_read_CAN1_AM05H() bfin_read16(CAN1_AM05H) #define bfin_write_CAN1_AM05H(val) bfin_write16(CAN1_AM05H, val) #define bfin_read_CAN1_AM06L() bfin_read16(CAN1_AM06L) #define bfin_write_CAN1_AM06L(val) bfin_write16(CAN1_AM06L, val) #define bfin_read_CAN1_AM06H() bfin_read16(CAN1_AM06H) #define bfin_write_CAN1_AM06H(val) bfin_write16(CAN1_AM06H, val) #define bfin_read_CAN1_AM07L() bfin_read16(CAN1_AM07L) #define bfin_write_CAN1_AM07L(val) bfin_write16(CAN1_AM07L, val) #define bfin_read_CAN1_AM07H() bfin_read16(CAN1_AM07H) #define bfin_write_CAN1_AM07H(val) bfin_write16(CAN1_AM07H, val) #define bfin_read_CAN1_AM08L() bfin_read16(CAN1_AM08L) #define bfin_write_CAN1_AM08L(val) bfin_write16(CAN1_AM08L, val) #define bfin_read_CAN1_AM08H() bfin_read16(CAN1_AM08H) #define bfin_write_CAN1_AM08H(val) bfin_write16(CAN1_AM08H, val) #define bfin_read_CAN1_AM09L() bfin_read16(CAN1_AM09L) #define bfin_write_CAN1_AM09L(val) bfin_write16(CAN1_AM09L, val) #define bfin_read_CAN1_AM09H() bfin_read16(CAN1_AM09H) #define bfin_write_CAN1_AM09H(val) bfin_write16(CAN1_AM09H, val) #define bfin_read_CAN1_AM10L() bfin_read16(CAN1_AM10L) #define bfin_write_CAN1_AM10L(val) bfin_write16(CAN1_AM10L, val) #define bfin_read_CAN1_AM10H() bfin_read16(CAN1_AM10H) #define bfin_write_CAN1_AM10H(val) bfin_write16(CAN1_AM10H, val) #define bfin_read_CAN1_AM11L() bfin_read16(CAN1_AM11L) #define bfin_write_CAN1_AM11L(val) bfin_write16(CAN1_AM11L, val) #define bfin_read_CAN1_AM11H() bfin_read16(CAN1_AM11H) #define bfin_write_CAN1_AM11H(val) bfin_write16(CAN1_AM11H, val) #define bfin_read_CAN1_AM12L() bfin_read16(CAN1_AM12L) #define bfin_write_CAN1_AM12L(val) bfin_write16(CAN1_AM12L, val) #define bfin_read_CAN1_AM12H() bfin_read16(CAN1_AM12H) #define bfin_write_CAN1_AM12H(val) bfin_write16(CAN1_AM12H, val) #define bfin_read_CAN1_AM13L() bfin_read16(CAN1_AM13L) #define bfin_write_CAN1_AM13L(val) bfin_write16(CAN1_AM13L, val) #define bfin_read_CAN1_AM13H() bfin_read16(CAN1_AM13H) #define bfin_write_CAN1_AM13H(val) bfin_write16(CAN1_AM13H, val) #define bfin_read_CAN1_AM14L() bfin_read16(CAN1_AM14L) #define bfin_write_CAN1_AM14L(val) bfin_write16(CAN1_AM14L, val) #define bfin_read_CAN1_AM14H() bfin_read16(CAN1_AM14H) #define bfin_write_CAN1_AM14H(val) bfin_write16(CAN1_AM14H, val) #define bfin_read_CAN1_AM15L() bfin_read16(CAN1_AM15L) #define bfin_write_CAN1_AM15L(val) bfin_write16(CAN1_AM15L, val) #define bfin_read_CAN1_AM15H() bfin_read16(CAN1_AM15H) #define bfin_write_CAN1_AM15H(val) bfin_write16(CAN1_AM15H, val) /* CAN Controller 1 Mailbox Accebfin_read_()tance Registers */ #define bfin_read_CAN1_AM16L() bfin_read16(CAN1_AM16L) #define bfin_write_CAN1_AM16L(val) bfin_write16(CAN1_AM16L, val) #define bfin_read_CAN1_AM16H() bfin_read16(CAN1_AM16H) #define bfin_write_CAN1_AM16H(val) bfin_write16(CAN1_AM16H, val) #define bfin_read_CAN1_AM17L() bfin_read16(CAN1_AM17L) #define bfin_write_CAN1_AM17L(val) bfin_write16(CAN1_AM17L, val) #define bfin_read_CAN1_AM17H() bfin_read16(CAN1_AM17H) #define bfin_write_CAN1_AM17H(val) bfin_write16(CAN1_AM17H, val) #define bfin_read_CAN1_AM18L() bfin_read16(CAN1_AM18L) #define bfin_write_CAN1_AM18L(val) bfin_write16(CAN1_AM18L, val) #define bfin_read_CAN1_AM18H() bfin_read16(CAN1_AM18H) #define bfin_write_CAN1_AM18H(val) bfin_write16(CAN1_AM18H, val) #define bfin_read_CAN1_AM19L() bfin_read16(CAN1_AM19L) #define bfin_write_CAN1_AM19L(val) bfin_write16(CAN1_AM19L, val) #define bfin_read_CAN1_AM19H() bfin_read16(CAN1_AM19H) #define bfin_write_CAN1_AM19H(val) bfin_write16(CAN1_AM19H, val) #define bfin_read_CAN1_AM20L() bfin_read16(CAN1_AM20L) #define bfin_write_CAN1_AM20L(val) bfin_write16(CAN1_AM20L, val) #define bfin_read_CAN1_AM20H() bfin_read16(CAN1_AM20H) #define bfin_write_CAN1_AM20H(val) bfin_write16(CAN1_AM20H, val) #define bfin_read_CAN1_AM21L() bfin_read16(CAN1_AM21L) #define bfin_write_CAN1_AM21L(val) bfin_write16(CAN1_AM21L, val) #define bfin_read_CAN1_AM21H() bfin_read16(CAN1_AM21H) #define bfin_write_CAN1_AM21H(val) bfin_write16(CAN1_AM21H, val) #define bfin_read_CAN1_AM22L() bfin_read16(CAN1_AM22L) #define bfin_write_CAN1_AM22L(val) bfin_write16(CAN1_AM22L, val) #define bfin_read_CAN1_AM22H() bfin_read16(CAN1_AM22H) #define bfin_write_CAN1_AM22H(val) bfin_write16(CAN1_AM22H, val) #define bfin_read_CAN1_AM23L() bfin_read16(CAN1_AM23L) #define bfin_write_CAN1_AM23L(val) bfin_write16(CAN1_AM23L, val) #define bfin_read_CAN1_AM23H() bfin_read16(CAN1_AM23H) #define bfin_write_CAN1_AM23H(val) bfin_write16(CAN1_AM23H, val) #define bfin_read_CAN1_AM24L() bfin_read16(CAN1_AM24L) #define bfin_write_CAN1_AM24L(val) bfin_write16(CAN1_AM24L, val) #define bfin_read_CAN1_AM24H() bfin_read16(CAN1_AM24H) #define bfin_write_CAN1_AM24H(val) bfin_write16(CAN1_AM24H, val) #define bfin_read_CAN1_AM25L() bfin_read16(CAN1_AM25L) #define bfin_write_CAN1_AM25L(val) bfin_write16(CAN1_AM25L, val) #define bfin_read_CAN1_AM25H() bfin_read16(CAN1_AM25H) #define bfin_write_CAN1_AM25H(val) bfin_write16(CAN1_AM25H, val) #define bfin_read_CAN1_AM26L() bfin_read16(CAN1_AM26L) #define bfin_write_CAN1_AM26L(val) bfin_write16(CAN1_AM26L, val) #define bfin_read_CAN1_AM26H() bfin_read16(CAN1_AM26H) #define bfin_write_CAN1_AM26H(val) bfin_write16(CAN1_AM26H, val) #define bfin_read_CAN1_AM27L() bfin_read16(CAN1_AM27L) #define bfin_write_CAN1_AM27L(val) bfin_write16(CAN1_AM27L, val) #define bfin_read_CAN1_AM27H() bfin_read16(CAN1_AM27H) #define bfin_write_CAN1_AM27H(val) bfin_write16(CAN1_AM27H, val) #define bfin_read_CAN1_AM28L() bfin_read16(CAN1_AM28L) #define bfin_write_CAN1_AM28L(val) bfin_write16(CAN1_AM28L, val) #define bfin_read_CAN1_AM28H() bfin_read16(CAN1_AM28H) #define bfin_write_CAN1_AM28H(val) bfin_write16(CAN1_AM28H, val) #define bfin_read_CAN1_AM29L() bfin_read16(CAN1_AM29L) #define bfin_write_CAN1_AM29L(val) bfin_write16(CAN1_AM29L, val) #define bfin_read_CAN1_AM29H() bfin_read16(CAN1_AM29H) #define bfin_write_CAN1_AM29H(val) bfin_write16(CAN1_AM29H, val) #define bfin_read_CAN1_AM30L() bfin_read16(CAN1_AM30L) #define bfin_write_CAN1_AM30L(val) bfin_write16(CAN1_AM30L, val) #define bfin_read_CAN1_AM30H() bfin_read16(CAN1_AM30H) #define bfin_write_CAN1_AM30H(val) bfin_write16(CAN1_AM30H, val) #define bfin_read_CAN1_AM31L() bfin_read16(CAN1_AM31L) #define bfin_write_CAN1_AM31L(val) bfin_write16(CAN1_AM31L, val) #define bfin_read_CAN1_AM31H() bfin_read16(CAN1_AM31H) #define bfin_write_CAN1_AM31H(val) bfin_write16(CAN1_AM31H, val) /* CAN Controller 1 Mailbox Data Registers */ #define bfin_read_CAN1_MB00_DATA0() bfin_read16(CAN1_MB00_DATA0) #define bfin_write_CAN1_MB00_DATA0(val) bfin_write16(CAN1_MB00_DATA0, val) #define bfin_read_CAN1_MB00_DATA1() bfin_read16(CAN1_MB00_DATA1) #define bfin_write_CAN1_MB00_DATA1(val) bfin_write16(CAN1_MB00_DATA1, val) #define bfin_read_CAN1_MB00_DATA2() bfin_read16(CAN1_MB00_DATA2) #define bfin_write_CAN1_MB00_DATA2(val) bfin_write16(CAN1_MB00_DATA2, val) #define bfin_read_CAN1_MB00_DATA3() bfin_read16(CAN1_MB00_DATA3) #define bfin_write_CAN1_MB00_DATA3(val) bfin_write16(CAN1_MB00_DATA3, val) #define bfin_read_CAN1_MB00_LENGTH() bfin_read16(CAN1_MB00_LENGTH) #define bfin_write_CAN1_MB00_LENGTH(val) bfin_write16(CAN1_MB00_LENGTH, val) #define bfin_read_CAN1_MB00_TIMESTAMP() bfin_read16(CAN1_MB00_TIMESTAMP) #define bfin_write_CAN1_MB00_TIMESTAMP(val) bfin_write16(CAN1_MB00_TIMESTAMP, val) #define bfin_read_CAN1_MB00_ID0() bfin_read16(CAN1_MB00_ID0) #define bfin_write_CAN1_MB00_ID0(val) bfin_write16(CAN1_MB00_ID0, val) #define bfin_read_CAN1_MB00_ID1() bfin_read16(CAN1_MB00_ID1) #define bfin_write_CAN1_MB00_ID1(val) bfin_write16(CAN1_MB00_ID1, val) #define bfin_read_CAN1_MB01_DATA0() bfin_read16(CAN1_MB01_DATA0) #define bfin_write_CAN1_MB01_DATA0(val) bfin_write16(CAN1_MB01_DATA0, val) #define bfin_read_CAN1_MB01_DATA1() bfin_read16(CAN1_MB01_DATA1) #define bfin_write_CAN1_MB01_DATA1(val) bfin_write16(CAN1_MB01_DATA1, val) #define bfin_read_CAN1_MB01_DATA2() bfin_read16(CAN1_MB01_DATA2) #define bfin_write_CAN1_MB01_DATA2(val) bfin_write16(CAN1_MB01_DATA2, val) #define bfin_read_CAN1_MB01_DATA3() bfin_read16(CAN1_MB01_DATA3) #define bfin_write_CAN1_MB01_DATA3(val) bfin_write16(CAN1_MB01_DATA3, val) #define bfin_read_CAN1_MB01_LENGTH() bfin_read16(CAN1_MB01_LENGTH) #define bfin_write_CAN1_MB01_LENGTH(val) bfin_write16(CAN1_MB01_LENGTH, val) #define bfin_read_CAN1_MB01_TIMESTAMP() bfin_read16(CAN1_MB01_TIMESTAMP) #define bfin_write_CAN1_MB01_TIMESTAMP(val) bfin_write16(CAN1_MB01_TIMESTAMP, val) #define bfin_read_CAN1_MB01_ID0() bfin_read16(CAN1_MB01_ID0) #define bfin_write_CAN1_MB01_ID0(val) bfin_write16(CAN1_MB01_ID0, val) #define bfin_read_CAN1_MB01_ID1() bfin_read16(CAN1_MB01_ID1) #define bfin_write_CAN1_MB01_ID1(val) bfin_write16(CAN1_MB01_ID1, val) #define bfin_read_CAN1_MB02_DATA0() bfin_read16(CAN1_MB02_DATA0) #define bfin_write_CAN1_MB02_DATA0(val) bfin_write16(CAN1_MB02_DATA0, val) #define bfin_read_CAN1_MB02_DATA1() bfin_read16(CAN1_MB02_DATA1) #define bfin_write_CAN1_MB02_DATA1(val) bfin_write16(CAN1_MB02_DATA1, val) #define bfin_read_CAN1_MB02_DATA2() bfin_read16(CAN1_MB02_DATA2) #define bfin_write_CAN1_MB02_DATA2(val) bfin_write16(CAN1_MB02_DATA2, val) #define bfin_read_CAN1_MB02_DATA3() bfin_read16(CAN1_MB02_DATA3) #define bfin_write_CAN1_MB02_DATA3(val) bfin_write16(CAN1_MB02_DATA3, val) #define bfin_read_CAN1_MB02_LENGTH() bfin_read16(CAN1_MB02_LENGTH) #define bfin_write_CAN1_MB02_LENGTH(val) bfin_write16(CAN1_MB02_LENGTH, val) #define bfin_read_CAN1_MB02_TIMESTAMP() bfin_read16(CAN1_MB02_TIMESTAMP) #define bfin_write_CAN1_MB02_TIMESTAMP(val) bfin_write16(CAN1_MB02_TIMESTAMP, val) #define bfin_read_CAN1_MB02_ID0() bfin_read16(CAN1_MB02_ID0) #define bfin_write_CAN1_MB02_ID0(val) bfin_write16(CAN1_MB02_ID0, val) #define bfin_read_CAN1_MB02_ID1() bfin_read16(CAN1_MB02_ID1) #define bfin_write_CAN1_MB02_ID1(val) bfin_write16(CAN1_MB02_ID1, val) #define bfin_read_CAN1_MB03_DATA0() bfin_read16(CAN1_MB03_DATA0) #define bfin_write_CAN1_MB03_DATA0(val) bfin_write16(CAN1_MB03_DATA0, val) #define bfin_read_CAN1_MB03_DATA1() bfin_read16(CAN1_MB03_DATA1) #define bfin_write_CAN1_MB03_DATA1(val) bfin_write16(CAN1_MB03_DATA1, val) #define bfin_read_CAN1_MB03_DATA2() bfin_read16(CAN1_MB03_DATA2) #define bfin_write_CAN1_MB03_DATA2(val) bfin_write16(CAN1_MB03_DATA2, val) #define bfin_read_CAN1_MB03_DATA3() bfin_read16(CAN1_MB03_DATA3) #define bfin_write_CAN1_MB03_DATA3(val) bfin_write16(CAN1_MB03_DATA3, val) #define bfin_read_CAN1_MB03_LENGTH() bfin_read16(CAN1_MB03_LENGTH) #define bfin_write_CAN1_MB03_LENGTH(val) bfin_write16(CAN1_MB03_LENGTH, val) #define bfin_read_CAN1_MB03_TIMESTAMP() bfin_read16(CAN1_MB03_TIMESTAMP) #define bfin_write_CAN1_MB03_TIMESTAMP(val) bfin_write16(CAN1_MB03_TIMESTAMP, val) #define bfin_read_CAN1_MB03_ID0() bfin_read16(CAN1_MB03_ID0) #define bfin_write_CAN1_MB03_ID0(val) bfin_write16(CAN1_MB03_ID0, val) #define bfin_read_CAN1_MB03_ID1() bfin_read16(CAN1_MB03_ID1) #define bfin_write_CAN1_MB03_ID1(val) bfin_write16(CAN1_MB03_ID1, val) #define bfin_read_CAN1_MB04_DATA0() bfin_read16(CAN1_MB04_DATA0) #define bfin_write_CAN1_MB04_DATA0(val) bfin_write16(CAN1_MB04_DATA0, val) #define bfin_read_CAN1_MB04_DATA1() bfin_read16(CAN1_MB04_DATA1) #define bfin_write_CAN1_MB04_DATA1(val) bfin_write16(CAN1_MB04_DATA1, val) #define bfin_read_CAN1_MB04_DATA2() bfin_read16(CAN1_MB04_DATA2) #define bfin_write_CAN1_MB04_DATA2(val) bfin_write16(CAN1_MB04_DATA2, val) #define bfin_read_CAN1_MB04_DATA3() bfin_read16(CAN1_MB04_DATA3) #define bfin_write_CAN1_MB04_DATA3(val) bfin_write16(CAN1_MB04_DATA3, val) #define bfin_read_CAN1_MB04_LENGTH() bfin_read16(CAN1_MB04_LENGTH) #define bfin_write_CAN1_MB04_LENGTH(val) bfin_write16(CAN1_MB04_LENGTH, val) #define bfin_read_CAN1_MB04_TIMESTAMP() bfin_read16(CAN1_MB04_TIMESTAMP) #define bfin_write_CAN1_MB04_TIMESTAMP(val) bfin_write16(CAN1_MB04_TIMESTAMP, val) #define bfin_read_CAN1_MB04_ID0() bfin_read16(CAN1_MB04_ID0) #define bfin_write_CAN1_MB04_ID0(val) bfin_write16(CAN1_MB04_ID0, val) #define bfin_read_CAN1_MB04_ID1() bfin_read16(CAN1_MB04_ID1) #define bfin_write_CAN1_MB04_ID1(val) bfin_write16(CAN1_MB04_ID1, val) #define bfin_read_CAN1_MB05_DATA0() bfin_read16(CAN1_MB05_DATA0) #define bfin_write_CAN1_MB05_DATA0(val) bfin_write16(CAN1_MB05_DATA0, val) #define bfin_read_CAN1_MB05_DATA1() bfin_read16(CAN1_MB05_DATA1) #define bfin_write_CAN1_MB05_DATA1(val) bfin_write16(CAN1_MB05_DATA1, val) #define bfin_read_CAN1_MB05_DATA2() bfin_read16(CAN1_MB05_DATA2) #define bfin_write_CAN1_MB05_DATA2(val) bfin_write16(CAN1_MB05_DATA2, val) #define bfin_read_CAN1_MB05_DATA3() bfin_read16(CAN1_MB05_DATA3) #define bfin_write_CAN1_MB05_DATA3(val) bfin_write16(CAN1_MB05_DATA3, val) #define bfin_read_CAN1_MB05_LENGTH() bfin_read16(CAN1_MB05_LENGTH) #define bfin_write_CAN1_MB05_LENGTH(val) bfin_write16(CAN1_MB05_LENGTH, val) #define bfin_read_CAN1_MB05_TIMESTAMP() bfin_read16(CAN1_MB05_TIMESTAMP) #define bfin_write_CAN1_MB05_TIMESTAMP(val) bfin_write16(CAN1_MB05_TIMESTAMP, val) #define bfin_read_CAN1_MB05_ID0() bfin_read16(CAN1_MB05_ID0) #define bfin_write_CAN1_MB05_ID0(val) bfin_write16(CAN1_MB05_ID0, val) #define bfin_read_CAN1_MB05_ID1() bfin_read16(CAN1_MB05_ID1) #define bfin_write_CAN1_MB05_ID1(val) bfin_write16(CAN1_MB05_ID1, val) #define bfin_read_CAN1_MB06_DATA0() bfin_read16(CAN1_MB06_DATA0) #define bfin_write_CAN1_MB06_DATA0(val) bfin_write16(CAN1_MB06_DATA0, val) #define bfin_read_CAN1_MB06_DATA1() bfin_read16(CAN1_MB06_DATA1) #define bfin_write_CAN1_MB06_DATA1(val) bfin_write16(CAN1_MB06_DATA1, val) #define bfin_read_CAN1_MB06_DATA2() bfin_read16(CAN1_MB06_DATA2) #define bfin_write_CAN1_MB06_DATA2(val) bfin_write16(CAN1_MB06_DATA2, val) #define bfin_read_CAN1_MB06_DATA3() bfin_read16(CAN1_MB06_DATA3) #define bfin_write_CAN1_MB06_DATA3(val) bfin_write16(CAN1_MB06_DATA3, val) #define bfin_read_CAN1_MB06_LENGTH() bfin_read16(CAN1_MB06_LENGTH) #define bfin_write_CAN1_MB06_LENGTH(val) bfin_write16(CAN1_MB06_LENGTH, val) #define bfin_read_CAN1_MB06_TIMESTAMP() bfin_read16(CAN1_MB06_TIMESTAMP) #define bfin_write_CAN1_MB06_TIMESTAMP(val) bfin_write16(CAN1_MB06_TIMESTAMP, val) #define bfin_read_CAN1_MB06_ID0() bfin_read16(CAN1_MB06_ID0) #define bfin_write_CAN1_MB06_ID0(val) bfin_write16(CAN1_MB06_ID0, val) #define bfin_read_CAN1_MB06_ID1() bfin_read16(CAN1_MB06_ID1) #define bfin_write_CAN1_MB06_ID1(val) bfin_write16(CAN1_MB06_ID1, val) #define bfin_read_CAN1_MB07_DATA0() bfin_read16(CAN1_MB07_DATA0) #define bfin_write_CAN1_MB07_DATA0(val) bfin_write16(CAN1_MB07_DATA0, val) #define bfin_read_CAN1_MB07_DATA1() bfin_read16(CAN1_MB07_DATA1) #define bfin_write_CAN1_MB07_DATA1(val) bfin_write16(CAN1_MB07_DATA1, val) #define bfin_read_CAN1_MB07_DATA2() bfin_read16(CAN1_MB07_DATA2) #define bfin_write_CAN1_MB07_DATA2(val) bfin_write16(CAN1_MB07_DATA2, val) #define bfin_read_CAN1_MB07_DATA3() bfin_read16(CAN1_MB07_DATA3) #define bfin_write_CAN1_MB07_DATA3(val) bfin_write16(CAN1_MB07_DATA3, val) #define bfin_read_CAN1_MB07_LENGTH() bfin_read16(CAN1_MB07_LENGTH) #define bfin_write_CAN1_MB07_LENGTH(val) bfin_write16(CAN1_MB07_LENGTH, val) #define bfin_read_CAN1_MB07_TIMESTAMP() bfin_read16(CAN1_MB07_TIMESTAMP) #define bfin_write_CAN1_MB07_TIMESTAMP(val) bfin_write16(CAN1_MB07_TIMESTAMP, val) #define bfin_read_CAN1_MB07_ID0() bfin_read16(CAN1_MB07_ID0) #define bfin_write_CAN1_MB07_ID0(val) bfin_write16(CAN1_MB07_ID0, val) #define bfin_read_CAN1_MB07_ID1() bfin_read16(CAN1_MB07_ID1) #define bfin_write_CAN1_MB07_ID1(val) bfin_write16(CAN1_MB07_ID1, val) #define bfin_read_CAN1_MB08_DATA0() bfin_read16(CAN1_MB08_DATA0) #define bfin_write_CAN1_MB08_DATA0(val) bfin_write16(CAN1_MB08_DATA0, val) #define bfin_read_CAN1_MB08_DATA1() bfin_read16(CAN1_MB08_DATA1) #define bfin_write_CAN1_MB08_DATA1(val) bfin_write16(CAN1_MB08_DATA1, val) #define bfin_read_CAN1_MB08_DATA2() bfin_read16(CAN1_MB08_DATA2) #define bfin_write_CAN1_MB08_DATA2(val) bfin_write16(CAN1_MB08_DATA2, val) #define bfin_read_CAN1_MB08_DATA3() bfin_read16(CAN1_MB08_DATA3) #define bfin_write_CAN1_MB08_DATA3(val) bfin_write16(CAN1_MB08_DATA3, val) #define bfin_read_CAN1_MB08_LENGTH() bfin_read16(CAN1_MB08_LENGTH) #define bfin_write_CAN1_MB08_LENGTH(val) bfin_write16(CAN1_MB08_LENGTH, val) #define bfin_read_CAN1_MB08_TIMESTAMP() bfin_read16(CAN1_MB08_TIMESTAMP) #define bfin_write_CAN1_MB08_TIMESTAMP(val) bfin_write16(CAN1_MB08_TIMESTAMP, val) #define bfin_read_CAN1_MB08_ID0() bfin_read16(CAN1_MB08_ID0) #define bfin_write_CAN1_MB08_ID0(val) bfin_write16(CAN1_MB08_ID0, val) #define bfin_read_CAN1_MB08_ID1() bfin_read16(CAN1_MB08_ID1) #define bfin_write_CAN1_MB08_ID1(val) bfin_write16(CAN1_MB08_ID1, val) #define bfin_read_CAN1_MB09_DATA0() bfin_read16(CAN1_MB09_DATA0) #define bfin_write_CAN1_MB09_DATA0(val) bfin_write16(CAN1_MB09_DATA0, val) #define bfin_read_CAN1_MB09_DATA1() bfin_read16(CAN1_MB09_DATA1) #define bfin_write_CAN1_MB09_DATA1(val) bfin_write16(CAN1_MB09_DATA1, val) #define bfin_read_CAN1_MB09_DATA2() bfin_read16(CAN1_MB09_DATA2) #define bfin_write_CAN1_MB09_DATA2(val) bfin_write16(CAN1_MB09_DATA2, val) #define bfin_read_CAN1_MB09_DATA3() bfin_read16(CAN1_MB09_DATA3) #define bfin_write_CAN1_MB09_DATA3(val) bfin_write16(CAN1_MB09_DATA3, val) #define bfin_read_CAN1_MB09_LENGTH() bfin_read16(CAN1_MB09_LENGTH) #define bfin_write_CAN1_MB09_LENGTH(val) bfin_write16(CAN1_MB09_LENGTH, val) #define bfin_read_CAN1_MB09_TIMESTAMP() bfin_read16(CAN1_MB09_TIMESTAMP) #define bfin_write_CAN1_MB09_TIMESTAMP(val) bfin_write16(CAN1_MB09_TIMESTAMP, val) #define bfin_read_CAN1_MB09_ID0() bfin_read16(CAN1_MB09_ID0) #define bfin_write_CAN1_MB09_ID0(val) bfin_write16(CAN1_MB09_ID0, val) #define bfin_read_CAN1_MB09_ID1() bfin_read16(CAN1_MB09_ID1) #define bfin_write_CAN1_MB09_ID1(val) bfin_write16(CAN1_MB09_ID1, val) #define bfin_read_CAN1_MB10_DATA0() bfin_read16(CAN1_MB10_DATA0) #define bfin_write_CAN1_MB10_DATA0(val) bfin_write16(CAN1_MB10_DATA0, val) #define bfin_read_CAN1_MB10_DATA1() bfin_read16(CAN1_MB10_DATA1) #define bfin_write_CAN1_MB10_DATA1(val) bfin_write16(CAN1_MB10_DATA1, val) #define bfin_read_CAN1_MB10_DATA2() bfin_read16(CAN1_MB10_DATA2) #define bfin_write_CAN1_MB10_DATA2(val) bfin_write16(CAN1_MB10_DATA2, val) #define bfin_read_CAN1_MB10_DATA3() bfin_read16(CAN1_MB10_DATA3) #define bfin_write_CAN1_MB10_DATA3(val) bfin_write16(CAN1_MB10_DATA3, val) #define bfin_read_CAN1_MB10_LENGTH() bfin_read16(CAN1_MB10_LENGTH) #define bfin_write_CAN1_MB10_LENGTH(val) bfin_write16(CAN1_MB10_LENGTH, val) #define bfin_read_CAN1_MB10_TIMESTAMP() bfin_read16(CAN1_MB10_TIMESTAMP) #define bfin_write_CAN1_MB10_TIMESTAMP(val) bfin_write16(CAN1_MB10_TIMESTAMP, val) #define bfin_read_CAN1_MB10_ID0() bfin_read16(CAN1_MB10_ID0) #define bfin_write_CAN1_MB10_ID0(val) bfin_write16(CAN1_MB10_ID0, val) #define bfin_read_CAN1_MB10_ID1() bfin_read16(CAN1_MB10_ID1) #define bfin_write_CAN1_MB10_ID1(val) bfin_write16(CAN1_MB10_ID1, val) #define bfin_read_CAN1_MB11_DATA0() bfin_read16(CAN1_MB11_DATA0) #define bfin_write_CAN1_MB11_DATA0(val) bfin_write16(CAN1_MB11_DATA0, val) #define bfin_read_CAN1_MB11_DATA1() bfin_read16(CAN1_MB11_DATA1) #define bfin_write_CAN1_MB11_DATA1(val) bfin_write16(CAN1_MB11_DATA1, val) #define bfin_read_CAN1_MB11_DATA2() bfin_read16(CAN1_MB11_DATA2) #define bfin_write_CAN1_MB11_DATA2(val) bfin_write16(CAN1_MB11_DATA2, val) #define bfin_read_CAN1_MB11_DATA3() bfin_read16(CAN1_MB11_DATA3) #define bfin_write_CAN1_MB11_DATA3(val) bfin_write16(CAN1_MB11_DATA3, val) #define bfin_read_CAN1_MB11_LENGTH() bfin_read16(CAN1_MB11_LENGTH) #define bfin_write_CAN1_MB11_LENGTH(val) bfin_write16(CAN1_MB11_LENGTH, val) #define bfin_read_CAN1_MB11_TIMESTAMP() bfin_read16(CAN1_MB11_TIMESTAMP) #define bfin_write_CAN1_MB11_TIMESTAMP(val) bfin_write16(CAN1_MB11_TIMESTAMP, val) #define bfin_read_CAN1_MB11_ID0() bfin_read16(CAN1_MB11_ID0) #define bfin_write_CAN1_MB11_ID0(val) bfin_write16(CAN1_MB11_ID0, val) #define bfin_read_CAN1_MB11_ID1() bfin_read16(CAN1_MB11_ID1) #define bfin_write_CAN1_MB11_ID1(val) bfin_write16(CAN1_MB11_ID1, val) #define bfin_read_CAN1_MB12_DATA0() bfin_read16(CAN1_MB12_DATA0) #define bfin_write_CAN1_MB12_DATA0(val) bfin_write16(CAN1_MB12_DATA0, val) #define bfin_read_CAN1_MB12_DATA1() bfin_read16(CAN1_MB12_DATA1) #define bfin_write_CAN1_MB12_DATA1(val) bfin_write16(CAN1_MB12_DATA1, val) #define bfin_read_CAN1_MB12_DATA2() bfin_read16(CAN1_MB12_DATA2) #define bfin_write_CAN1_MB12_DATA2(val) bfin_write16(CAN1_MB12_DATA2, val) #define bfin_read_CAN1_MB12_DATA3() bfin_read16(CAN1_MB12_DATA3) #define bfin_write_CAN1_MB12_DATA3(val) bfin_write16(CAN1_MB12_DATA3, val) #define bfin_read_CAN1_MB12_LENGTH() bfin_read16(CAN1_MB12_LENGTH) #define bfin_write_CAN1_MB12_LENGTH(val) bfin_write16(CAN1_MB12_LENGTH, val) #define bfin_read_CAN1_MB12_TIMESTAMP() bfin_read16(CAN1_MB12_TIMESTAMP) #define bfin_write_CAN1_MB12_TIMESTAMP(val) bfin_write16(CAN1_MB12_TIMESTAMP, val) #define bfin_read_CAN1_MB12_ID0() bfin_read16(CAN1_MB12_ID0) #define bfin_write_CAN1_MB12_ID0(val) bfin_write16(CAN1_MB12_ID0, val) #define bfin_read_CAN1_MB12_ID1() bfin_read16(CAN1_MB12_ID1) #define bfin_write_CAN1_MB12_ID1(val) bfin_write16(CAN1_MB12_ID1, val) #define bfin_read_CAN1_MB13_DATA0() bfin_read16(CAN1_MB13_DATA0) #define bfin_write_CAN1_MB13_DATA0(val) bfin_write16(CAN1_MB13_DATA0, val) #define bfin_read_CAN1_MB13_DATA1() bfin_read16(CAN1_MB13_DATA1) #define bfin_write_CAN1_MB13_DATA1(val) bfin_write16(CAN1_MB13_DATA1, val) #define bfin_read_CAN1_MB13_DATA2() bfin_read16(CAN1_MB13_DATA2) #define bfin_write_CAN1_MB13_DATA2(val) bfin_write16(CAN1_MB13_DATA2, val) #define bfin_read_CAN1_MB13_DATA3() bfin_read16(CAN1_MB13_DATA3) #define bfin_write_CAN1_MB13_DATA3(val) bfin_write16(CAN1_MB13_DATA3, val) #define bfin_read_CAN1_MB13_LENGTH() bfin_read16(CAN1_MB13_LENGTH) #define bfin_write_CAN1_MB13_LENGTH(val) bfin_write16(CAN1_MB13_LENGTH, val) #define bfin_read_CAN1_MB13_TIMESTAMP() bfin_read16(CAN1_MB13_TIMESTAMP) #define bfin_write_CAN1_MB13_TIMESTAMP(val) bfin_write16(CAN1_MB13_TIMESTAMP, val) #define bfin_read_CAN1_MB13_ID0() bfin_read16(CAN1_MB13_ID0) #define bfin_write_CAN1_MB13_ID0(val) bfin_write16(CAN1_MB13_ID0, val) #define bfin_read_CAN1_MB13_ID1() bfin_read16(CAN1_MB13_ID1) #define bfin_write_CAN1_MB13_ID1(val) bfin_write16(CAN1_MB13_ID1, val) #define bfin_read_CAN1_MB14_DATA0() bfin_read16(CAN1_MB14_DATA0) #define bfin_write_CAN1_MB14_DATA0(val) bfin_write16(CAN1_MB14_DATA0, val) #define bfin_read_CAN1_MB14_DATA1() bfin_read16(CAN1_MB14_DATA1) #define bfin_write_CAN1_MB14_DATA1(val) bfin_write16(CAN1_MB14_DATA1, val) #define bfin_read_CAN1_MB14_DATA2() bfin_read16(CAN1_MB14_DATA2) #define bfin_write_CAN1_MB14_DATA2(val) bfin_write16(CAN1_MB14_DATA2, val) #define bfin_read_CAN1_MB14_DATA3() bfin_read16(CAN1_MB14_DATA3) #define bfin_write_CAN1_MB14_DATA3(val) bfin_write16(CAN1_MB14_DATA3, val) #define bfin_read_CAN1_MB14_LENGTH() bfin_read16(CAN1_MB14_LENGTH) #define bfin_write_CAN1_MB14_LENGTH(val) bfin_write16(CAN1_MB14_LENGTH, val) #define bfin_read_CAN1_MB14_TIMESTAMP() bfin_read16(CAN1_MB14_TIMESTAMP) #define bfin_write_CAN1_MB14_TIMESTAMP(val) bfin_write16(CAN1_MB14_TIMESTAMP, val) #define bfin_read_CAN1_MB14_ID0() bfin_read16(CAN1_MB14_ID0) #define bfin_write_CAN1_MB14_ID0(val) bfin_write16(CAN1_MB14_ID0, val) #define bfin_read_CAN1_MB14_ID1() bfin_read16(CAN1_MB14_ID1) #define bfin_write_CAN1_MB14_ID1(val) bfin_write16(CAN1_MB14_ID1, val) #define bfin_read_CAN1_MB15_DATA0() bfin_read16(CAN1_MB15_DATA0) #define bfin_write_CAN1_MB15_DATA0(val) bfin_write16(CAN1_MB15_DATA0, val) #define bfin_read_CAN1_MB15_DATA1() bfin_read16(CAN1_MB15_DATA1) #define bfin_write_CAN1_MB15_DATA1(val) bfin_write16(CAN1_MB15_DATA1, val) #define bfin_read_CAN1_MB15_DATA2() bfin_read16(CAN1_MB15_DATA2) #define bfin_write_CAN1_MB15_DATA2(val) bfin_write16(CAN1_MB15_DATA2, val) #define bfin_read_CAN1_MB15_DATA3() bfin_read16(CAN1_MB15_DATA3) #define bfin_write_CAN1_MB15_DATA3(val) bfin_write16(CAN1_MB15_DATA3, val) #define bfin_read_CAN1_MB15_LENGTH() bfin_read16(CAN1_MB15_LENGTH) #define bfin_write_CAN1_MB15_LENGTH(val) bfin_write16(CAN1_MB15_LENGTH, val) #define bfin_read_CAN1_MB15_TIMESTAMP() bfin_read16(CAN1_MB15_TIMESTAMP) #define bfin_write_CAN1_MB15_TIMESTAMP(val) bfin_write16(CAN1_MB15_TIMESTAMP, val) #define bfin_read_CAN1_MB15_ID0() bfin_read16(CAN1_MB15_ID0) #define bfin_write_CAN1_MB15_ID0(val) bfin_write16(CAN1_MB15_ID0, val) #define bfin_read_CAN1_MB15_ID1() bfin_read16(CAN1_MB15_ID1) #define bfin_write_CAN1_MB15_ID1(val) bfin_write16(CAN1_MB15_ID1, val) /* CAN Controller 1 Mailbox Data Registers */ #define bfin_read_CAN1_MB16_DATA0() bfin_read16(CAN1_MB16_DATA0) #define bfin_write_CAN1_MB16_DATA0(val) bfin_write16(CAN1_MB16_DATA0, val) #define bfin_read_CAN1_MB16_DATA1() bfin_read16(CAN1_MB16_DATA1) #define bfin_write_CAN1_MB16_DATA1(val) bfin_write16(CAN1_MB16_DATA1, val) #define bfin_read_CAN1_MB16_DATA2() bfin_read16(CAN1_MB16_DATA2) #define bfin_write_CAN1_MB16_DATA2(val) bfin_write16(CAN1_MB16_DATA2, val) #define bfin_read_CAN1_MB16_DATA3() bfin_read16(CAN1_MB16_DATA3) #define bfin_write_CAN1_MB16_DATA3(val) bfin_write16(CAN1_MB16_DATA3, val) #define bfin_read_CAN1_MB16_LENGTH() bfin_read16(CAN1_MB16_LENGTH) #define bfin_write_CAN1_MB16_LENGTH(val) bfin_write16(CAN1_MB16_LENGTH, val) #define bfin_read_CAN1_MB16_TIMESTAMP() bfin_read16(CAN1_MB16_TIMESTAMP) #define bfin_write_CAN1_MB16_TIMESTAMP(val) bfin_write16(CAN1_MB16_TIMESTAMP, val) #define bfin_read_CAN1_MB16_ID0() bfin_read16(CAN1_MB16_ID0) #define bfin_write_CAN1_MB16_ID0(val) bfin_write16(CAN1_MB16_ID0, val) #define bfin_read_CAN1_MB16_ID1() bfin_read16(CAN1_MB16_ID1) #define bfin_write_CAN1_MB16_ID1(val) bfin_write16(CAN1_MB16_ID1, val) #define bfin_read_CAN1_MB17_DATA0() bfin_read16(CAN1_MB17_DATA0) #define bfin_write_CAN1_MB17_DATA0(val) bfin_write16(CAN1_MB17_DATA0, val) #define bfin_read_CAN1_MB17_DATA1() bfin_read16(CAN1_MB17_DATA1) #define bfin_write_CAN1_MB17_DATA1(val) bfin_write16(CAN1_MB17_DATA1, val) #define bfin_read_CAN1_MB17_DATA2() bfin_read16(CAN1_MB17_DATA2) #define bfin_write_CAN1_MB17_DATA2(val) bfin_write16(CAN1_MB17_DATA2, val) #define bfin_read_CAN1_MB17_DATA3() bfin_read16(CAN1_MB17_DATA3) #define bfin_write_CAN1_MB17_DATA3(val) bfin_write16(CAN1_MB17_DATA3, val) #define bfin_read_CAN1_MB17_LENGTH() bfin_read16(CAN1_MB17_LENGTH) #define bfin_write_CAN1_MB17_LENGTH(val) bfin_write16(CAN1_MB17_LENGTH, val) #define bfin_read_CAN1_MB17_TIMESTAMP() bfin_read16(CAN1_MB17_TIMESTAMP) #define bfin_write_CAN1_MB17_TIMESTAMP(val) bfin_write16(CAN1_MB17_TIMESTAMP, val) #define bfin_read_CAN1_MB17_ID0() bfin_read16(CAN1_MB17_ID0) #define bfin_write_CAN1_MB17_ID0(val) bfin_write16(CAN1_MB17_ID0, val) #define bfin_read_CAN1_MB17_ID1() bfin_read16(CAN1_MB17_ID1) #define bfin_write_CAN1_MB17_ID1(val) bfin_write16(CAN1_MB17_ID1, val) #define bfin_read_CAN1_MB18_DATA0() bfin_read16(CAN1_MB18_DATA0) #define bfin_write_CAN1_MB18_DATA0(val) bfin_write16(CAN1_MB18_DATA0, val) #define bfin_read_CAN1_MB18_DATA1() bfin_read16(CAN1_MB18_DATA1) #define bfin_write_CAN1_MB18_DATA1(val) bfin_write16(CAN1_MB18_DATA1, val) #define bfin_read_CAN1_MB18_DATA2() bfin_read16(CAN1_MB18_DATA2) #define bfin_write_CAN1_MB18_DATA2(val) bfin_write16(CAN1_MB18_DATA2, val) #define bfin_read_CAN1_MB18_DATA3() bfin_read16(CAN1_MB18_DATA3) #define bfin_write_CAN1_MB18_DATA3(val) bfin_write16(CAN1_MB18_DATA3, val) #define bfin_read_CAN1_MB18_LENGTH() bfin_read16(CAN1_MB18_LENGTH) #define bfin_write_CAN1_MB18_LENGTH(val) bfin_write16(CAN1_MB18_LENGTH, val) #define bfin_read_CAN1_MB18_TIMESTAMP() bfin_read16(CAN1_MB18_TIMESTAMP) #define bfin_write_CAN1_MB18_TIMESTAMP(val) bfin_write16(CAN1_MB18_TIMESTAMP, val) #define bfin_read_CAN1_MB18_ID0() bfin_read16(CAN1_MB18_ID0) #define bfin_write_CAN1_MB18_ID0(val) bfin_write16(CAN1_MB18_ID0, val) #define bfin_read_CAN1_MB18_ID1() bfin_read16(CAN1_MB18_ID1) #define bfin_write_CAN1_MB18_ID1(val) bfin_write16(CAN1_MB18_ID1, val) #define bfin_read_CAN1_MB19_DATA0() bfin_read16(CAN1_MB19_DATA0) #define bfin_write_CAN1_MB19_DATA0(val) bfin_write16(CAN1_MB19_DATA0, val) #define bfin_read_CAN1_MB19_DATA1() bfin_read16(CAN1_MB19_DATA1) #define bfin_write_CAN1_MB19_DATA1(val) bfin_write16(CAN1_MB19_DATA1, val) #define bfin_read_CAN1_MB19_DATA2() bfin_read16(CAN1_MB19_DATA2) #define bfin_write_CAN1_MB19_DATA2(val) bfin_write16(CAN1_MB19_DATA2, val) #define bfin_read_CAN1_MB19_DATA3() bfin_read16(CAN1_MB19_DATA3) #define bfin_write_CAN1_MB19_DATA3(val) bfin_write16(CAN1_MB19_DATA3, val) #define bfin_read_CAN1_MB19_LENGTH() bfin_read16(CAN1_MB19_LENGTH) #define bfin_write_CAN1_MB19_LENGTH(val) bfin_write16(CAN1_MB19_LENGTH, val) #define bfin_read_CAN1_MB19_TIMESTAMP() bfin_read16(CAN1_MB19_TIMESTAMP) #define bfin_write_CAN1_MB19_TIMESTAMP(val) bfin_write16(CAN1_MB19_TIMESTAMP, val) #define bfin_read_CAN1_MB19_ID0() bfin_read16(CAN1_MB19_ID0) #define bfin_write_CAN1_MB19_ID0(val) bfin_write16(CAN1_MB19_ID0, val) #define bfin_read_CAN1_MB19_ID1() bfin_read16(CAN1_MB19_ID1) #define bfin_write_CAN1_MB19_ID1(val) bfin_write16(CAN1_MB19_ID1, val) #define bfin_read_CAN1_MB20_DATA0() bfin_read16(CAN1_MB20_DATA0) #define bfin_write_CAN1_MB20_DATA0(val) bfin_write16(CAN1_MB20_DATA0, val) #define bfin_read_CAN1_MB20_DATA1() bfin_read16(CAN1_MB20_DATA1) #define bfin_write_CAN1_MB20_DATA1(val) bfin_write16(CAN1_MB20_DATA1, val) #define bfin_read_CAN1_MB20_DATA2() bfin_read16(CAN1_MB20_DATA2) #define bfin_write_CAN1_MB20_DATA2(val) bfin_write16(CAN1_MB20_DATA2, val) #define bfin_read_CAN1_MB20_DATA3() bfin_read16(CAN1_MB20_DATA3) #define bfin_write_CAN1_MB20_DATA3(val) bfin_write16(CAN1_MB20_DATA3, val) #define bfin_read_CAN1_MB20_LENGTH() bfin_read16(CAN1_MB20_LENGTH) #define bfin_write_CAN1_MB20_LENGTH(val) bfin_write16(CAN1_MB20_LENGTH, val) #define bfin_read_CAN1_MB20_TIMESTAMP() bfin_read16(CAN1_MB20_TIMESTAMP) #define bfin_write_CAN1_MB20_TIMESTAMP(val) bfin_write16(CAN1_MB20_TIMESTAMP, val) #define bfin_read_CAN1_MB20_ID0() bfin_read16(CAN1_MB20_ID0) #define bfin_write_CAN1_MB20_ID0(val) bfin_write16(CAN1_MB20_ID0, val) #define bfin_read_CAN1_MB20_ID1() bfin_read16(CAN1_MB20_ID1) #define bfin_write_CAN1_MB20_ID1(val) bfin_write16(CAN1_MB20_ID1, val) #define bfin_read_CAN1_MB21_DATA0() bfin_read16(CAN1_MB21_DATA0) #define bfin_write_CAN1_MB21_DATA0(val) bfin_write16(CAN1_MB21_DATA0, val) #define bfin_read_CAN1_MB21_DATA1() bfin_read16(CAN1_MB21_DATA1) #define bfin_write_CAN1_MB21_DATA1(val) bfin_write16(CAN1_MB21_DATA1, val) #define bfin_read_CAN1_MB21_DATA2() bfin_read16(CAN1_MB21_DATA2) #define bfin_write_CAN1_MB21_DATA2(val) bfin_write16(CAN1_MB21_DATA2, val) #define bfin_read_CAN1_MB21_DATA3() bfin_read16(CAN1_MB21_DATA3) #define bfin_write_CAN1_MB21_DATA3(val) bfin_write16(CAN1_MB21_DATA3, val) #define bfin_read_CAN1_MB21_LENGTH() bfin_read16(CAN1_MB21_LENGTH) #define bfin_write_CAN1_MB21_LENGTH(val) bfin_write16(CAN1_MB21_LENGTH, val) #define bfin_read_CAN1_MB21_TIMESTAMP() bfin_read16(CAN1_MB21_TIMESTAMP) #define bfin_write_CAN1_MB21_TIMESTAMP(val) bfin_write16(CAN1_MB21_TIMESTAMP, val) #define bfin_read_CAN1_MB21_ID0() bfin_read16(CAN1_MB21_ID0) #define bfin_write_CAN1_MB21_ID0(val) bfin_write16(CAN1_MB21_ID0, val) #define bfin_read_CAN1_MB21_ID1() bfin_read16(CAN1_MB21_ID1) #define bfin_write_CAN1_MB21_ID1(val) bfin_write16(CAN1_MB21_ID1, val) #define bfin_read_CAN1_MB22_DATA0() bfin_read16(CAN1_MB22_DATA0) #define bfin_write_CAN1_MB22_DATA0(val) bfin_write16(CAN1_MB22_DATA0, val) #define bfin_read_CAN1_MB22_DATA1() bfin_read16(CAN1_MB22_DATA1) #define bfin_write_CAN1_MB22_DATA1(val) bfin_write16(CAN1_MB22_DATA1, val) #define bfin_read_CAN1_MB22_DATA2() bfin_read16(CAN1_MB22_DATA2) #define bfin_write_CAN1_MB22_DATA2(val) bfin_write16(CAN1_MB22_DATA2, val) #define bfin_read_CAN1_MB22_DATA3() bfin_read16(CAN1_MB22_DATA3) #define bfin_write_CAN1_MB22_DATA3(val) bfin_write16(CAN1_MB22_DATA3, val) #define bfin_read_CAN1_MB22_LENGTH() bfin_read16(CAN1_MB22_LENGTH) #define bfin_write_CAN1_MB22_LENGTH(val) bfin_write16(CAN1_MB22_LENGTH, val) #define bfin_read_CAN1_MB22_TIMESTAMP() bfin_read16(CAN1_MB22_TIMESTAMP) #define bfin_write_CAN1_MB22_TIMESTAMP(val) bfin_write16(CAN1_MB22_TIMESTAMP, val) #define bfin_read_CAN1_MB22_ID0() bfin_read16(CAN1_MB22_ID0) #define bfin_write_CAN1_MB22_ID0(val) bfin_write16(CAN1_MB22_ID0, val) #define bfin_read_CAN1_MB22_ID1() bfin_read16(CAN1_MB22_ID1) #define bfin_write_CAN1_MB22_ID1(val) bfin_write16(CAN1_MB22_ID1, val) #define bfin_read_CAN1_MB23_DATA0() bfin_read16(CAN1_MB23_DATA0) #define bfin_write_CAN1_MB23_DATA0(val) bfin_write16(CAN1_MB23_DATA0, val) #define bfin_read_CAN1_MB23_DATA1() bfin_read16(CAN1_MB23_DATA1) #define bfin_write_CAN1_MB23_DATA1(val) bfin_write16(CAN1_MB23_DATA1, val) #define bfin_read_CAN1_MB23_DATA2() bfin_read16(CAN1_MB23_DATA2) #define bfin_write_CAN1_MB23_DATA2(val) bfin_write16(CAN1_MB23_DATA2, val) #define bfin_read_CAN1_MB23_DATA3() bfin_read16(CAN1_MB23_DATA3) #define bfin_write_CAN1_MB23_DATA3(val) bfin_write16(CAN1_MB23_DATA3, val) #define bfin_read_CAN1_MB23_LENGTH() bfin_read16(CAN1_MB23_LENGTH) #define bfin_write_CAN1_MB23_LENGTH(val) bfin_write16(CAN1_MB23_LENGTH, val) #define bfin_read_CAN1_MB23_TIMESTAMP() bfin_read16(CAN1_MB23_TIMESTAMP) #define bfin_write_CAN1_MB23_TIMESTAMP(val) bfin_write16(CAN1_MB23_TIMESTAMP, val) #define bfin_read_CAN1_MB23_ID0() bfin_read16(CAN1_MB23_ID0) #define bfin_write_CAN1_MB23_ID0(val) bfin_write16(CAN1_MB23_ID0, val) #define bfin_read_CAN1_MB23_ID1() bfin_read16(CAN1_MB23_ID1) #define bfin_write_CAN1_MB23_ID1(val) bfin_write16(CAN1_MB23_ID1, val) #define bfin_read_CAN1_MB24_DATA0() bfin_read16(CAN1_MB24_DATA0) #define bfin_write_CAN1_MB24_DATA0(val) bfin_write16(CAN1_MB24_DATA0, val) #define bfin_read_CAN1_MB24_DATA1() bfin_read16(CAN1_MB24_DATA1) #define bfin_write_CAN1_MB24_DATA1(val) bfin_write16(CAN1_MB24_DATA1, val) #define bfin_read_CAN1_MB24_DATA2() bfin_read16(CAN1_MB24_DATA2) #define bfin_write_CAN1_MB24_DATA2(val) bfin_write16(CAN1_MB24_DATA2, val) #define bfin_read_CAN1_MB24_DATA3() bfin_read16(CAN1_MB24_DATA3) #define bfin_write_CAN1_MB24_DATA3(val) bfin_write16(CAN1_MB24_DATA3, val) #define bfin_read_CAN1_MB24_LENGTH() bfin_read16(CAN1_MB24_LENGTH) #define bfin_write_CAN1_MB24_LENGTH(val) bfin_write16(CAN1_MB24_LENGTH, val) #define bfin_read_CAN1_MB24_TIMESTAMP() bfin_read16(CAN1_MB24_TIMESTAMP) #define bfin_write_CAN1_MB24_TIMESTAMP(val) bfin_write16(CAN1_MB24_TIMESTAMP, val) #define bfin_read_CAN1_MB24_ID0() bfin_read16(CAN1_MB24_ID0) #define bfin_write_CAN1_MB24_ID0(val) bfin_write16(CAN1_MB24_ID0, val) #define bfin_read_CAN1_MB24_ID1() bfin_read16(CAN1_MB24_ID1) #define bfin_write_CAN1_MB24_ID1(val) bfin_write16(CAN1_MB24_ID1, val) #define bfin_read_CAN1_MB25_DATA0() bfin_read16(CAN1_MB25_DATA0) #define bfin_write_CAN1_MB25_DATA0(val) bfin_write16(CAN1_MB25_DATA0, val) #define bfin_read_CAN1_MB25_DATA1() bfin_read16(CAN1_MB25_DATA1) #define bfin_write_CAN1_MB25_DATA1(val) bfin_write16(CAN1_MB25_DATA1, val) #define bfin_read_CAN1_MB25_DATA2() bfin_read16(CAN1_MB25_DATA2) #define bfin_write_CAN1_MB25_DATA2(val) bfin_write16(CAN1_MB25_DATA2, val) #define bfin_read_CAN1_MB25_DATA3() bfin_read16(CAN1_MB25_DATA3) #define bfin_write_CAN1_MB25_DATA3(val) bfin_write16(CAN1_MB25_DATA3, val) #define bfin_read_CAN1_MB25_LENGTH() bfin_read16(CAN1_MB25_LENGTH) #define bfin_write_CAN1_MB25_LENGTH(val) bfin_write16(CAN1_MB25_LENGTH, val) #define bfin_read_CAN1_MB25_TIMESTAMP() bfin_read16(CAN1_MB25_TIMESTAMP) #define bfin_write_CAN1_MB25_TIMESTAMP(val) bfin_write16(CAN1_MB25_TIMESTAMP, val) #define bfin_read_CAN1_MB25_ID0() bfin_read16(CAN1_MB25_ID0) #define bfin_write_CAN1_MB25_ID0(val) bfin_write16(CAN1_MB25_ID0, val) #define bfin_read_CAN1_MB25_ID1() bfin_read16(CAN1_MB25_ID1) #define bfin_write_CAN1_MB25_ID1(val) bfin_write16(CAN1_MB25_ID1, val) #define bfin_read_CAN1_MB26_DATA0() bfin_read16(CAN1_MB26_DATA0) #define bfin_write_CAN1_MB26_DATA0(val) bfin_write16(CAN1_MB26_DATA0, val) #define bfin_read_CAN1_MB26_DATA1() bfin_read16(CAN1_MB26_DATA1) #define bfin_write_CAN1_MB26_DATA1(val) bfin_write16(CAN1_MB26_DATA1, val) #define bfin_read_CAN1_MB26_DATA2() bfin_read16(CAN1_MB26_DATA2) #define bfin_write_CAN1_MB26_DATA2(val) bfin_write16(CAN1_MB26_DATA2, val) #define bfin_read_CAN1_MB26_DATA3() bfin_read16(CAN1_MB26_DATA3) #define bfin_write_CAN1_MB26_DATA3(val) bfin_write16(CAN1_MB26_DATA3, val) #define bfin_read_CAN1_MB26_LENGTH() bfin_read16(CAN1_MB26_LENGTH) #define bfin_write_CAN1_MB26_LENGTH(val) bfin_write16(CAN1_MB26_LENGTH, val) #define bfin_read_CAN1_MB26_TIMESTAMP() bfin_read16(CAN1_MB26_TIMESTAMP) #define bfin_write_CAN1_MB26_TIMESTAMP(val) bfin_write16(CAN1_MB26_TIMESTAMP, val) #define bfin_read_CAN1_MB26_ID0() bfin_read16(CAN1_MB26_ID0) #define bfin_write_CAN1_MB26_ID0(val) bfin_write16(CAN1_MB26_ID0, val) #define bfin_read_CAN1_MB26_ID1() bfin_read16(CAN1_MB26_ID1) #define bfin_write_CAN1_MB26_ID1(val) bfin_write16(CAN1_MB26_ID1, val) #define bfin_read_CAN1_MB27_DATA0() bfin_read16(CAN1_MB27_DATA0) #define bfin_write_CAN1_MB27_DATA0(val) bfin_write16(CAN1_MB27_DATA0, val) #define bfin_read_CAN1_MB27_DATA1() bfin_read16(CAN1_MB27_DATA1) #define bfin_write_CAN1_MB27_DATA1(val) bfin_write16(CAN1_MB27_DATA1, val) #define bfin_read_CAN1_MB27_DATA2() bfin_read16(CAN1_MB27_DATA2) #define bfin_write_CAN1_MB27_DATA2(val) bfin_write16(CAN1_MB27_DATA2, val) #define bfin_read_CAN1_MB27_DATA3() bfin_read16(CAN1_MB27_DATA3) #define bfin_write_CAN1_MB27_DATA3(val) bfin_write16(CAN1_MB27_DATA3, val) #define bfin_read_CAN1_MB27_LENGTH() bfin_read16(CAN1_MB27_LENGTH) #define bfin_write_CAN1_MB27_LENGTH(val) bfin_write16(CAN1_MB27_LENGTH, val) #define bfin_read_CAN1_MB27_TIMESTAMP() bfin_read16(CAN1_MB27_TIMESTAMP) #define bfin_write_CAN1_MB27_TIMESTAMP(val) bfin_write16(CAN1_MB27_TIMESTAMP, val) #define bfin_read_CAN1_MB27_ID0() bfin_read16(CAN1_MB27_ID0) #define bfin_write_CAN1_MB27_ID0(val) bfin_write16(CAN1_MB27_ID0, val) #define bfin_read_CAN1_MB27_ID1() bfin_read16(CAN1_MB27_ID1) #define bfin_write_CAN1_MB27_ID1(val) bfin_write16(CAN1_MB27_ID1, val) #define bfin_read_CAN1_MB28_DATA0() bfin_read16(CAN1_MB28_DATA0) #define bfin_write_CAN1_MB28_DATA0(val) bfin_write16(CAN1_MB28_DATA0, val) #define bfin_read_CAN1_MB28_DATA1() bfin_read16(CAN1_MB28_DATA1) #define bfin_write_CAN1_MB28_DATA1(val) bfin_write16(CAN1_MB28_DATA1, val) #define bfin_read_CAN1_MB28_DATA2() bfin_read16(CAN1_MB28_DATA2) #define bfin_write_CAN1_MB28_DATA2(val) bfin_write16(CAN1_MB28_DATA2, val) #define bfin_read_CAN1_MB28_DATA3() bfin_read16(CAN1_MB28_DATA3) #define bfin_write_CAN1_MB28_DATA3(val) bfin_write16(CAN1_MB28_DATA3, val) #define bfin_read_CAN1_MB28_LENGTH() bfin_read16(CAN1_MB28_LENGTH) #define bfin_write_CAN1_MB28_LENGTH(val) bfin_write16(CAN1_MB28_LENGTH, val) #define bfin_read_CAN1_MB28_TIMESTAMP() bfin_read16(CAN1_MB28_TIMESTAMP) #define bfin_write_CAN1_MB28_TIMESTAMP(val) bfin_write16(CAN1_MB28_TIMESTAMP, val) #define bfin_read_CAN1_MB28_ID0() bfin_read16(CAN1_MB28_ID0) #define bfin_write_CAN1_MB28_ID0(val) bfin_write16(CAN1_MB28_ID0, val) #define bfin_read_CAN1_MB28_ID1() bfin_read16(CAN1_MB28_ID1) #define bfin_write_CAN1_MB28_ID1(val) bfin_write16(CAN1_MB28_ID1, val) #define bfin_read_CAN1_MB29_DATA0() bfin_read16(CAN1_MB29_DATA0) #define bfin_write_CAN1_MB29_DATA0(val) bfin_write16(CAN1_MB29_DATA0, val) #define bfin_read_CAN1_MB29_DATA1() bfin_read16(CAN1_MB29_DATA1) #define bfin_write_CAN1_MB29_DATA1(val) bfin_write16(CAN1_MB29_DATA1, val) #define bfin_read_CAN1_MB29_DATA2() bfin_read16(CAN1_MB29_DATA2) #define bfin_write_CAN1_MB29_DATA2(val) bfin_write16(CAN1_MB29_DATA2, val) #define bfin_read_CAN1_MB29_DATA3() bfin_read16(CAN1_MB29_DATA3) #define bfin_write_CAN1_MB29_DATA3(val) bfin_write16(CAN1_MB29_DATA3, val) #define bfin_read_CAN1_MB29_LENGTH() bfin_read16(CAN1_MB29_LENGTH) #define bfin_write_CAN1_MB29_LENGTH(val) bfin_write16(CAN1_MB29_LENGTH, val) #define bfin_read_CAN1_MB29_TIMESTAMP() bfin_read16(CAN1_MB29_TIMESTAMP) #define bfin_write_CAN1_MB29_TIMESTAMP(val) bfin_write16(CAN1_MB29_TIMESTAMP, val) #define bfin_read_CAN1_MB29_ID0() bfin_read16(CAN1_MB29_ID0) #define bfin_write_CAN1_MB29_ID0(val) bfin_write16(CAN1_MB29_ID0, val) #define bfin_read_CAN1_MB29_ID1() bfin_read16(CAN1_MB29_ID1) #define bfin_write_CAN1_MB29_ID1(val) bfin_write16(CAN1_MB29_ID1, val) #define bfin_read_CAN1_MB30_DATA0() bfin_read16(CAN1_MB30_DATA0) #define bfin_write_CAN1_MB30_DATA0(val) bfin_write16(CAN1_MB30_DATA0, val) #define bfin_read_CAN1_MB30_DATA1() bfin_read16(CAN1_MB30_DATA1) #define bfin_write_CAN1_MB30_DATA1(val) bfin_write16(CAN1_MB30_DATA1, val) #define bfin_read_CAN1_MB30_DATA2() bfin_read16(CAN1_MB30_DATA2) #define bfin_write_CAN1_MB30_DATA2(val) bfin_write16(CAN1_MB30_DATA2, val) #define bfin_read_CAN1_MB30_DATA3() bfin_read16(CAN1_MB30_DATA3) #define bfin_write_CAN1_MB30_DATA3(val) bfin_write16(CAN1_MB30_DATA3, val) #define bfin_read_CAN1_MB30_LENGTH() bfin_read16(CAN1_MB30_LENGTH) #define bfin_write_CAN1_MB30_LENGTH(val) bfin_write16(CAN1_MB30_LENGTH, val) #define bfin_read_CAN1_MB30_TIMESTAMP() bfin_read16(CAN1_MB30_TIMESTAMP) #define bfin_write_CAN1_MB30_TIMESTAMP(val) bfin_write16(CAN1_MB30_TIMESTAMP, val) #define bfin_read_CAN1_MB30_ID0() bfin_read16(CAN1_MB30_ID0) #define bfin_write_CAN1_MB30_ID0(val) bfin_write16(CAN1_MB30_ID0, val) #define bfin_read_CAN1_MB30_ID1() bfin_read16(CAN1_MB30_ID1) #define bfin_write_CAN1_MB30_ID1(val) bfin_write16(CAN1_MB30_ID1, val) #define bfin_read_CAN1_MB31_DATA0() bfin_read16(CAN1_MB31_DATA0) #define bfin_write_CAN1_MB31_DATA0(val) bfin_write16(CAN1_MB31_DATA0, val) #define bfin_read_CAN1_MB31_DATA1() bfin_read16(CAN1_MB31_DATA1) #define bfin_write_CAN1_MB31_DATA1(val) bfin_write16(CAN1_MB31_DATA1, val) #define bfin_read_CAN1_MB31_DATA2() bfin_read16(CAN1_MB31_DATA2) #define bfin_write_CAN1_MB31_DATA2(val) bfin_write16(CAN1_MB31_DATA2, val) #define bfin_read_CAN1_MB31_DATA3() bfin_read16(CAN1_MB31_DATA3) #define bfin_write_CAN1_MB31_DATA3(val) bfin_write16(CAN1_MB31_DATA3, val) #define bfin_read_CAN1_MB31_LENGTH() bfin_read16(CAN1_MB31_LENGTH) #define bfin_write_CAN1_MB31_LENGTH(val) bfin_write16(CAN1_MB31_LENGTH, val) #define bfin_read_CAN1_MB31_TIMESTAMP() bfin_read16(CAN1_MB31_TIMESTAMP) #define bfin_write_CAN1_MB31_TIMESTAMP(val) bfin_write16(CAN1_MB31_TIMESTAMP, val) #define bfin_read_CAN1_MB31_ID0() bfin_read16(CAN1_MB31_ID0) #define bfin_write_CAN1_MB31_ID0(val) bfin_write16(CAN1_MB31_ID0, val) #define bfin_read_CAN1_MB31_ID1() bfin_read16(CAN1_MB31_ID1) #define bfin_write_CAN1_MB31_ID1(val) bfin_write16(CAN1_MB31_ID1, val) #endif /* _CDEF_BF548_H */
null
null
null
null
88,026
9,728
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
174,723
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * linux/arch/arm/mach-pxa/cm-x300.c * * Support for the CompuLab CM-X300 modules * * Copyright (C) 2008,2009 CompuLab Ltd. * * Mike Rapoport <mike@compulab.co.il> * Igor Grinberg <grinberg@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/module.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/dm9000.h> #include <linux/leds.h> #include <linux/platform_data/rtc-v3020.h> #include <linux/pwm.h> #include <linux/pwm_backlight.h> #include <linux/i2c.h> #include <linux/platform_data/pca953x.h> #include <linux/i2c/pxa-i2c.h> #include <linux/mfd/da903x.h> #include <linux/regulator/machine.h> #include <linux/power_supply.h> #include <linux/apm-emulation.h> #include <linux/spi/spi.h> #include <linux/spi/spi_gpio.h> #include <linux/spi/tdo24m.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/setup.h> #include <asm/system_info.h> #include "pxa300.h" #include "pxa27x-udc.h" #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/mtd-nand-pxa3xx.h> #include <mach/audio.h> #include <linux/platform_data/usb-pxa3xx-ulpi.h> #include <asm/mach/map.h> #include "generic.h" #include "devices.h" #define CM_X300_ETH_PHYS 0x08000010 #define GPIO82_MMC_IRQ (82) #define GPIO85_MMC_WP (85) #define CM_X300_MMC_IRQ PXA_GPIO_TO_IRQ(GPIO82_MMC_IRQ) #define GPIO95_RTC_CS (95) #define GPIO96_RTC_WR (96) #define GPIO97_RTC_RD (97) #define GPIO98_RTC_IO (98) #define GPIO_ULPI_PHY_RST (127) static mfp_cfg_t cm_x3xx_mfp_cfg[] __initdata = { /* LCD */ GPIO54_LCD_LDD_0, GPIO55_LCD_LDD_1, GPIO56_LCD_LDD_2, GPIO57_LCD_LDD_3, GPIO58_LCD_LDD_4, GPIO59_LCD_LDD_5, GPIO60_LCD_LDD_6, GPIO61_LCD_LDD_7, GPIO62_LCD_LDD_8, GPIO63_LCD_LDD_9, GPIO64_LCD_LDD_10, GPIO65_LCD_LDD_11, GPIO66_LCD_LDD_12, GPIO67_LCD_LDD_13, GPIO68_LCD_LDD_14, GPIO69_LCD_LDD_15, GPIO72_LCD_FCLK, GPIO73_LCD_LCLK, GPIO74_LCD_PCLK, GPIO75_LCD_BIAS, /* BTUART */ GPIO111_UART2_RTS, GPIO112_UART2_RXD | MFP_LPM_EDGE_FALL, GPIO113_UART2_TXD, GPIO114_UART2_CTS | MFP_LPM_EDGE_BOTH, /* STUART */ GPIO109_UART3_TXD, GPIO110_UART3_RXD | MFP_LPM_EDGE_FALL, /* AC97 */ GPIO23_AC97_nACRESET, GPIO24_AC97_SYSCLK, GPIO29_AC97_BITCLK, GPIO25_AC97_SDATA_IN_0, GPIO27_AC97_SDATA_OUT, GPIO28_AC97_SYNC, /* Keypad */ GPIO115_KP_MKIN_0 | MFP_LPM_EDGE_BOTH, GPIO116_KP_MKIN_1 | MFP_LPM_EDGE_BOTH, GPIO117_KP_MKIN_2 | MFP_LPM_EDGE_BOTH, GPIO118_KP_MKIN_3 | MFP_LPM_EDGE_BOTH, GPIO119_KP_MKIN_4 | MFP_LPM_EDGE_BOTH, GPIO120_KP_MKIN_5 | MFP_LPM_EDGE_BOTH, GPIO2_2_KP_MKIN_6 | MFP_LPM_EDGE_BOTH, GPIO3_2_KP_MKIN_7 | MFP_LPM_EDGE_BOTH, GPIO121_KP_MKOUT_0, GPIO122_KP_MKOUT_1, GPIO123_KP_MKOUT_2, GPIO124_KP_MKOUT_3, GPIO125_KP_MKOUT_4, GPIO4_2_KP_MKOUT_5, /* MMC1 */ GPIO3_MMC1_DAT0, GPIO4_MMC1_DAT1 | MFP_LPM_EDGE_BOTH, GPIO5_MMC1_DAT2, GPIO6_MMC1_DAT3, GPIO7_MMC1_CLK, GPIO8_MMC1_CMD, /* CMD0 for slot 0 */ /* MMC2 */ GPIO9_MMC2_DAT0, GPIO10_MMC2_DAT1 | MFP_LPM_EDGE_BOTH, GPIO11_MMC2_DAT2, GPIO12_MMC2_DAT3, GPIO13_MMC2_CLK, GPIO14_MMC2_CMD, /* FFUART */ GPIO30_UART1_RXD | MFP_LPM_EDGE_FALL, GPIO31_UART1_TXD, GPIO32_UART1_CTS, GPIO37_UART1_RTS, GPIO33_UART1_DCD, GPIO34_UART1_DSR | MFP_LPM_EDGE_FALL, GPIO35_UART1_RI, GPIO36_UART1_DTR, /* GPIOs */ GPIO82_GPIO | MFP_PULL_HIGH, /* MMC CD */ GPIO85_GPIO, /* MMC WP */ GPIO99_GPIO, /* Ethernet IRQ */ /* RTC GPIOs */ GPIO95_GPIO | MFP_LPM_DRIVE_HIGH, /* RTC CS */ GPIO96_GPIO | MFP_LPM_DRIVE_HIGH, /* RTC WR */ GPIO97_GPIO | MFP_LPM_DRIVE_HIGH, /* RTC RD */ GPIO98_GPIO, /* RTC IO */ /* Standard I2C */ GPIO21_I2C_SCL, GPIO22_I2C_SDA, /* PWM Backlight */ GPIO19_PWM2_OUT, }; static mfp_cfg_t cm_x3xx_rev_lt130_mfp_cfg[] __initdata = { /* GPIOs */ GPIO79_GPIO, /* LED */ GPIO77_GPIO, /* WiFi reset */ GPIO78_GPIO, /* BT reset */ }; static mfp_cfg_t cm_x3xx_rev_ge130_mfp_cfg[] __initdata = { /* GPIOs */ GPIO76_GPIO, /* LED */ GPIO71_GPIO, /* WiFi reset */ GPIO70_GPIO, /* BT reset */ }; static mfp_cfg_t cm_x310_mfp_cfg[] __initdata = { /* USB PORT 2 */ ULPI_STP, ULPI_NXT, ULPI_DIR, GPIO30_ULPI_DATA_OUT_0, GPIO31_ULPI_DATA_OUT_1, GPIO32_ULPI_DATA_OUT_2, GPIO33_ULPI_DATA_OUT_3, GPIO34_ULPI_DATA_OUT_4, GPIO35_ULPI_DATA_OUT_5, GPIO36_ULPI_DATA_OUT_6, GPIO37_ULPI_DATA_OUT_7, GPIO38_ULPI_CLK, /* external PHY reset pin */ GPIO127_GPIO, /* USB PORT 3 */ GPIO77_USB_P3_1, GPIO78_USB_P3_2, GPIO79_USB_P3_3, GPIO80_USB_P3_4, GPIO81_USB_P3_5, GPIO82_USB_P3_6, GPIO0_2_USBH_PEN, }; #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) static struct resource dm9000_resources[] = { [0] = { .start = CM_X300_ETH_PHYS, .end = CM_X300_ETH_PHYS + 0x3, .flags = IORESOURCE_MEM, }, [1] = { .start = CM_X300_ETH_PHYS + 0x4, .end = CM_X300_ETH_PHYS + 0x4 + 500, .flags = IORESOURCE_MEM, }, [2] = { .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO99)), .end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO99)), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct dm9000_plat_data cm_x300_dm9000_platdata = { .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM, }; static struct platform_device dm9000_device = { .name = "dm9000", .id = 0, .num_resources = ARRAY_SIZE(dm9000_resources), .resource = dm9000_resources, .dev = { .platform_data = &cm_x300_dm9000_platdata, } }; static void __init cm_x300_init_dm9000(void) { platform_device_register(&dm9000_device); } #else static inline void cm_x300_init_dm9000(void) {} #endif /* LCD */ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct pxafb_mode_info cm_x300_lcd_modes[] = { [0] = { .pixclock = 38250, .bpp = 16, .xres = 480, .yres = 640, .hsync_len = 8, .vsync_len = 2, .left_margin = 8, .upper_margin = 2, .right_margin = 24, .lower_margin = 4, .cmap_greyscale = 0, }, [1] = { .pixclock = 153800, .bpp = 16, .xres = 240, .yres = 320, .hsync_len = 8, .vsync_len = 2, .left_margin = 8, .upper_margin = 2, .right_margin = 88, .lower_margin = 2, .cmap_greyscale = 0, }, }; static struct pxafb_mach_info cm_x300_lcd = { .modes = cm_x300_lcd_modes, .num_modes = ARRAY_SIZE(cm_x300_lcd_modes), .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, }; static void __init cm_x300_init_lcd(void) { pxa_set_fb_info(NULL, &cm_x300_lcd); } #else static inline void cm_x300_init_lcd(void) {} #endif #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) static struct pwm_lookup cm_x300_pwm_lookup[] = { PWM_LOOKUP("pxa27x-pwm.0", 1, "pwm-backlight.0", NULL, 10000, PWM_POLARITY_NORMAL), }; static struct platform_pwm_backlight_data cm_x300_backlight_data = { .max_brightness = 100, .dft_brightness = 100, .enable_gpio = -1, }; static struct platform_device cm_x300_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &cm_x300_backlight_data, }, }; static void cm_x300_init_bl(void) { pwm_add_table(cm_x300_pwm_lookup, ARRAY_SIZE(cm_x300_pwm_lookup)); platform_device_register(&cm_x300_backlight_device); } #else static inline void cm_x300_init_bl(void) {} #endif #if defined(CONFIG_SPI_GPIO) || defined(CONFIG_SPI_GPIO_MODULE) #define GPIO_LCD_BASE (144) #define GPIO_LCD_DIN (GPIO_LCD_BASE + 8) /* aux_gpio3_0 */ #define GPIO_LCD_DOUT (GPIO_LCD_BASE + 9) /* aux_gpio3_1 */ #define GPIO_LCD_SCL (GPIO_LCD_BASE + 10) /* aux_gpio3_2 */ #define GPIO_LCD_CS (GPIO_LCD_BASE + 11) /* aux_gpio3_3 */ #define LCD_SPI_BUS_NUM (1) static struct spi_gpio_platform_data cm_x300_spi_gpio_pdata = { .sck = GPIO_LCD_SCL, .mosi = GPIO_LCD_DIN, .miso = GPIO_LCD_DOUT, .num_chipselect = 1, }; static struct platform_device cm_x300_spi_gpio = { .name = "spi_gpio", .id = LCD_SPI_BUS_NUM, .dev = { .platform_data = &cm_x300_spi_gpio_pdata, }, }; static struct tdo24m_platform_data cm_x300_tdo24m_pdata = { .model = TDO35S, }; static struct spi_board_info cm_x300_spi_devices[] __initdata = { { .modalias = "tdo24m", .max_speed_hz = 1000000, .bus_num = LCD_SPI_BUS_NUM, .chip_select = 0, .controller_data = (void *) GPIO_LCD_CS, .platform_data = &cm_x300_tdo24m_pdata, }, }; static void __init cm_x300_init_spi(void) { spi_register_board_info(cm_x300_spi_devices, ARRAY_SIZE(cm_x300_spi_devices)); platform_device_register(&cm_x300_spi_gpio); } #else static inline void cm_x300_init_spi(void) {} #endif #if defined(CONFIG_SND_PXA2XX_LIB_AC97) static void __init cm_x300_init_ac97(void) { pxa_set_ac97_info(NULL); } #else static inline void cm_x300_init_ac97(void) {} #endif #if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE) static struct mtd_partition cm_x300_nand_partitions[] = { [0] = { .name = "OBM", .offset = 0, .size = SZ_256K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, [1] = { .name = "U-Boot", .offset = MTDPART_OFS_APPEND, .size = SZ_256K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, [2] = { .name = "Environment", .offset = MTDPART_OFS_APPEND, .size = SZ_256K, }, [3] = { .name = "reserved", .offset = MTDPART_OFS_APPEND, .size = SZ_256K + SZ_1M, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, [4] = { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_4M, }, [5] = { .name = "fs", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct pxa3xx_nand_platform_data cm_x300_nand_info = { .enable_arbiter = 1, .keep_config = 1, .num_cs = 1, .parts[0] = cm_x300_nand_partitions, .nr_parts[0] = ARRAY_SIZE(cm_x300_nand_partitions), }; static void __init cm_x300_init_nand(void) { pxa3xx_set_nand_info(&cm_x300_nand_info); } #else static inline void cm_x300_init_nand(void) {} #endif #if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE) static struct pxamci_platform_data cm_x300_mci_platform_data = { .detect_delay_ms = 200, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .gpio_card_detect = GPIO82_MMC_IRQ, .gpio_card_ro = GPIO85_MMC_WP, .gpio_power = -1, }; /* The second MMC slot of CM-X300 is hardwired to Libertas card and has no detection/ro pins */ static int cm_x300_mci2_init(struct device *dev, irq_handler_t cm_x300_detect_int, void *data) { return 0; } static void cm_x300_mci2_exit(struct device *dev, void *data) { } static struct pxamci_platform_data cm_x300_mci2_platform_data = { .detect_delay_ms = 200, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .init = cm_x300_mci2_init, .exit = cm_x300_mci2_exit, .gpio_card_detect = -1, .gpio_card_ro = -1, .gpio_power = -1, }; static void __init cm_x300_init_mmc(void) { pxa_set_mci_info(&cm_x300_mci_platform_data); pxa3xx_set_mci2_info(&cm_x300_mci2_platform_data); } #else static inline void cm_x300_init_mmc(void) {} #endif #if defined(CONFIG_PXA310_ULPI) static struct clk *pout_clk; static int cm_x300_ulpi_phy_reset(void) { int err; /* reset the PHY */ err = gpio_request_one(GPIO_ULPI_PHY_RST, GPIOF_OUT_INIT_LOW, "ulpi reset"); if (err) { pr_err("failed to request ULPI reset GPIO: %d\n", err); return err; } msleep(10); gpio_set_value(GPIO_ULPI_PHY_RST, 1); msleep(10); gpio_free(GPIO_ULPI_PHY_RST); return 0; } static inline int cm_x300_u2d_init(struct device *dev) { int err = 0; if (cpu_is_pxa310()) { /* CLK_POUT is connected to the ULPI PHY */ pout_clk = clk_get(NULL, "CLK_POUT"); if (IS_ERR(pout_clk)) { err = PTR_ERR(pout_clk); pr_err("failed to get CLK_POUT: %d\n", err); return err; } clk_enable(pout_clk); err = cm_x300_ulpi_phy_reset(); if (err) { clk_disable(pout_clk); clk_put(pout_clk); } } return err; } static void cm_x300_u2d_exit(struct device *dev) { if (cpu_is_pxa310()) { clk_disable(pout_clk); clk_put(pout_clk); } } static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = { .ulpi_mode = ULPI_SER_6PIN, .init = cm_x300_u2d_init, .exit = cm_x300_u2d_exit, }; static void cm_x300_init_u2d(void) { pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data); } #else static inline void cm_x300_init_u2d(void) {} #endif #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static int cm_x300_ohci_init(struct device *dev) { if (cpu_is_pxa300()) UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DMPDE | UP2OCR_DPPDE; return 0; } static struct pxaohci_platform_data cm_x300_ohci_platform_data = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW, .init = cm_x300_ohci_init, }; static void __init cm_x300_init_ohci(void) { pxa_set_ohci_info(&cm_x300_ohci_platform_data); } #else static inline void cm_x300_init_ohci(void) {} #endif #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) static struct gpio_led cm_x300_leds[] = { [0] = { .name = "cm-x300:green", .default_trigger = "heartbeat", .active_low = 1, }, }; static struct gpio_led_platform_data cm_x300_gpio_led_pdata = { .num_leds = ARRAY_SIZE(cm_x300_leds), .leds = cm_x300_leds, }; static struct platform_device cm_x300_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &cm_x300_gpio_led_pdata, }, }; static void __init cm_x300_init_leds(void) { if (system_rev < 130) cm_x300_leds[0].gpio = 79; else cm_x300_leds[0].gpio = 76; platform_device_register(&cm_x300_led_device); } #else static inline void cm_x300_init_leds(void) {} #endif #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) /* PCA9555 */ static struct pca953x_platform_data cm_x300_gpio_ext_pdata_0 = { .gpio_base = 128, }; static struct pca953x_platform_data cm_x300_gpio_ext_pdata_1 = { .gpio_base = 144, }; static struct i2c_board_info cm_x300_gpio_ext_info[] = { [0] = { I2C_BOARD_INFO("pca9555", 0x24), .platform_data = &cm_x300_gpio_ext_pdata_0, }, [1] = { I2C_BOARD_INFO("pca9555", 0x25), .platform_data = &cm_x300_gpio_ext_pdata_1, }, }; static void __init cm_x300_init_i2c(void) { pxa_set_i2c_info(NULL); i2c_register_board_info(0, cm_x300_gpio_ext_info, ARRAY_SIZE(cm_x300_gpio_ext_info)); } #else static inline void cm_x300_init_i2c(void) {} #endif #if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE) struct v3020_platform_data cm_x300_v3020_pdata = { .use_gpio = 1, .gpio_cs = GPIO95_RTC_CS, .gpio_wr = GPIO96_RTC_WR, .gpio_rd = GPIO97_RTC_RD, .gpio_io = GPIO98_RTC_IO, }; static struct platform_device cm_x300_rtc_device = { .name = "v3020", .id = -1, .dev = { .platform_data = &cm_x300_v3020_pdata, } }; static void __init cm_x300_init_rtc(void) { platform_device_register(&cm_x300_rtc_device); } #else static inline void cm_x300_init_rtc(void) {} #endif /* Battery */ struct power_supply_info cm_x300_psy_info = { .name = "battery", .technology = POWER_SUPPLY_TECHNOLOGY_LIPO, .voltage_max_design = 4200000, .voltage_min_design = 3000000, .use_for_apm = 1, }; static void cm_x300_battery_low(void) { #if defined(CONFIG_APM_EMULATION) apm_queue_event(APM_LOW_BATTERY); #endif } static void cm_x300_battery_critical(void) { #if defined(CONFIG_APM_EMULATION) apm_queue_event(APM_CRITICAL_SUSPEND); #endif } struct da9030_battery_info cm_x300_battery_info = { .battery_info = &cm_x300_psy_info, .charge_milliamp = 1000, .charge_millivolt = 4200, .vbat_low = 3600, .vbat_crit = 3400, .vbat_charge_start = 4100, .vbat_charge_stop = 4200, .vbat_charge_restart = 4000, .vcharge_min = 3200, .vcharge_max = 5500, .tbat_low = 197, .tbat_high = 78, .tbat_restart = 100, .batmon_interval = 0, .battery_low = cm_x300_battery_low, .battery_critical = cm_x300_battery_critical, }; static struct regulator_consumer_supply buck2_consumers[] = { REGULATOR_SUPPLY("vcc_core", NULL), }; static struct regulator_init_data buck2_data = { .constraints = { .min_uV = 1375000, .max_uV = 1375000, .state_mem = { .enabled = 0, }, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, .apply_uV = 1, }, .num_consumer_supplies = ARRAY_SIZE(buck2_consumers), .consumer_supplies = buck2_consumers, }; /* DA9030 */ struct da903x_subdev_info cm_x300_da9030_subdevs[] = { { .name = "da903x-battery", .id = DA9030_ID_BAT, .platform_data = &cm_x300_battery_info, }, { .name = "da903x-regulator", .id = DA9030_ID_BUCK2, .platform_data = &buck2_data, }, }; static struct da903x_platform_data cm_x300_da9030_info = { .num_subdevs = ARRAY_SIZE(cm_x300_da9030_subdevs), .subdevs = cm_x300_da9030_subdevs, }; static struct i2c_board_info cm_x300_pmic_info = { I2C_BOARD_INFO("da9030", 0x49), .irq = IRQ_WAKEUP0, .platform_data = &cm_x300_da9030_info, }; static struct i2c_pxa_platform_data cm_x300_pwr_i2c_info = { .use_pio = 1, }; static void __init cm_x300_init_da9030(void) { pxa3xx_set_i2c_power_info(&cm_x300_pwr_i2c_info); i2c_register_board_info(1, &cm_x300_pmic_info, 1); irq_set_irq_wake(IRQ_WAKEUP0, 1); } /* wi2wi gpio setting for system_rev >= 130 */ static struct gpio cm_x300_wi2wi_gpios[] __initdata = { { 71, GPIOF_OUT_INIT_HIGH, "wlan en" }, { 70, GPIOF_OUT_INIT_HIGH, "bt reset" }, }; static void __init cm_x300_init_wi2wi(void) { int err; if (system_rev < 130) { cm_x300_wi2wi_gpios[0].gpio = 77; /* wlan en */ cm_x300_wi2wi_gpios[1].gpio = 78; /* bt reset */ } /* Libertas and CSR reset */ err = gpio_request_array(ARRAY_AND_SIZE(cm_x300_wi2wi_gpios)); if (err) { pr_err("failed to request wifi/bt gpios: %d\n", err); return; } udelay(10); gpio_set_value(cm_x300_wi2wi_gpios[1].gpio, 0); udelay(10); gpio_set_value(cm_x300_wi2wi_gpios[1].gpio, 1); gpio_free_array(ARRAY_AND_SIZE(cm_x300_wi2wi_gpios)); } /* MFP */ static void __init cm_x300_init_mfp(void) { /* board-processor specific GPIO initialization */ pxa3xx_mfp_config(ARRAY_AND_SIZE(cm_x3xx_mfp_cfg)); if (system_rev < 130) pxa3xx_mfp_config(ARRAY_AND_SIZE(cm_x3xx_rev_lt130_mfp_cfg)); else pxa3xx_mfp_config(ARRAY_AND_SIZE(cm_x3xx_rev_ge130_mfp_cfg)); if (cpu_is_pxa310()) pxa3xx_mfp_config(ARRAY_AND_SIZE(cm_x310_mfp_cfg)); } static void __init cm_x300_init(void) { cm_x300_init_mfp(); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); if (cpu_is_pxa300()) pxa_set_ffuart_info(NULL); cm_x300_init_da9030(); cm_x300_init_dm9000(); cm_x300_init_lcd(); cm_x300_init_u2d(); cm_x300_init_ohci(); cm_x300_init_mmc(); cm_x300_init_nand(); cm_x300_init_leds(); cm_x300_init_i2c(); cm_x300_init_spi(); cm_x300_init_rtc(); cm_x300_init_ac97(); cm_x300_init_wi2wi(); cm_x300_init_bl(); regulator_has_full_constraints(); } static void __init cm_x300_fixup(struct tag *tags, char **cmdline) { /* Make sure that mi->bank[0].start = PHYS_ADDR */ for (; tags->hdr.size; tags = tag_next(tags)) if (tags->hdr.tag == ATAG_MEM && tags->u.mem.start == 0x80000000) { tags->u.mem.start = 0xa0000000; break; } } MACHINE_START(CM_X300, "CM-X300 module") .atag_offset = 0x100, .map_io = pxa3xx_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .init_time = pxa_timer_init, .init_machine = cm_x300_init, .fixup = cm_x300_fixup, .restart = pxa_restart, MACHINE_END
null
null
null
null
83,070
34,799
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
199,794
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Empiatech em28x1 audio extension * * Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com> * * Copyright (C) 2007-2016 Mauro Carvalho Chehab * - Port to work with the in-kernel driver * - Cleanups, fixes, alsa-controls, etc. * * This driver is based on my previous au600 usb pstn audio driver * and inherits all the copyrights * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "em28xx.h" #include <linux/kernel.h> #include <linux/usb.h> #include <linux/init.h> #include <linux/sound.h> #include <linux/spinlock.h> #include <linux/soundcard.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/initval.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/ac97_codec.h> #include <media/v4l2-common.h> static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "activates debug info"); #define EM28XX_MAX_AUDIO_BUFS 5 #define EM28XX_MIN_AUDIO_PACKETS 64 #define dprintk(fmt, arg...) do { \ if (debug) \ dev_printk(KERN_DEBUG, &dev->intf->dev, \ "video: %s: " fmt, __func__, ## arg); \ } while (0) static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static int em28xx_deinit_isoc_audio(struct em28xx *dev) { int i; dprintk("Stopping isoc\n"); for (i = 0; i < dev->adev.num_urb; i++) { struct urb *urb = dev->adev.urb[i]; if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); } return 0; } static void em28xx_audio_isocirq(struct urb *urb) { struct em28xx *dev = urb->context; int i; unsigned int oldptr; int period_elapsed = 0; int status; unsigned char *cp; unsigned int stride; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; if (dev->disconnected) { dprintk("device disconnected while streaming. URB status=%d.\n", urb->status); atomic_set(&dev->adev.stream_started, 0); return; } switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dprintk("urb completition error %d.\n", urb->status); break; } if (atomic_read(&dev->adev.stream_started) == 0) return; if (dev->adev.capture_pcm_substream) { substream = dev->adev.capture_pcm_substream; runtime = substream->runtime; stride = runtime->frame_bits >> 3; for (i = 0; i < urb->number_of_packets; i++) { int length = urb->iso_frame_desc[i].actual_length / stride; cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (!length) continue; oldptr = dev->adev.hwptr_done_capture; if (oldptr + length >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; memcpy(runtime->dma_area + oldptr * stride, cp, cnt * stride); memcpy(runtime->dma_area, cp + cnt * stride, length * stride - cnt * stride); } else { memcpy(runtime->dma_area + oldptr * stride, cp, length * stride); } snd_pcm_stream_lock(substream); dev->adev.hwptr_done_capture += length; if (dev->adev.hwptr_done_capture >= runtime->buffer_size) dev->adev.hwptr_done_capture -= runtime->buffer_size; dev->adev.capture_transfer_done += length; if (dev->adev.capture_transfer_done >= runtime->period_size) { dev->adev.capture_transfer_done -= runtime->period_size; period_elapsed = 1; } snd_pcm_stream_unlock(substream); } if (period_elapsed) snd_pcm_period_elapsed(substream); } urb->status = 0; status = usb_submit_urb(urb, GFP_ATOMIC); if (status < 0) dev_err(&dev->intf->dev, "resubmit of audio urb failed (error=%i)\n", status); return; } static int em28xx_init_audio_isoc(struct em28xx *dev) { int i, errCode; dprintk("Starting isoc transfers\n"); /* Start streaming */ for (i = 0; i < dev->adev.num_urb; i++) { memset(dev->adev.transfer_buffer[i], 0x80, dev->adev.urb[i]->transfer_buffer_length); errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC); if (errCode) { dev_err(&dev->intf->dev, "submit of audio urb failed (error=%i)\n", errCode); em28xx_deinit_isoc_audio(dev); atomic_set(&dev->adev.stream_started, 0); return errCode; } } return 0; } static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t size) { struct em28xx *dev = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; dprintk("Allocating vbuffer\n"); if (runtime->dma_area) { if (runtime->dma_bytes > size) return 0; vfree(runtime->dma_area); } runtime->dma_area = vmalloc(size); if (!runtime->dma_area) return -ENOMEM; runtime->dma_bytes = size; return 0; } static struct snd_pcm_hardware snd_em28xx_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */ /* * The period is 12.288 bytes. Allow a 10% of variation along its * value, in order to avoid overruns/underruns due to some clock * drift. * * FIXME: This period assumes 64 packets, and a 48000 PCM rate. * Calculate it dynamically. */ .period_bytes_min = 11059, .period_bytes_max = 13516, .periods_min = 2, .periods_max = 98, /* 12544, */ }; static int snd_em28xx_capture_open(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int nonblock, ret = 0; if (!dev) { pr_err("em28xx-audio: BUG: em28xx can't find device struct. Can't proceed with open\n"); return -ENODEV; } if (dev->disconnected) return -ENODEV; dprintk("opening device and trying to acquire exclusive lock\n"); nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); runtime->hw = snd_em28xx_hw_capture; if (dev->adev.users == 0) { if (dev->alt == 0 || dev->is_audio_only) { struct usb_device *udev = interface_to_usbdev(dev->intf); if (dev->is_audio_only) /* audio is on a separate interface */ dev->alt = 1; else /* audio is on the same interface as video */ dev->alt = 7; /* * FIXME: The intention seems to be to select * the alt setting with the largest * wMaxPacketSize for the video endpoint. * At least dev->alt should be used instead, but * we should probably not touch it at all if it * is already >0, because wMaxPacketSize of the * audio endpoints seems to be the same for all. */ dprintk("changing alternate number on interface %d to %d\n", dev->ifnum, dev->alt); usb_set_interface(udev, dev->ifnum, dev->alt); } /* Sets volume, mute, etc */ dev->mute = 0; ret = em28xx_audio_analog_set(dev); if (ret < 0) goto err; } kref_get(&dev->ref); dev->adev.users++; mutex_unlock(&dev->lock); /* Dynamically adjust the period size */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, dev->adev.period * 95 / 100, dev->adev.period * 105 / 100); dev->adev.capture_pcm_substream = substream; return 0; err: mutex_unlock(&dev->lock); dev_err(&dev->intf->dev, "Error while configuring em28xx mixer\n"); return ret; } static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); dprintk("closing device\n"); dev->mute = 1; mutex_lock(&dev->lock); dev->adev.users--; if (atomic_read(&dev->adev.stream_started) > 0) { atomic_set(&dev->adev.stream_started, 0); schedule_work(&dev->adev.wq_trigger); } em28xx_audio_analog_set(dev); if (substream->runtime->dma_area) { dprintk("freeing\n"); vfree(substream->runtime->dma_area); substream->runtime->dma_area = NULL; } mutex_unlock(&dev->lock); kref_put(&dev->ref, em28xx_free_device); return 0; } static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int ret; struct em28xx *dev = snd_pcm_substream_chip(substream); if (dev->disconnected) return -ENODEV; dprintk("Setting capture parameters\n"); ret = snd_pcm_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (ret < 0) return ret; #if 0 /* TODO: set up em28xx audio chip to deliver the correct audio format, current default is 48000hz multiplexed => 96000hz mono which shouldn't matter since analogue TV only supports mono */ unsigned int channels, rate, format; format = params_format(hw_params); rate = params_rate(hw_params); channels = params_channels(hw_params); #endif return 0; } static int snd_em28xx_hw_capture_free(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); struct em28xx_audio *adev = &dev->adev; dprintk("Stop capture, if needed\n"); if (atomic_read(&adev->stream_started) > 0) { atomic_set(&adev->stream_started, 0); schedule_work(&adev->wq_trigger); } return 0; } static int snd_em28xx_prepare(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); if (dev->disconnected) return -ENODEV; dev->adev.hwptr_done_capture = 0; dev->adev.capture_transfer_done = 0; return 0; } static void audio_trigger(struct work_struct *work) { struct em28xx_audio *adev = container_of(work, struct em28xx_audio, wq_trigger); struct em28xx *dev = container_of(adev, struct em28xx, adev); if (atomic_read(&adev->stream_started)) { dprintk("starting capture"); em28xx_init_audio_isoc(dev); } else { dprintk("stopping capture"); em28xx_deinit_isoc_audio(dev); } } static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct em28xx *dev = snd_pcm_substream_chip(substream); int retval = 0; if (dev->disconnected) return -ENODEV; switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */ case SNDRV_PCM_TRIGGER_START: atomic_set(&dev->adev.stream_started, 1); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */ case SNDRV_PCM_TRIGGER_STOP: atomic_set(&dev->adev.stream_started, 0); break; default: retval = -EINVAL; } schedule_work(&dev->adev.wq_trigger); return retval; } static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream *substream) { unsigned long flags; struct em28xx *dev; snd_pcm_uframes_t hwptr_done; dev = snd_pcm_substream_chip(substream); if (dev->disconnected) return SNDRV_PCM_POS_XRUN; spin_lock_irqsave(&dev->adev.slock, flags); hwptr_done = dev->adev.hwptr_done_capture; spin_unlock_irqrestore(&dev->adev.slock, flags); return hwptr_done; } static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * AC97 volume control support */ static int em28xx_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); if (dev->disconnected) return -ENODEV; info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0; info->value.integer.max = 0x1f; return 0; } static int em28xx_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) | (0x1f - (value->value.integer.value[1] & 0x1f)) << 8; int nonblock = 0; int rc; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); rc = em28xx_read_ac97(dev, kcontrol->private_value); if (rc < 0) goto err; val |= rc & 0x8000; /* Preserve the mute flag */ rc = em28xx_write_ac97(dev, kcontrol->private_value, val); if (rc < 0) goto err; dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); err: mutex_unlock(&dev->lock); return rc; } static int em28xx_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int val; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); val = em28xx_read_ac97(dev, kcontrol->private_value); mutex_unlock(&dev->lock); if (val < 0) return val; dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); value->value.integer.value[0] = 0x1f - (val & 0x1f); value->value.integer.value[1] = 0x1f - ((val << 8) & 0x1f); return 0; } static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); u16 val = value->value.integer.value[0]; struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int rc; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); rc = em28xx_read_ac97(dev, kcontrol->private_value); if (rc < 0) goto err; if (val) rc &= 0x1f1f; else rc |= 0x8000; rc = em28xx_write_ac97(dev, kcontrol->private_value, rc); if (rc < 0) goto err; dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); err: mutex_unlock(&dev->lock); return rc; } static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); struct snd_pcm_substream *substream = dev->adev.capture_pcm_substream; int nonblock = 0; int val; if (dev->disconnected) return -ENODEV; if (substream) nonblock = !!(substream->f_flags & O_NONBLOCK); if (nonblock) { if (!mutex_trylock(&dev->lock)) return -EAGAIN; } else mutex_lock(&dev->lock); val = em28xx_read_ac97(dev, kcontrol->private_value); mutex_unlock(&dev->lock); if (val < 0) return val; if (val & 0x8000) value->value.integer.value[0] = 0; else value->value.integer.value[0] = 1; dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); return 0; } static const DECLARE_TLV_DB_SCALE(em28xx_db_scale, -3450, 150, 0); static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev, char *name, int id) { int err; char ctl_name[44]; struct snd_kcontrol *kctl; struct snd_kcontrol_new tmp; memset(&tmp, 0, sizeof(tmp)); tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER, tmp.private_value = id, tmp.name = ctl_name, /* Add Mute Control */ sprintf(ctl_name, "%s Switch", name); tmp.get = em28xx_vol_get_mute; tmp.put = em28xx_vol_put_mute; tmp.info = snd_ctl_boolean_mono_info; kctl = snd_ctl_new1(&tmp, dev); err = snd_ctl_add(card, kctl); if (err < 0) return err; dprintk("Added control %s for ac97 volume control 0x%04x\n", ctl_name, id); memset(&tmp, 0, sizeof(tmp)); tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER, tmp.private_value = id, tmp.name = ctl_name, /* Add Volume Control */ sprintf(ctl_name, "%s Volume", name); tmp.get = em28xx_vol_get; tmp.put = em28xx_vol_put; tmp.info = em28xx_vol_info; tmp.tlv.p = em28xx_db_scale, kctl = snd_ctl_new1(&tmp, dev); err = snd_ctl_add(card, kctl); if (err < 0) return err; dprintk("Added control %s for ac97 volume control 0x%04x\n", ctl_name, id); return 0; } /* * register/unregister code and data */ static const struct snd_pcm_ops snd_em28xx_pcm_capture = { .open = snd_em28xx_capture_open, .close = snd_em28xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_em28xx_hw_capture_params, .hw_free = snd_em28xx_hw_capture_free, .prepare = snd_em28xx_prepare, .trigger = snd_em28xx_capture_trigger, .pointer = snd_em28xx_capture_pointer, .page = snd_pcm_get_vmalloc_page, }; static void em28xx_audio_free_urb(struct em28xx *dev) { struct usb_device *udev = interface_to_usbdev(dev->intf); int i; for (i = 0; i < dev->adev.num_urb; i++) { struct urb *urb = dev->adev.urb[i]; if (!urb) continue; usb_free_coherent(udev, urb->transfer_buffer_length, dev->adev.transfer_buffer[i], urb->transfer_dma); usb_free_urb(urb); } kfree(dev->adev.urb); kfree(dev->adev.transfer_buffer); dev->adev.num_urb = 0; } /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ static int em28xx_audio_ep_packet_size(struct usb_device *udev, struct usb_endpoint_descriptor *e) { int size = le16_to_cpu(e->wMaxPacketSize); if (udev->speed == USB_SPEED_HIGH) return (size & 0x7ff) * (1 + (((size) >> 11) & 0x03)); return size & 0x7ff; } static int em28xx_audio_urb_init(struct em28xx *dev) { struct usb_interface *intf; struct usb_endpoint_descriptor *e, *ep = NULL; struct usb_device *udev = interface_to_usbdev(dev->intf); int i, ep_size, interval, num_urb, npackets; int urb_size, bytes_per_transfer; u8 alt; if (dev->ifnum) alt = 1; else alt = 7; intf = usb_ifnum_to_if(udev, dev->ifnum); if (intf->num_altsetting <= alt) { dev_err(&dev->intf->dev, "alt %d doesn't exist on interface %d\n", dev->ifnum, alt); return -ENODEV; } for (i = 0; i < intf->altsetting[alt].desc.bNumEndpoints; i++) { e = &intf->altsetting[alt].endpoint[i].desc; if (!usb_endpoint_dir_in(e)) continue; if (e->bEndpointAddress == EM28XX_EP_AUDIO) { ep = e; break; } } if (!ep) { dev_err(&dev->intf->dev, "Couldn't find an audio endpoint"); return -ENODEV; } ep_size = em28xx_audio_ep_packet_size(udev, ep); interval = 1 << (ep->bInterval - 1); dev_info(&dev->intf->dev, "Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n", EM28XX_EP_AUDIO, usb_speed_string(udev->speed), dev->ifnum, alt, interval, ep_size); /* Calculate the number and size of URBs to better fit the audio samples */ /* * Estimate the number of bytes per DMA transfer. * * This is given by the bit rate (for now, only 48000 Hz) multiplied * by 2 channels and 2 bytes/sample divided by the number of microframe * intervals and by the microframe rate (125 us) */ bytes_per_transfer = DIV_ROUND_UP(48000 * 2 * 2, 125 * interval); /* * Estimate the number of transfer URBs. Don't let it go past the * maximum number of URBs that is known to be supported by the device. */ num_urb = DIV_ROUND_UP(bytes_per_transfer, ep_size); if (num_urb > EM28XX_MAX_AUDIO_BUFS) num_urb = EM28XX_MAX_AUDIO_BUFS; /* * Now that we know the number of bytes per transfer and the number of * URBs, estimate the typical size of an URB, in order to adjust the * minimal number of packets. */ urb_size = bytes_per_transfer / num_urb; /* * Now, calculate the amount of audio packets to be filled on each * URB. In order to preserve the old behaviour, use a minimal * threshold for this value. */ npackets = EM28XX_MIN_AUDIO_PACKETS; if (urb_size > ep_size * npackets) npackets = DIV_ROUND_UP(urb_size, ep_size); dev_info(&dev->intf->dev, "Number of URBs: %d, with %d packets and %d size\n", num_urb, npackets, urb_size); /* Estimate the bytes per period */ dev->adev.period = urb_size * npackets; /* Allocate space to store the number of URBs to be used */ dev->adev.transfer_buffer = kcalloc(num_urb, sizeof(*dev->adev.transfer_buffer), GFP_ATOMIC); if (!dev->adev.transfer_buffer) { return -ENOMEM; } dev->adev.urb = kcalloc(num_urb, sizeof(*dev->adev.urb), GFP_ATOMIC); if (!dev->adev.urb) { kfree(dev->adev.transfer_buffer); return -ENOMEM; } /* Alloc memory for each URB and for each transfer buffer */ dev->adev.num_urb = num_urb; for (i = 0; i < num_urb; i++) { struct urb *urb; int j, k; void *buf; urb = usb_alloc_urb(npackets, GFP_ATOMIC); if (!urb) { em28xx_audio_free_urb(dev); return -ENOMEM; } dev->adev.urb[i] = urb; buf = usb_alloc_coherent(udev, npackets * ep_size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { dev_err(&dev->intf->dev, "usb_alloc_coherent failed!\n"); em28xx_audio_free_urb(dev); return -ENOMEM; } dev->adev.transfer_buffer[i] = buf; urb->dev = udev; urb->context = dev; urb->pipe = usb_rcvisocpipe(udev, EM28XX_EP_AUDIO); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer = buf; urb->interval = interval; urb->complete = em28xx_audio_isocirq; urb->number_of_packets = npackets; urb->transfer_buffer_length = ep_size * npackets; for (j = k = 0; j < npackets; j++, k += ep_size) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = ep_size; } } return 0; } static int em28xx_audio_init(struct em28xx *dev) { struct em28xx_audio *adev = &dev->adev; struct usb_device *udev = interface_to_usbdev(dev->intf); struct snd_pcm *pcm; struct snd_card *card; static int devnr; int err; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) { /* This device does not support the extension (in this case the device is expecting the snd-usb-audio module or doesn't have analog audio support at all) */ return 0; } dev_info(&dev->intf->dev, "Binding audio extension\n"); kref_get(&dev->ref); dev_info(&dev->intf->dev, "em28xx-audio.c: Copyright (C) 2006 Markus Rechberger\n"); dev_info(&dev->intf->dev, "em28xx-audio.c: Copyright (C) 2007-2016 Mauro Carvalho Chehab\n"); err = snd_card_new(&dev->intf->dev, index[devnr], "Em28xx Audio", THIS_MODULE, 0, &card); if (err < 0) return err; spin_lock_init(&adev->slock); adev->sndcard = card; adev->udev = udev; err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm); if (err < 0) goto card_free; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture); pcm->info_flags = 0; pcm->private_data = dev; strcpy(pcm->name, "Empia 28xx Capture"); strcpy(card->driver, "Em28xx-Audio"); strcpy(card->shortname, "Em28xx Audio"); strcpy(card->longname, "Empia Em28xx Audio"); INIT_WORK(&adev->wq_trigger, audio_trigger); if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { em28xx_cvol_new(card, dev, "Video", AC97_VIDEO); em28xx_cvol_new(card, dev, "Line In", AC97_LINE); em28xx_cvol_new(card, dev, "Phone", AC97_PHONE); em28xx_cvol_new(card, dev, "Microphone", AC97_MIC); em28xx_cvol_new(card, dev, "CD", AC97_CD); em28xx_cvol_new(card, dev, "AUX", AC97_AUX); em28xx_cvol_new(card, dev, "PCM", AC97_PCM); em28xx_cvol_new(card, dev, "Master", AC97_MASTER); em28xx_cvol_new(card, dev, "Line", AC97_HEADPHONE); em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO); em28xx_cvol_new(card, dev, "LFE", AC97_CENTER_LFE_MASTER); em28xx_cvol_new(card, dev, "Surround", AC97_SURROUND_MASTER); } err = em28xx_audio_urb_init(dev); if (err) goto card_free; err = snd_card_register(card); if (err < 0) goto urb_free; dev_info(&dev->intf->dev, "Audio extension successfully initialized\n"); return 0; urb_free: em28xx_audio_free_urb(dev); card_free: snd_card_free(card); adev->sndcard = NULL; return err; } static int em28xx_audio_fini(struct em28xx *dev) { if (dev == NULL) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) { /* This device does not support the extension (in this case the device is expecting the snd-usb-audio module or doesn't have analog audio support at all) */ return 0; } dev_info(&dev->intf->dev, "Closing audio extension\n"); if (dev->adev.sndcard) { snd_card_disconnect(dev->adev.sndcard); flush_work(&dev->adev.wq_trigger); em28xx_audio_free_urb(dev); snd_card_free(dev->adev.sndcard); dev->adev.sndcard = NULL; } kref_put(&dev->ref, em28xx_free_device); return 0; } static int em28xx_audio_suspend(struct em28xx *dev) { if (dev == NULL) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) return 0; dev_info(&dev->intf->dev, "Suspending audio extension\n"); em28xx_deinit_isoc_audio(dev); atomic_set(&dev->adev.stream_started, 0); return 0; } static int em28xx_audio_resume(struct em28xx *dev) { if (dev == NULL) return 0; if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) return 0; dev_info(&dev->intf->dev, "Resuming audio extension\n"); /* Nothing to do other than schedule_work() ?? */ schedule_work(&dev->adev.wq_trigger); return 0; } static struct em28xx_ops audio_ops = { .id = EM28XX_AUDIO, .name = "Em28xx Audio Extension", .init = em28xx_audio_init, .fini = em28xx_audio_fini, .suspend = em28xx_audio_suspend, .resume = em28xx_audio_resume, }; static int __init em28xx_alsa_register(void) { return em28xx_register_extension(&audio_ops); } static void __exit em28xx_alsa_unregister(void) { em28xx_unregister_extension(&audio_ops); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_DESCRIPTION(DRIVER_DESC " - audio interface"); MODULE_VERSION(EM28XX_VERSION); module_init(em28xx_alsa_register); module_exit(em28xx_alsa_unregister);
null
null
null
null
108,141
1,053
3
train_val
81cdb259fb6d8c1c4ecfeea389ff5a73c07f5755
166,048
linux
1
https://github.com/torvalds/linux
2016-11-24 18:37:19+01:00
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); }
CVE-2016-9777
CWE-125
https://github.com/torvalds/linux/commit/81cdb259fb6d8c1c4ecfeea389ff5a73c07f5755
Medium
3,942
21,308
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
21,308
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_RENDERER_MEDIA_RECORDER_AUDIO_TRACK_OPUS_ENCODER_H_ #define CONTENT_RENDERER_MEDIA_RECORDER_AUDIO_TRACK_OPUS_ENCODER_H_ #include <memory> #include "base/macros.h" #include "base/time/time.h" #include "content/renderer/media_recorder/audio_track_encoder.h" #include "media/base/audio_bus.h" #include "media/base/audio_converter.h" #include "media/base/audio_fifo.h" #include "media/base/audio_parameters.h" #include "third_party/opus/src/include/opus.h" namespace content { // Class encapsulating Opus-related encoding details. It contains an // AudioConverter to adapt incoming data to the format Opus likes to have. class AudioTrackOpusEncoder : public AudioTrackEncoder, public media::AudioConverter::InputCallback { public: AudioTrackOpusEncoder(OnEncodedAudioCB on_encoded_audio_cb, int32_t bits_per_second); void OnSetFormat(const media::AudioParameters& params) override; void EncodeAudio(std::unique_ptr<media::AudioBus> input_bus, base::TimeTicks capture_time) override; private: ~AudioTrackOpusEncoder() override; bool is_initialized() const { return !!opus_encoder_; } void DestroyExistingOpusEncoder(); // media::AudioConverted::InputCallback implementation. double ProvideInput(media::AudioBus* audio_bus, uint32_t frames_delayed) override; // Target bitrate for Opus. If 0, Opus provide automatic bitrate is used. const int32_t bits_per_second_; // Output parameters after audio conversion. This differs from the input // parameters only in sample_rate() and frames_per_buffer(): output should be // 48ksamples/s and 2880, respectively. media::AudioParameters converted_params_; // Sample rate adapter from the input audio to what OpusEncoder desires. std::unique_ptr<media::AudioConverter> converter_; // Buffer for holding the original input audio before it goes to the // converter. std::unique_ptr<media::AudioFifo> fifo_; // Buffer for passing AudioBus data from the converter to the encoder. std::unique_ptr<float[]> buffer_; OpusEncoder* opus_encoder_; DISALLOW_COPY_AND_ASSIGN(AudioTrackOpusEncoder); }; } // namespace content #endif // CONTENT_RENDERER_MEDIA_RECORDER_AUDIO_TRACK_OPUS_ENCODER_H_
null
null
null
null
18,171
22,935
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
187,930
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * Driver for CSR SiRFprimaII onboard UARTs. * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/sysrq.h> #include <linux/console.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of_gpio.h> #include <linux/dmaengine.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include "sirfsoc_uart.h" static unsigned int sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count); static unsigned int sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count); static struct uart_driver sirfsoc_uart_drv; static void sirfsoc_uart_tx_dma_complete_callback(void *param); static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = { {4000000, 2359296}, {3500000, 1310721}, {3000000, 1572865}, {2500000, 1245186}, {2000000, 1572866}, {1500000, 1245188}, {1152000, 1638404}, {1000000, 1572869}, {921600, 1114120}, {576000, 1245196}, {500000, 1245198}, {460800, 1572876}, {230400, 1310750}, {115200, 1310781}, {57600, 1310843}, {38400, 1114328}, {19200, 1114545}, {9600, 1114979}, }; static struct sirfsoc_uart_port *sirf_ports[SIRFSOC_UART_NR]; static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port) { return container_of(port, struct sirfsoc_uart_port, port); } static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port) { unsigned long reg; struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status); return (reg & ufifo_st->ff_empty(port)) ? TIOCSER_TEMT : 0; } static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled) goto cts_asserted; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) & SIRFUART_AFC_CTS_STATUS)) goto cts_asserted; else goto cts_deasserted; } else { if (!gpio_get_value(sirfport->cts_gpio)) goto cts_asserted; else goto cts_deasserted; } cts_deasserted: return TIOCM_CAR | TIOCM_DSR; cts_asserted: return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; } static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; unsigned int assert = mctrl & TIOCM_RTS; unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0; unsigned int current_val; if (mctrl & TIOCM_LOOP) { if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) wr_regl(port, ureg->sirfsoc_line_ctrl, rd_regl(port, ureg->sirfsoc_line_ctrl) | SIRFUART_LOOP_BACK); else wr_regl(port, ureg->sirfsoc_mode1, rd_regl(port, ureg->sirfsoc_mode1) | SIRFSOC_USP_LOOP_BACK_CTRL); } else { if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) wr_regl(port, ureg->sirfsoc_line_ctrl, rd_regl(port, ureg->sirfsoc_line_ctrl) & ~SIRFUART_LOOP_BACK); else wr_regl(port, ureg->sirfsoc_mode1, rd_regl(port, ureg->sirfsoc_mode1) & ~SIRFSOC_USP_LOOP_BACK_CTRL); } if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled) return; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF; val |= current_val; wr_regl(port, ureg->sirfsoc_afc_ctrl, val); } else { if (!val) gpio_set_value(sirfport->rts_gpio, 1); else gpio_set_value(sirfport->rts_gpio, 0); } } static void sirfsoc_uart_stop_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (sirfport->tx_dma_chan) { if (sirfport->tx_dma_state == TX_DMA_RUNNING) { dmaengine_pause(sirfport->tx_dma_chan); sirfport->tx_dma_state = TX_DMA_PAUSE; } else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_clr_reg, uint_en->sirfsoc_txfifo_empty_en); } } else { if (sirfport->uart_reg->uart_type == SIRF_USP_UART) wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port, ureg->sirfsoc_tx_rx_en) & ~SIRFUART_TX_EN); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_clr_reg, uint_en->sirfsoc_txfifo_empty_en); } } static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport) { struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; struct circ_buf *xmit = &port->state->xmit; unsigned long tran_size; unsigned long tran_start; unsigned long pio_tx_size; tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); tran_start = (unsigned long)(xmit->buf + xmit->tail); if (uart_circ_empty(xmit) || uart_tx_stopped(port) || !tran_size) return; if (sirfport->tx_dma_state == TX_DMA_PAUSE) { dmaengine_resume(sirfport->tx_dma_chan); return; } if (sirfport->tx_dma_state == TX_DMA_RUNNING) return; if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)& ~(uint_en->sirfsoc_txfifo_empty_en)); else wr_regl(port, ureg->sirfsoc_int_en_clr_reg, uint_en->sirfsoc_txfifo_empty_en); /* * DMA requires buffer address and buffer length are both aligned with * 4 bytes, so we use PIO for * 1. if address is not aligned with 4bytes, use PIO for the first 1~3 * bytes, and move to DMA for the left part aligned with 4bytes * 2. if buffer length is not aligned with 4bytes, use DMA for aligned * part first, move to PIO for the left 1~3 bytes */ if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) { wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)| SIRFUART_IO_MODE); if (BYTES_TO_ALIGN(tran_start)) { pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport, BYTES_TO_ALIGN(tran_start)); tran_size -= pio_tx_size; } if (tran_size < 4) sirfsoc_uart_pio_tx_chars(sirfport, tran_size); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); } else { /* tx transfer mode switch into dma mode */ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)& ~SIRFUART_IO_MODE); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); tran_size &= ~(0x3); sirfport->tx_dma_addr = dma_map_single(port->dev, xmit->buf + xmit->tail, tran_size, DMA_TO_DEVICE); sirfport->tx_dma_desc = dmaengine_prep_slave_single( sirfport->tx_dma_chan, sirfport->tx_dma_addr, tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!sirfport->tx_dma_desc) { dev_err(port->dev, "DMA prep slave single fail\n"); return; } sirfport->tx_dma_desc->callback = sirfsoc_uart_tx_dma_complete_callback; sirfport->tx_dma_desc->callback_param = (void *)sirfport; sirfport->transfer_size = tran_size; dmaengine_submit(sirfport->tx_dma_desc); dma_async_issue_pending(sirfport->tx_dma_chan); sirfport->tx_dma_state = TX_DMA_RUNNING; } } static void sirfsoc_uart_start_tx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (sirfport->tx_dma_chan) sirfsoc_uart_tx_with_dma(sirfport); else { if (sirfport->uart_reg->uart_type == SIRF_USP_UART) wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port, ureg->sirfsoc_tx_rx_en) | SIRFUART_TX_EN); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)| uint_en->sirfsoc_txfifo_empty_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_txfifo_empty_en); } } static void sirfsoc_uart_stop_rx(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); if (sirfport->rx_dma_chan) { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~(SIRFUART_RX_DMA_INT_EN(uint_en, sirfport->uart_reg->uart_type) | uint_en->sirfsoc_rx_done_en)); else wr_regl(port, ureg->sirfsoc_int_en_clr_reg, SIRFUART_RX_DMA_INT_EN(uint_en, sirfport->uart_reg->uart_type)| uint_en->sirfsoc_rx_done_en); dmaengine_terminate_all(sirfport->rx_dma_chan); } else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)& ~(SIRFUART_RX_IO_INT_EN(uint_en, sirfport->uart_reg->uart_type))); else wr_regl(port, ureg->sirfsoc_int_en_clr_reg, SIRFUART_RX_IO_INT_EN(uint_en, sirfport->uart_reg->uart_type)); } } static void sirfsoc_uart_disable_ms(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (!sirfport->hw_flow_ctrl) return; sirfport->ms_enabled = false; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { wr_regl(port, ureg->sirfsoc_afc_ctrl, rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg)& ~uint_en->sirfsoc_cts_en); else wr_regl(port, ureg->sirfsoc_int_en_clr_reg, uint_en->sirfsoc_cts_en); } else disable_irq(gpio_to_irq(sirfport->cts_gpio)); } static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id) { struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; struct uart_port *port = &sirfport->port; spin_lock(&port->lock); if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled) uart_handle_cts_change(port, !gpio_get_value(sirfport->cts_gpio)); spin_unlock(&port->lock); return IRQ_HANDLED; } static void sirfsoc_uart_enable_ms(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; if (!sirfport->hw_flow_ctrl) return; sirfport->ms_enabled = true; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { wr_regl(port, ureg->sirfsoc_afc_ctrl, rd_regl(port, ureg->sirfsoc_afc_ctrl) | SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN | SIRFUART_AFC_CTRL_RX_THD); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | uint_en->sirfsoc_cts_en); else wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_cts_en); } else enable_irq(gpio_to_irq(sirfport->cts_gpio)); } static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl); if (break_state) ulcon |= SIRFUART_SET_BREAK; else ulcon &= ~SIRFUART_SET_BREAK; wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon); } } static unsigned int sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; unsigned int ch, rx_count = 0; struct tty_struct *tty; tty = tty_port_tty_get(&port->state->port); if (!tty) return -ENODEV; while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) & ufifo_st->ff_empty(port))) { ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) | SIRFUART_DUMMY_READ; if (unlikely(uart_handle_sysrq_char(port, ch))) continue; uart_insert_char(port, 0, 0, ch, TTY_NORMAL); rx_count++; if (rx_count >= max_rx_count) break; } port->icount.rx += rx_count; return rx_count; } static unsigned int sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count) { struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; struct circ_buf *xmit = &port->state->xmit; unsigned int num_tx = 0; while (!uart_circ_empty(xmit) && !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port)) && count--) { wr_regl(port, ureg->sirfsoc_tx_fifo_data, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; num_tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); return num_tx; } static void sirfsoc_uart_tx_dma_complete_callback(void *param) { struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; struct uart_port *port = &sirfport->port; struct circ_buf *xmit = &port->state->xmit; unsigned long flags; spin_lock_irqsave(&port->lock, flags); xmit->tail = (xmit->tail + sirfport->transfer_size) & (UART_XMIT_SIZE - 1); port->icount.tx += sirfport->transfer_size; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (sirfport->tx_dma_addr) dma_unmap_single(port->dev, sirfport->tx_dma_addr, sirfport->transfer_size, DMA_TO_DEVICE); sirfport->tx_dma_state = TX_DMA_IDLE; sirfsoc_uart_tx_with_dma(sirfport); spin_unlock_irqrestore(&port->lock, flags); } static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) { unsigned long intr_status; unsigned long cts_status; unsigned long flag = TTY_NORMAL; struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; struct uart_port *port = &sirfport->port; struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; struct uart_state *state = port->state; struct circ_buf *xmit = &port->state->xmit; spin_lock(&port->lock); intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg); wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status); intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg); if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(uint_st, sirfport->uart_reg->uart_type)))) { if (intr_status & uint_st->sirfsoc_rxd_brk) { port->icount.brk++; if (uart_handle_break(port)) goto recv_char; } if (intr_status & uint_st->sirfsoc_rx_oflow) { port->icount.overrun++; flag = TTY_OVERRUN; } if (intr_status & uint_st->sirfsoc_frm_err) { port->icount.frame++; flag = TTY_FRAME; } if (intr_status & uint_st->sirfsoc_parity_err) { port->icount.parity++; flag = TTY_PARITY; } wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); intr_status &= port->read_status_mask; uart_insert_char(port, intr_status, uint_en->sirfsoc_rx_oflow_en, 0, flag); } recv_char: if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) && (intr_status & SIRFUART_CTS_INT_ST(uint_st)) && !sirfport->tx_dma_state) { cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) & SIRFUART_AFC_CTS_STATUS; if (cts_status != 0) cts_status = 0; else cts_status = 1; uart_handle_cts_change(port, cts_status); wake_up_interruptible(&state->port.delta_msr_wait); } if (!sirfport->rx_dma_chan && (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))) { /* * chip will trigger continuous RX_TIMEOUT interrupt * in RXFIFO empty and not trigger if RXFIFO recevice * data in limit time, original method use RX_TIMEOUT * will trigger lots of useless interrupt in RXFIFO * empty.RXFIFO received one byte will trigger RX_DONE * interrupt.use RX_DONE to wait for data received * into RXFIFO, use RX_THD/RX_FULL for lots data receive * and use RX_TIMEOUT for the last left data. */ if (intr_status & uint_st->sirfsoc_rx_done) { if (!sirfport->is_atlas7) { wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~(uint_en->sirfsoc_rx_done_en)); wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | (uint_en->sirfsoc_rx_timeout_en)); } else { wr_regl(port, ureg->sirfsoc_int_en_clr_reg, uint_en->sirfsoc_rx_done_en); wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_rx_timeout_en); } } else { if (intr_status & uint_st->sirfsoc_rx_timeout) { if (!sirfport->is_atlas7) { wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) & ~(uint_en->sirfsoc_rx_timeout_en)); wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | (uint_en->sirfsoc_rx_done_en)); } else { wr_regl(port, ureg->sirfsoc_int_en_clr_reg, uint_en->sirfsoc_rx_timeout_en); wr_regl(port, ureg->sirfsoc_int_en_reg, uint_en->sirfsoc_rx_done_en); } } sirfsoc_uart_pio_rx_chars(port, port->fifosize); } } spin_unlock(&port->lock); tty_flip_buffer_push(&state->port); spin_lock(&port->lock); if (intr_status & uint_st->sirfsoc_txfifo_empty) { if (sirfport->tx_dma_chan) sirfsoc_uart_tx_with_dma(sirfport); else { if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { spin_unlock(&port->lock); return IRQ_HANDLED; } else { sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize); if ((uart_circ_empty(xmit)) && (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_empty(port))) sirfsoc_uart_stop_tx(port); } } } spin_unlock(&port->lock); return IRQ_HANDLED; } static void sirfsoc_uart_rx_dma_complete_callback(void *param) { } /* submit rx dma task into dmaengine */ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); sirfport->rx_dma_items.xmit.tail = sirfport->rx_dma_items.xmit.head = 0; sirfport->rx_dma_items.desc = dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan, sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE, SIRFSOC_RX_DMA_BUF_SIZE / 2, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) { dev_err(port->dev, "DMA slave single fail\n"); return; } sirfport->rx_dma_items.desc->callback = sirfsoc_uart_rx_dma_complete_callback; sirfport->rx_dma_items.desc->callback_param = sirfport; sirfport->rx_dma_items.cookie = dmaengine_submit(sirfport->rx_dma_items.desc); dma_async_issue_pending(sirfport->rx_dma_chan); if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_DMA_INT_EN(uint_en, sirfport->uart_reg->uart_type)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_DMA_INT_EN(uint_en, sirfport->uart_reg->uart_type)); } static unsigned int sirfsoc_usp_calc_sample_div(unsigned long set_rate, unsigned long ioclk_rate, unsigned long *sample_reg) { unsigned long min_delta = ~0UL; unsigned short sample_div; unsigned long ioclk_div = 0; unsigned long temp_delta; for (sample_div = SIRF_USP_MIN_SAMPLE_DIV; sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) { temp_delta = ioclk_rate - (ioclk_rate + (set_rate * sample_div) / 2) / (set_rate * sample_div) * set_rate * sample_div; temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta; if (temp_delta < min_delta) { ioclk_div = (2 * ioclk_rate / (set_rate * sample_div) + 1) / 2 - 1; if (ioclk_div > SIRF_IOCLK_DIV_MAX) continue; min_delta = temp_delta; *sample_reg = sample_div; if (!temp_delta) break; } } return ioclk_div; } static unsigned int sirfsoc_uart_calc_sample_div(unsigned long baud_rate, unsigned long ioclk_rate, unsigned long *set_baud) { unsigned long min_delta = ~0UL; unsigned short sample_div; unsigned int regv = 0; unsigned long ioclk_div; unsigned long baud_tmp; int temp_delta; for (sample_div = SIRF_MIN_SAMPLE_DIV; sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) { ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1; if (ioclk_div > SIRF_IOCLK_DIV_MAX) continue; baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1)); temp_delta = baud_tmp - baud_rate; temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta; if (temp_delta < min_delta) { regv = regv & (~SIRF_IOCLK_DIV_MASK); regv = regv | ioclk_div; regv = regv & (~SIRF_SAMPLE_DIV_MASK); regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT); min_delta = temp_delta; *set_baud = baud_tmp; } } return regv; } static void sirfsoc_uart_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; unsigned long config_reg = 0; unsigned long baud_rate; unsigned long set_baud; unsigned long flags; unsigned long ic; unsigned int clk_div_reg = 0; unsigned long txfifo_op_reg, ioclk_rate; unsigned long rx_time_out; int threshold_div; u32 data_bit_len, stop_bit_len, len_val; unsigned long sample_div_reg = 0xf; ioclk_rate = port->uartclk; switch (termios->c_cflag & CSIZE) { default: case CS8: data_bit_len = 8; config_reg |= SIRFUART_DATA_BIT_LEN_8; break; case CS7: data_bit_len = 7; config_reg |= SIRFUART_DATA_BIT_LEN_7; break; case CS6: data_bit_len = 6; config_reg |= SIRFUART_DATA_BIT_LEN_6; break; case CS5: data_bit_len = 5; config_reg |= SIRFUART_DATA_BIT_LEN_5; break; } if (termios->c_cflag & CSTOPB) { config_reg |= SIRFUART_STOP_BIT_LEN_2; stop_bit_len = 2; } else stop_bit_len = 1; spin_lock_irqsave(&port->lock, flags); port->read_status_mask = uint_en->sirfsoc_rx_oflow_en; port->ignore_status_mask = 0; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { if (termios->c_iflag & INPCK) port->read_status_mask |= uint_en->sirfsoc_frm_err_en | uint_en->sirfsoc_parity_err_en; } else { if (termios->c_iflag & INPCK) port->read_status_mask |= uint_en->sirfsoc_frm_err_en; } if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= uint_en->sirfsoc_frm_err_en | uint_en->sirfsoc_parity_err_en; if (termios->c_cflag & PARENB) { if (termios->c_cflag & CMSPAR) { if (termios->c_cflag & PARODD) config_reg |= SIRFUART_STICK_BIT_MARK; else config_reg |= SIRFUART_STICK_BIT_SPACE; } else { if (termios->c_cflag & PARODD) config_reg |= SIRFUART_STICK_BIT_ODD; else config_reg |= SIRFUART_STICK_BIT_EVEN; } } } else { if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= uint_en->sirfsoc_frm_err_en; if (termios->c_cflag & PARENB) dev_warn(port->dev, "USP-UART not support parity err\n"); } if (termios->c_iflag & IGNBRK) { port->ignore_status_mask |= uint_en->sirfsoc_rxd_brk_en; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= uint_en->sirfsoc_rx_oflow_en; } if ((termios->c_cflag & CREAD) == 0) port->ignore_status_mask |= SIRFUART_DUMMY_READ; /* Hardware Flow Control Settings */ if (UART_ENABLE_MS(port, termios->c_cflag)) { if (!sirfport->ms_enabled) sirfsoc_uart_enable_ms(port); } else { if (sirfport->ms_enabled) sirfsoc_uart_disable_ms(port); } baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000); if (ioclk_rate == 150000000) { for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++) if (baud_rate == baudrate_to_regv[ic].baud_rate) clk_div_reg = baudrate_to_regv[ic].reg_val; } set_baud = baud_rate; if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { if (unlikely(clk_div_reg == 0)) clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate, ioclk_rate, &set_baud); wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg); } else { clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate, ioclk_rate, &sample_div_reg); sample_div_reg--; set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) / (sample_div_reg + 1)); /* setting usp mode 2 */ len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) | (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET)); len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK) << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET); wr_regl(port, ureg->sirfsoc_mode2, len_val); } if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, set_baud, set_baud); /* set receive timeout && data bits len */ rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000); rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out); txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op); wr_regl(port, ureg->sirfsoc_tx_fifo_op, (txfifo_op_reg & ~SIRFUART_FIFO_START)); if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { config_reg |= SIRFUART_UART_RECV_TIMEOUT(rx_time_out); wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg); } else { /*tx frame ctrl*/ len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET; len_val |= (data_bit_len + 1 + stop_bit_len - 1) << SIRFSOC_USP_TX_FRAME_LEN_OFFSET; len_val |= ((data_bit_len - 1) << SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET); len_val |= (((clk_div_reg & 0xc00) >> 10) << SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET); wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val); /*rx frame ctrl*/ len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET; len_val |= (data_bit_len + 1 + stop_bit_len - 1) << SIRFSOC_USP_RX_FRAME_LEN_OFFSET; len_val |= (data_bit_len - 1) << SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET; len_val |= (((clk_div_reg & 0xf000) >> 12) << SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET); wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val); /*async param*/ wr_regl(port, ureg->sirfsoc_async_param_reg, (SIRFUART_USP_RECV_TIMEOUT(rx_time_out)) | (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) << SIRFSOC_USP_ASYNC_DIV2_OFFSET); } if (sirfport->tx_dma_chan) wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE); else wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE); if (sirfport->rx_dma_chan) wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); else wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFUART_IO_MODE); sirfport->rx_period_time = 20000000; /* Reset Rx/Tx FIFO Threshold level for proper baudrate */ if (set_baud < 1000000) threshold_div = 1; else threshold_div = 2; wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port) / threshold_div); wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port) / threshold_div); txfifo_op_reg |= SIRFUART_FIFO_START; wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg); uart_update_timeout(port, termios->c_cflag, set_baud); wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN); spin_unlock_irqrestore(&port->lock, flags); } static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); if (!state) clk_prepare_enable(sirfport->clk); else clk_disable_unprepare(sirfport->clk); } static int sirfsoc_uart_startup(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; unsigned int index = port->line; int ret; irq_modify_status(port->irq, IRQ_NOREQUEST, IRQ_NOAUTOEN); ret = request_irq(port->irq, sirfsoc_uart_isr, 0, SIRFUART_PORT_NAME, sirfport); if (ret != 0) { dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n", index, port->irq); goto irq_err; } /* initial hardware settings */ wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) | SIRFUART_IO_MODE); wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFUART_IO_MODE); wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_RX_DMA_FLUSH); wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0); wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0); wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN); if (sirfport->uart_reg->uart_type == SIRF_USP_UART) wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_ENDIAN_CTRL_LSBF | SIRFSOC_USP_EN); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET); wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port)); wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port)); if (sirfport->rx_dma_chan) wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk, SIRFUART_RX_FIFO_CHK_SC(port->line, 0x1) | SIRFUART_RX_FIFO_CHK_LC(port->line, 0x2) | SIRFUART_RX_FIFO_CHK_HC(port->line, 0x4)); if (sirfport->tx_dma_chan) { sirfport->tx_dma_state = TX_DMA_IDLE; wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk, SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) | SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) | SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4)); } sirfport->ms_enabled = false; if (sirfport->uart_reg->uart_type == SIRF_USP_UART && sirfport->hw_flow_ctrl) { irq_modify_status(gpio_to_irq(sirfport->cts_gpio), IRQ_NOREQUEST, IRQ_NOAUTOEN); ret = request_irq(gpio_to_irq(sirfport->cts_gpio), sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport); if (ret != 0) { dev_err(port->dev, "UART-USP:request gpio irq fail\n"); goto init_rx_err; } } if (sirfport->uart_reg->uart_type == SIRF_REAL_UART && sirfport->rx_dma_chan) wr_regl(port, ureg->sirfsoc_swh_dma_io, SIRFUART_CLEAR_RX_ADDR_EN); if (sirfport->uart_reg->uart_type == SIRF_USP_UART && sirfport->rx_dma_chan) wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFSOC_USP_FRADDR_CLR_EN); if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) { sirfport->is_hrt_enabled = true; sirfport->rx_period_time = 20000000; sirfport->rx_last_pos = -1; sirfport->pio_fetch_cnt = 0; sirfport->rx_dma_items.xmit.tail = sirfport->rx_dma_items.xmit.head = 0; hrtimer_start(&sirfport->hrt, ns_to_ktime(sirfport->rx_period_time), HRTIMER_MODE_REL); } wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); if (sirfport->rx_dma_chan) sirfsoc_uart_start_next_rx_dma(port); else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_IO_INT_EN(uint_en, sirfport->uart_reg->uart_type)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_IO_INT_EN(uint_en, sirfport->uart_reg->uart_type)); } enable_irq(port->irq); return 0; init_rx_err: free_irq(port->irq, sirfport); irq_err: return ret; } static void sirfsoc_uart_shutdown(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct circ_buf *xmit; xmit = &sirfport->rx_dma_items.xmit; if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, 0); else wr_regl(port, ureg->sirfsoc_int_en_clr_reg, ~0UL); free_irq(port->irq, sirfport); if (sirfport->ms_enabled) sirfsoc_uart_disable_ms(port); if (sirfport->uart_reg->uart_type == SIRF_USP_UART && sirfport->hw_flow_ctrl) { gpio_set_value(sirfport->rts_gpio, 1); free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport); } if (sirfport->tx_dma_chan) sirfport->tx_dma_state = TX_DMA_IDLE; if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) { while (((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt) && !CIRC_CNT(xmit->head, xmit->tail, SIRFSOC_RX_DMA_BUF_SIZE)) ; sirfport->is_hrt_enabled = false; hrtimer_cancel(&sirfport->hrt); } } static const char *sirfsoc_uart_type(struct uart_port *port) { return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL; } static int sirfsoc_uart_request_port(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param; void *ret; ret = request_mem_region(port->mapbase, SIRFUART_MAP_SIZE, uart_param->port_name); return ret ? 0 : -EBUSY; } static void sirfsoc_uart_release_port(struct uart_port *port) { release_mem_region(port->mapbase, SIRFUART_MAP_SIZE); } static void sirfsoc_uart_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { port->type = SIRFSOC_PORT_TYPE; sirfsoc_uart_request_port(port); } } static const struct uart_ops sirfsoc_uart_ops = { .tx_empty = sirfsoc_uart_tx_empty, .get_mctrl = sirfsoc_uart_get_mctrl, .set_mctrl = sirfsoc_uart_set_mctrl, .stop_tx = sirfsoc_uart_stop_tx, .start_tx = sirfsoc_uart_start_tx, .stop_rx = sirfsoc_uart_stop_rx, .enable_ms = sirfsoc_uart_enable_ms, .break_ctl = sirfsoc_uart_break_ctl, .startup = sirfsoc_uart_startup, .shutdown = sirfsoc_uart_shutdown, .set_termios = sirfsoc_uart_set_termios, .pm = sirfsoc_uart_pm, .type = sirfsoc_uart_type, .release_port = sirfsoc_uart_release_port, .request_port = sirfsoc_uart_request_port, .config_port = sirfsoc_uart_config_port, }; #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE static int __init sirfsoc_uart_console_setup(struct console *co, char *options) { unsigned int baud = 115200; unsigned int bits = 8; unsigned int parity = 'n'; unsigned int flow = 'n'; struct sirfsoc_uart_port *sirfport; struct sirfsoc_register *ureg; if (co->index < 0 || co->index >= SIRFSOC_UART_NR) co->index = 1; sirfport = sirf_ports[co->index]; if (!sirfport) return -ENODEV; ureg = &sirfport->uart_reg->uart_reg; if (!sirfport->port.mapbase) return -ENODEV; /* enable usp in mode1 register */ if (sirfport->uart_reg->uart_type == SIRF_USP_UART) wr_regl(&sirfport->port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN | SIRFSOC_USP_ENDIAN_CTRL_LSBF); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); sirfport->port.cons = co; /* default console tx/rx transfer using io mode */ sirfport->rx_dma_chan = NULL; sirfport->tx_dma_chan = NULL; return uart_set_options(&sirfport->port, co, baud, parity, bits, flow); } static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; while (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port)) cpu_relax(); wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch); } static void sirfsoc_uart_console_write(struct console *co, const char *s, unsigned int count) { struct sirfsoc_uart_port *sirfport = sirf_ports[co->index]; uart_console_write(&sirfport->port, s, count, sirfsoc_uart_console_putchar); } static struct console sirfsoc_uart_console = { .name = SIRFSOC_UART_NAME, .device = uart_console_device, .flags = CON_PRINTBUFFER, .index = -1, .write = sirfsoc_uart_console_write, .setup = sirfsoc_uart_console_setup, .data = &sirfsoc_uart_drv, }; static int __init sirfsoc_uart_console_init(void) { register_console(&sirfsoc_uart_console); return 0; } console_initcall(sirfsoc_uart_console_init); #endif static struct uart_driver sirfsoc_uart_drv = { .owner = THIS_MODULE, .driver_name = SIRFUART_PORT_NAME, .nr = SIRFSOC_UART_NR, .dev_name = SIRFSOC_UART_NAME, .major = SIRFSOC_UART_MAJOR, .minor = SIRFSOC_UART_MINOR, #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE .cons = &sirfsoc_uart_console, #else .cons = NULL, #endif }; static enum hrtimer_restart sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer *hrt) { struct sirfsoc_uart_port *sirfport; struct uart_port *port; int count, inserted; struct dma_tx_state tx_state; struct tty_struct *tty; struct sirfsoc_register *ureg; struct circ_buf *xmit; struct sirfsoc_fifo_status *ufifo_st; int max_pio_cnt; sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt); port = &sirfport->port; inserted = 0; tty = port->state->port.tty; ureg = &sirfport->uart_reg->uart_reg; xmit = &sirfport->rx_dma_items.xmit; ufifo_st = &sirfport->uart_reg->fifo_status; dmaengine_tx_status(sirfport->rx_dma_chan, sirfport->rx_dma_items.cookie, &tx_state); if (SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue != sirfport->rx_last_pos) { xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue; sirfport->rx_last_pos = xmit->head; sirfport->pio_fetch_cnt = 0; } count = CIRC_CNT_TO_END(xmit->head, xmit->tail, SIRFSOC_RX_DMA_BUF_SIZE); while (count > 0) { inserted = tty_insert_flip_string(tty->port, (const unsigned char *)&xmit->buf[xmit->tail], count); if (!inserted) goto next_hrt; port->icount.rx += inserted; xmit->tail = (xmit->tail + inserted) & (SIRFSOC_RX_DMA_BUF_SIZE - 1); count = CIRC_CNT_TO_END(xmit->head, xmit->tail, SIRFSOC_RX_DMA_BUF_SIZE); tty_flip_buffer_push(tty->port); } /* * if RX DMA buffer data have all push into tty buffer, and there is * only little data(less than a dma transfer unit) left in rxfifo, * fetch it out in pio mode and switch back to dma immediately */ if (!inserted && !count && ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) & SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt)) { dmaengine_pause(sirfport->rx_dma_chan); /* switch to pio mode */ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFUART_IO_MODE); /* * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN * When found changing I/O to DMA mode, it clears * two low bits of read point; * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL. * Fetch data out from rxfifo into DMA buffer in PIO mode, * while switch back to DMA mode, the data fetched will override * by DMA, as hardware have a strange behaviour: * after switch back to DMA mode, check rxfifo status it will * be the number PIO fetched, so record the fetched data count * to avoid the repeated fetch */ max_pio_cnt = 3; while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) & ufifo_st->ff_empty(port)) && max_pio_cnt--) { xmit->buf[xmit->head] = rd_regl(port, ureg->sirfsoc_rx_fifo_data); xmit->head = (xmit->head + 1) & (SIRFSOC_RX_DMA_BUF_SIZE - 1); sirfport->pio_fetch_cnt++; } /* switch back to dma mode */ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_IO_MODE); dmaengine_resume(sirfport->rx_dma_chan); } next_hrt: hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time)); return HRTIMER_RESTART; } static struct of_device_id sirfsoc_uart_ids[] = { { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,}, { .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart}, { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp}, { .compatible = "sirf,atlas7-usp-uart", .data = &sirfsoc_usp}, {} }; MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids); static int sirfsoc_uart_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct sirfsoc_uart_port *sirfport; struct uart_port *port; struct resource *res; int ret; struct dma_slave_config slv_cfg = { .src_maxburst = 1, }; struct dma_slave_config tx_slv_cfg = { .dst_maxburst = 2, }; const struct of_device_id *match; match = of_match_node(sirfsoc_uart_ids, np); sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL); if (!sirfport) { ret = -ENOMEM; goto err; } sirfport->port.line = of_alias_get_id(np, "serial"); sirf_ports[sirfport->port.line] = sirfport; sirfport->port.iotype = UPIO_MEM; sirfport->port.flags = UPF_BOOT_AUTOCONF; port = &sirfport->port; port->dev = &pdev->dev; port->private_data = sirfport; sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data; sirfport->hw_flow_ctrl = of_property_read_bool(np, "uart-has-rtscts") || of_property_read_bool(np, "sirf,uart-has-rtscts") /* deprecated */; if (of_device_is_compatible(np, "sirf,prima2-uart") || of_device_is_compatible(np, "sirf,atlas7-uart")) sirfport->uart_reg->uart_type = SIRF_REAL_UART; if (of_device_is_compatible(np, "sirf,prima2-usp-uart") || of_device_is_compatible(np, "sirf,atlas7-usp-uart")) { sirfport->uart_reg->uart_type = SIRF_USP_UART; if (!sirfport->hw_flow_ctrl) goto usp_no_flow_control; if (of_find_property(np, "cts-gpios", NULL)) sirfport->cts_gpio = of_get_named_gpio(np, "cts-gpios", 0); else sirfport->cts_gpio = -1; if (of_find_property(np, "rts-gpios", NULL)) sirfport->rts_gpio = of_get_named_gpio(np, "rts-gpios", 0); else sirfport->rts_gpio = -1; if ((!gpio_is_valid(sirfport->cts_gpio) || !gpio_is_valid(sirfport->rts_gpio))) { ret = -EINVAL; dev_err(&pdev->dev, "Usp flow control must have cts and rts gpio"); goto err; } ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio, "usp-cts-gpio"); if (ret) { dev_err(&pdev->dev, "Unable request cts gpio"); goto err; } gpio_direction_input(sirfport->cts_gpio); ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio, "usp-rts-gpio"); if (ret) { dev_err(&pdev->dev, "Unable request rts gpio"); goto err; } gpio_direction_output(sirfport->rts_gpio, 1); } usp_no_flow_control: if (of_device_is_compatible(np, "sirf,atlas7-uart") || of_device_is_compatible(np, "sirf,atlas7-usp-uart")) sirfport->is_atlas7 = true; if (of_property_read_u32(np, "fifosize", &port->fifosize)) { dev_err(&pdev->dev, "Unable to find fifosize in uart node.\n"); ret = -EFAULT; goto err; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Insufficient resources.\n"); ret = -EFAULT; goto err; } port->mapbase = res->start; port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!port->membase) { dev_err(&pdev->dev, "Cannot remap resource.\n"); ret = -ENOMEM; goto err; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "Insufficient resources.\n"); ret = -EFAULT; goto err; } port->irq = res->start; sirfport->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(sirfport->clk)) { ret = PTR_ERR(sirfport->clk); goto err; } port->uartclk = clk_get_rate(sirfport->clk); port->ops = &sirfsoc_uart_ops; spin_lock_init(&port->lock); platform_set_drvdata(pdev, sirfport); ret = uart_add_one_port(&sirfsoc_uart_drv, port); if (ret != 0) { dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id); goto err; } sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx"); sirfport->rx_dma_items.xmit.buf = dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, &sirfport->rx_dma_items.dma_addr, GFP_KERNEL); if (!sirfport->rx_dma_items.xmit.buf) { dev_err(port->dev, "Uart alloc bufa failed\n"); ret = -ENOMEM; goto alloc_coherent_err; } sirfport->rx_dma_items.xmit.head = sirfport->rx_dma_items.xmit.tail = 0; if (sirfport->rx_dma_chan) dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg); sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx"); if (sirfport->tx_dma_chan) dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg); if (sirfport->rx_dma_chan) { hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback; sirfport->is_hrt_enabled = false; } return 0; alloc_coherent_err: dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, sirfport->rx_dma_items.xmit.buf, sirfport->rx_dma_items.dma_addr); dma_release_channel(sirfport->rx_dma_chan); err: return ret; } static int sirfsoc_uart_remove(struct platform_device *pdev) { struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_remove_one_port(&sirfsoc_uart_drv, port); if (sirfport->rx_dma_chan) { dmaengine_terminate_all(sirfport->rx_dma_chan); dma_release_channel(sirfport->rx_dma_chan); dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, sirfport->rx_dma_items.xmit.buf, sirfport->rx_dma_items.dma_addr); } if (sirfport->tx_dma_chan) { dmaengine_terminate_all(sirfport->tx_dma_chan); dma_release_channel(sirfport->tx_dma_chan); } return 0; } #ifdef CONFIG_PM_SLEEP static int sirfsoc_uart_suspend(struct device *pdev) { struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_suspend_port(&sirfsoc_uart_drv, port); return 0; } static int sirfsoc_uart_resume(struct device *pdev) { struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_resume_port(&sirfsoc_uart_drv, port); return 0; } #endif static const struct dev_pm_ops sirfsoc_uart_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume) }; static struct platform_driver sirfsoc_uart_driver = { .probe = sirfsoc_uart_probe, .remove = sirfsoc_uart_remove, .driver = { .name = SIRFUART_PORT_NAME, .of_match_table = sirfsoc_uart_ids, .pm = &sirfsoc_uart_pm_ops, }, }; static int __init sirfsoc_uart_init(void) { int ret = 0; ret = uart_register_driver(&sirfsoc_uart_drv); if (ret) goto out; ret = platform_driver_register(&sirfsoc_uart_driver); if (ret) uart_unregister_driver(&sirfsoc_uart_drv); out: return ret; } module_init(sirfsoc_uart_init); static void __exit sirfsoc_uart_exit(void) { platform_driver_unregister(&sirfsoc_uart_driver); uart_unregister_driver(&sirfsoc_uart_drv); } module_exit(sirfsoc_uart_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>"); MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");
null
null
null
null
96,277
42,065
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
42,065
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "google_apis/gcm/engine/instance_id_delete_token_request_handler.h" #include "base/macros.h" #include "base/metrics/histogram_macros.h" #include "base/strings/string_number_conversions.h" #include "google_apis/gcm/base/gcm_util.h" #include "net/url_request/url_fetcher.h" #include "net/url_request/url_request_context_getter.h" namespace gcm { namespace { // Request constants. const char kGMSVersionKey[] = "gmsv"; const char kInstanceIDKey[] = "appid"; const char kSenderKey[] = "sender"; const char kScopeKey[] = "scope"; const char kExtraScopeKey[] = "X-scope"; // Response constants. const char kTokenPrefix[] = "token="; } // namespace InstanceIDDeleteTokenRequestHandler::InstanceIDDeleteTokenRequestHandler( const std::string& instance_id, const std::string& authorized_entity, const std::string& scope, int gcm_version) : instance_id_(instance_id), authorized_entity_(authorized_entity), scope_(scope), gcm_version_(gcm_version) { DCHECK(!instance_id.empty()); DCHECK(!authorized_entity.empty()); DCHECK(!scope.empty()); } InstanceIDDeleteTokenRequestHandler::~InstanceIDDeleteTokenRequestHandler() {} void InstanceIDDeleteTokenRequestHandler::BuildRequestBody(std::string* body){ BuildFormEncoding(kInstanceIDKey, instance_id_, body); BuildFormEncoding(kSenderKey, authorized_entity_, body); BuildFormEncoding(kScopeKey, scope_, body); BuildFormEncoding(kExtraScopeKey, scope_, body); BuildFormEncoding(kGMSVersionKey, base::IntToString(gcm_version_), body); } UnregistrationRequest::Status InstanceIDDeleteTokenRequestHandler::ParseResponse( const std::string& response) { if (response.find(kTokenPrefix) == std::string::npos) return UnregistrationRequest::RESPONSE_PARSING_FAILED; return UnregistrationRequest::SUCCESS; } void InstanceIDDeleteTokenRequestHandler::ReportUMAs( UnregistrationRequest::Status status, int retry_count, base::TimeDelta complete_time) { UMA_HISTOGRAM_ENUMERATION("InstanceID.DeleteToken.RequestStatus", status, UnregistrationRequest::UNREGISTRATION_STATUS_COUNT); // Other UMAs are only reported when the request succeeds. if (status != UnregistrationRequest::SUCCESS) return; UMA_HISTOGRAM_COUNTS("InstanceID.DeleteToken.RetryCount", retry_count); UMA_HISTOGRAM_TIMES("InstanceID.DeleteToken.CompleteTime", complete_time); } } // namespace gcm
null
null
null
null
38,928
24,044
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
189,039
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
/* * psb GEM interface * * Copyright (c) 2011, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: Alan Cox * * TODO: * - we need to work out if the MMU is relevant (eg for * accelerated operations on a GEM object) */ #include <drm/drmP.h> #include <drm/drm.h> #include <drm/gma_drm.h> #include <drm/drm_vma_manager.h> #include "psb_drv.h" void psb_gem_free_object(struct drm_gem_object *obj) { struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); /* Remove the list map if one is present */ drm_gem_free_mmap_offset(obj); drm_gem_object_release(obj); /* This must occur last as it frees up the memory of the GEM object */ psb_gtt_free_range(obj->dev, gtt); } int psb_gem_get_aperture(struct drm_device *dev, void *data, struct drm_file *file) { return -EINVAL; } /** * psb_gem_dumb_map_gtt - buffer mapping for dumb interface * @file: our drm client file * @dev: drm device * @handle: GEM handle to the object (from dumb_create) * * Do the necessary setup to allow the mapping of the frame buffer * into user memory. We don't have to do much here at the moment. */ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { int ret = 0; struct drm_gem_object *obj; /* GEM does all our handle to object mapping */ obj = drm_gem_object_lookup(file, handle); if (obj == NULL) return -ENOENT; /* Make it mmapable */ ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; *offset = drm_vma_node_offset_addr(&obj->vma_node); out: drm_gem_object_unreference_unlocked(obj); return ret; } /** * psb_gem_create - create a mappable object * @file: the DRM file of the client * @dev: our device * @size: the size requested * @handlep: returned handle (opaque number) * * Create a GEM object, fill in the boilerplate and attach a handle to * it so that userspace can speak about it. This does the core work * for the various methods that do/will create GEM objects for things */ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, u32 *handlep, int stolen, u32 align) { struct gtt_range *r; int ret; u32 handle; size = roundup(size, PAGE_SIZE); /* Allocate our object - for now a direct gtt range which is not stolen memory backed */ r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE); if (r == NULL) { dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); return -ENOSPC; } /* Initialize the extra goodies GEM needs to do all the hard work */ if (drm_gem_object_init(dev, &r->gem, size) != 0) { psb_gtt_free_range(dev, r); /* GEM doesn't give an error code so use -ENOMEM */ dev_err(dev->dev, "GEM init failed for %lld\n", size); return -ENOMEM; } /* Limit the object to 32bit mappings */ mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32); /* Give the object a handle so we can carry it more easily */ ret = drm_gem_handle_create(file, &r->gem, &handle); if (ret) { dev_err(dev->dev, "GEM handle failed for %p, %lld\n", &r->gem, size); drm_gem_object_release(&r->gem); psb_gtt_free_range(dev, r); return ret; } /* We have the initial and handle reference but need only one now */ drm_gem_object_unreference_unlocked(&r->gem); *handlep = handle; return 0; } /** * psb_gem_dumb_create - create a dumb buffer * @drm_file: our client file * @dev: our device * @args: the requested arguments copied from userspace * * Allocate a buffer suitable for use for a frame buffer of the * form described by user space. Give userspace a handle by which * to reference it. */ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); args->size = args->pitch * args->height; return psb_gem_create(file, dev, args->size, &args->handle, 0, PAGE_SIZE); } /** * psb_gem_fault - pagefault handler for GEM objects * @vma: the VMA of the GEM object * @vmf: fault detail * * Invoked when a fault occurs on an mmap of a GEM managed area. GEM * does most of the work for us including the actual map/unmap calls * but we need to do the actual page work. * * This code eventually needs to handle faulting objects in and out * of the GTT and repacking it when we run out of space. We can put * that off for now and for our simple uses * * The VMA was set up by GEM. In doing so it also ensured that the * vma->vm_private_data points to the GEM object that is backing this * mapping. */ int psb_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj; struct gtt_range *r; int ret; unsigned long pfn; pgoff_t page_offset; struct drm_device *dev; struct drm_psb_private *dev_priv; obj = vma->vm_private_data; /* GEM object */ dev = obj->dev; dev_priv = dev->dev_private; r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */ /* Make sure we don't parallel update on a fault, nor move or remove something from beneath our feet */ mutex_lock(&dev_priv->mmap_mutex); /* For now the mmap pins the object and it stays pinned. As things stand that will do us no harm */ if (r->mmapping == 0) { ret = psb_gtt_pin(r); if (ret < 0) { dev_err(dev->dev, "gma500: pin failed: %d\n", ret); goto fail; } r->mmapping = 1; } /* Page relative to the VMA start - we must calculate this ourselves because vmf->pgoff is the fake GEM offset */ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; /* CPU view of the page, don't go via the GART for CPU writes */ if (r->stolen) pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; else pfn = page_to_pfn(r->pages[page_offset]); ret = vm_insert_pfn(vma, vmf->address, pfn); fail: mutex_unlock(&dev_priv->mmap_mutex); switch (ret) { case 0: case -ERESTARTSYS: case -EINTR: return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; default: return VM_FAULT_SIGBUS; } }
null
null
null
null
97,386
41,287
null
train_val
e4311ee51d1e2676001b2d8fcefd92bdd79aad85
206,282
linux
0
https://github.com/torvalds/linux
2017-05-12 08:32:58+10:00
#ifndef _LINUX_TRACEPOINT_H #define _LINUX_TRACEPOINT_H /* * Kernel Tracepoint API. * * See Documentation/trace/tracepoints.txt. * * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> * * Heavily inspired from the Linux Kernel Markers. * * This file is released under the GPLv2. * See the file COPYING for more details. */ #include <linux/smp.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/cpumask.h> #include <linux/rcupdate.h> #include <linux/tracepoint-defs.h> struct module; struct tracepoint; struct notifier_block; struct trace_enum_map { const char *system; const char *enum_string; unsigned long enum_value; }; #define TRACEPOINT_DEFAULT_PRIO 10 extern int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); extern int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, int prio); extern int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); extern void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), void *priv); #ifdef CONFIG_MODULES struct tp_module { struct list_head list; struct module *mod; }; bool trace_module_has_bad_taint(struct module *mod); extern int register_tracepoint_module_notifier(struct notifier_block *nb); extern int unregister_tracepoint_module_notifier(struct notifier_block *nb); #else static inline bool trace_module_has_bad_taint(struct module *mod) { return false; } static inline int register_tracepoint_module_notifier(struct notifier_block *nb) { return 0; } static inline int unregister_tracepoint_module_notifier(struct notifier_block *nb) { return 0; } #endif /* CONFIG_MODULES */ /* * tracepoint_synchronize_unregister must be called between the last tracepoint * probe unregistration and the end of module exit to make sure there is no * caller executing a probe when it is freed. */ static inline void tracepoint_synchronize_unregister(void) { synchronize_sched(); } #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS extern int syscall_regfunc(void); extern void syscall_unregfunc(void); #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ #define PARAMS(args...) args #define TRACE_DEFINE_ENUM(x) #endif /* _LINUX_TRACEPOINT_H */ /* * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include * file ifdef protection. * This is due to the way trace events work. If a file includes two * trace event headers under one "CREATE_TRACE_POINTS" the first include * will override the TRACE_EVENT and break the second include. */ #ifndef DECLARE_TRACE #define TP_PROTO(args...) args #define TP_ARGS(args...) args #define TP_CONDITION(args...) args /* * Individual subsystem my have a separate configuration to * enable their tracepoints. By default, this file will create * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem * wants to be able to disable its tracepoints from being created * it can define NOTRACE before including the tracepoint headers. */ #if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE) #define TRACEPOINTS_ENABLED #endif #ifdef TRACEPOINTS_ENABLED /* * it_func[0] is never NULL because there is at least one element in the array * when the array itself is non NULL. * * Note, the proto and args passed in includes "__data" as the first parameter. * The reason for this is to handle the "void" prototype. If a tracepoint * has a "void" prototype, then it is invalid to declare a function * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". */ #define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \ do { \ struct tracepoint_func *it_func_ptr; \ void *it_func; \ void *__data; \ \ if (!(cond)) \ return; \ prercu; \ rcu_read_lock_sched_notrace(); \ it_func_ptr = rcu_dereference_sched((tp)->funcs); \ if (it_func_ptr) { \ do { \ it_func = (it_func_ptr)->func; \ __data = (it_func_ptr)->data; \ ((void(*)(proto))(it_func))(args); \ } while ((++it_func_ptr)->func); \ } \ rcu_read_unlock_sched_notrace(); \ postrcu; \ } while (0) #ifndef MODULE #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name##_rcuidle(proto) \ { \ if (static_key_false(&__tracepoint_##name.key)) \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond), \ rcu_irq_enter_irqson(), \ rcu_irq_exit_irqson()); \ } #else #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) #endif /* * Make sure the alignment of the structure in the __tracepoints section will * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. * * When lockdep is enabled, we make sure to always do the RCU portions of * the tracepoint code, regardless of whether tracing is on. However, * don't check if the condition is false, due to interaction with idle * instrumentation. This lets us find RCU issues triggered with tracepoints * even when this tracepoint is off. This code has no purpose other than * poking RCU a bit. */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ if (static_key_false(&__tracepoint_##name.key)) \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond),,); \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ rcu_read_lock_sched_notrace(); \ rcu_dereference_sched(__tracepoint_##name.funcs);\ rcu_read_unlock_sched_notrace(); \ } \ } \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ return tracepoint_probe_register(&__tracepoint_##name, \ (void *)probe, data); \ } \ static inline int \ register_trace_prio_##name(void (*probe)(data_proto), void *data,\ int prio) \ { \ return tracepoint_probe_register_prio(&__tracepoint_##name, \ (void *)probe, data, prio); \ } \ static inline int \ unregister_trace_##name(void (*probe)(data_proto), void *data) \ { \ return tracepoint_probe_unregister(&__tracepoint_##name,\ (void *)probe, data); \ } \ static inline void \ check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ } \ static inline bool \ trace_##name##_enabled(void) \ { \ return static_key_false(&__tracepoint_##name.key); \ } /* * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. */ #define DEFINE_TRACE_FN(name, reg, unreg) \ static const char __tpstrtab_##name[] \ __attribute__((section("__tracepoints_strings"))) = #name; \ struct tracepoint __tracepoint_##name \ __attribute__((section("__tracepoints"))) = \ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ static struct tracepoint * const __tracepoint_ptr_##name __used \ __attribute__((section("__tracepoints_ptrs"))) = \ &__tracepoint_##name; #define DEFINE_TRACE(name) \ DEFINE_TRACE_FN(name, NULL, NULL); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ EXPORT_SYMBOL_GPL(__tracepoint_##name) #define EXPORT_TRACEPOINT_SYMBOL(name) \ EXPORT_SYMBOL(__tracepoint_##name) #else /* !TRACEPOINTS_ENABLED */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ static inline void trace_##name##_rcuidle(proto) \ { } \ static inline int \ register_trace_##name(void (*probe)(data_proto), \ void *data) \ { \ return -ENOSYS; \ } \ static inline int \ unregister_trace_##name(void (*probe)(data_proto), \ void *data) \ { \ return -ENOSYS; \ } \ static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ } \ static inline bool \ trace_##name##_enabled(void) \ { \ return false; \ } #define DEFINE_TRACE_FN(name, reg, unreg) #define DEFINE_TRACE(name) #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) #endif /* TRACEPOINTS_ENABLED */ #ifdef CONFIG_TRACING /** * tracepoint_string - register constant persistent string to trace system * @str - a constant persistent string that will be referenced in tracepoints * * If constant strings are being used in tracepoints, it is faster and * more efficient to just save the pointer to the string and reference * that with a printf "%s" instead of saving the string in the ring buffer * and wasting space and time. * * The problem with the above approach is that userspace tools that read * the binary output of the trace buffers do not have access to the string. * Instead they just show the address of the string which is not very * useful to users. * * With tracepoint_string(), the string will be registered to the tracing * system and exported to userspace via the debugfs/tracing/printk_formats * file that maps the string address to the string text. This way userspace * tools that read the binary buffers have a way to map the pointers to * the ASCII strings they represent. * * The @str used must be a constant string and persistent as it would not * make sense to show a string that no longer exists. But it is still fine * to be used with modules, because when modules are unloaded, if they * had tracepoints, the ring buffers are cleared too. As long as the string * does not change during the life of the module, it is fine to use * tracepoint_string() within a module. */ #define tracepoint_string(str) \ ({ \ static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) #define __tracepoint_string __attribute__((section("__tracepoint_str"))) #else /* * tracepoint_string() is used to save the string address for userspace * tracing tools. When tracing isn't configured, there's no need to save * anything. */ # define tracepoint_string(str) str # define __tracepoint_string #endif /* * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype * (void). "void" is a special value in a function prototype and can * not be combined with other arguments. Since the DECLARE_TRACE() * macro adds a data element at the beginning of the prototype, * we need a way to differentiate "(void *data, proto)" from * "(void *data, void)". The second prototype is invalid. * * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype * and "void *__data" as the callback prototype. * * DECLARE_TRACE() passes "proto" as the tracepoint protoype and * "void *__data, proto" as the callback prototype. */ #define DECLARE_TRACE_NOARGS(name) \ __DECLARE_TRACE(name, void, , \ cpu_online(raw_smp_processor_id()), \ void *__data, __data) #define DECLARE_TRACE(name, proto, args) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()), \ PARAMS(void *__data, proto), \ PARAMS(__data, args)) #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ PARAMS(void *__data, proto), \ PARAMS(__data, args)) #define TRACE_EVENT_FLAGS(event, flag) #define TRACE_EVENT_PERF_PERM(event, expr...) #endif /* DECLARE_TRACE */ #ifndef TRACE_EVENT /* * For use with the TRACE_EVENT macro: * * We define a tracepoint, its arguments, its printk format * and its 'fast binary record' layout. * * Firstly, name your tracepoint via TRACE_EVENT(name : the * 'subsystem_event' notation is fine. * * Think about this whole construct as the * 'trace_sched_switch() function' from now on. * * * TRACE_EVENT(sched_switch, * * * * * A function has a regular function arguments * * prototype, declare it via TP_PROTO(): * * * * TP_PROTO(struct rq *rq, struct task_struct *prev, * struct task_struct *next), * * * * * Define the call signature of the 'function'. * * (Design sidenote: we use this instead of a * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) * * * * TP_ARGS(rq, prev, next), * * * * * Fast binary tracing: define the trace record via * * TP_STRUCT__entry(). You can think about it like a * * regular C structure local variable definition. * * * * This is how the trace record is structured and will * * be saved into the ring buffer. These are the fields * * that will be exposed to user-space in * * /sys/kernel/debug/tracing/events/<*>/format. * * * * The declared 'local variable' is called '__entry' * * * * __field(pid_t, prev_prid) is equivalent to a standard declariton: * * * * pid_t prev_pid; * * * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: * * * * char prev_comm[TASK_COMM_LEN]; * * * * TP_STRUCT__entry( * __array( char, prev_comm, TASK_COMM_LEN ) * __field( pid_t, prev_pid ) * __field( int, prev_prio ) * __array( char, next_comm, TASK_COMM_LEN ) * __field( pid_t, next_pid ) * __field( int, next_prio ) * ), * * * * * Assign the entry into the trace record, by embedding * * a full C statement block into TP_fast_assign(). You * * can refer to the trace record as '__entry' - * * otherwise you can put arbitrary C code in here. * * * * Note: this C code will execute every time a trace event * * happens, on an active tracepoint. * * * * TP_fast_assign( * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); * __entry->prev_pid = prev->pid; * __entry->prev_prio = prev->prio; * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); * __entry->next_pid = next->pid; * __entry->next_prio = next->prio; * ), * * * * * Formatted output of a trace record via TP_printk(). * * This is how the tracepoint will appear under ftrace * * plugins that make use of this tracepoint. * * * * (raw-binary tracing wont actually perform this step.) * * * * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, * __entry->next_comm, __entry->next_pid, __entry->next_prio), * * ); * * This macro construct is thus used for the regular printk format * tracing setup, it is used to construct a function pointer based * tracepoint callback (this is used by programmatic plugins and * can also by used by generic instrumentation like SystemTap), and * it is also used to expose a structured trace record in * /sys/kernel/debug/tracing/events/. * * A set of (un)registration functions can be passed to the variant * TRACE_EVENT_FN to perform any (un)registration work. */ #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) #define DEFINE_EVENT(template, name, proto, args) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)\ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define DEFINE_EVENT_CONDITION(template, name, proto, \ args, cond) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ PARAMS(args), PARAMS(cond)) #define TRACE_EVENT(name, proto, args, struct, assign, print) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define TRACE_EVENT_FN(name, proto, args, struct, \ assign, print, reg, unreg) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \ assign, print, reg, unreg) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ PARAMS(args), PARAMS(cond)) #define TRACE_EVENT_CONDITION(name, proto, args, cond, \ struct, assign, print) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ PARAMS(args), PARAMS(cond)) #define TRACE_EVENT_FLAGS(event, flag) #define TRACE_EVENT_PERF_PERM(event, expr...) #endif /* ifdef TRACE_EVENT (see note above) */
null
null
null
null
114,629
45,946
null
train_val
796a0e014bc3985709c0a35538d606ef1da31e1b
45,946
Chrome
0
https://github.com/chromium/chromium
2018-04-07 23:43:03+00:00
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/wm/test_child_modal_parent.h" #include <memory> #include "base/strings/utf_string_conversions.h" #include "ui/aura/window.h" #include "ui/gfx/canvas.h" #include "ui/views/background.h" #include "ui/views/controls/button/label_button.h" #include "ui/views/controls/native/native_view_host.h" #include "ui/views/controls/textfield/textfield.h" #include "ui/views/widget/widget.h" #include "ui/views/widget/widget_delegate.h" #include "ui/wm/core/window_modality_controller.h" using views::Widget; namespace ash { namespace { // Parent window size and position. const int kWindowLeft = 170; const int kWindowTop = 200; const int kWindowWidth = 400; const int kWindowHeight = 400; // Parent window layout. const int kButtonHeight = 35; const int kTextfieldHeight = 35; // Child window size. const int kChildWindowWidth = 330; const int kChildWindowHeight = 200; // Child window layout. const int kChildTextfieldLeft = 20; const int kChildTextfieldTop = 50; const int kChildTextfieldWidth = 290; const int kChildTextfieldHeight = 35; const SkColor kModalParentColor = SK_ColorWHITE; const SkColor kChildColor = SK_ColorWHITE; } // namespace class ChildModalWindow : public views::WidgetDelegateView { public: ChildModalWindow(); ~ChildModalWindow() override; private: // Overridden from View: void OnPaint(gfx::Canvas* canvas) override; gfx::Size CalculatePreferredSize() const override; // Overridden from WidgetDelegate: base::string16 GetWindowTitle() const override; bool CanResize() const override; ui::ModalType GetModalType() const override; DISALLOW_COPY_AND_ASSIGN(ChildModalWindow); }; ChildModalWindow::ChildModalWindow() { views::Textfield* textfield = new views::Textfield; AddChildView(textfield); textfield->SetBounds(kChildTextfieldLeft, kChildTextfieldTop, kChildTextfieldWidth, kChildTextfieldHeight); } ChildModalWindow::~ChildModalWindow() = default; void ChildModalWindow::OnPaint(gfx::Canvas* canvas) { canvas->FillRect(GetLocalBounds(), kChildColor); } gfx::Size ChildModalWindow::CalculatePreferredSize() const { return gfx::Size(kChildWindowWidth, kChildWindowHeight); } base::string16 ChildModalWindow::GetWindowTitle() const { return base::ASCIIToUTF16("Examples: Child Modal Window"); } bool ChildModalWindow::CanResize() const { return false; } ui::ModalType ChildModalWindow::GetModalType() const { return ui::MODAL_TYPE_CHILD; } // static void TestChildModalParent::Create() { Widget::CreateWindowWithContextAndBounds( new TestChildModalParent(), /*context=*/nullptr, gfx::Rect(kWindowLeft, kWindowTop, kWindowWidth, kWindowHeight)) ->Show(); } TestChildModalParent::TestChildModalParent() : widget_(std::make_unique<Widget>()), button_(new views::LabelButton( this, base::ASCIIToUTF16("Show/Hide Child Modal Window"))), textfield_(new views::Textfield), host_(new views::NativeViewHost), child_(nullptr) { Widget::InitParams params(Widget::InitParams::TYPE_CONTROL); params.ownership = Widget::InitParams::WIDGET_OWNS_NATIVE_WIDGET; widget_->Init(params); widget_->GetRootView()->SetBackground( views::CreateSolidBackground(kModalParentColor)); widget_->GetNativeView()->SetName("ModalParent"); AddChildView(button_); AddChildView(textfield_); AddChildView(host_); } TestChildModalParent::~TestChildModalParent() = default; void TestChildModalParent::ShowChild() { if (!child_) child_ = CreateChild(); child_->Show(); } aura::Window* TestChildModalParent::GetModalParent() const { return widget_->GetNativeView(); } aura::Window* TestChildModalParent::GetChild() const { if (child_) return child_->GetNativeView(); return nullptr; } Widget* TestChildModalParent::CreateChild() { Widget* child = Widget::CreateWindowWithParent(new ChildModalWindow, GetWidget()->GetNativeView()); wm::SetModalParent(child->GetNativeView(), GetModalParent()); child->AddObserver(this); child->GetNativeView()->SetName("ChildModalWindow"); return child; } base::string16 TestChildModalParent::GetWindowTitle() const { return base::ASCIIToUTF16("Examples: Child Modal Parent"); } bool TestChildModalParent::CanResize() const { return false; } void TestChildModalParent::DeleteDelegate() { if (child_) { child_->RemoveObserver(this); child_->Close(); child_ = NULL; } delete this; } void TestChildModalParent::Layout() { int running_y = y(); button_->SetBounds(x(), running_y, width(), kButtonHeight); running_y += kButtonHeight; textfield_->SetBounds(x(), running_y, width(), kTextfieldHeight); running_y += kTextfieldHeight; host_->SetBounds(x(), running_y, width(), height() - running_y); } void TestChildModalParent::ViewHierarchyChanged( const ViewHierarchyChangedDetails& details) { if (details.is_add && details.child == this) { host_->Attach(widget_->GetNativeWindow()); GetWidget()->GetNativeView()->SetName("Parent"); } } void TestChildModalParent::ButtonPressed(views::Button* sender, const ui::Event& event) { if (sender == button_) { if (!child_) child_ = CreateChild(); if (child_->IsVisible()) child_->Hide(); else child_->Show(); } } void TestChildModalParent::OnWidgetDestroying(Widget* widget) { if (child_) { DCHECK_EQ(child_, widget); child_ = NULL; } } } // namespace ash
null
null
null
null
42,809